parquet-converter commited on
Commit
6ea2723
·
1 Parent(s): d42fae7

Update parquet files (step 11 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Providers/Aichat.py +0 -44
  2. spaces/1gistliPinn/ChatGPT4/Examples/Anwar Shah Kashmiri Books Urdu P.md +0 -19
  3. spaces/1gistliPinn/ChatGPT4/Examples/Cstpatcher11 Exe.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Francine Dee Pornstar Book.md +0 -6
  5. spaces/1phancelerku/anime-remove-background/Baby Cat Breeds Which One is Right for You?..md +0 -156
  6. spaces/1phancelerku/anime-remove-background/Bloons TD 6 APK 36.3 el juego de torres de defensa ms divertido y adictivo.md +0 -146
  7. spaces/1phancelerku/anime-remove-background/Build Your Dream City with Idle Island - City Idle Tycoon Mod APK - No Ads No Root.md +0 -81
  8. spaces/1phancelerku/anime-remove-background/Download Driven The Movie That Changed the Face of Motorsports.md +0 -162
  9. spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_karras_ve.py +0 -232
  10. spaces/2ndelement/voicevox/Dockerfile +0 -296
  11. spaces/7hao/bingo/src/components/button-scroll-to-bottom.tsx +0 -34
  12. spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Getting Started 6bc871dcdd4a4554b5b22c0c40740841/Example sub-page 48f64d6186ec4428b2e4180475245a9c.md +0 -5
  13. spaces/AI-Naga/Parking_Space_Counter/app.py +0 -91
  14. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/node.py +0 -263
  15. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/blocks.py +0 -342
  16. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/models/melgan.py +0 -458
  17. spaces/AIWaves/SOP_Generation-single/Environment/base_environment.py +0 -177
  18. spaces/Abhilashvj/planogram-compliance/utils/aws/userdata.sh +0 -27
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/ColorPicker.js +0 -101
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/CloseListPanel.js +0 -11
  21. spaces/AkitoP/umamusume_bert_vits2/app0.py +0 -344
  22. spaces/Amrrs/DragGan-Inversion/PTI/training/coaches/base_coach.py +0 -158
  23. spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/models_utils.py +0 -28
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/model_editing.md +0 -35
  25. spaces/Andy1621/uniformer_image_detection/configs/detr/detr_r50_8x2_150e_coco.py +0 -131
  26. spaces/Andy1621/uniformer_image_detection/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py +0 -16
  27. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/fast_scnn.py +0 -57
  28. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/utils.py +0 -189
  29. spaces/AntNikYab/NaturalLanguageProcessing/function/lstm_preprocessing.py +0 -162
  30. spaces/Ariharasudhan/YoloV5/utils/loggers/comet/__init__.py +0 -508
  31. spaces/Arsenii2023/Demo1/demo1.py +0 -73
  32. spaces/Awesimo/jojogan/e4e/criteria/w_norm.py +0 -14
  33. spaces/Bart92/RVC_HF/go-applio.bat +0 -92
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/constants.py +0 -30
  35. spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/README.md +0 -11
  36. spaces/BwayKC/darkstorm2150-Protogen_v2.2_Official_Release/app.py +0 -3
  37. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/samplers/distributed_sampler.py +0 -199
  38. spaces/CVPR/LIVE/pybind11/tests/test_callbacks.cpp +0 -168
  39. spaces/CVPR/LIVE/thrust/testing/unittest/special_types.h +0 -184
  40. spaces/CVPR/LIVE/thrust/thrust/device_reference.h +0 -983
  41. spaces/CVPR/Object-Detection-With-DETR-and-YOLOS/app.py +0 -153
  42. spaces/CVPR/WALT/mmdet/core/bbox/samplers/sampling_result.py +0 -152
  43. spaces/CVPR/flava-multimodal-zero-shot/app.py +0 -131
  44. spaces/CVPR/lama-example/bin/calc_dataset_stats.py +0 -88
  45. spaces/Cpp4App/Cpp4App/CDM/run_single.py +0 -212
  46. spaces/Cvandi/remake/realesrgan/__init__.py +0 -6
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cu2qu/errors.py +0 -77
  48. spaces/Detomo/Image-Classification/app.py +0 -81
  49. spaces/EdBianchi/ThemeParksAccidents_RDF-SPARQL/app.py +0 -297
  50. spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/nets_new.py +0 -133
spaces/101-5/gpt4free/g4f/Provider/Providers/Aichat.py DELETED
@@ -1,44 +0,0 @@
1
- import os, requests
2
- from ...typing import sha256, Dict, get_type_hints
3
-
4
- url = 'https://chat-gpt.org/chat'
5
- model = ['gpt-3.5-turbo']
6
- supports_stream = False
7
- needs_auth = False
8
-
9
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
10
- base = ''
11
- for message in messages:
12
- base += '%s: %s\n' % (message['role'], message['content'])
13
- base += 'assistant:'
14
-
15
-
16
- headers = {
17
- 'authority': 'chat-gpt.org',
18
- 'accept': '*/*',
19
- 'cache-control': 'no-cache',
20
- 'content-type': 'application/json',
21
- 'origin': 'https://chat-gpt.org',
22
- 'pragma': 'no-cache',
23
- 'referer': 'https://chat-gpt.org/chat',
24
- 'sec-ch-ua-mobile': '?0',
25
- 'sec-ch-ua-platform': '"macOS"',
26
- 'sec-fetch-dest': 'empty',
27
- 'sec-fetch-mode': 'cors',
28
- 'sec-fetch-site': 'same-origin',
29
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
30
- }
31
-
32
- json_data = {
33
- 'message':base,
34
- 'temperature': 1,
35
- 'presence_penalty': 0,
36
- 'top_p': 1,
37
- 'frequency_penalty': 0
38
- }
39
-
40
- response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
41
- yield response.json()['message']
42
-
43
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
44
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Anwar Shah Kashmiri Books Urdu P.md DELETED
@@ -1,19 +0,0 @@
1
- <br />
2
- <h1>Anwar Shah Kashmiri: A Renowned Scholar and Jurist of Kashmir</h1>
3
- <p>Anwar Shah Kashmiri (1875-1933) was a prominent Muslim scholar and jurist who belonged to Kashmir, a region disputed between India and Pakistan. He was known for his mastery of various Islamic sciences, such as Hadith, Fiqh, Tafsir, and Kalam. He wrote many books and commentaries on these subjects, some of which are considered authoritative and influential in the Islamic world.</p>
4
- <p>Anwar Shah Kashmiri was born in a Sayyid family that traced its lineage to Imam Husayn, the grandson of Prophet Muhammad. He received his early education from his father and other local scholars in Kashmir. He then traveled to India and studied at various madrasas, including Darul Uloom Deoband, where he became a disciple of Mahmud al-Hasan, a leading figure of the Deobandi movement. He also studied under other eminent scholars, such as Rashid Ahmad Gangohi, Muhammad Qasim Nanautawi, and Ashraf Ali Thanwi.</p>
5
- <h2>Anwar Shah Kashmiri Books Urdu P</h2><br /><p><b><b>Download</b> &#9913; <a href="https://imgfil.com/2uy1JE">https://imgfil.com/2uy1JE</a></b></p><br /><br />
6
- <p>Anwar Shah Kashmiri served as the first principal of Madrasa Aminia in Delhi, where he taught Hadith and Fiqh. He also served as the fourth principal of Darul Uloom Deoband, where he taught Tafsir and Kalam. He was respected and admired by his students and colleagues for his vast knowledge, eloquence, piety, and humility. He also participated in the Khilafat Movement, a political campaign to restore the Ottoman Caliphate after World War I.</p>
7
- <p>Anwar Shah Kashmiri authored more than 100 books and treatises on various Islamic topics. Some of his most famous works are:</p>
8
- <ul>
9
- <li>Al-Arf al-Shadhi: A commentary on Sunan al-Tirmidhi, one of the six major collections of Hadith.</li>
10
- <li>Fayd al-Bari: A commentary on Sahih al-Bukhari, the most authentic collection of Hadith.</li>
11
- <li>Tafsir al-Quran al-Azim: A commentary on the Quran that combines rational and traditional approaches.</li>
12
- <li>Al-Urf al-Shadhi: A commentary on Al-Hidayah, a classical manual of Hanafi Fiqh.</li>
13
- <li>Anwar al-Kalam: A refutation of the arguments of the Mu'tazila, a rationalist school of Islamic theology.</li>
14
- </ul>
15
- <p>Anwar Shah Kashmiri died in Deoband at the age of 58. He was buried in the graveyard of Darul Uloom Deoband. His legacy lives on through his books and his students, who include some of the most prominent scholars of the 20th century, such as Muhammad Yusuf Banuri, Muhammad Zakariyya Kandhlawi, Husain Ahmad Madani, and Shabbir Ahmad Usmani.</p><p>Anwar Shah Kashmiri was not only a scholar and a jurist, but also a poet and a mystic. He composed many poems in Arabic, Persian, and Urdu, expressing his love for Allah and His Messenger. He also wrote some poems in praise of his teachers and his homeland. He was influenced by the Sufi teachings of Imam al-Ghazali, Ibn al-Arabi, and Abdul Qadir Jilani. He practiced various forms of dhikr (remembrance of Allah) and tasawwuf (spiritual purification).</p>
16
- <p>Anwar Shah Kashmiri was also a reformer and a revivalist. He advocated for the revival of the Islamic sciences and the preservation of the Islamic heritage. He opposed the innovations and deviations that had crept into the Muslim community over time. He also defended the Sunni creed and the Hanafi school of law from the attacks of the Shia, the Ahl al-Hadith, and the Salafi movements. He was a staunch supporter of the Ahl al-Sunnah wa al-Jama'ah (the people of the Sunnah and the consensus).</p>
17
- <p>Anwar Shah Kashmiri was a man of great vision and wisdom. He foresaw the challenges and opportunities that the Muslim world would face in the modern era. He urged the Muslims to unite under the banner of Islam and to cooperate with each other for the common good. He also encouraged them to seek knowledge from all sources and to benefit from the advancements of science and technology. He believed that Islam was compatible with reason and progress, and that it was the only solution for the problems of humanity.</p> d5da3c52bf<br />
18
- <br />
19
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cstpatcher11 Exe.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>cstpatcher11 exe</h2><br /><p><b><b>Download File</b> &#9999; <a href="https://imgfil.com/2uxZOk">https://imgfil.com/2uxZOk</a></b></p><br /><br />
2
- <br />
3
- View Cstpatcher11 Exe from the same97uyl by Julie Croft. Cst Er11 Exe 32bit Serial Registration Windows Download. Download: 31, 2020 - (x86 and x64) with *.exe, *.dll extensions and in files. The file CSTpatcher11.exe is 6144 bytes (6KB). Links for downloading this file ... Read more View Cstpatcher11 Exe from the same97uyl by Julie Croft. ... Cst Er11 Exe 32bit Serial Registration Windows Download. Download: 31, 2020 - (x86 and x64) with *.exe, *.dll extensions and in files. The file CSTpatcher11.exe is 6144 bytes (6KB). Links to download this file can be found below the page. This file is classified as dangerous! Be careful and use our antivirus products to prevent infecting your computer. Download CSTpatcher11.exe. .torrent file. 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Francine Dee Pornstar Book.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>francine dee pornstar book</h2><br /><p><b><b>Download</b> &rArr; <a href="https://imgfil.com/2uxY2H">https://imgfil.com/2uxY2H</a></b></p><br /><br />
2
- <br />
3
- 3cee63e6c2<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Baby Cat Breeds Which One is Right for You?..md DELETED
@@ -1,156 +0,0 @@
1
-
2
- <h1>Baby Cats: Everything You Need to Know About These Cute Furry Friends</h1>
3
- <p>Have you ever wondered what makes baby cats so adorable? Or how to take care of them properly? Or what breeds of baby cats are best for your family? If you answered yes to any of these questions, then this article is for you. In this article, we will explore the fascinating world of baby cats, also known as kittens, and share some facts, tips, and stories that will make you fall in love with them even more.</p>
4
- <h2>Facts about Baby Cats</h2>
5
- <p>Baby cats are not just miniature versions of adult cats. They have their own unique characteristics, behaviors, and needs that make them special. Here are some facts that you may not know about baby cats.</p>
6
- <h2>baby cat</h2><br /><p><b><b>Download File</b> &raquo; <a href="https://jinyurl.com/2uNUdK">https://jinyurl.com/2uNUdK</a></b></p><br /><br />
7
- <h3>Development Stages of Baby Cats</h3>
8
- <p>Baby cats go through different development stages from birth to adulthood. According to Wikipedia, these stages are:</p>
9
- <ul>
10
- <li>Newborn stage (0 to 2 weeks): Baby cats are born with their eyes and ears closed, and they depend on their mother for survival. They cannot regulate their body temperature, walk, or meow well. They only drink their mother's milk and need to be stimulated by her to urinate or defecate.</li>
11
- <li>Transition stage (2 to 4 weeks): Baby cats start to open their eyes and ears, and they begin to explore their surroundings. They develop their sense of smell and taste, and they start to eat solid food. They also learn to groom themselves and others, and they play with their littermates.</li>
12
- <li>Socialization stage (4 to 8 weeks): Baby cats become more active and curious, and they interact with people and other animals. They learn to use the litter box, and they develop their hunting and stalking skills. They also form bonds with their mother and siblings, as well as their human caregivers.</li>
13
- <li>Juvenile stage (8 to 26 weeks): Baby cats grow rapidly and reach sexual maturity. They become more independent and adventurous, but they still need guidance and supervision. They also develop their personality and preferences, and they may show signs of territoriality or aggression.</li>
14
- <li>Adult stage (26 weeks onwards): Baby cats reach their full size and weight, and they establish their social status and territory. They may become less playful and more settled, but they still need attention and stimulation. They also need regular health check-ups and vaccinations.</li>
15
- </ul>
16
- <h3>Unusual Stories of Baby Cats</h3>
17
- <p>Baby cats are not only cute but also amazing. They can sometimes surprise us with their extraordinary abilities or experiences. Here are some unusual stories of baby cats that will make you smile or wonder.</p>
18
- <ul>
19
- <li>A kitten in Bali was adopted by a monkey! According to A-Z Animals, a wild long-tailed macaque found a tiny kitten abandoned in the forest and took care of it as his own. The monkey cuddled, carried, and protected the kitten, and introduced it to his family. The kitten seemed happy and healthy in his new home.</li>
20
- <li>A litter of kittens can have multiple fathers! According to WebMD, female cats can ovulate multiple times during a heat cycle, which means that they can mate with different males and produce offspring with different genetic fathers. This phenomenon is called superfecundity, and it can result in kittens with different colors or patterns.</li>
21
- <li>A kitten was born with two faces! According to Yahoo News, a rare kitten named Biscuits and Gravy was born with a condition called diprosopus, which means <p>that he had two faces, each with a mouth, nose, and eye. The kitten was born in Oregon, USA, and was named after a famous breakfast dish. The kitten's owner said that he ate well and was very affectionate. Sadly, the kitten passed away after four days due to health complications.</p>
22
- <h3>Differences between Baby Cats and Adult Cats</h3>
23
- <p>Baby cats and adult cats have some obvious differences, such as size, weight, and appearance. But they also have some less noticeable differences, such as metabolism, immunity, and behavior. Here are some of the main differences between baby cats and adult cats:</p>
24
- <table>
25
- <tr>
26
- <th>Baby Cats</th>
27
- <th>Adult Cats</th>
28
- </tr>
29
- <tr>
30
- <td>Have a higher metabolism and need more calories per pound of body weight</td>
31
- <td>Have a lower metabolism and need fewer calories per pound of body weight</td>
32
- </tr>
33
- <tr>
34
- <td>Have a weaker immune system and are more susceptible to infections and diseases</td>
35
- <td>Have a stronger immune system and are more resistant to infections and diseases</td>
36
- </tr>
37
- <tr>
38
- <td>Have softer, finer fur that may change color or texture as they grow older</td>
39
- <td>Have coarser, thicker fur that usually stays the same color and texture throughout their lives</td>
40
- </tr>
41
- <tr>
42
- <td>Have blue eyes that may change color as they mature</td>
43
- <td>Have various eye colors that are usually fixed by the time they are six months old</td>
44
- </tr>
45
- <tr>
46
- <td>Have more teeth (26) that are smaller and sharper than adult teeth</td>
47
- <td>Have fewer teeth (30) that are larger and duller than baby teeth</td>
48
- </tr>
49
- <tr>
50
- <td>Are more curious, playful, and energetic, and need more stimulation and socialization</td>
51
- <td>Are more calm, relaxed, and independent, and need less stimulation and socialization</td>
52
- </tr></table>
53
- <h2>Care Tips for Baby Cats</h2>
54
- <p>Baby cats require special care and attention to ensure their health and happiness. They depend on their mother or human caregiver for their basic needs, such as food, warmth, safety, and hygiene. Here are some care tips for baby cats that will help you provide the best possible environment for your furry friend.</p>
55
- <h3>Feeding and Grooming Baby Cats</h3>
56
- <p>Baby cats need proper nutrition to support their growth and development. If the mother cat is present, she will nurse her kittens until they are ready to wean at around four to six weeks of age. If the mother cat is absent or unable to nurse, you will have to bottle-feed the kittens with a special formula designed for kittens. You can purchase kitten milk replacement formula (KMR) at your local pet store or vet's office. Never feed a kitten cow's milk or other types of milk, as they can cause diarrhea, dehydration, and nutritional deficiencies. Follow the instructions on the package for how much and how often to feed the kittens. You may also need to stimulate the kittens' urination and defecation by gently rubbing their genital area with a warm, damp cloth after each feeding. As the kittens grow older, you can introduce them to solid food by offering them wet or dry kitten food mixed with some water or formula. Gradually reduce the amount of liquid until the kittens are eating solid food only by eight weeks of age.</p>
57
- <p>Baby cats also need regular grooming to keep their coat clean and healthy. If the mother cat is present, she will lick her kittens to groom them and remove any dirt or debris. If the mother cat is absent or unable to groom, you will have to do it yourself by using a soft brush or comb to gently remove any loose hair or mats. You can also use a damp cloth or cotton ball to wipe the kittens' eyes, ears, nose, and mouth if they are dirty or crusty. Be careful not to use any harsh chemicals or products that could irritate the kittens' skin or eyes. You can also trim the kittens' nails with a pair of nail clippers designed for cats if they are too long or sharp. Be careful not to cut too close to the quick (the pink part of the nail), as this could cause bleeding and pain.</p>
58
- <h3>Keeping Baby Cats Warm and Safe</h3>
59
- <p>Baby cats cannot regulate their body temperature well until they are about four weeks old. They rely on their mother or external sources of heat to keep them warm. If the mother cat is present, she will cuddle with her kittens in a cozy nest made of blankets or towels. If the mother cat is absent or unable to provide warmth, you will have to create a comfortable bed for the kittens in a draft-free corner of your home. You can use a cardboard box lined with soft materials, such as blankets, towels, or fleece. You can also add a heating pad, a hot water bottle, or a rice sock to provide extra warmth. Make sure to cover the heating device with a cloth and leave some space for the kittens to move away if they get too hot. Check the temperature of the bed regularly and adjust it as needed. The ideal temperature for newborn kittens is around 90°F (32°C), and it can be gradually lowered to 80°F (27°C) by the time they are four weeks old.</p>
60
- <p>Baby cat synonyms<br />
61
- Kitten pictures and facts<br />
62
- How to care for a newborn kitten<br />
63
- Best kitten food and toys<br />
64
- Baby cat breeds and characteristics<br />
65
- Kitten adoption and rescue<br />
66
- How to train a kitten to use the litter box<br />
67
- Baby cat names and meanings<br />
68
- Kitten health and vaccination<br />
69
- Baby cat videos and memes<br />
70
- How to introduce a kitten to other pets<br />
71
- Kitten behavior and development<br />
72
- Baby cat costumes and accessories<br />
73
- Kitten grooming and nail trimming<br />
74
- Baby cat sounds and communication<br />
75
- Kitten socialization and play<br />
76
- Baby cat allergies and remedies<br />
77
- Kitten growth and weight chart<br />
78
- Baby cat games and apps<br />
79
- Kitten nutrition and feeding schedule<br />
80
- How to choose a kitten from a litter<br />
81
- Baby cat wallpapers and backgrounds<br />
82
- Kitten anatomy and physiology<br />
83
- Baby cat crafts and DIY projects<br />
84
- Kitten dental care and teething<br />
85
- How to make a kitten feel comfortable at home<br />
86
- Baby cat quotes and sayings<br />
87
- Kitten eye color and vision<br />
88
- Baby cat coloring pages and activities<br />
89
- Kitten ear care and cleaning<br />
90
- How to travel with a kitten safely<br />
91
- Baby cat calendar and planner<br />
92
- Kitten genetics and coat patterns<br />
93
- Baby cat jokes and puns<br />
94
- Kitten enrichment and stimulation<br />
95
- How to bond with a kitten emotionally<br />
96
- Baby cat gifts and merchandise<br />
97
- Kitten flea treatment and prevention<br />
98
- Baby cat art and photography<br />
99
- Kitten fur types and textures<br />
100
- How to deal with a kitten's separation anxiety<br />
101
- Baby cat poetry and songs<br />
102
- Kitten personality types and traits<br />
103
- Baby cat history and folklore<br />
104
- Kitten skin care and grooming products</p>
105
- <p>Baby cats also need a safe and secure environment to prevent them from getting injured or lost. If the mother cat is present, she will protect her kittens from any potential threats or dangers. If the mother cat is absent or unable to provide safety, you will have to keep the kittens in a confined area, such as a room, a crate, or a pen. Make sure that the area is clean, quiet, and free of any hazards, such as wires, cords, sharp objects, toxic substances, or other pets. You can also provide some toys and scratching posts for the kittens to play with and exercise their claws. Monitor the kittens closely and do not let them roam around the house unsupervised until they are old enough and fully vaccinated.</p>
106
- <h3>Teaching Baby Cats to Use the Litter Box</h3>
107
- <p>Baby cats need to learn how to use the litter box properly to avoid making a mess in your home. If the mother cat is present, she will teach her kittens how to use the litter box by example. If the mother cat is absent or unable to train, you will have to do it yourself by following these steps:</p>
108
- <ol>
109
- <li>Choose a suitable litter box and litter for your kittens. The litter box should be large enough for the kittens to fit comfortably, but low enough for them to enter and exit easily. The litter should be unscented and clumping, as some kittens may try to eat scented or non-clumping litter.</li>
110
- <li>Place the litter box in a convenient and accessible location for your kittens. The location should be quiet, private, and away from their food and water bowls. You may need to place multiple litter boxes in different areas of your home if you have more than one kitten or a large space.</li>
111
- <li>Fill the litter box with about two inches of litter and scoop it daily. You can also sprinkle some baking soda or odor-neutralizing powder on the bottom of the litter box to reduce any unpleasant smells.</li>
112
- <li>Show your kittens where the litter box is and how to use it. You can do this by gently placing them in the litter box after they wake up, eat, or play, and praising them when they use it correctly. You can also scratch the litter with your finger or a toy to encourage them to dig and cover their waste.</li>
113
- <li>Avoid scolding or punishing your kittens if they have accidents outside the litter box. This may only make them fearful or confused. Instead, clean up the mess with an enzyme-based cleaner that eliminates any traces of odor, and redirect your kittens to the litter box.</li>
114
- </ol>
115
- <h2>Breeds of Baby Cats</h2>
116
- <p>Baby cats come in different shapes, sizes, colors, and personalities. Some breeds of baby cats are more popular than others because of their distinctive features or traits. Here are some of the most common breeds of baby cats that you may encounter or consider adopting.</p>
117
- <h3>Small Cat Breeds</h3>
118
- <p>Some breeds of baby cats are naturally small even when they grow up. These breeds are ideal for people who live in small spaces or prefer petite pets. Some examples of small cat breeds are:</p>
119
- <ul>
120
- <li>Singapura: This breed is considered the smallest domestic cat breed in the world, weighing only four to eight pounds on average. They have large ears, almond-shaped eyes, and short coats that come in one color: sepia agouti (brown ticked tabby). They are also very active, curious, and affectionate.</li>
121
- <li>Cornish Rex: This breed is known for its curly coat that feels like velvet. They have slender bodies, long legs, large ears, and oval-shaped eyes. They come in various colors and patterns, such as black, white, red, blue, cream, <p>Fluffy Cat Breeds</p>
122
- <p>If you love fluffy cats, you are not alone. Many people adore cats with long, soft, and fluffy fur that make them look like plush toys. Fluffy cats can be great cuddlers and companions, as well as beautiful to look at. However, they also require more grooming and care than short-haired cats, so you need to be prepared for that. Here are some of the most popular fluffy cat breeds that you may want to consider.</p>
123
- <h3>Somali Cat</h3>
124
- <p>The Somali cat is a long-haired version of the Abyssinian cat. They have the same ticked coat pattern, but with longer and silkier fur. They also have plumed tails, tufted ears, and ruffs around their necks. They come in various colors, such as ruddy, red, blue, and fawn. They are very active, playful, and intelligent cats that love to explore and interact with people. They also have a distinctive voice that they use to communicate their needs and feelings.</p>
125
- <h3>Birman Cat</h3>
126
- <p>The Birman cat is a sacred cat of Burma, where they were believed to be the companions of priests and temple guardians. They have semi-long fur that is silky and does not mat easily. They also have striking blue eyes and white \"gloves\" on their paws. They come in various colors, such as seal, blue, chocolate, lilac, red, cream, and tortie. They are very gentle, affectionate, and loyal cats that enjoy being with their human family. They are also very quiet and calm cats that do not demand much attention.</p>
127
- <h3>Siberian Cat</h3>
128
- <p>The Siberian cat is a natural breed from Russia, where they have adapted to the harsh climate and terrain. They have thick, water-repellent coats that protect them from the cold and snow. They also have large paws that act like snowshoes and help them balance on trees. They come in various colors and patterns, such as solid, tabby, tortie, smoke, and silver. They are very strong, agile, and athletic cats that love to climb and jump. They are also very friendly, sociable, and playful cats that get along well with children and other pets.</p>
129
- <h3>Norwegian Forest Cat</h3>
130
- <p>The Norwegian Forest cat is another natural breed from Scandinavia, where they have also developed thick coats to survive the cold weather. They have long guard hairs that cover a dense undercoat, as well as bushy tails and ruffs around their necks. They come in various colors and patterns, such as black, white, red, blue, cream, silver, tabby, <p>Kid-Friendly Cat Breeds</p>
131
- <p>If you have children or plan to have them in the future, you may want to choose a cat breed that is known for being kid-friendly. These breeds are typically gentle, patient, tolerant, and playful with kids of all ages. They also enjoy being part of a family and can adapt to different lifestyles and environments. Here are some of the best cat breeds for kids that you may want to consider.</p>
132
- <h3>Birman Cat</h3>
133
- <p>We already mentioned the birman cat as one of the best fluffy cat breeds, but it is also one of the best cat breeds for kids. The birman cat is very gentle, affectionate, and loyal to its human family. It loves to cuddle and be petted, but it is not demanding or clingy. It is also very smart and curious, and can learn tricks and games easily. The birman cat gets along well with other pets and strangers, and can handle loud noises and changes in routine. It is also very beautiful, with its long silky coat, blue eyes, and white gloves.</p>
134
- <h3>Ragdoll Cat</h3>
135
- <p>The ragdoll cat is another fluffy breed that is great for kids. The ragdoll cat is named for its habit of going limp when picked up, like a ragdoll. It is very relaxed, laid-back, and easygoing, and does not mind being carried around or dressed up by kids. It is also very affectionate, friendly, and sociable, and loves to be with its human family. It is not very vocal or active, but it enjoys playing with toys and following its people around the house. The ragdoll cat has a semi-long coat that does not shed much or mat easily, and comes in various colors and patterns.</p>
136
- <h3>Himalayan Cat</h3>
137
- <p>The Himalayan cat is a cross between a Persian cat and a Siamese cat. It has the long fluffy coat and flat face of a Persian, and the pointed coloration and blue eyes of a Siamese. It is a medium-sized cat that weighs about 10 pounds on average. The Himalayan cat is very sweet, gentle, and affectionate, and loves to be pampered and petted by its human family. It is also very quiet, calm, and docile, and does not mind being left alone for short periods of time. The Himalayan cat needs regular grooming to keep its coat healthy and prevent mats and tangles.</p>
138
- <h3>Maine Coon Cat</h3>
139
- <p>The Maine coon cat is one of the largest domestic cat breeds in the world, weighing up to 20 pounds or more. It has a thick long coat that protects it from the cold weather of its native Maine, as well as large paws, ears, and tail. It comes in various colors and patterns, such as solid, tabby, tortie, smoke, or silver. The Maine coon cat is very friendly, playful, and intelligent, and loves to interact with its human family. It is also very adaptable and can live in different climates and environments. The Maine coon cat needs regular brushing to keep its coat shiny and smooth.</p>
140
- <h3>Abyssinian Cat</h3>
141
- <p>The Abyssinian cat is a small but athletic cat that weighs about 10 pounds on average. It has a short ticked coat that comes in various colors, such as ruddy, red, blue, or cinnamon. It has large ears, almond-shaped eyes, and a slender body. The Abyssinian cat is very active, curious, and outgoing, and loves to explore and play with its human family. It is also very smart and can learn tricks and games easily. The Abyssinian cat needs a lot of stimulation and attention to keep it happy and healthy.</p> <h2>Conclusion</h2>
142
- <p>Baby cats are wonderful creatures that can bring joy and happiness to your life. They are adorable, fascinating, and diverse, and they deserve the best care and love possible. Whether you are looking for a small, fluffy, or kid-friendly cat breed, you can find the perfect match for your family and lifestyle. If you are ready to adopt a baby cat, you can visit your local shelter or rescue group and give a home to a furry friend in need. You will not regret it!</p>
143
- <h2>FAQs</h2>
144
- <p>Here are some of the most frequently asked questions and answers about baby cats that you may find helpful.</p>
145
- <h3>How long do baby cats stay with their mother?</h3>
146
- <p>Baby cats usually stay with their mother until they are about eight to twelve weeks old. This is the ideal time for them to learn social and survival skills from their mother and siblings, as well as to be fully weaned and vaccinated. However, some circumstances may require separating the kittens from their mother earlier or later than this period. For example, if the mother cat is sick or injured, or if the kittens are orphaned or in danger, they may need to be taken care of by a human caregiver as soon as possible. On the other hand, if the mother cat and kittens are in a safe and comfortable environment, they may stay together longer than twelve weeks until they find suitable homes.</p>
147
- <h3>How often do baby cats sleep?</h3>
148
- <p>Baby cats sleep a lot more than adult cats. They can sleep up to 20 hours a day, depending on their age and activity level. Newborn kittens sleep almost all the time, waking up only to feed and eliminate. As they grow older, they become more awake and playful, but they still need plenty of rest to support their growth and development. Sleeping is also a way for kittens to bond with their mother and littermates, as well as to feel safe and secure.</p>
149
- <h3>How can I tell the gender of a baby cat?</h3>
150
- <p>Telling the gender of a baby cat can be tricky, especially when they are very young. The easiest way to tell the difference is by looking at the distance between the anus and the genital opening. Male kittens have a greater distance between these two openings than female kittens, and they also have a small bump that will become the scrotum as they mature. Female kittens have a smaller distance between these two openings than male kittens, and they also have a slit-like opening that will become the vulva as they mature. You can also look at the color of the kitten's coat, as some colors are more common in one gender than the other. For example, tortoiseshell and calico kittens are almost always female, while orange tabby kittens are more likely to be male.</p>
151
- <h3>How can I name my baby cat?</h3>
152
- <p>Naming your baby cat is a fun and creative process that can reflect your personality and preferences. You can choose a name based on your kitten's appearance, behavior, breed, or origin. You can also choose a name based on your favorite characters, celebrities, places, or things. You can also use online tools or books to generate or browse through thousands of possible names for your kitten. The most important thing is to choose a name that you like and that suits your kitten's personality.</p>
153
- <h3>How can I train my baby cat?</h3>
154
- <p>Training your baby cat is important to teach it good manners and habits, as well as to prevent or correct any unwanted behaviors. You can start training your kitten as early as possible, using positive reinforcement and gentle guidance. You can use treats, toys, praise, or affection as rewards for good behavior, and avoid using punishment or force for bad behavior. You can also use clicker training or target training to teach your kitten various commands or tricks. Some of the basic things that you can train your kitten are: how to use the litter box, how to scratch appropriately, how to come when called, how to sit or stay on command, how to walk on a leash, how to get along with other pets or people.</p> 401be4b1e0<br />
155
- <br />
156
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bloons TD 6 APK 36.3 el juego de torres de defensa ms divertido y adictivo.md DELETED
@@ -1,146 +0,0 @@
1
- <br />
2
- <h1>Bloons TD 6 APK Ultima Version: A Guide for Android Users</h1>
3
- <p>If you are a fan of tower defense games, you might have heard of Bloons TD 6, a popular game developed by Ninja Kiwi. Bloons TD 6 is a game where you have to craft your perfect defense from a combination of powerful Monkey Towers and awesome Heroes, then pop every last invading Bloon. It is a game that offers endless hours of strategy gaming with regular updates, boss events, odysseys, quests, trophy store, content browser, and more.</p>
4
- <p>But what if you want to play Bloons TD 6 on your Android device without spending any money? Well, there is a way to do that. You can download and install Bloons TD 6 APK ultima version, which is a modified version of the original game that allows you to enjoy all the features and content for free. In this article, we will show you how to do that and why you should choose Bloons TD 6 APK ultima version over the official version. We will also give you some tips and tricks for playing Bloons TD 6 on your Android device.</p>
5
- <h2>bloons td 6 apk ultima version</h2><br /><p><b><b>Download</b> &#10001; <a href="https://jinyurl.com/2uNPFK">https://jinyurl.com/2uNPFK</a></b></p><br /><br />
6
- <h2>What is Bloons TD 6?</h2>
7
- <h3>A smash hit tower defense game</h3>
8
- <p>Bloons TD 6 is the latest installment in the Bloons Tower Defense series, which has been around for over a decade. It is a game that challenges you to stop the invasion of colorful balloons (called Bloons) by placing various types of Monkey Towers along their path. Each Monkey Tower has its own unique abilities and upgrades that can help you pop the Bloons more effectively. You can also use Heroes, which are powerful characters that have special skills and can level up during the game.</p>
9
- <p>Bloons TD 6 has several game modes and difficulty levels that can suit different preferences and skill levels. You can play solo or with up to three other players in co-op mode. You can also create your own challenges and odysseys using the content browser and share them with other players online.</p>
10
- <h3>Features and content of Bloons TD 6</h3>
11
- <p>Bloons TD 6 is a game that offers a lot of features and content that make it fun and engaging. Some of the features and content are:</p>
12
- <ul>
13
- <li>23 powerful Monkey Towers, each with 3 upgrade paths and unique activated abilities.</li>
14
- <li>Paragons! Explore the incredible power of the newest Paragon upgrades.</li>
15
- <li>14 diverse Heroes, with 20 signature upgrades and 2 special abilities. Plus, unlockable skins and voiceovers!</li>
16
- <li>Regular updates! Ninja Kiwi releases several updates every year with new characters, features, and gameplay.</li>
17
- <li>Boss Events! Fearsome Boss Bloons will challenge even the strongest defenses.</li>
18
- <li>Odysseys! Battle through a series of maps connected by their theme, rules, and rewards.</li>
19
- <li>Contested Territory! Join forces with other players and battle for territory against five other teams. Capture tiles on a shared map and compete on the leaderboards.</li>
20
- <li>Quests! Delve into what makes the Monkeys tick with Quests, crafted to tell tales and share knowledge.</li>
21
- <li>Trophy Store! Earn Trophies to unlock dozens of cosmetic items that let <p>you customize your Monkeys, Bloons, and the world around you.</li>
22
- <li>Content Browser! Create your own challenges and odysseys using the in-game editor and share them with other players online.</li>
23
- <li>100+ original maps, each with their own unique shape, size, and theme.</li>
24
- <li>10 special types of Bloons, each with their own abilities and resistances.</li>
25
- <li>Colorblind mode, cloud save, offline play, and more accessibility options.</li>
26
- </ul>
27
- <h2>How to download and install Bloons TD 6 APK ultima version?</h2>
28
- <h3>Requirements and compatibility</h3>
29
- <p>To download and install Bloons TD 6 APK ultima version, you need to have an Android device that meets the following requirements:</p>
30
- <ul>
31
- <li>Android version 5.0 or higher</li>
32
- <li>At least 2 GB of RAM</li>
33
- <li>At least 1 GB of free storage space</li>
34
- <li>A stable internet connection</li>
35
- </ul>
36
- <p>Bloons TD 6 APK ultima version is compatible with most Android devices, including smartphones, tablets, and emulators. However, some devices may not support the game or may experience performance issues. If you encounter any problems, you can contact Ninja Kiwi support for assistance.</p>
37
- <h3>Steps to download and install</h3>
38
- <p>To download and install Bloons TD 6 APK ultima version, you need to follow these steps:</p>
39
- <p>bloons td 6 mod apk ultima version gratis<br />
40
- descargar bloons td 6 apk ultima version full<br />
41
- bloons td 6 apk ultima version mega<br />
42
- bloons td 6 apk ultima version android<br />
43
- bloons td 6 apk ultima version mediafire<br />
44
- bloons td 6 apk ultima version sin internet<br />
45
- bloons td 6 apk ultima version hackeado<br />
46
- bloons td 6 apk ultima version actualizado<br />
47
- bloons td 6 apk ultima version premium<br />
48
- bloons td 6 apk ultima version español<br />
49
- bloons td 6 apk ultima version infinito<br />
50
- bloons td 6 apk ultima version online<br />
51
- bloons td 6 apk ultima version todo desbloqueado<br />
52
- bloons td 6 apk ultima version para pc<br />
53
- bloons td 6 apk ultima version uptodown<br />
54
- bloons td 6 apk ultima version ilimitado<br />
55
- bloons td 6 apk ultima version original<br />
56
- bloons td 6 apk ultima version sin anuncios<br />
57
- bloons td 6 apk ultima version facil<br />
58
- bloons td 6 apk ultima version divertido<br />
59
- bloons td 6 apk ultima version nuevo<br />
60
- bloons td 6 apk ultima version rapido<br />
61
- bloons td 6 apk ultima version seguro<br />
62
- bloons td 6 apk ultima version oficial<br />
63
- bloons td 6 apk ultima version completo<br />
64
- bloons td 6 apk ultima version mejorado<br />
65
- bloons td 6 apk ultima version clasico<br />
66
- bloons td 6 apk ultima version moderno<br />
67
- bloons td 6 apk ultima version increible<br />
68
- bloons td 6 apk ultima version fantastico<br />
69
- bloons td 6 apk ultima version genial<br />
70
- bloons td 6 apk ultima version divertidisimo<br />
71
- bloons td 6 apk ultima version adictivo<br />
72
- bloons td 6 apk ultima version entretenido<br />
73
- bloons td 6 apk ultima version emocionante<br />
74
- bloons td 6 apk ultima version espectacular<br />
75
- bloons td 6 apk ultima version maravilloso<br />
76
- bloons td 6 apk ultima version sorprendente<br />
77
- bloons td 6 apk ultima version impresionante<br />
78
- bloons td 6 apk ultima version extraordinario<br />
79
- bloons td 6 apk ultima version magnifico<br />
80
- bloons td 6 apk ultima version asombroso<br />
81
- bloons td 6 apk ultima version estupendo<br />
82
- bloons td 6 apk ultima version fabuloso<br />
83
- bloons td 6 apk ultima version sensacional<br />
84
- bloons td 6 apk ultima version formidable<br />
85
- bloons td 6 apk ultima version excelente<br />
86
- bloons td 6 apk ultima version sublime<br />
87
- bloons td 6 apk ultima version perfecto</p>
88
- <ol>
89
- <li>Go to a trusted website that offers Bloons TD 6 APK ultima version for download. For example, you can use this link: [text].</li>
90
- <li>Click on the download button and wait for the APK file to be downloaded to your device.</li>
91
- <li>Once the download is complete, locate the APK file in your device's file manager and tap on it to start the installation process.</li>
92
- <li>If you see a warning message that says "Install blocked", go to your device's settings and enable the option to install apps from unknown sources.</li>
93
- <li>Follow the on-screen instructions to complete the installation process.</li>
94
- <li>Launch the game and enjoy playing Bloons TD 6 APK ultima version for free!</li>
95
- </ol>
96
- <h2>Why choose Bloons TD 6 APK ultima version?</h2>
97
- <h3>Benefits of using APK files</h3>
98
- <p>An APK file is an Android application package file that contains all the files and data needed to run an app on an Android device. By using APK files, you can enjoy some benefits that are not available in the official version of the app. Some of these benefits are:</p>
99
- <ul>
100
- <li>You can access apps that are not available in your region or country.</li>
101
- <li>You can get apps that are not compatible with your device or operating system.</li>
102
- <li>You can get apps that are no longer supported or updated by the developers.</li>
103
- <li>You can get apps that have extra features or modifications that are not present in the official version.</li>
104
- <li>You can get apps that are free of charge or have no in-app purchases or ads.</li>
105
- </ul>
106
- <h3>Advantages of playing Bloons TD 6 on Android</h3>
107
- <p>Bloons TD 6 is a game that can be played on various platforms, including PC, iOS, and Android. However, playing Bloons TD 6 on Android has some advantages that make it more enjoyable and convenient. Some of these advantages are:</p>
108
- <ul>
109
- <li>You can play Bloons TD 6 anytime and anywhere with your Android device, as long as you have a battery and an internet connection.</li>
110
- <li>You can play Bloons TD 6 with touch controls that are intuitive and responsive, giving you more control over your Monkeys and Heroes.</li>
111
- <li>You can play Bloons TD 6 with other Android users in co-op mode or contested territory mode, as well as cross-platform players on PC and iOS.</li>
112
- <li>You can play Bloons TD 6 with high-quality graphics and sound effects that are optimized for your Android device's screen size and resolution.</li>
113
- <li>You can play Bloons TD 6 with cloud save functionality that allows you to sync your progress across multiple devices using your Ninja Kiwi account.</li>
114
- </ul>
115
- <h2>Tips and tricks for playing Bloons TD 6</h2>
116
- <h3>How to use Monkey Towers and Heroes effectively</h3>
117
- <p>Bloons TD 6 is a game that requires strategic thinking and planning to pop all the Bloons before they reach the end of the map. To do that, you need to use Monkey Towers and Heroes effectively. Here are some tips and tricks for doing so:</p>
118
- <ul>
119
- <li>Choose Monkey Towers that match the type of Bloons you are facing. For example, use Dart Mon keys to pop regular Bloons, use Bomb Shooters to pop Lead Bloons, use Ice Monkeys to slow down Bloons, and use Monkey Subs to detect Camo Bloons.</li>
120
- <li>Upgrade your Monkey Towers wisely. Each Monkey Tower has three upgrade paths that offer different benefits and trade-offs. You can only choose two paths per tower, so you need to decide which ones suit your strategy best. For example, you can upgrade the Dart Monkey to have a Crossbow that shoots faster and pierces more Bloons, or a Juggernaut that shoots giant spiked balls that can pop Lead and Frozen Bloons.</li>
121
- <li>Use your Heroes strategically. Heroes are powerful units that can make a big difference in your defense. Each Hero has a unique skill set and personality that can complement your Monkey Towers. For example, you can use Quincy, the Archer, to deal extra damage to MOAB-class Bloons, or use Obyn Greenfoot, the Forest Guardian, to buff nearby Magic Monkeys and summon Brambles and Wall of Trees.</li>
122
- <li>Place your Monkey Towers and Heroes in optimal locations. You need to consider the range, line of sight, and placement bonuses of your Monkey Towers and Heroes when placing them on the map. For example, you can place Sniper Monkeys on high ground to increase their range and visibility, or place Banana Farms near the entrance to collect more bananas.</li>
123
- </ul>
124
- <h3>How to earn Trophies and unlock cosmetic items</h3>
125
- <p>Bloons TD 6 is a game that rewards you for your achievements and progress. You can earn Trophies by completing various tasks and challenges in the game, such as popping a certain number of Bloons, winning a certain number of games, or reaching a certain level. You can then use Trophies to unlock cosmetic items in the Trophy Store, such as skins, decals, music tracks, profile icons, and more. Here are some tips and tricks for earning Trophies and unlocking cosmetic items:</p>
126
- <ul>
127
- <li>Play different game modes and difficulty levels. You can earn more Trophies by playing harder game modes and difficulty levels, such as Impoppable mode or CHIMPS mode. You can also earn more Trophies by playing different maps and challenges.</li>
128
- <li>Complete Quests and Boss Events. Quests are special missions that give you specific objectives and rewards. Boss Events are limited-time events that pit you against powerful Boss Bloons with unique abilities. You can earn Trophies by completing Quests and Boss Events.</li>
129
- <li>Participate in Contested Territory. Contested Territory is a competitive mode where you have to capture tiles on a shared map and compete with other players on the leaderboards. You can earn Trophies by capturing tiles and holding them for as long as possible.</li>
130
- <li>Create and share your own challenges and odysseys. You can use the Content Browser to create your own challenges and odysseys using the in-game editor. You can then share them with other players online and earn Trophies by getting likes and plays.</li>
131
- </ul>
132
- <h2>Conclusion and FAQs</h2>
133
- <p>Bloons TD 6 is a fun and addictive tower defense game that offers a lot of features and content for Android users. You can download and install Bloons TD 6 APK ultima version for free and enjoy all the benefits of using APK files. You can also use our tips and tricks to improve your gameplay and earn more Trophies.</p>
134
- <p>If you have any questions about Bloons TD 6 APK ultima version or the game itself, you can check out these FAQs:</p>
135
- <h4>Q: Is Bloons TD 6 APK ultima version safe to use?</h4>
136
- <p>A: Yes, Bloons TD 6 APK ultima version is safe to use as long as you download it from a trusted website that does not contain any viruses or malware. However, you should always be careful when downloading any APK files from unknown sources and scan them with an antivirus app before installing them.</p>
137
- <h4>Q: Can I play Bloons TD 6 APK ultima version online with other players?</h4>
138
- <p>A: Yes, you can play Bloons TD 6 APK ultima version online with other players in co-op mode or contested territory mode. However, you may not be able to play with players who are using the official version of the game or a different version of the APK file.</p>
139
- <h4>Q: Can I update Bloons TD 6 APK ultima version to get the latest features and content?</h4>
140
- <p>A: Yes, you can update Bloons TD 6 APK ultima version to get the latest features and content by downloading the new version of the APK file from the same website where you got the previous one. However, you may lose your progress or data if you uninstall the old version before installing the new one.</p>
141
- <h4>Q: Can I transfer my progress or data from Bloons TD 6 APK ultima version to the official version or another device?</h4>
142
- <p>A: Yes, you can transfer your progress or data from Bloons TD 6 APK ultima version to the official version or another device by using your Ninja Kiwi account. You need to create a Ninja Kiwi account and link it to your game in the settings menu. Then, you can log in to your Ninja Kiwi account on any device or platform and sync your progress and data.</p>
143
- <h4>Q: How can I contact Ninja Kiwi support if I have any issues or feedback about Bloons TD 6?</h4>
144
- <p>A: You can contact Ninja Kiwi support by using the in-game support button in the settings menu. You can also visit their website at [text] or their social media pages at [text] and [text]. Ninja Kiwi is always happy to hear from their players and will try to help you as soon as possible.</p> 401be4b1e0<br />
145
- <br />
146
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Build Your Dream City with Idle Island - City Idle Tycoon Mod APK - No Ads No Root.md DELETED
@@ -1,81 +0,0 @@
1
- <br />
2
- <h1>Idle Island City Idle Tycoon Mod APK: Build Your Dream City</h1>
3
- <p>Do you love city building games? Do you want to create your own island paradise and become a tycoon? If yes, then you should try Idle Island City Idle Tycoon, a popular mobile simulator game that allows you to build your own city and become the ultimate tycoon.</p>
4
- <h2>idle island city idle tycoon mod apk</h2><br /><p><b><b>Download File</b> &#9745; <a href="https://jinyurl.com/2uNQju">https://jinyurl.com/2uNQju</a></b></p><br /><br />
5
- <h2>What is Idle Island City Idle Tycoon?</h2>
6
- <p>Idle Island City Idle Tycoon is a game developed by RSGapps - Idle Tycoon Games. It is available for Android devices and can be downloaded from Google Play Store. In this game, you start with a small island and a few buildings. Your goal is to expand your city by building more houses, factories, shops, hotels, airports, and other facilities. You also have to manage your economy and resources, such as money, energy, population, and happiness. You can unlock new islands and buildings as you progress in the game. You can also hire managers and advisors to help you run your city more efficiently. The game has stunning graphics and animations that make your city look realistic and lively.</p>
7
- <h3>Features of Idle Island City Idle Tycoon</h3>
8
- <h4>- Build and upgrade your city</h4>
9
- <p>You can build various types of buildings in your city, such as residential, commercial, industrial, recreational, and cultural. You can also upgrade them to increase their productivity and profitability. You can customize your city by choosing different styles and themes for your buildings. You can also decorate your city with parks, trees, roads, bridges, monuments, and other items.</p>
10
- <h4>- Manage your economy and resources</h4>
11
- <p>You have to balance your income and expenses in your city. You have to collect money from your buildings and use it to build more facilities or upgrade them. You also have to pay taxes, salaries, maintenance costs, and other expenses. You have to monitor your energy consumption and production, as well as your population growth and happiness level. You have to make sure that your city is sustainable and profitable.</p>
12
- <p>idle island city building tycoon mod apk unlimited money<br />
13
- idle island city idle tycoon hack apk download<br />
14
- idle island city builder tycoon mod apk latest version<br />
15
- idle island city building idle tycoon cheats<br />
16
- idle island city idle tycoon mod apk happymod<br />
17
- idle island city builder tycoon mod apk android 1<br />
18
- idle island city building idle tycoon tips and tricks<br />
19
- idle island city idle tycoon mod apk wendgames<br />
20
- idle island city builder tycoon mod apk revdl<br />
21
- idle island city building idle tycoon gameplay<br />
22
- idle island city idle tycoon mod apk free shopping<br />
23
- idle island city builder tycoon mod apk rexdl<br />
24
- idle island city building idle tycoon guide<br />
25
- idle island city idle tycoon mod apk 1.13.10<br />
26
- idle island city builder tycoon mod apk 1.13.12<br />
27
- idle island city building idle tycoon review<br />
28
- idle island city idle tycoon mod menu apk<br />
29
- idle island city builder tycoon mod apk 1.06<br />
30
- idle island city building idle tycoon wiki<br />
31
- idle island city idle tycoon unlimited currency<br />
32
- idle island city builder tycoon mod apk 1.0.6<br />
33
- idle island city building idle tycoon reddit<br />
34
- idle island city idle tycoon hack version download<br />
35
- idle island city builder tycoon mod apk 1.13.9<br />
36
- idle island city building idle tycoon best layout<br />
37
- how to play idle island city idle tycoon<br />
38
- how to download idle island city builder tycoon mod apk<br />
39
- how to get free gems in idle island city building idle tycoon<br />
40
- how to reset progress in idle island city idle tycoon<br />
41
- how to update idle island city builder tycoon mod apk<br />
42
- is there a pc version of idle island city building idle tycoon<br />
43
- what is the max level in idle island city idle tycoon<br />
44
- when was the last update of idle island city builder tycoon mod apk<br />
45
- where to find promo codes for idle island city building idle tycoon<br />
46
- who developed the game of idle island city idl</p>
47
- <h4>- Unlock new islands and buildings</h4>
48
- <p>You can unlock new islands as you progress in the game. Each island has its own theme and challenges. You can also unlock new buildings that offer different benefits and features. You can discover more than 100 buildings in the game.</p>
49
- <h4>- Hire managers and advisors</h4>
50
- <p>You can hire managers to automate your buildings and increase their efficiency. You can also hire advisors to give you tips and advice on how to improve your city. They will also reward you with bonuses and gifts.</p>
51
- <h4>- Enjoy stunning graphics and animations</h4>
52
- <p>The game has amazing graphics and animations that make your city look realistic and lively. You can see the day-night cycle, weather effects, traffic movements, people activities, and other details in your city. You can also zoom in and out to see your city from different angles.</p>
53
- <h3>Why use Idle Island City Idle Tycoon Mod APK?</h3>
54
- <p>If you want to enjoy the game without any limitations or interruptions, you should use Idle Island City Idle Tycoon Mod APK. This is a modified version of the game that gives you access to unlimited money, no ads, and easy installation.</p>
55
- <h4>- Unlimited money</h4>
56
- <p>With Idle Island City Idle Tycoon Mod APK, you will have unlimited money in the game. This means that you can build or upgrade anything you want without worrying about the cost. You can also buy any items or boosts that you want from the shop. You can also skip the waiting time for building or upgrading your facilities. You can enjoy the game without any financial constraints.</p>
57
- <h4>- No ads</h4>
58
- <p>With Idle Island City Idle Tycoon Mod APK, you will not see any ads in the game. This means that you can play the game without any interruptions or distractions. You can also save your data and battery life by avoiding the ads. You can enjoy the game without any annoyance.</p>
59
- <h4>- Easy to install and use</h4>
60
- <p>With Idle Island City Idle Tycoon Mod APK, you will not have any trouble installing or using the game. You just need to download the APK file from a reliable source and install it on your device. You do not need to root your device or use any other tools. You can also update the game easily whenever there is a new version available. You can enjoy the game without any hassle.</p>
61
- <h3>How to download and install Idle Island City Idle Tycoon Mod APK?</h3>
62
- <p>If you want to download and install Idle Island City Idle Tycoon Mod APK, you can follow these simple steps:</p>
63
- <ol>
64
- <li>Click on this link to download the APK file: <a href="">Idle Island City Idle Tycoon Mod APK Download</a></li>
65
- <li>Allow your device to install apps from unknown sources by going to Settings > Security > Unknown Sources and enabling it.</li>
66
- <li>Locate the downloaded APK file in your file manager and tap on it to install it.</li>
67
- <li>Launch the game and enjoy building your dream city.</li>
68
- </ol>
69
- <h3>Conclusion</h3>
70
- <p>Idle Island City Idle Tycoon is a fun and addictive city building game that lets you create your own island paradise and become a tycoon. You can build various types of buildings, manage your economy and resources, unlock new islands and buildings, hire managers and advisors, and enjoy stunning graphics and animations. If you want to play the game without any limitations or interruptions, you should use Idle Island City Idle Tycoon Mod APK. This will give you access to unlimited money, no ads, and easy installation. You can download and install the game easily by following the steps above. So, what are you waiting for? Download Idle Island City Idle Tycoon Mod APK now and start building your dream city.</p>
71
- <h3>FAQs</h3>
72
- <p>Here are some frequently asked questions about Idle Island City Idle Tycoon Mod APK:</p>
73
- <table>
74
- <tr><td><b>Q: Is Idle Island City Idle Tycoon Mod APK safe to use?</b></td><td><b>A: Yes, Idle Island City Idle Tycoon Mod APK is safe to use as long as you download it from a trusted source. It does not contain any viruses or malware that can harm your device or data.</b></td></tr>
75
- <tr><td><b>Q: Do I need an internet connection to play Idle Island City Idle Tycoon Mod APK?</b></td><td><b>A: No, you do not need an internet connection to play Idle Island City Idle Tycoon Mod APK. You can play the game offline without any problem.</b></td></tr>
76
- <tr><td><b>Q: How can I update Idle Island City Idle Tycoon Mod APK?</b></td><td><b>A: You can update Idle Island City Idle Tycoon Mod APK by downloading the latest version of the APK file from the same source and installing it over the existing one. You do not need to uninstall the previous version.</b></td></tr>
77
- <tr><td><b>Q: Can I play Idle Island City Idle Tycoon Mod APK on PC?</b></td><td><b>A: Yes, you can play Idle Island City Idle Tycoon Mod APK on PC by using an Android emulator such as Bluestacks or Nox Player. You just need to install the emulator on your PC and then install the APK file on it.</b></td></tr>
78
- <tr><td><b>Q: Can I transfer my progress from the original game to Idle Island City Idle Tycoon Mod APK?</b></td><td><b>A: Yes, you can transfer your progress from the original game to Idle Island City Idle Tycoon Mod APK by using a cloud save feature. You just need to connect your game account to Google Play Games or Facebook and then sync your data across devices.</b></td></tr>
79
- </table></p> 401be4b1e0<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Driven The Movie That Changed the Face of Motorsports.md DELETED
@@ -1,162 +0,0 @@
1
-
2
- <h1>Download Driven Marketing: How to Use Data to Boost Your Marketing ROI</h1>
3
- <p>Data is the new oil in the digital economy. It fuels innovation, growth, and competitive advantage. But how can you use data to power up your marketing efforts?</p>
4
- <p>One way is to leverage the data from downloads. Downloads are any actions that involve downloading a file, such as an ebook, a report, a podcast, or a video. Downloads are valuable sources of data because they reveal a lot about your audience's interests, preferences, behaviors, and needs.</p>
5
- <h2>download driven</h2><br /><p><b><b>Download Zip</b> &rarr;&rarr;&rarr; <a href="https://jinyurl.com/2uNORn">https://jinyurl.com/2uNORn</a></b></p><br /><br />
6
- <p>In this article, we will explain what download driven marketing is and why it is important for your business. We will also show you how to implement download driven marketing in your business and share some examples of successful download driven marketing campaigns.</p>
7
- <h2>What is download driven marketing and why is it important?</h2>
8
- <h3>Download driven marketing is the use of data from downloads to optimize marketing campaigns and strategies.</h3>
9
- <p>Download driven marketing is a type of data-driven marketing that focuses on using the data from downloads to improve your marketing performance. Download driven marketing involves collecting, analyzing, and using the data from downloads to:</p>
10
- <ul>
11
- <li>Create relevant and valuable content and offers that attract and engage your audience.</li>
12
- <li>Segment your audience based on their download behavior and interests.</li>
13
- <li>Personalize your content and offers based on their download history and profile.</li>
14
- <li>Measure the effectiveness of your marketing efforts and improve your conversion rates.</li>
15
- </ul>
16
- <h3>Download driven marketing can help you:</h3>
17
- <h4>- Understand your audience's needs, preferences, and behaviors.</h4>
18
- <p>By analyzing the data from downloads, you can gain insights into what your audience is looking for, what they like, what they dislike, how they consume content, and how they make decisions. This can help you create content and offers that match their needs and expectations.</p>
19
- <h4>- Segment your audience based on their download behavior and interests.</h4>
20
- <p>By using the data from downloads, you can segment your audience into different groups based on their download behavior and interests. For example, you can segment them by:</p>
21
- <ul>
22
- <li>The type of content they download (e.g., ebooks, podcasts, videos).</li>
23
- <li>The topic of the content they download (e.g., social media, SEO, email marketing).</li>
24
- <li>The frequency of their downloads (e.g., once a month, once a week, once a day).</li <h4>- Personalize your content and offers based on their download history and profile.</h4>
25
- <p>By using the data from downloads, you can personalize your content and offers based on their download history and profile. For example, you can:</p>
26
- <ul>
27
- <li>Send them follow-up emails with more content and offers related to their previous downloads.</li>
28
- <li>Show them personalized recommendations and suggestions based on their download preferences.</li>
29
- <li>Create dynamic landing pages and web pages that display content and offers tailored to their download interests.</li>
30
- </ul>
31
- <h4>- Measure the effectiveness of your marketing efforts and improve your conversion rates.</h4>
32
- <p>By using the data from downloads, you can measure the effectiveness of your marketing efforts and improve your conversion rates. For example, you can:</p>
33
- <p>download driven: how to create a high-converting lead magnet<br />
34
- download driven: the ultimate guide to email marketing<br />
35
- download driven: how to optimize your landing pages for conversions<br />
36
- download driven: how to use content upgrades to grow your email list<br />
37
- download driven: how to create a killer lead magnet in 5 easy steps<br />
38
- download driven: how to use SEO keywords to rank higher on Google<br />
39
- download driven: how to create a content marketing strategy that drives downloads<br />
40
- download driven: how to use social media to promote your lead magnets<br />
41
- download driven: how to measure and improve your conversion rate<br />
42
- download driven: how to use webinars to generate more leads and sales<br />
43
- download driven: how to create a viral ebook that gets shared and downloaded<br />
44
- download driven: how to use video marketing to attract and engage your audience<br />
45
- download driven: how to create a podcast that drives downloads and subscribers<br />
46
- download driven: how to use quizzes and surveys to generate leads and feedback<br />
47
- download driven: how to create a blog that drives traffic and downloads<br />
48
- download driven: how to use influencer marketing to boost your credibility and reach<br />
49
- download driven: how to create a free course that educates and converts<br />
50
- download driven: how to use email automation to nurture and sell to your leads<br />
51
- download driven: how to create a membership site that drives recurring revenue<br />
52
- download driven: how to use gamification to increase engagement and retention<br />
53
- download driven: how to create a mobile app that drives downloads and reviews<br />
54
- download driven: how to use chatbots and live chat to capture and qualify leads<br />
55
- download driven: how to create a landing page that converts like crazy<br />
56
- download driven: how to use testimonials and case studies to increase trust and conversions<br />
57
- download driven: how to create a white paper that showcases your expertise and authority<br />
58
- download driven: how to use analytics and split testing to optimize your campaigns<br />
59
- download driven: how to create a checklist that simplifies and solves your audience's problems<br />
60
- download driven: how to use Facebook ads to drive targeted traffic and leads<br />
61
- download driven: how to create a webinar replay that generates more downloads and sales<br />
62
- download driven: how to use Pinterest pins to drive traffic and downloads<br />
63
- download driven: how to create an infographic that gets shared and downloaded<br />
64
- download driven: how to use Instagram stories to showcase your lead magnets and drive downloads<br />
65
- download driven: how to create a swipe file that saves your audience time and money<br />
66
- download driven: how to use LinkedIn articles to drive traffic and downloads<br />
67
- download driven: how to create a template that makes your audience's life easier<br />
68
- download driven: how to use YouTube videos to drive traffic and downloads<br />
69
- download driven: how to create a cheat sheet that gives your audience quick wins<br />
70
- download driven: how to use Twitter threads to drive traffic and downloads<br />
71
- download driven: how to create a toolkit that provides your audience with valuable resources<br />
72
- download driven: how to use Reddit posts to drive traffic and downloads</p>
73
- <ul>
74
- <li>Track and analyze the key performance indicators (KPIs) of your download campaigns and strategies, such as download rate, click-through rate, bounce rate, and conversion rate.</li>
75
- <li>Identify the best practices and the areas of improvement for your download campaigns and strategies, such as content quality, design, format, distribution, and promotion.</li>
76
- <li>Test and optimize your download campaigns and strategies based on data-driven insights, such as A/B testing, multivariate testing, and user feedback.</li>
77
- </ul>
78
- <h2>How to implement download driven marketing in your business?</h2>
79
- <h3>To implement download driven marketing, you need to:</h3>
80
- <h4>- Identify your download goals and metrics.</h4>
81
- <p>The first step to implement download driven marketing is to identify your download goals and metrics. You need to define what you want to achieve with your downloads and how you will measure your success. For example, your download goals could be:</p>
82
- <ul>
83
- <li>To generate more leads for your business.</li>
84
- <li>To increase brand awareness and authority in your industry.</li>
85
- <li>To educate and inform your audience about your products or services.</li <li>To nurture and convert your leads into customers.</li>
86
- </ul>
87
- <p>Your download metrics could be:</p>
88
- <ul>
89
- <li>The number of downloads per content type, topic, or channel.</li>
90
- <li>The percentage of downloads that result in leads, subscribers, or customers.</li>
91
- <li>The cost per download, lead, subscriber, or customer.</li>
92
- <li>The revenue per download, lead, subscriber, or customer.</li>
93
- </ul>
94
- <h4>- Choose the right tools and platforms to collect, store, and analyze your download data.</h4>
95
- <p>The second step to implement download driven marketing is to choose the right tools and platforms to collect, store, and analyze your download data. You need to have a system that allows you to:</p>
96
- <ul>
97
- <li>Capture the data from downloads, such as the user's name, email, location, device, browser, etc.</li>
98
- <li>Store the data from downloads in a secure and accessible database or cloud service.</li>
99
- <li>Analyze the data from downloads using tools such as Google Analytics, Microsoft Power BI, Tableau, etc.</li>
100
- </ul>
101
- <h4>- Create relevant and valuable content and offers that attract and engage your audience.</h4>
102
- <p>The third step to implement download driven marketing is to create relevant and valuable content and offers that attract and engage your audience. You need to produce content and offers that:</p>
103
- <ul>
104
- <li>Solve a problem or answer a question that your audience has.</li>
105
- <li>Provide useful information or insights that your audience can benefit from.</li <li>Match the tone and style of your brand and your audience.</li>
106
- <li>Include a clear and compelling call to action that encourages your audience to download your content or offer.</li>
107
- </ul>
108
- <h4>- Test and optimize your download campaigns and strategies based on data-driven insights.</h4>
109
- <p>The fourth step to implement download driven marketing is to test and optimize your download campaigns and strategies based on data-driven insights. You need to monitor and evaluate your download performance and use the data to:</p>
110
- <ul>
111
- <li>Identify the best practices and the areas of improvement for your download campaigns and strategies.</li>
112
- <li>Experiment with different variables and factors that affect your download results, such as content type, topic, format, design, distribution, promotion, etc.</li>
113
- <li>Implement the changes and improvements that lead to better download outcomes and higher marketing ROI.</li>
114
- </ul>
115
- <h2>Examples of successful download driven marketing campaigns</h2>
116
- <h3>Here are some examples of how brands have used download driven marketing to achieve their marketing goals:</h3>
117
- <h4>- Netflix used download data to create personalized recommendations and increase customer retention.</h4>
118
- <p>Netflix is one of the most popular streaming services in the world, with over 200 million subscribers. One of the reasons for its success is its ability to use download data to create personalized recommendations for its users. Netflix analyzes the data from downloads, such as the genres, titles, ratings, and viewing habits of its users, to provide them with tailored suggestions and recommendations based on their preferences and interests. This helps Netflix to increase customer satisfaction, loyalty, and retention.</p>
119
- <h4>- HubSpot used download data to generate leads and nurture them through email marketing.</h4>
120
- <p>HubSpot is a leading software company that provides tools and solutions for inbound marketing, sales, and customer service. One of the ways HubSpot generates leads and nurtures them through email marketing is by using download data. HubSpot offers various types of content and offers for download, such as ebooks, reports, webinars, templates, etc. HubSpot collects the data from downloads, such as the user's name, email, company, industry, etc., to segment them into different groups based on their download behavior and interests. HubSpot then sends them personalized emails with more content and offers related to their previous downloads. This helps HubSpot to build trust and rapport with its leads and move them along the sales funnel.</p>
121
- <h4>- Spotify used download data to create customized playlists and enhance user experience.</h4 <p>Spotify is a popular music streaming service that has over 300 million users. One of the features that makes Spotify stand out is its ability to use download data to create customized playlists and enhance user experience. Spotify analyzes the data from downloads, such as the songs, artists, genres, and moods of its users, to create personalized playlists and recommendations based on their preferences and tastes. Spotify also allows its users to download songs and playlists for offline listening, which helps them save data and enjoy music anytime and anywhere.</p>
122
- <h2>Conclusion</h2>
123
- <h3>Download driven marketing is a powerful way to use data to improve your marketing ROI. By using download data, you can:</h3>
124
- <h4>- Know your audience better and tailor your content and offers to their needs and interests.</h4>
125
- <p>Download data can help you understand your audience's needs, preferences, behaviors, and expectations. This can help you create content and offers that solve their problems, answer their questions, and provide them with value.</p>
126
- <h4>- Segment your audience based on their download behavior and deliver personalized messages and experiences.</h4>
127
- <p>Download data can help you segment your audience into different groups based on their download behavior and interests. This can help you deliver personalized messages and experiences that match their download preferences and profile.</p>
128
- <h4>- Track and measure the impact of your download campaigns and strategies and optimize them accordingly.</h4>
129
- <p>Download data can help you track and measure the impact of your download campaigns and strategies on your marketing goals and metrics. This can help you identify the best practices and the areas of improvement for your download campaigns and strategies and optimize them accordingly.</p>
130
- <p>If you want to learn more about how to use download driven marketing to boost your marketing ROI, download our free ebook: "The Ultimate Guide to Download Driven Marketing".</p>
131
- <h2>FAQs</h2>
132
- <h3>What is download driven marketing?</h3>
133
- <p>Download driven marketing is the use of data from downloads to optimize marketing campaigns and strategies.</p>
134
- <h3>What are the benefits of download driven marketing?</h3>
135
- <p>Download driven marketing can help you understand your audience better, segment your audience based on their download behavior, personalize your content and offers based on their download history, and measure the effectiveness of your marketing efforts.</p>
136
- <h3>What are some examples of download driven marketing campaigns?</h3 <p>Some examples of download driven marketing campaigns are:</p>
137
- <ul>
138
- <li>Netflix used download data to create personalized recommendations and increase customer retention.</li>
139
- <li>HubSpot used download data to generate leads and nurture them through email marketing.</li>
140
- <li>Spotify used download data to create customized playlists and enhance user experience.</li>
141
- </ul>
142
- <h3>What are the best tools and platforms for download driven marketing?</h3>
143
- <p>There are many tools and platforms that can help you with download driven marketing, such as:</p>
144
- <ul>
145
- <li>Google Analytics: A web analytics tool that can help you track and analyze your download data and performance.</li>
146
- <li>Microsoft Power BI: A business intelligence tool that can help you visualize and report your download data and insights.</li>
147
- <li>Tableau: A data visualization tool that can help you create interactive dashboards and charts based on your download data.</li>
148
- <li>Mailchimp: An email marketing tool that can help you segment your audience based on their download behavior and send them personalized emails with more content and offers.</li>
149
- <li>WordPress: A content management system that can help you create and manage your content and offers for download, such as ebooks, reports, webinars, etc.</li>
150
- </ul>
151
- <h3>How to create content and offers that attract and engage your audience?</h3 <p>To create content and offers that attract and engage your audience, you need to:</p>
152
- <ul>
153
- <li>Research your audience and understand their pain points, challenges, goals, and interests.</li>
154
- <li>Create content and offers that solve their problems, answer their questions, and provide them with value.</li>
155
- <li>Use catchy headlines, compelling introductions, and clear conclusions to capture their attention and interest.</li>
156
- <li>Use simple, conversational, and engaging language to communicate your message and connect with your audience.</li>
157
- <li>Use visuals, such as images, videos, infographics, etc., to enhance your content and offer and make them more appealing and memorable.</li>
158
- <li>Include a clear and compelling call to action that encourages your audience to download your content or offer.</li>
159
- </ul>
160
- <p>I hope this article has helped you understand what download driven marketing is and how to use it to boost your marketing ROI. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p> 197e85843d<br />
161
- <br />
162
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_karras_ve.py DELETED
@@ -1,232 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from dataclasses import dataclass
17
- from typing import Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import paddle
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from ..utils import BaseOutput
24
- from .scheduling_utils import SchedulerMixin
25
-
26
-
27
- @dataclass
28
- class KarrasVeOutput(BaseOutput):
29
- """
30
- Output class for the scheduler's step function output.
31
-
32
- Args:
33
- prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
34
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
35
- denoising loop.
36
- derivative (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
37
- Derivative of predicted original image sample (x_0).
38
- pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
39
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
40
- `pred_original_sample` can be used to preview progress or for guidance.
41
- """
42
-
43
- prev_sample: paddle.Tensor
44
- derivative: paddle.Tensor
45
- pred_original_sample: Optional[paddle.Tensor] = None
46
-
47
-
48
- class KarrasVeScheduler(SchedulerMixin, ConfigMixin):
49
- """
50
- Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
51
- the VE column of Table 1 from [1] for reference.
52
-
53
- [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
54
- https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
55
- differential equations." https://arxiv.org/abs/2011.13456
56
-
57
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
58
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
59
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
60
- [`~SchedulerMixin.from_pretrained`] functions.
61
-
62
- For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of
63
- Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the
64
- optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper.
65
-
66
- Args:
67
- sigma_min (`float`): minimum noise magnitude
68
- sigma_max (`float`): maximum noise magnitude
69
- s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling.
70
- A reasonable range is [1.000, 1.011].
71
- s_churn (`float`): the parameter controlling the overall amount of stochasticity.
72
- A reasonable range is [0, 100].
73
- s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity).
74
- A reasonable range is [0, 10].
75
- s_max (`float`): the end value of the sigma range where we add noise.
76
- A reasonable range is [0.2, 80].
77
-
78
- """
79
-
80
- order = 2
81
-
82
- @register_to_config
83
- def __init__(
84
- self,
85
- sigma_min: float = 0.02,
86
- sigma_max: float = 100,
87
- s_noise: float = 1.007,
88
- s_churn: float = 80,
89
- s_min: float = 0.05,
90
- s_max: float = 50,
91
- ):
92
- # standard deviation of the initial noise distribution
93
- self.init_noise_sigma = sigma_max
94
-
95
- # setable values
96
- self.num_inference_steps: int = None
97
- self.timesteps: paddle.Tensor = None
98
- self.schedule: paddle.Tensor = None # sigma(t_i)
99
-
100
- def scale_model_input(self, sample: paddle.Tensor, timestep: Optional[int] = None) -> paddle.Tensor:
101
- """
102
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
103
- current timestep.
104
-
105
- Args:
106
- sample (`paddle.Tensor`): input sample
107
- timestep (`int`, optional): current timestep
108
-
109
- Returns:
110
- `paddle.Tensor`: scaled input sample
111
- """
112
- return sample
113
-
114
- def set_timesteps(self, num_inference_steps: int):
115
- """
116
- Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
117
-
118
- Args:
119
- num_inference_steps (`int`):
120
- the number of diffusion steps used when generating samples with a pre-trained model.
121
-
122
- """
123
- self.num_inference_steps = num_inference_steps
124
- timesteps = np.arange(0, self.num_inference_steps)[::-1].copy()
125
- self.timesteps = paddle.to_tensor(timesteps)
126
- schedule = [
127
- (
128
- self.config.sigma_max**2
129
- * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
130
- )
131
- for i in self.timesteps
132
- ]
133
- self.schedule = paddle.to_tensor(schedule, dtype="float32")
134
-
135
- def add_noise_to_input(
136
- self, sample: paddle.Tensor, sigma: float, generator: Optional[paddle.Generator] = None
137
- ) -> Tuple[paddle.Tensor, float]:
138
- """
139
- Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a
140
- higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.
141
-
142
- TODO Args:
143
- """
144
- if self.config.s_min <= sigma <= self.config.s_max:
145
- gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1)
146
- else:
147
- gamma = 0
148
-
149
- # sample eps ~ N(0, S_noise^2 * I)
150
- eps = self.config.s_noise * paddle.randn(sample.shape, generator=generator)
151
- sigma_hat = sigma + gamma * sigma
152
- sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
153
-
154
- return sample_hat, sigma_hat
155
-
156
- def step(
157
- self,
158
- model_output: paddle.Tensor,
159
- sigma_hat: float,
160
- sigma_prev: float,
161
- sample_hat: paddle.Tensor,
162
- return_dict: bool = True,
163
- ) -> Union[KarrasVeOutput, Tuple]:
164
- """
165
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
166
- process from the learned model outputs (most often the predicted noise).
167
-
168
- Args:
169
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
170
- sigma_hat (`float`): TODO
171
- sigma_prev (`float`): TODO
172
- sample_hat (`paddle.Tensor`): TODO
173
- return_dict (`bool`): option for returning tuple rather than KarrasVeOutput class
174
-
175
- KarrasVeOutput: updated sample in the diffusion chain and derivative (TODO double check).
176
- Returns:
177
- [`~schedulers.scheduling_karras_ve.KarrasVeOutput`] or `tuple`:
178
- [`~schedulers.scheduling_karras_ve.KarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When
179
- returning a tuple, the first element is the sample tensor.
180
-
181
- """
182
-
183
- pred_original_sample = sample_hat + sigma_hat * model_output
184
- derivative = (sample_hat - pred_original_sample) / sigma_hat
185
- sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative
186
-
187
- if not return_dict:
188
- return (sample_prev, derivative)
189
-
190
- return KarrasVeOutput(
191
- prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample
192
- )
193
-
194
- def step_correct(
195
- self,
196
- model_output: paddle.Tensor,
197
- sigma_hat: float,
198
- sigma_prev: float,
199
- sample_hat: paddle.Tensor,
200
- sample_prev: paddle.Tensor,
201
- derivative: paddle.Tensor,
202
- return_dict: bool = True,
203
- ) -> Union[KarrasVeOutput, Tuple]:
204
- """
205
- Correct the predicted sample based on the output model_output of the network. TODO complete description
206
-
207
- Args:
208
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
209
- sigma_hat (`float`): TODO
210
- sigma_prev (`float`): TODO
211
- sample_hat (`paddle.Tensor`): TODO
212
- sample_prev (`paddle.Tensor`): TODO
213
- derivative (`paddle.Tensor`): TODO
214
- return_dict (`bool`): option for returning tuple rather than KarrasVeOutput class
215
-
216
- Returns:
217
- prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO
218
-
219
- """
220
- pred_original_sample = sample_prev + sigma_prev * model_output
221
- derivative_corr = (sample_prev - pred_original_sample) / sigma_prev
222
- sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
223
-
224
- if not return_dict:
225
- return (sample_prev, derivative)
226
-
227
- return KarrasVeOutput(
228
- prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample
229
- )
230
-
231
- def add_noise(self, original_samples, noise, timesteps):
232
- raise NotImplementedError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/Dockerfile DELETED
@@ -1,296 +0,0 @@
1
- # syntax=docker/dockerfile:1.4
2
-
3
- ARG BASE_IMAGE=ubuntu:20.04
4
- ARG BASE_RUNTIME_IMAGE=$BASE_IMAGE
5
-
6
- # Download VOICEVOX Core shared object
7
- FROM ${BASE_IMAGE} AS download-core-env
8
- ARG DEBIAN_FRONTEND=noninteractive
9
-
10
- WORKDIR /work
11
-
12
- RUN <<EOF
13
- set -eux
14
-
15
- apt-get update
16
- apt-get install -y \
17
- wget \
18
- unzip
19
- apt-get clean
20
- rm -rf /var/lib/apt/lists/*
21
- EOF
22
-
23
- # assert VOICEVOX_CORE_VERSION >= 0.11.0 (ONNX)
24
- ARG TARGETPLATFORM
25
- ARG USE_GPU=false
26
- ARG VOICEVOX_CORE_VERSION=0.14.3
27
-
28
- RUN <<EOF
29
- set -eux
30
-
31
- # Processing Switch
32
- if [ "${USE_GPU}" = "true" ]; then
33
- VOICEVOX_CORE_ASSET_ASSET_PROCESSING="gpu"
34
- else
35
- VOICEVOX_CORE_ASSET_ASSET_PROCESSING="cpu"
36
- fi
37
-
38
- # TARGETARCH Switch
39
- if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then
40
- VOICEVOX_CORE_ASSET_TARGETARCH="x64"
41
- else
42
- VOICEVOX_CORE_ASSET_TARGETARCH="arm64"
43
- fi
44
-
45
- VOICEVOX_CORE_ASSET_PREFIX="voicevox_core-linux-${VOICEVOX_CORE_ASSET_TARGETARCH}-${VOICEVOX_CORE_ASSET_ASSET_PROCESSING}"
46
-
47
- # Download Core
48
- VOICEVOX_CORE_ASSET_NAME=${VOICEVOX_CORE_ASSET_PREFIX}-${VOICEVOX_CORE_VERSION}
49
- wget -nv --show-progress -c -O "./${VOICEVOX_CORE_ASSET_NAME}.zip" "https://github.com/VOICEVOX/voicevox_core/releases/download/${VOICEVOX_CORE_VERSION}/${VOICEVOX_CORE_ASSET_NAME}.zip"
50
- unzip "./${VOICEVOX_CORE_ASSET_NAME}.zip"
51
- mkdir -p core
52
- mv "${VOICEVOX_CORE_ASSET_NAME}"/* core
53
- rm -rf $VOICEVOX_CORE_ASSET_NAME
54
- rm "./${VOICEVOX_CORE_ASSET_NAME}.zip"
55
-
56
- # Move Core to /opt/voicevox_core/
57
- mkdir /opt/voicevox_core
58
- mv ./core/* /opt/voicevox_core/
59
-
60
- # Add /opt/voicevox_core to dynamic library search path
61
- echo "/opt/voicevox_core" > /etc/ld.so.conf.d/voicevox_core.conf
62
-
63
- # Update dynamic library search cache
64
- ldconfig
65
- EOF
66
-
67
-
68
- # Download ONNX Runtime
69
- FROM ${BASE_IMAGE} AS download-onnxruntime-env
70
- ARG DEBIAN_FRONTEND=noninteractive
71
-
72
- WORKDIR /work
73
-
74
- RUN <<EOF
75
- set -eux
76
-
77
- apt-get update
78
- apt-get install -y \
79
- wget \
80
- tar
81
- apt-get clean
82
- rm -rf /var/lib/apt/lists/*
83
- EOF
84
-
85
- ARG TARGETPLATFORM
86
- ARG USE_GPU=false
87
- ARG ONNXRUNTIME_VERSION=1.13.1
88
- RUN <<EOF
89
- set -eux
90
-
91
- # Processing Switch
92
- if [ "${USE_GPU}" = "true" ]; then
93
- ONNXRUNTIME_PROCESSING="gpu-"
94
- else
95
- ONNXRUNTIME_PROCESSING=""
96
- fi
97
-
98
- # TARGETARCH Switch
99
- if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then
100
- ONNXRUNTIME_TARGETARCH=x64
101
- else
102
- ONNXRUNTIME_TARGETARCH=aarch64
103
- fi
104
-
105
- ONNXRUNTIME_URL="https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-${ONNXRUNTIME_TARGETARCH}-${ONNXRUNTIME_PROCESSING}${ONNXRUNTIME_VERSION}.tgz"
106
-
107
- # Download ONNX Runtime
108
- wget -nv --show-progress -c -O "./onnxruntime.tgz" "${ONNXRUNTIME_URL}"
109
-
110
- # Extract ONNX Runtime to /opt/onnxruntime
111
- mkdir -p /opt/onnxruntime
112
- tar xf "./onnxruntime.tgz" -C "/opt/onnxruntime" --strip-components 1
113
- rm ./onnxruntime.tgz
114
-
115
- # Add /opt/onnxruntime/lib to dynamic library search path
116
- echo "/opt/onnxruntime/lib" > /etc/ld.so.conf.d/onnxruntime.conf
117
-
118
- # Update dynamic library search cache
119
- ldconfig
120
- EOF
121
-
122
-
123
- # Compile Python (version locked)
124
- FROM ${BASE_IMAGE} AS compile-python-env
125
-
126
- ARG DEBIAN_FRONTEND=noninteractive
127
-
128
- RUN <<EOF
129
- set -eux
130
- apt-get update
131
- apt-get install -y \
132
- build-essential \
133
- libssl-dev \
134
- zlib1g-dev \
135
- libbz2-dev \
136
- libreadline-dev \
137
- libsqlite3-dev \
138
- curl \
139
- libncursesw5-dev \
140
- xz-utils \
141
- tk-dev \
142
- libxml2-dev \
143
- libxmlsec1-dev \
144
- libffi-dev \
145
- liblzma-dev \
146
- git
147
- apt-get clean
148
- rm -rf /var/lib/apt/lists/*
149
- EOF
150
-
151
- ARG PYTHON_VERSION=3.11.3
152
- ARG PYENV_VERSION=v2.3.17
153
- ARG PYENV_ROOT=/tmp/.pyenv
154
- ARG PYBUILD_ROOT=/tmp/python-build
155
- RUN <<EOF
156
- set -eux
157
-
158
- git clone -b "${PYENV_VERSION}" https://github.com/pyenv/pyenv.git "$PYENV_ROOT"
159
- PREFIX="$PYBUILD_ROOT" "$PYENV_ROOT"/plugins/python-build/install.sh
160
- "$PYBUILD_ROOT/bin/python-build" -v "$PYTHON_VERSION" /opt/python
161
-
162
- rm -rf "$PYBUILD_ROOT" "$PYENV_ROOT"
163
- EOF
164
-
165
- # FIXME: add /opt/python to PATH
166
- # not working: /etc/profile read only on login shell
167
- # not working: /etc/environment is the same
168
- # not suitable: `ENV` is ignored by docker-compose
169
- # RUN <<EOF
170
- # set -eux
171
- # echo "export PATH=/opt/python/bin:\$PATH" > /etc/profile.d/python-path.sh
172
- # echo "export LD_LIBRARY_PATH=/opt/python/lib:\$LD_LIBRARY_PATH" >> /etc/profile.d/python-path.sh
173
- # echo "export C_INCLUDE_PATH=/opt/python/include:\$C_INCLUDE_PATH" >> /etc/profile.d/python-path.sh
174
- #
175
- # rm -f /etc/ld.so.cache
176
- # ldconfig
177
- # EOF
178
-
179
-
180
- # Runtime
181
- FROM ${BASE_RUNTIME_IMAGE} AS runtime-env
182
- ARG DEBIAN_FRONTEND=noninteractive
183
-
184
- WORKDIR /opt/voicevox_engine
185
-
186
- # libsndfile1: soundfile shared object
187
- # ca-certificates: pyopenjtalk dictionary download
188
- # build-essential: pyopenjtalk local build
189
- RUN <<EOF
190
- set -eux
191
-
192
- apt-get update
193
- apt-get install -y \
194
- git \
195
- wget \
196
- cmake \
197
- libsndfile1 \
198
- ca-certificates \
199
- build-essential \
200
- gosu
201
- apt-get clean
202
- rm -rf /var/lib/apt/lists/*
203
-
204
- # Create a general user
205
- useradd --create-home user
206
- EOF
207
-
208
- # Copy python env
209
- COPY --from=compile-python-env /opt/python /opt/python
210
-
211
- # Install Python dependencies
212
- ADD ./requirements.txt /tmp/
213
- RUN <<EOF
214
- # Install requirements
215
- gosu user /opt/python/bin/pip3 install -r /tmp/requirements.txt
216
- EOF
217
-
218
- # Copy VOICEVOX Core release
219
- # COPY --from=download-core-env /etc/ld.so.conf.d/voicevox_core.conf /etc/ld.so.conf.d/voicevox_core.conf
220
- COPY --from=download-core-env /opt/voicevox_core /opt/voicevox_core
221
-
222
- # Copy ONNX Runtime
223
- # COPY --from=download-onnxruntime-env /etc/ld.so.conf.d/onnxruntime.conf /etc/ld.so.conf.d/onnxruntime.conf
224
- COPY --from=download-onnxruntime-env /opt/onnxruntime /opt/onnxruntime
225
-
226
- # Add local files
227
- ADD ./voicevox_engine /opt/voicevox_engine/voicevox_engine
228
- ADD ./docs /opt/voicevox_engine/docs
229
- ADD ./run.py ./generate_licenses.py ./presets.yaml ./default.csv ./default_setting.yml ./engine_manifest.json /opt/voicevox_engine/
230
- ADD ./speaker_info /opt/voicevox_engine/speaker_info
231
- ADD ./ui_template /opt/voicevox_engine/ui_template
232
- ADD ./engine_manifest_assets /opt/voicevox_engine/engine_manifest_assets
233
-
234
- # Replace version
235
- ARG VOICEVOX_ENGINE_VERSION=latest
236
- RUN sed -i "s/__version__ = \"latest\"/__version__ = \"${VOICEVOX_ENGINE_VERSION}\"/" /opt/voicevox_engine/voicevox_engine/__init__.py
237
- RUN sed -i "s/\"version\": \"999\\.999\\.999\"/\"version\": \"${VOICEVOX_ENGINE_VERSION}\"/" /opt/voicevox_engine/engine_manifest.json
238
-
239
- # Generate licenses.json
240
- ADD ./requirements-license.txt /tmp/
241
- RUN <<EOF
242
- set -eux
243
-
244
- cd /opt/voicevox_engine
245
-
246
- # Define temporary env vars
247
- # /home/user/.local/bin is required to use the commands installed by pip
248
- export PATH="/home/user/.local/bin:${PATH:-}"
249
-
250
- gosu user /opt/python/bin/pip3 install -r /tmp/requirements-license.txt
251
- gosu user /opt/python/bin/python3 generate_licenses.py > /opt/voicevox_engine/engine_manifest_assets/dependency_licenses.json
252
- cp /opt/voicevox_engine/engine_manifest_assets/dependency_licenses.json /opt/voicevox_engine/licenses.json
253
- EOF
254
-
255
- # Keep this layer separated to use layer cache on download failed in local build
256
- RUN <<EOF
257
- set -eux
258
-
259
- # Download openjtalk dictionary
260
- # try 5 times, sleep 5 seconds before retry
261
- for i in $(seq 5); do
262
- EXIT_CODE=0
263
- gosu user /opt/python/bin/python3 -c "import pyopenjtalk; pyopenjtalk._lazy_init()" || EXIT_CODE=$?
264
- if [ "$EXIT_CODE" = "0" ]; then
265
- break
266
- fi
267
- sleep 5
268
- done
269
-
270
- if [ "$EXIT_CODE" != "0" ]; then
271
- exit "$EXIT_CODE"
272
- fi
273
- EOF
274
-
275
- # Download Resource
276
- ARG VOICEVOX_RESOURCE_VERSION=0.14.3-preview.1
277
- RUN <<EOF
278
- set -eux
279
-
280
- # README
281
- wget -nv --show-progress -c -O "/opt/voicevox_engine/README.md" "https://raw.githubusercontent.com/VOICEVOX/voicevox_resource/${VOICEVOX_RESOURCE_VERSION}/engine/README.md"
282
- EOF
283
-
284
- # Create container start shell
285
- COPY --chmod=775 <<EOF /entrypoint.sh
286
- #!/bin/bash
287
- set -eux
288
-
289
- # Display README for engine
290
- cat /opt/voicevox_engine/README.md > /dev/stderr
291
-
292
- exec "\$@"
293
- EOF
294
- USER user
295
- ENTRYPOINT [ "/entrypoint.sh" ]
296
- CMD [ "/opt/python/bin/python3", "./run.py", "--voicelib_dir", "/opt/voicevox_core/", "--runtime_dir", "/opt/onnxruntime/lib", "--host", "0.0.0.0","--port","7860" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/components/button-scroll-to-bottom.tsx DELETED
@@ -1,34 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
-
5
- import { cn } from '@/lib/utils'
6
- import { useAtBottom } from '@/lib/hooks/use-at-bottom'
7
- import { Button, type ButtonProps } from '@/components/ui/button'
8
- import { IconArrowDown } from '@/components/ui/icons'
9
-
10
- export function ButtonScrollToBottom({ className, ...props }: ButtonProps) {
11
- const isAtBottom = useAtBottom()
12
-
13
- return (
14
- <Button
15
- variant="outline"
16
- size="icon"
17
- className={cn(
18
- 'fixed right-4 bottom-24 z-50 bg-background transition-opacity duration-300 sm:right-20',
19
- isAtBottom ? 'opacity-0' : 'opacity-100',
20
- className
21
- )}
22
- onClick={() =>
23
- window.scrollTo({
24
- top: document.body.offsetHeight,
25
- behavior: 'smooth'
26
- })
27
- }
28
- {...props}
29
- >
30
- <IconArrowDown />
31
- <span className="sr-only">Scroll to bottom</span>
32
- </Button>
33
- )
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Getting Started 6bc871dcdd4a4554b5b22c0c40740841/Example sub-page 48f64d6186ec4428b2e4180475245a9c.md DELETED
@@ -1,5 +0,0 @@
1
- # Example sub-page
2
-
3
- Last edited time: March 31, 2023 1:49 PM
4
- Owner: Anonymous
5
- Tags: Testing
 
 
 
 
 
 
spaces/AI-Naga/Parking_Space_Counter/app.py DELETED
@@ -1,91 +0,0 @@
1
- import gradio as gr
2
- import cv2
3
- import requests
4
- import os
5
- import torch
6
- import numpy as np
7
-
8
- from ultralytics import YOLO
9
-
10
- model = torch.hub.load('ultralytics/yolov5', 'yolov5l', pretrained=True)
11
- path = [['image_0.jpg'], ['image_1.jpg']]
12
- video_path = [['video_test.mp4']]
13
- # area = [(25,430), (10, 515), (407,485), (750,425), (690,370)]
14
- area = [(48,430), (18, 515), (407,485), (750,425), (690,370)]
15
- total_space = 12
16
- count=0
17
-
18
- def show_preds_video():
19
- cap = cv2.VideoCapture('Video_1.mp4')
20
- count=0
21
- while(cap.isOpened()):
22
- ret, frame = cap.read()
23
- if not ret:
24
- break
25
- count += 1
26
- if count % 2 != 0:
27
- continue
28
-
29
- frame=cv2.resize(frame,(1020,600))
30
- frame_copy = frame.copy()
31
- Vehicle_cnt = 0
32
-
33
- results=model(frame)
34
- for index, row in results.pandas().xyxy[0].iterrows():
35
- x1 = int(row['xmin'])
36
- y1 = int(row['ymin'])
37
- x2 = int(row['xmax'])
38
- y2 = int(row['ymax'])
39
- d=(row['name'])
40
-
41
- cx=int(x1+x2)//2
42
- cy=int(y1+y2)//2
43
-
44
- if ('car' or 'truck') in d:
45
- results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
46
- if results >0:
47
- cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
48
- cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,255,0),2)
49
- Vehicle_cnt += 1
50
-
51
- # elif ('truck') in d:
52
- # results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
53
- # if results >0:
54
- # cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
55
- # cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,0,0),2)
56
- # truck_cnt += 1
57
-
58
- free_space = total_space - Vehicle_cnt
59
- cv2.putText(frame_copy, ("Free space: " + str(free_space)), (50,50) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
60
- # cv2.putText(frame_copy, str(str(" car: ")+ str(car_cnt) + str(" truck: ") +str(truck_cnt)), (50,75) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
61
- cv2.putText(frame_copy, str(str("vehicles: ")+ str(Vehicle_cnt) ), (50,85) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
62
-
63
- cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
64
-
65
- # fps = cap.get(cv2.CAP_PROP_FPS)
66
- # cv2.putText(frame_copy,str("fps: ") + str(np.round(fps,0)),(50,100),cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
67
-
68
- yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
69
-
70
-
71
- inputs_video = [
72
- #gr.components.Video(type="filepath", label="Input Video"),
73
-
74
- ]
75
- outputs_video = [
76
- gr.components.Image(type="numpy", label="Output Image"),
77
- ]
78
- interface_video = gr.Interface(
79
- fn=show_preds_video,
80
- inputs=inputs_video,
81
- outputs=outputs_video,
82
- title="Parking space counter",
83
- description="Click generate !!!'",
84
- # examples=video_path,
85
- cache_examples=False,
86
- )
87
-
88
- gr.TabbedInterface(
89
- [interface_video],
90
- tab_names=['Video inference']
91
- ).queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/node.py DELETED
@@ -1,263 +0,0 @@
1
- """Nodes, conforming to the glTF 2.0 standards as specified in
2
- https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-node
3
-
4
- Author: Matthew Matl
5
- """
6
- import numpy as np
7
-
8
- import trimesh.transformations as transformations
9
-
10
- from .camera import Camera
11
- from .mesh import Mesh
12
- from .light import Light
13
-
14
-
15
- class Node(object):
16
- """A node in the node hierarchy.
17
-
18
- Parameters
19
- ----------
20
- name : str, optional
21
- The user-defined name of this object.
22
- camera : :class:`Camera`, optional
23
- The camera in this node.
24
- children : list of :class:`Node`
25
- The children of this node.
26
- skin : int, optional
27
- The index of the skin referenced by this node.
28
- matrix : (4,4) float, optional
29
- A floating-point 4x4 transformation matrix.
30
- mesh : :class:`Mesh`, optional
31
- The mesh in this node.
32
- rotation : (4,) float, optional
33
- The node's unit quaternion in the order (x, y, z, w), where
34
- w is the scalar.
35
- scale : (3,) float, optional
36
- The node's non-uniform scale, given as the scaling factors along the x,
37
- y, and z axes.
38
- translation : (3,) float, optional
39
- The node's translation along the x, y, and z axes.
40
- weights : (n,) float
41
- The weights of the instantiated Morph Target. Number of elements must
42
- match number of Morph Targets of used mesh.
43
- light : :class:`Light`, optional
44
- The light in this node.
45
- """
46
-
47
- def __init__(self,
48
- name=None,
49
- camera=None,
50
- children=None,
51
- skin=None,
52
- matrix=None,
53
- mesh=None,
54
- rotation=None,
55
- scale=None,
56
- translation=None,
57
- weights=None,
58
- light=None):
59
- # Set defaults
60
- if children is None:
61
- children = []
62
-
63
- self._matrix = None
64
- self._scale = None
65
- self._rotation = None
66
- self._translation = None
67
- if matrix is None:
68
- if rotation is None:
69
- rotation = np.array([0.0, 0.0, 0.0, 1.0])
70
- if translation is None:
71
- translation = np.zeros(3)
72
- if scale is None:
73
- scale = np.ones(3)
74
- self.rotation = rotation
75
- self.translation = translation
76
- self.scale = scale
77
- else:
78
- self.matrix = matrix
79
-
80
- self.name = name
81
- self.camera = camera
82
- self.children = children
83
- self.skin = skin
84
- self.mesh = mesh
85
- self.weights = weights
86
- self.light = light
87
-
88
- @property
89
- def name(self):
90
- """str : The user-defined name of this object.
91
- """
92
- return self._name
93
-
94
- @name.setter
95
- def name(self, value):
96
- if value is not None:
97
- value = str(value)
98
- self._name = value
99
-
100
- @property
101
- def camera(self):
102
- """:class:`Camera` : The camera in this node.
103
- """
104
- return self._camera
105
-
106
- @camera.setter
107
- def camera(self, value):
108
- if value is not None and not isinstance(value, Camera):
109
- raise TypeError('Value must be a camera')
110
- self._camera = value
111
-
112
- @property
113
- def children(self):
114
- """list of :class:`Node` : The children of this node.
115
- """
116
- return self._children
117
-
118
- @children.setter
119
- def children(self, value):
120
- self._children = value
121
-
122
- @property
123
- def skin(self):
124
- """int : The skin index for this node.
125
- """
126
- return self._skin
127
-
128
- @skin.setter
129
- def skin(self, value):
130
- self._skin = value
131
-
132
- @property
133
- def mesh(self):
134
- """:class:`Mesh` : The mesh in this node.
135
- """
136
- return self._mesh
137
-
138
- @mesh.setter
139
- def mesh(self, value):
140
- if value is not None and not isinstance(value, Mesh):
141
- raise TypeError('Value must be a mesh')
142
- self._mesh = value
143
-
144
- @property
145
- def light(self):
146
- """:class:`Light` : The light in this node.
147
- """
148
- return self._light
149
-
150
- @light.setter
151
- def light(self, value):
152
- if value is not None and not isinstance(value, Light):
153
- raise TypeError('Value must be a light')
154
- self._light = value
155
-
156
- @property
157
- def rotation(self):
158
- """(4,) float : The xyzw quaternion for this node.
159
- """
160
- return self._rotation
161
-
162
- @rotation.setter
163
- def rotation(self, value):
164
- value = np.asanyarray(value)
165
- if value.shape != (4,):
166
- raise ValueError('Quaternion must be a (4,) vector')
167
- if np.abs(np.linalg.norm(value) - 1.0) > 1e-3:
168
- raise ValueError('Quaternion must have norm == 1.0')
169
- self._rotation = value
170
- self._matrix = None
171
-
172
- @property
173
- def translation(self):
174
- """(3,) float : The translation for this node.
175
- """
176
- return self._translation
177
-
178
- @translation.setter
179
- def translation(self, value):
180
- value = np.asanyarray(value)
181
- if value.shape != (3,):
182
- raise ValueError('Translation must be a (3,) vector')
183
- self._translation = value
184
- self._matrix = None
185
-
186
- @property
187
- def scale(self):
188
- """(3,) float : The scale for this node.
189
- """
190
- return self._scale
191
-
192
- @scale.setter
193
- def scale(self, value):
194
- value = np.asanyarray(value)
195
- if value.shape != (3,):
196
- raise ValueError('Scale must be a (3,) vector')
197
- self._scale = value
198
- self._matrix = None
199
-
200
- @property
201
- def matrix(self):
202
- """(4,4) float : The homogenous transform matrix for this node.
203
-
204
- Note that this matrix's elements are not settable,
205
- it's just a copy of the internal matrix. You can set the whole
206
- matrix, but not an individual element.
207
- """
208
- if self._matrix is None:
209
- self._matrix = self._m_from_tqs(
210
- self.translation, self.rotation, self.scale
211
- )
212
- return self._matrix.copy()
213
-
214
- @matrix.setter
215
- def matrix(self, value):
216
- value = np.asanyarray(value)
217
- if value.shape != (4,4):
218
- raise ValueError('Matrix must be a 4x4 numpy ndarray')
219
- if not np.allclose(value[3,:], np.array([0.0, 0.0, 0.0, 1.0])):
220
- raise ValueError('Bottom row of matrix must be [0,0,0,1]')
221
- self.rotation = Node._q_from_m(value)
222
- self.scale = Node._s_from_m(value)
223
- self.translation = Node._t_from_m(value)
224
- self._matrix = value
225
-
226
- @staticmethod
227
- def _t_from_m(m):
228
- return m[:3,3]
229
-
230
- @staticmethod
231
- def _r_from_m(m):
232
- U = m[:3,:3]
233
- norms = np.linalg.norm(U.T, axis=1)
234
- return U / norms
235
-
236
- @staticmethod
237
- def _q_from_m(m):
238
- M = np.eye(4)
239
- M[:3,:3] = Node._r_from_m(m)
240
- q_wxyz = transformations.quaternion_from_matrix(M)
241
- return np.roll(q_wxyz, -1)
242
-
243
- @staticmethod
244
- def _s_from_m(m):
245
- return np.linalg.norm(m[:3,:3].T, axis=1)
246
-
247
- @staticmethod
248
- def _r_from_q(q):
249
- q_wxyz = np.roll(q, 1)
250
- return transformations.quaternion_matrix(q_wxyz)[:3,:3]
251
-
252
- @staticmethod
253
- def _m_from_tqs(t, q, s):
254
- S = np.eye(4)
255
- S[:3,:3] = np.diag(s)
256
-
257
- R = np.eye(4)
258
- R[:3,:3] = Node._r_from_q(q)
259
-
260
- T = np.eye(4)
261
- T[:3,3] = t
262
-
263
- return T.dot(R.dot(S))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/blocks.py DELETED
@@ -1,342 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from .vit import (
5
- _make_pretrained_vitb_rn50_384,
6
- _make_pretrained_vitl16_384,
7
- _make_pretrained_vitb16_384,
8
- forward_vit,
9
- )
10
-
11
- def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
- if backbone == "vitl16_384":
13
- pretrained = _make_pretrained_vitl16_384(
14
- use_pretrained, hooks=hooks, use_readout=use_readout
15
- )
16
- scratch = _make_scratch(
17
- [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
- ) # ViT-L/16 - 85.0% Top1 (backbone)
19
- elif backbone == "vitb_rn50_384":
20
- pretrained = _make_pretrained_vitb_rn50_384(
21
- use_pretrained,
22
- hooks=hooks,
23
- use_vit_only=use_vit_only,
24
- use_readout=use_readout,
25
- )
26
- scratch = _make_scratch(
27
- [256, 512, 768, 768], features, groups=groups, expand=expand
28
- ) # ViT-H/16 - 85.0% Top1 (backbone)
29
- elif backbone == "vitb16_384":
30
- pretrained = _make_pretrained_vitb16_384(
31
- use_pretrained, hooks=hooks, use_readout=use_readout
32
- )
33
- scratch = _make_scratch(
34
- [96, 192, 384, 768], features, groups=groups, expand=expand
35
- ) # ViT-B/16 - 84.6% Top1 (backbone)
36
- elif backbone == "resnext101_wsl":
37
- pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
- scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
- elif backbone == "efficientnet_lite3":
40
- pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
- scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
- else:
43
- print(f"Backbone '{backbone}' not implemented")
44
- assert False
45
-
46
- return pretrained, scratch
47
-
48
-
49
- def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
- scratch = nn.Module()
51
-
52
- out_shape1 = out_shape
53
- out_shape2 = out_shape
54
- out_shape3 = out_shape
55
- out_shape4 = out_shape
56
- if expand==True:
57
- out_shape1 = out_shape
58
- out_shape2 = out_shape*2
59
- out_shape3 = out_shape*4
60
- out_shape4 = out_shape*8
61
-
62
- scratch.layer1_rn = nn.Conv2d(
63
- in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
- )
65
- scratch.layer2_rn = nn.Conv2d(
66
- in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
- )
68
- scratch.layer3_rn = nn.Conv2d(
69
- in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
- )
71
- scratch.layer4_rn = nn.Conv2d(
72
- in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
- )
74
-
75
- return scratch
76
-
77
-
78
- def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
- efficientnet = torch.hub.load(
80
- "rwightman/gen-efficientnet-pytorch",
81
- "tf_efficientnet_lite3",
82
- pretrained=use_pretrained,
83
- exportable=exportable
84
- )
85
- return _make_efficientnet_backbone(efficientnet)
86
-
87
-
88
- def _make_efficientnet_backbone(effnet):
89
- pretrained = nn.Module()
90
-
91
- pretrained.layer1 = nn.Sequential(
92
- effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
- )
94
- pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
- pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
- pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
-
98
- return pretrained
99
-
100
-
101
- def _make_resnet_backbone(resnet):
102
- pretrained = nn.Module()
103
- pretrained.layer1 = nn.Sequential(
104
- resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
- )
106
-
107
- pretrained.layer2 = resnet.layer2
108
- pretrained.layer3 = resnet.layer3
109
- pretrained.layer4 = resnet.layer4
110
-
111
- return pretrained
112
-
113
-
114
- def _make_pretrained_resnext101_wsl(use_pretrained):
115
- resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
- return _make_resnet_backbone(resnet)
117
-
118
-
119
-
120
- class Interpolate(nn.Module):
121
- """Interpolation module.
122
- """
123
-
124
- def __init__(self, scale_factor, mode, align_corners=False):
125
- """Init.
126
-
127
- Args:
128
- scale_factor (float): scaling
129
- mode (str): interpolation mode
130
- """
131
- super(Interpolate, self).__init__()
132
-
133
- self.interp = nn.functional.interpolate
134
- self.scale_factor = scale_factor
135
- self.mode = mode
136
- self.align_corners = align_corners
137
-
138
- def forward(self, x):
139
- """Forward pass.
140
-
141
- Args:
142
- x (tensor): input
143
-
144
- Returns:
145
- tensor: interpolated data
146
- """
147
-
148
- x = self.interp(
149
- x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
- )
151
-
152
- return x
153
-
154
-
155
- class ResidualConvUnit(nn.Module):
156
- """Residual convolution module.
157
- """
158
-
159
- def __init__(self, features):
160
- """Init.
161
-
162
- Args:
163
- features (int): number of features
164
- """
165
- super().__init__()
166
-
167
- self.conv1 = nn.Conv2d(
168
- features, features, kernel_size=3, stride=1, padding=1, bias=True
169
- )
170
-
171
- self.conv2 = nn.Conv2d(
172
- features, features, kernel_size=3, stride=1, padding=1, bias=True
173
- )
174
-
175
- self.relu = nn.ReLU(inplace=True)
176
-
177
- def forward(self, x):
178
- """Forward pass.
179
-
180
- Args:
181
- x (tensor): input
182
-
183
- Returns:
184
- tensor: output
185
- """
186
- out = self.relu(x)
187
- out = self.conv1(out)
188
- out = self.relu(out)
189
- out = self.conv2(out)
190
-
191
- return out + x
192
-
193
-
194
- class FeatureFusionBlock(nn.Module):
195
- """Feature fusion block.
196
- """
197
-
198
- def __init__(self, features):
199
- """Init.
200
-
201
- Args:
202
- features (int): number of features
203
- """
204
- super(FeatureFusionBlock, self).__init__()
205
-
206
- self.resConfUnit1 = ResidualConvUnit(features)
207
- self.resConfUnit2 = ResidualConvUnit(features)
208
-
209
- def forward(self, *xs):
210
- """Forward pass.
211
-
212
- Returns:
213
- tensor: output
214
- """
215
- output = xs[0]
216
-
217
- if len(xs) == 2:
218
- output += self.resConfUnit1(xs[1])
219
-
220
- output = self.resConfUnit2(output)
221
-
222
- output = nn.functional.interpolate(
223
- output, scale_factor=2, mode="bilinear", align_corners=True
224
- )
225
-
226
- return output
227
-
228
-
229
-
230
-
231
- class ResidualConvUnit_custom(nn.Module):
232
- """Residual convolution module.
233
- """
234
-
235
- def __init__(self, features, activation, bn):
236
- """Init.
237
-
238
- Args:
239
- features (int): number of features
240
- """
241
- super().__init__()
242
-
243
- self.bn = bn
244
-
245
- self.groups=1
246
-
247
- self.conv1 = nn.Conv2d(
248
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
- )
250
-
251
- self.conv2 = nn.Conv2d(
252
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
- )
254
-
255
- if self.bn==True:
256
- self.bn1 = nn.BatchNorm2d(features)
257
- self.bn2 = nn.BatchNorm2d(features)
258
-
259
- self.activation = activation
260
-
261
- self.skip_add = nn.quantized.FloatFunctional()
262
-
263
- def forward(self, x):
264
- """Forward pass.
265
-
266
- Args:
267
- x (tensor): input
268
-
269
- Returns:
270
- tensor: output
271
- """
272
-
273
- out = self.activation(x)
274
- out = self.conv1(out)
275
- if self.bn==True:
276
- out = self.bn1(out)
277
-
278
- out = self.activation(out)
279
- out = self.conv2(out)
280
- if self.bn==True:
281
- out = self.bn2(out)
282
-
283
- if self.groups > 1:
284
- out = self.conv_merge(out)
285
-
286
- return self.skip_add.add(out, x)
287
-
288
- # return out + x
289
-
290
-
291
- class FeatureFusionBlock_custom(nn.Module):
292
- """Feature fusion block.
293
- """
294
-
295
- def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
- """Init.
297
-
298
- Args:
299
- features (int): number of features
300
- """
301
- super(FeatureFusionBlock_custom, self).__init__()
302
-
303
- self.deconv = deconv
304
- self.align_corners = align_corners
305
-
306
- self.groups=1
307
-
308
- self.expand = expand
309
- out_features = features
310
- if self.expand==True:
311
- out_features = features//2
312
-
313
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
-
315
- self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
- self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
-
318
- self.skip_add = nn.quantized.FloatFunctional()
319
-
320
- def forward(self, *xs):
321
- """Forward pass.
322
-
323
- Returns:
324
- tensor: output
325
- """
326
- output = xs[0]
327
-
328
- if len(xs) == 2:
329
- res = self.resConfUnit1(xs[1])
330
- output = self.skip_add.add(output, res)
331
- # output += res
332
-
333
- output = self.resConfUnit2(output)
334
-
335
- output = nn.functional.interpolate(
336
- output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
- )
338
-
339
- output = self.out_conv(output)
340
-
341
- return output
342
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/models/melgan.py DELETED
@@ -1,458 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- # Copyright 2020 Tomoki Hayashi
4
- # MIT License (https://opensource.org/licenses/MIT)
5
-
6
- """MelGAN Modules."""
7
-
8
- import logging
9
-
10
- import numpy as np
11
- import torch
12
- from torch import nn
13
-
14
- from text_to_speech.modules.vocoder.parallel_wavegan.layers import CausalConv1d
15
- from text_to_speech.modules.vocoder.parallel_wavegan.layers import CausalConvTranspose1d
16
- from text_to_speech.modules.vocoder.parallel_wavegan.layers import ResidualStack
17
- from text_to_speech.modules.vocoder.parallel_wavegan.models.source import SourceModuleCycNoise_v1
18
-
19
-
20
- class MelGANGenerator(torch.nn.Module):
21
- """MelGAN generator module."""
22
-
23
- def __init__(self,
24
- in_channels=80,
25
- out_channels=1,
26
- kernel_size=7,
27
- channels=512,
28
- bias=True,
29
- upsample_scales=[8, 8, 2, 2],
30
- stack_kernel_size=3,
31
- stacks=3,
32
- nonlinear_activation="LeakyReLU",
33
- nonlinear_activation_params={"negative_slope": 0.2},
34
- pad="ReflectionPad1d",
35
- pad_params={},
36
- use_final_nonlinear_activation=True,
37
- use_weight_norm=True,
38
- use_causal_conv=False,
39
- use_pitch_embed=False,
40
- use_nsf=False,
41
- sample_rate=22050,
42
- **kwargs
43
- ):
44
- """Initialize MelGANGenerator module.
45
-
46
- Args:
47
- in_channels (int): Number of input channels.
48
- out_channels (int): Number of output channels.
49
- kernel_size (int): Kernel size of initial and final conv layer.
50
- channels (int): Initial number of channels for conv layer.
51
- bias (bool): Whether to add bias parameter in convolution layers.
52
- upsample_scales (list): List of upsampling scales.
53
- stack_kernel_size (int): Kernel size of dilated conv layers in residual stack.
54
- stacks (int): Number of stacks in a single residual stack.
55
- nonlinear_activation (str): Activation function module name.
56
- nonlinear_activation_params (dict): Hyperparameters for activation function.
57
- pad (str): Padding function module name before dilated convolution layer.
58
- pad_params (dict): Hyperparameters for padding function.
59
- use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer.
60
- use_weight_norm (bool): Whether to use weight norm.
61
- If set to true, it will be applied to all of the conv layers.
62
- use_causal_conv (bool): Whether to use causal convolution.
63
-
64
- """
65
- super(MelGANGenerator, self).__init__()
66
-
67
- # check hyper parameters is valid
68
- assert channels >= np.prod(upsample_scales)
69
- assert channels % (2 ** len(upsample_scales)) == 0
70
- if not use_causal_conv:
71
- assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
72
-
73
- # add initial layer
74
- layers = []
75
- if not use_causal_conv:
76
- layers += [
77
- getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
78
- torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias),
79
- ]
80
- else:
81
- layers += [
82
- CausalConv1d(in_channels, channels, kernel_size,
83
- bias=bias, pad=pad, pad_params=pad_params),
84
- ]
85
-
86
- self.use_pitch_embed = use_pitch_embed
87
- if use_pitch_embed:
88
- self.pitch_embed = nn.Embedding(300, in_channels, 0)
89
- self.c_proj = nn.Conv1d(2 * in_channels, in_channels, 1)
90
-
91
- for i, upsample_scale in enumerate(upsample_scales):
92
- # add upsampling layer
93
- layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
94
- if not use_causal_conv:
95
- layers += [
96
- torch.nn.ConvTranspose1d(
97
- channels // (2 ** i),
98
- channels // (2 ** (i + 1)),
99
- upsample_scale * 2,
100
- stride=upsample_scale,
101
- padding=upsample_scale // 2 + upsample_scale % 2,
102
- output_padding=upsample_scale % 2,
103
- bias=bias,
104
- )
105
- ]
106
- else:
107
- layers += [
108
- CausalConvTranspose1d(
109
- channels // (2 ** i),
110
- channels // (2 ** (i + 1)),
111
- upsample_scale * 2,
112
- stride=upsample_scale,
113
- bias=bias,
114
- )
115
- ]
116
-
117
- # add residual stack
118
- for j in range(stacks):
119
- layers += [
120
- ResidualStack(
121
- kernel_size=stack_kernel_size,
122
- channels=channels // (2 ** (i + 1)),
123
- dilation=stack_kernel_size ** j,
124
- bias=bias,
125
- nonlinear_activation=nonlinear_activation,
126
- nonlinear_activation_params=nonlinear_activation_params,
127
- pad=pad,
128
- pad_params=pad_params,
129
- use_causal_conv=use_causal_conv,
130
- )
131
- ]
132
- self.use_nsf = use_nsf
133
- if use_nsf:
134
- self.harmonic_num = 8
135
- hop_size = np.prod(upsample_scales)
136
- self.f0_upsamp = torch.nn.Upsample(scale_factor=hop_size)
137
- # self.m_source = SourceModuleHnNSF(sampling_rate=sample_rate, harmonic_num=self.harmonic_num)
138
- self.m_source = SourceModuleCycNoise_v1(sample_rate, 0.003)
139
- self.nsf_conv = nn.Sequential(nn.Conv1d(1, channels // (2 ** (i + 1)), 1), torch.nn.Tanh())
140
-
141
- # define the model as a single function
142
- self.melgan_body = torch.nn.Sequential(*layers)
143
- layers = []
144
- # add final layer
145
- layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
146
- if not use_causal_conv:
147
- layers += [
148
- getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
149
- torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias),
150
- ]
151
- else:
152
- layers += [
153
- CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size,
154
- bias=bias, pad=pad, pad_params=pad_params),
155
- ]
156
- if use_final_nonlinear_activation:
157
- layers += [torch.nn.Tanh()]
158
-
159
- # define the model as a single function
160
- self.melgan_final = torch.nn.Sequential(*layers)
161
-
162
- # apply weight norm
163
- if use_weight_norm:
164
- self.apply_weight_norm()
165
-
166
- # reset parameters
167
- self.reset_parameters()
168
-
169
- def forward(self, c, f0=None, pitch=None):
170
- """Calculate forward propagation.
171
-
172
- Args:
173
- c (Tensor): Input tensor (B, channels, T).
174
-
175
- Returns:
176
- Tensor: Output tensor (B, 1, T ** prod(upsample_scales)).
177
-
178
- """
179
- if self.use_pitch_embed:
180
- c = self.c_proj(torch.cat([c, self.pitch_embed(pitch).transpose(1, 2)], 1))
181
- x = self.melgan_body(c)
182
- if self.use_nsf:
183
- f0_upsample = self.f0_upsamp(f0[:, None, :])
184
- f0_upsample = self.nsf_conv(f0_upsample)
185
- x = x + f0_upsample
186
- x = self.melgan_final(x)
187
- return x
188
-
189
- def remove_weight_norm(self):
190
- """Remove weight normalization module from all of the layers."""
191
- def _remove_weight_norm(m):
192
- try:
193
- logging.debug(f"Weight norm is removed from {m}.")
194
- torch.nn.utils.remove_weight_norm(m)
195
- except ValueError: # this module didn't have weight norm
196
- return
197
-
198
- self.apply(_remove_weight_norm)
199
-
200
- def apply_weight_norm(self):
201
- """Apply weight normalization module from all of the layers."""
202
- def _apply_weight_norm(m):
203
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
204
- torch.nn.utils.weight_norm(m)
205
- logging.debug(f"Weight norm is applied to {m}.")
206
-
207
- self.apply(_apply_weight_norm)
208
-
209
- def reset_parameters(self):
210
- """Reset parameters.
211
-
212
- This initialization follows official implementation manner.
213
- https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
214
-
215
- """
216
- def _reset_parameters(m):
217
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
218
- m.weight.data.normal_(0.0, 0.02)
219
- logging.debug(f"Reset parameters in {m}.")
220
-
221
- self.apply(_reset_parameters)
222
-
223
-
224
- class MelGANDiscriminator(torch.nn.Module):
225
- """MelGAN discriminator module."""
226
-
227
- def __init__(self,
228
- in_channels=1,
229
- out_channels=1,
230
- kernel_sizes=[5, 3],
231
- channels=16,
232
- max_downsample_channels=1024,
233
- bias=True,
234
- downsample_scales=[4, 4, 4, 4],
235
- nonlinear_activation="LeakyReLU",
236
- nonlinear_activation_params={"negative_slope": 0.2},
237
- pad="ReflectionPad1d",
238
- pad_params={},
239
- ):
240
- """Initilize MelGAN discriminator module.
241
-
242
- Args:
243
- in_channels (int): Number of input channels.
244
- out_channels (int): Number of output channels.
245
- kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
246
- and the first and the second kernel sizes will be used for the last two layers.
247
- For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15,
248
- the last two layers' kernel size will be 5 and 3, respectively.
249
- channels (int): Initial number of channels for conv layer.
250
- max_downsample_channels (int): Maximum number of channels for downsampling layers.
251
- bias (bool): Whether to add bias parameter in convolution layers.
252
- downsample_scales (list): List of downsampling scales.
253
- nonlinear_activation (str): Activation function module name.
254
- nonlinear_activation_params (dict): Hyperparameters for activation function.
255
- pad (str): Padding function module name before dilated convolution layer.
256
- pad_params (dict): Hyperparameters for padding function.
257
-
258
- """
259
- super(MelGANDiscriminator, self).__init__()
260
- self.layers = torch.nn.ModuleList()
261
-
262
- # check kernel size is valid
263
- assert len(kernel_sizes) == 2
264
- assert kernel_sizes[0] % 2 == 1
265
- assert kernel_sizes[1] % 2 == 1
266
-
267
- # add first layer
268
- self.layers += [
269
- torch.nn.Sequential(
270
- getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
271
- torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias),
272
- getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
273
- )
274
- ]
275
-
276
- # add downsample layers
277
- in_chs = channels
278
- for downsample_scale in downsample_scales:
279
- out_chs = min(in_chs * downsample_scale, max_downsample_channels)
280
- self.layers += [
281
- torch.nn.Sequential(
282
- torch.nn.Conv1d(
283
- in_chs, out_chs,
284
- kernel_size=downsample_scale * 10 + 1,
285
- stride=downsample_scale,
286
- padding=downsample_scale * 5,
287
- groups=in_chs // 4,
288
- bias=bias,
289
- ),
290
- getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
291
- )
292
- ]
293
- in_chs = out_chs
294
-
295
- # add final layers
296
- out_chs = min(in_chs * 2, max_downsample_channels)
297
- self.layers += [
298
- torch.nn.Sequential(
299
- torch.nn.Conv1d(
300
- in_chs, out_chs, kernel_sizes[0],
301
- padding=(kernel_sizes[0] - 1) // 2,
302
- bias=bias,
303
- ),
304
- getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
305
- )
306
- ]
307
- self.layers += [
308
- torch.nn.Conv1d(
309
- out_chs, out_channels, kernel_sizes[1],
310
- padding=(kernel_sizes[1] - 1) // 2,
311
- bias=bias,
312
- ),
313
- ]
314
-
315
- def forward(self, x):
316
- """Calculate forward propagation.
317
-
318
- Args:
319
- x (Tensor): Input noise signal (B, 1, T).
320
-
321
- Returns:
322
- List: List of output tensors of each layer.
323
-
324
- """
325
- outs = []
326
- for f in self.layers:
327
- x = f(x)
328
- outs += [x]
329
-
330
- return outs
331
-
332
-
333
- class MelGANMultiScaleDiscriminator(torch.nn.Module):
334
- """MelGAN multi-scale discriminator module."""
335
-
336
- def __init__(self,
337
- in_channels=1,
338
- out_channels=1,
339
- scales=3,
340
- downsample_pooling="AvgPool1d",
341
- # follow the official implementation setting
342
- downsample_pooling_params={
343
- "kernel_size": 4,
344
- "stride": 2,
345
- "padding": 1,
346
- "count_include_pad": False,
347
- },
348
- kernel_sizes=[5, 3],
349
- channels=16,
350
- max_downsample_channels=1024,
351
- bias=True,
352
- downsample_scales=[4, 4, 4, 4],
353
- nonlinear_activation="LeakyReLU",
354
- nonlinear_activation_params={"negative_slope": 0.2},
355
- pad="ReflectionPad1d",
356
- pad_params={},
357
- use_weight_norm=True,
358
- **kwargs
359
- ):
360
- """Initilize MelGAN multi-scale discriminator module.
361
-
362
- Args:
363
- in_channels (int): Number of input channels.
364
- out_channels (int): Number of output channels.
365
- downsample_pooling (str): Pooling module name for downsampling of the inputs.
366
- downsample_pooling_params (dict): Parameters for the above pooling module.
367
- kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer,
368
- and the first and the second kernel sizes will be used for the last two layers.
369
- channels (int): Initial number of channels for conv layer.
370
- max_downsample_channels (int): Maximum number of channels for downsampling layers.
371
- bias (bool): Whether to add bias parameter in convolution layers.
372
- downsample_scales (list): List of downsampling scales.
373
- nonlinear_activation (str): Activation function module name.
374
- nonlinear_activation_params (dict): Hyperparameters for activation function.
375
- pad (str): Padding function module name before dilated convolution layer.
376
- pad_params (dict): Hyperparameters for padding function.
377
- use_causal_conv (bool): Whether to use causal convolution.
378
-
379
- """
380
- super(MelGANMultiScaleDiscriminator, self).__init__()
381
- self.discriminators = torch.nn.ModuleList()
382
-
383
- # add discriminators
384
- for _ in range(scales):
385
- self.discriminators += [
386
- MelGANDiscriminator(
387
- in_channels=in_channels,
388
- out_channels=out_channels,
389
- kernel_sizes=kernel_sizes,
390
- channels=channels,
391
- max_downsample_channels=max_downsample_channels,
392
- bias=bias,
393
- downsample_scales=downsample_scales,
394
- nonlinear_activation=nonlinear_activation,
395
- nonlinear_activation_params=nonlinear_activation_params,
396
- pad=pad,
397
- pad_params=pad_params,
398
- )
399
- ]
400
- self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params)
401
-
402
- # apply weight norm
403
- if use_weight_norm:
404
- self.apply_weight_norm()
405
-
406
- # reset parameters
407
- self.reset_parameters()
408
-
409
- def forward(self, x):
410
- """Calculate forward propagation.
411
-
412
- Args:
413
- x (Tensor): Input noise signal (B, 1, T).
414
-
415
- Returns:
416
- List: List of list of each discriminator outputs, which consists of each layer output tensors.
417
-
418
- """
419
- outs = []
420
- for f in self.discriminators:
421
- outs += [f(x)]
422
- x = self.pooling(x)
423
-
424
- return outs
425
-
426
- def remove_weight_norm(self):
427
- """Remove weight normalization module from all of the layers."""
428
- def _remove_weight_norm(m):
429
- try:
430
- logging.debug(f"Weight norm is removed from {m}.")
431
- torch.nn.utils.remove_weight_norm(m)
432
- except ValueError: # this module didn't have weight norm
433
- return
434
-
435
- self.apply(_remove_weight_norm)
436
-
437
- def apply_weight_norm(self):
438
- """Apply weight normalization module from all of the layers."""
439
- def _apply_weight_norm(m):
440
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
441
- torch.nn.utils.weight_norm(m)
442
- logging.debug(f"Weight norm is applied to {m}.")
443
-
444
- self.apply(_apply_weight_norm)
445
-
446
- def reset_parameters(self):
447
- """Reset parameters.
448
-
449
- This initialization follows official implementation manner.
450
- https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
451
-
452
- """
453
- def _reset_parameters(m):
454
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
455
- m.weight.data.normal_(0.0, 0.02)
456
- logging.debug(f"Reset parameters in {m}.")
457
-
458
- self.apply(_reset_parameters)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/SOP_Generation-single/Environment/base_environment.py DELETED
@@ -1,177 +0,0 @@
1
- from utils import get_relevant_history, get_embedding
2
- import torch
3
- from LLM.base_LLM import *
4
- from Memory import Memory
5
- from Prompt import *
6
- import json
7
- class Environment:
8
- """
9
- The place where the agent activities, responsible for storing some shared memories
10
- """
11
- def __init__(self, config) -> None:
12
- self.shared_memory = {"long_term_memory": [], "short_term_memory": None}
13
- self.agents = None
14
-
15
- self.summary_system_prompt = {}
16
- self.summary_last_prompt = {}
17
- self.environment_prompt = {}
18
- self.environment_type = config["environment_type"] if "environment_type" in config else "cooperative"
19
- self.current_chat_history_idx = 0
20
- self.LLMs = {}
21
-
22
- # 初始化每个state 的summary 方法
23
- # Initialize the summary method for each state
24
- for state_name, state_dict in config["states"].items():
25
- if state_name != "end_state":
26
- self.summary_system_prompt[state_name] = (
27
- state_dict["summary_system_prompt"]
28
- if "summary_system_prompt" in state_dict
29
- else eval(Default_environment_summary_system_prompt)
30
- )
31
-
32
- self.summary_last_prompt[state_name] = (
33
- state_dict["summary_last_prompt"]
34
- if "summary_last_prompt" in state_dict
35
- else eval(Default_environment_summary_last_prompt)
36
- )
37
-
38
- self.environment_prompt[state_name] = (
39
- state_dict["environment_prompt"]
40
- if "environment_prompt" in state_dict
41
- else " "
42
- )
43
- self.LLMs[state_name] = init_LLM("logs"+os.sep+f"{state_name}",**state_dict)
44
- self.roles_to_names = None
45
- self.names_to_roles = None
46
-
47
- @classmethod
48
- def from_config(cls, config_path):
49
- with open(config_path) as f:
50
- config = json.load(f)
51
- return cls(config)
52
-
53
- def summary(self, current_state):
54
- """
55
- Summarize the situation in the current environment every once in a while
56
- """
57
- MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
58
- current_state_name = current_state.name
59
-
60
- query = self.shared_memory["long_term_memory"][-1].content
61
- if len(self.shared_memory["long_term_memory"])>1:
62
- relevant_history = get_relevant_history(
63
- query,
64
- self.shared_memory["long_term_memory"][:-1],
65
- self.shared_memory["chat_embeddings"][:-1],
66
- )
67
-
68
- relevant_history = Memory.get_chat_history(relevant_history)
69
- else:
70
- relevant_history = ""
71
- chat_history = Memory.get_chat_history(
72
- self.shared_memory["long_term_memory"][-MAX_CHAT_HISTORY + 1 :]
73
- )
74
- summary = self.shared_memory["short_term_memory"]
75
-
76
-
77
- # system prompt = environment prompt + current memory + system prompt
78
- # current_memory = summary + chat history + relevant history
79
- current_memory = eval(Environment_summary_memory)
80
- environment_prompt = self.environment_prompt[current_state_name]
81
- summary_system_prompt = self.summary_system_prompt[current_state_name]
82
-
83
- environment_summary_system_prompt = eval(Environment_summary_system_prompt)
84
- response = self.LLMs[current_state_name].get_response(None, environment_summary_system_prompt, stream=False)
85
- return response
86
-
87
- def update_memory(self, memory, current_state):
88
- """
89
- update chat embbedings and long term memory,short term memory,agents long term memory
90
- """
91
- MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
92
- self.shared_memory["long_term_memory"].append(memory)
93
- current_embedding = get_embedding(memory.content)
94
- if "chat_embeddings" not in self.shared_memory:
95
- self.shared_memory["chat_embeddings"] = current_embedding
96
- else:
97
- self.shared_memory["chat_embeddings"] = torch.cat(
98
- [self.shared_memory["chat_embeddings"], current_embedding], dim=0
99
- )
100
- if len(self.shared_memory["long_term_memory"]) % MAX_CHAT_HISTORY == 0:
101
- summary = self.summary(current_state)
102
- self.shared_memory["short_term_memory"] = summary
103
-
104
- self.agents[memory.send_name].update_memory(memory)
105
-
106
-
107
- def _get_agent_last_conversation_idx(self,agent,current_long_term_memory):
108
- last_conversation_idx = -1
109
- for i, history in enumerate(current_long_term_memory):
110
- if history.send_name == agent.name:
111
- last_conversation_idx = i
112
- return last_conversation_idx
113
-
114
-
115
- def _get_agent_new_memory(self,agent,current_long_term_memory):
116
- # get new conversation
117
- last_conversation_idx = self._get_agent_last_conversation_idx(agent,current_long_term_memory)
118
-
119
- if last_conversation_idx == -1:
120
- new_conversation =current_long_term_memory
121
- elif (
122
- last_conversation_idx
123
- == len(current_long_term_memory) - 1
124
- ):
125
- new_conversation = []
126
- else:
127
- new_conversation = current_long_term_memory[
128
- last_conversation_idx + 1 :
129
- ]
130
- MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
131
- if len(new_conversation) > 2 * MAX_CHAT_HISTORY:
132
- new_conversation = new_conversation[-2*MAX_CHAT_HISTORY+1:]
133
-
134
- # get chat history from new conversation
135
- return Memory.get_chat_history(new_conversation)
136
-
137
-
138
- def _observe(self,agent):
139
- MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
140
- current_state = agent.current_state
141
- current_role = agent.state_roles[current_state.name]
142
- current_component_dict = current_state.components[current_role]
143
-
144
- # cooperative:Sharing information between different states ; competive: No information is shared between different states
145
- current_chat_history_idx = self.current_chat_history_idx if self.environment_type == "competive" else 0
146
- current_long_term_memory = self.shared_memory["long_term_memory"][current_chat_history_idx:]
147
- current_chat_embbedings = self.shared_memory["chat_embeddings"][current_chat_history_idx:]
148
-
149
- if len(current_long_term_memory)>2*MAX_CHAT_HISTORY:
150
- current_long_term_memory = current_long_term_memory[-2*MAX_CHAT_HISTORY+1:]
151
- current_chat_embbedings = current_chat_embbedings[-2*MAX_CHAT_HISTORY+1:]
152
- # relevant_memory
153
- query = current_long_term_memory[-1].content
154
- if len(current_long_term_memory)>1:
155
- relevant_memory = get_relevant_history(
156
- query,
157
- current_long_term_memory[:-2],
158
- current_chat_embbedings[:-2],
159
- )
160
- relevant_memory = Memory.get_chat_history(relevant_memory,agent.name)
161
- else:
162
- relevant_memory = ""
163
-
164
- relevant_memory = eval(Agent_observe_relevant_memory)
165
- agent.relevant_memory = relevant_memory
166
-
167
-
168
- # get chat history from new conversation
169
- conversations = self._get_agent_new_memory(agent,current_long_term_memory)
170
-
171
- # memory = relevant_memory + summary + history + query
172
- query = current_long_term_memory[-1]
173
- current_memory = eval(Agent_observe_memory)
174
-
175
- return {"role": "user", "content": current_memory}
176
-
177
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/aws/userdata.sh DELETED
@@ -1,27 +0,0 @@
1
- #!/bin/bash
2
- # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3
- # This script will run only once on first instance start (for a re-start script see mime.sh)
4
- # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5
- # Use >300 GB SSD
6
-
7
- cd home/ubuntu
8
- if [ ! -d yolov5 ]; then
9
- echo "Running first-time script." # install dependencies, download COCO, pull Docker
10
- git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11
- cd yolov5
12
- bash data/scripts/get_coco.sh && echo "COCO done." &
13
- sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14
- python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15
- wait && echo "All tasks done." # finish background tasks
16
- else
17
- echo "Running re-start script." # resume interrupted runs
18
- i=0
19
- list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20
- while IFS= read -r id; do
21
- ((i++))
22
- echo "restarting container $i: $id"
23
- sudo docker start $id
24
- # sudo docker exec -it $id python train.py --resume # single-GPU
25
- sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26
- done <<<"$list"
27
- fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/methods/ColorPicker.js DELETED
@@ -1,101 +0,0 @@
1
- import Sizer from '../../../sizer/Sizer.js';
2
- import ColorPicker from '../../colorpicker/ColorPicker.js';
3
- import ColorComponents from '../../colorcomponents/ColorComponents.js';
4
- import TouchEventStop from '../../../toucheventstop/TouchEventStop.js';
5
-
6
- const GetValue = Phaser.Utils.Objects.GetValue;
7
-
8
- class ColorPickerPanel extends Sizer {
9
- constructor(scene, config) {
10
- if (config === undefined) {
11
- config = {};
12
- }
13
-
14
- config.orientation = 1;
15
- super(scene, config);
16
- this.type = 'rexColorInput.ColorPickerPanel';
17
-
18
- // Add elements
19
- var background = GetValue(config, 'background', undefined);
20
-
21
- var colorPicker = new ColorPicker(scene, {
22
- hPalette: config.hPalette || {},
23
- svPalette: config.svPalette || {},
24
- space: {
25
- item: GetValue(config, 'space.hPalette', 8)
26
- }
27
- });
28
- scene.add.existing(colorPicker);
29
-
30
- var colorComponents;
31
- if (config.colorComponents) {
32
- colorComponents = new ColorComponents(scene, config.colorComponents);
33
- scene.add.existing(colorComponents);
34
- }
35
-
36
- if (background) {
37
- this.addBackground(background);
38
- var touchEventStop = new TouchEventStop(background, {
39
- stopAllLevels: false,
40
- });
41
- }
42
-
43
- this.add(
44
- colorPicker,
45
- { proportion: 1, expand: true }
46
- );
47
-
48
- if (colorComponents) {
49
- this.add(
50
- colorComponents,
51
- { proportion: 0, expand: true }
52
- );
53
- }
54
-
55
- this.addChildrenMap('background', background);
56
- this.addChildrenMap('colorPicker', colorPicker);
57
- this.addChildrenMap('colorComponents', colorComponents);
58
-
59
- colorPicker.on('valuechange', function (value) {
60
- this.setValue(value);
61
- }, this)
62
-
63
- if (colorComponents) {
64
- colorComponents.on('valuechange', function (value) {
65
- this.setValue(value);
66
- }, this)
67
- }
68
-
69
- this.setValue(GetValue(config, 'value', 0xffffff));
70
- }
71
-
72
- get value() {
73
- return this._value;
74
- }
75
-
76
- set value(value) {
77
- if (this._value === value) {
78
- return;
79
- }
80
-
81
- this._value = value;
82
-
83
- var colorPicker = this.childrenMap.colorPicker;
84
- colorPicker.setValue(value);
85
-
86
- var colorComponents = this.childrenMap.colorComponents;
87
- if (colorComponents) {
88
- colorComponents.setValue(value);
89
- }
90
-
91
- this.emit('valuechange', value);
92
- }
93
-
94
- setValue(value) {
95
- this.value = value;
96
- return this;
97
- }
98
-
99
- }
100
-
101
- export default ColorPickerPanel;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/CloseListPanel.js DELETED
@@ -1,11 +0,0 @@
1
- var CloseListPanel = function () {
2
- if (!this.dropDownBehavior) {
3
- return this;
4
- }
5
-
6
- this.dropDownBehavior.requestClose();
7
-
8
- return this;
9
- }
10
-
11
- export default CloseListPanel;
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/app0.py DELETED
@@ -1,344 +0,0 @@
1
- # flake8: noqa: E402
2
-
3
- import sys, os
4
- import logging
5
- import os
6
- import time
7
- import numpy as np # 假设你使用NumPy来处理音频数据
8
- import shutil # 用于删除文件夹和文件
9
- from scipy.io import wavfile
10
-
11
- logging.getLogger("numba").setLevel(logging.WARNING)
12
- logging.getLogger("markdown_it").setLevel(logging.WARNING)
13
- logging.getLogger("urllib3").setLevel(logging.WARNING)
14
- logging.getLogger("matplotlib").setLevel(logging.WARNING)
15
-
16
- logging.basicConfig(
17
- level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
18
- )
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
- import torch
23
- import argparse
24
- import commons
25
- import utils
26
- from models import SynthesizerTrn
27
- from text.symbols import symbols
28
- from text import cleaned_text_to_sequence, get_bert
29
- from text.cleaner import clean_text
30
- import gradio as gr
31
- import webbrowser
32
- import numpy as np
33
-
34
- net_g = None
35
-
36
- if sys.platform == "darwin" and torch.backends.mps.is_available():
37
- device = "mps"
38
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
39
- else:
40
- device = "cuda"
41
-
42
-
43
- def get_text(text, language_str, hps):
44
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
45
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
46
-
47
- if hps.data.add_blank:
48
- phone = commons.intersperse(phone, 0)
49
- tone = commons.intersperse(tone, 0)
50
- language = commons.intersperse(language, 0)
51
- for i in range(len(word2ph)):
52
- word2ph[i] = word2ph[i] * 2
53
- word2ph[0] += 1
54
- bert = get_bert(norm_text, word2ph, language_str, device)
55
- del word2ph
56
- assert bert.shape[-1] == len(phone), phone
57
-
58
- if language_str == "ZH":
59
- bert = bert
60
- ja_bert = torch.zeros(768, len(phone))
61
- elif language_str == "JP":
62
- ja_bert = bert
63
- bert = torch.zeros(1024, len(phone))
64
- else:
65
- bert = torch.zeros(1024, len(phone))
66
- ja_bert = torch.zeros(768, len(phone))
67
-
68
- assert bert.shape[-1] == len(
69
- phone
70
- ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
71
-
72
- phone = torch.LongTensor(phone)
73
- tone = torch.LongTensor(tone)
74
- language = torch.LongTensor(language)
75
- return bert, ja_bert, phone, tone, language
76
-
77
-
78
- def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
79
- global net_g
80
- bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
81
- with torch.no_grad():
82
- x_tst = phones.to(device).unsqueeze(0)
83
- tones = tones.to(device).unsqueeze(0)
84
- lang_ids = lang_ids.to(device).unsqueeze(0)
85
- bert = bert.to(device).unsqueeze(0)
86
- ja_bert = ja_bert.to(device).unsqueeze(0)
87
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
88
- #print(x_tst.type(), tones.type(), lang_ids.type(), bert.type(), ja_bert.type(), x_tst_lengths.type())
89
- del phones
90
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
91
- audio = (
92
- net_g.infer(
93
- x_tst,
94
- x_tst_lengths,
95
- speakers,
96
- tones,
97
- lang_ids,
98
- bert,
99
- ja_bert,
100
- sdp_ratio=sdp_ratio,
101
- noise_scale=noise_scale,
102
- noise_scale_w=noise_scale_w,
103
- length_scale=length_scale,
104
- )[0][0, 0]
105
- .data.cpu()
106
- .float()
107
- .numpy()
108
- )
109
- del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
110
- torch.cuda.empty_cache()
111
- return audio
112
-
113
- def infer_2(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
114
- global net_g_2
115
- bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
116
- with torch.no_grad():
117
- x_tst = phones.to(device).unsqueeze(0)
118
- tones = tones.to(device).unsqueeze(0)
119
- lang_ids = lang_ids.to(device).unsqueeze(0)
120
- bert = bert.to(device).unsqueeze(0)
121
- ja_bert = ja_bert.to(device).unsqueeze(0)
122
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
123
- #print(x_tst.type(), tones.type(), lang_ids.type(), bert.type(), ja_bert.type(), x_tst_lengths.type())
124
- del phones
125
- speakers = torch.LongTensor([hps_2.data.spk2id[sid]]).to(device)
126
- audio = (
127
- net_g_2.infer(
128
- x_tst,
129
- x_tst_lengths,
130
- speakers,
131
- tones,
132
- lang_ids,
133
- bert,
134
- ja_bert,
135
- sdp_ratio=sdp_ratio,
136
- noise_scale=noise_scale,
137
- noise_scale_w=noise_scale_w,
138
- length_scale=length_scale,
139
- )[0][0, 0]
140
- .data.cpu()
141
- .float()
142
- .numpy()
143
- )
144
- del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
145
- torch.cuda.empty_cache()
146
- return audio
147
-
148
- __LOG__ = "./generation_logs.txt"
149
- def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, language,from_model=0):
150
- # 清空 ./infer_save 文件夹
151
- if os.path.exists('./infer_save'):
152
- shutil.rmtree('./infer_save')
153
- os.makedirs('./infer_save')
154
-
155
- slices = text.split("\n")
156
- slices = [slice for slice in slices if slice.strip() != ""]
157
- audio_list = []
158
- with torch.no_grad():
159
- with open(__LOG__,"a",encoding="UTF-8") as f:
160
- for slice in slices:
161
- assert len(slice) < 150 # 限制输入的文本长度
162
- if from_model == 0:
163
- audio = infer(slice, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker, language=language)
164
- else:
165
- audio = infer_2(slice, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker, language=language)
166
- audio_list.append(audio)
167
-
168
- # 创建唯一的文件名
169
- timestamp = str(int(time.time() * 1000))
170
- audio_file_path = f'./infer_save/audio_{timestamp}.wav'
171
-
172
- # 保存音频数据到.wav文件
173
- wavfile.write(audio_file_path, hps.data.sampling_rate, audio)
174
-
175
- silence = np.zeros(int(hps.data.sampling_rate/2), dtype=np.int16) # 生成半秒的静音
176
- audio_list.append(silence) # 将静音添加到列表中
177
-
178
- f.write(f"{slice} | {speaker}\n")
179
- print(f"{slice} | {speaker}")
180
-
181
- audio_concat = np.concatenate(audio_list)
182
- return "Success", (hps.data.sampling_rate, audio_concat)
183
- def tts_fn_2(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, language,from_model=1):
184
- return tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, language,from_model)
185
-
186
- if __name__ == "__main__":
187
- parser = argparse.ArgumentParser()
188
- parser.add_argument(
189
- "-m", "--model", default="./logs/natuki/G_72000.pth", help="path of your model"
190
- )
191
- parser.add_argument(
192
- "-c",
193
- "--config",
194
- default="./configs/config.json",
195
- help="path of your config file",
196
- )
197
- parser.add_argument(
198
- "--share", default=False, help="make link public", action="store_true"
199
- )
200
- parser.add_argument(
201
- "-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log"
202
- )
203
-
204
- args = parser.parse_args()
205
- if args.debug:
206
- logger.info("Enable DEBUG-LEVEL log")
207
- logging.basicConfig(level=logging.DEBUG)
208
- hps = utils.get_hparams_from_file("./logs/digital/config.json")
209
- hps_2 = utils.get_hparams_from_file("./logs/fukukitaru/config.json")
210
-
211
- device = (
212
- "cuda:0"
213
- if torch.cuda.is_available()
214
- else (
215
- "mps"
216
- if sys.platform == "darwin" and torch.backends.mps.is_available()
217
- else "cpu"
218
- )
219
- )
220
- net_g = SynthesizerTrn(
221
- len(symbols),
222
- hps.data.filter_length // 2 + 1,
223
- hps.train.segment_size // hps.data.hop_length,
224
- n_speakers=hps.data.n_speakers,
225
- **hps.model,
226
- ).to(device)
227
- _ = net_g.eval()
228
-
229
- net_g_2 = SynthesizerTrn(
230
- len(symbols),
231
- hps.data.filter_length // 2 + 1,
232
- hps.train.segment_size // hps.data.hop_length,
233
- n_speakers=hps.data.n_speakers,
234
- **hps.model,
235
- ).to(device)
236
-
237
- _ = utils.load_checkpoint("./logs/digital/G_10500.pth", net_g, None, skip_optimizer=True)
238
- _ = utils.load_checkpoint("./logs/fukukitaru/G_10000.pth", net_g_2, None, skip_optimizer=True)
239
-
240
- speaker_ids = hps.data.spk2id
241
- speakers = list(speaker_ids.keys())
242
- speaker_ids_2 = hps_2.data.spk2id
243
- speakers_2 = list(speaker_ids_2.keys())
244
-
245
-
246
- languages = ["ZH", "JP"]
247
- with gr.Blocks() as app:
248
- with gr.Tab(label="umamusume"):
249
- with gr.Row():
250
- with gr.Column():
251
- text = gr.TextArea(
252
- label="Text",
253
- placeholder="Input Text Here",
254
- value="はりきっていこう!",
255
- )
256
- speaker = gr.Dropdown(
257
- choices=speakers, value=speakers[0], label="Speaker"
258
- )
259
- sdp_ratio = gr.Slider(
260
- minimum=0, maximum=1, value=0.2, step=0.1, label="SDP Ratio"
261
- )
262
- noise_scale = gr.Slider(
263
- minimum=0.1, maximum=2, value=0.6, step=0.1, label="Noise Scale"
264
- )
265
- noise_scale_w = gr.Slider(
266
- minimum=0.1, maximum=2, value=0.8, step=0.1, label="Noise Scale W"
267
- )
268
- length_scale = gr.Slider(
269
- minimum=0.1, maximum=2, value=1, step=0.1, label="Length Scale"
270
- )
271
- language = gr.Dropdown(
272
- choices=languages, value=languages[1], label="Language"
273
- )
274
- btn = gr.Button("Generate!", variant="primary")
275
- with gr.Column():
276
- text_output = gr.Textbox(label="Message")
277
- audio_output = gr.Audio(label="Output Audio")
278
- gr.Markdown("# 赛马娘 Bert-VITS2 语音合成\n"
279
- "Project page:[GitHub](https://github.com/fishaudio/Bert-VITS2)\n"
280
- "- 本项目在日语方面有所欠缺,特别是音调的设计上,需要帮助。\n"
281
- "- このプロジェクトは、日本語の方面で不足しています。特に、音調の設計に関して助けが欲しいです。")
282
-
283
- btn.click(
284
- tts_fn,
285
- inputs=[
286
- text,
287
- speaker,
288
- sdp_ratio,
289
- noise_scale,
290
- noise_scale_w,
291
- length_scale,
292
- language,
293
- ],
294
- outputs=[text_output, audio_output],
295
- )
296
- with gr.Tab(label="natuki"):
297
- with gr.Row():
298
- with gr.Column():
299
- text2 = gr.TextArea(
300
- label="Text",
301
- placeholder="Input Text Here",
302
- value="はりきっていこう!",
303
- )
304
- speaker2 = gr.Dropdown(
305
- choices=speakers_2, value=speakers_2[0], label="Speaker"
306
- )
307
- sdp_ratio2 = gr.Slider(
308
- minimum=0, maximum=1, value=0.2, step=0.1, label="SDP Ratio"
309
- )
310
- noise_scale2 = gr.Slider(
311
- minimum=0.1, maximum=2, value=0.6, step=0.1, label="Noise Scale"
312
- )
313
- noise_scale_w2 = gr.Slider(
314
- minimum=0.1, maximum=2, value=0.8, step=0.1, label="Noise Scale W"
315
- )
316
- length_scale2 = gr.Slider(
317
- minimum=0.1, maximum=2, value=1, step=0.1, label="Length Scale"
318
- )
319
- language2 = gr.Dropdown(
320
- choices=languages, value=languages[1], label="Language"
321
- )
322
- btn2 = gr.Button("Generate!", variant="primary")
323
- with gr.Column():
324
- text_output2 = gr.Textbox(label="Message")
325
- audio_output2 = gr.Audio(label="Output Audio")
326
- gr.Markdown("# 赛马娘 Bert-VITS2 语音合成\n"
327
- "Project page:[GitHub](https://github.com/fishaudio/Bert-VITS2)\n"
328
- "- 本项目在日语方面有所欠缺,特别是音调的设计上,需要帮助。\n"
329
- "- このプロジェクトは、日本語の方面で不足しています。特に、音調の設計に関して助けが欲しいです。")
330
-
331
- btn2.click(
332
- tts_fn_2,
333
- inputs=[
334
- text2,
335
- speaker2,
336
- sdp_ratio2,
337
- noise_scale2,
338
- noise_scale_w2,
339
- length_scale2,
340
- language2,
341
- ],
342
- outputs=[text_output2, audio_output2],
343
- )
344
- app.launch(server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/training/coaches/base_coach.py DELETED
@@ -1,158 +0,0 @@
1
- import abc
2
- import os
3
- import pickle
4
- from argparse import Namespace
5
- import os.path
6
- from PTI.criteria.localitly_regulizer import Space_Regulizer
7
- import torch
8
- from torchvision import transforms
9
- from lpips import LPIPS
10
- from PTI.training.projectors import w_projector
11
- from PTI.configs import global_config, paths_config, hyperparameters
12
- from PTI.criteria import l2_loss
13
- from PTI.models.e4e.psp import pSp
14
- from PTI.utils.log_utils import log_image_from_w
15
- from PTI.utils.models_utils import toogle_grad, load_old_G
16
-
17
-
18
- class BaseCoach:
19
- def __init__(self, data_loader, use_wandb):
20
-
21
- self.use_wandb = use_wandb
22
- self.data_loader = data_loader
23
- self.w_pivots = {}
24
- self.image_counter = 0
25
-
26
- if hyperparameters.first_inv_type == 'w+':
27
- self.initilize_e4e()
28
-
29
- self.e4e_image_transform = transforms.Compose([
30
- transforms.ToPILImage(),
31
- transforms.Resize((256, 256)),
32
- transforms.ToTensor(),
33
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
34
-
35
- # Initialize loss
36
- self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(
37
- global_config.device).eval()
38
-
39
- self.restart_training()
40
-
41
- # Initialize checkpoint dir
42
- self.checkpoint_dir = paths_config.checkpoints_dir
43
- os.makedirs(self.checkpoint_dir, exist_ok=True)
44
-
45
- def restart_training(self):
46
-
47
- # Initialize networks
48
- self.G = load_old_G()
49
- toogle_grad(self.G, True)
50
-
51
- self.original_G = load_old_G()
52
-
53
- self.space_regulizer = Space_Regulizer(
54
- self.original_G, self.lpips_loss)
55
- self.optimizer = self.configure_optimizers()
56
-
57
- def get_inversion(self, w_path_dir, image_name, image):
58
- embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
59
- os.makedirs(embedding_dir, exist_ok=True)
60
-
61
- w_pivot = None
62
- if hyperparameters.use_last_w_pivots:
63
- w_pivot = self.load_inversions(w_path_dir, image_name)
64
-
65
- if not hyperparameters.use_last_w_pivots or w_pivot is None:
66
- w_pivot = self.calc_inversions(image, image_name)
67
- torch.save(w_pivot, f'{embedding_dir}/0.pt')
68
-
69
- w_pivot = w_pivot.to(global_config.device)
70
- return w_pivot
71
-
72
- def load_inversions(self, w_path_dir, image_name):
73
- if image_name in self.w_pivots:
74
- return self.w_pivots[image_name]
75
-
76
- if hyperparameters.first_inv_type == 'w+':
77
- w_potential_path = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}/0.pt'
78
- else:
79
- w_potential_path = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}/0.pt'
80
- if not os.path.isfile(w_potential_path):
81
- return None
82
- w = torch.load(w_potential_path).to(global_config.device)
83
- self.w_pivots[image_name] = w
84
- return w
85
-
86
- def calc_inversions(self, image, image_name):
87
- if hyperparameters.first_inv_type == 'w+':
88
- w = self.get_e4e_inversion(image)
89
-
90
- else:
91
- id_image = torch.squeeze(
92
- (image.to(global_config.device) + 1) / 2) * 255
93
- w = w_projector.project(self.G, id_image, device=torch.device(global_config.device), w_avg_samples=600,
94
- num_steps=hyperparameters.first_inv_steps, w_name=image_name,
95
- use_wandb=self.use_wandb)
96
-
97
- return w
98
-
99
- @abc.abstractmethod
100
- def train(self):
101
- pass
102
-
103
- def configure_optimizers(self):
104
- optimizer = torch.optim.Adam(
105
- self.G.parameters(), lr=hyperparameters.pti_learning_rate)
106
-
107
- return optimizer
108
-
109
- def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
110
- loss = 0.0
111
-
112
- if hyperparameters.pt_l2_lambda > 0:
113
- l2_loss_val = l2_loss.l2_loss(generated_images, real_images)
114
- if self.use_wandb:
115
- wandb.log({f'MSE_loss_val_{log_name}': l2_loss_val.detach(
116
- ).cpu()}, step=global_config.training_step)
117
- loss += l2_loss_val * hyperparameters.pt_l2_lambda
118
- if hyperparameters.pt_lpips_lambda > 0:
119
- loss_lpips = self.lpips_loss(generated_images, real_images)
120
- loss_lpips = torch.squeeze(loss_lpips)
121
- if self.use_wandb:
122
- wandb.log({f'LPIPS_loss_val_{log_name}': loss_lpips.detach(
123
- ).cpu()}, step=global_config.training_step)
124
- loss += loss_lpips * hyperparameters.pt_lpips_lambda
125
-
126
- if use_ball_holder and hyperparameters.use_locality_regularization:
127
- ball_holder_loss_val = self.space_regulizer.space_regulizer_loss(
128
- new_G, w_batch, use_wandb=self.use_wandb)
129
- loss += ball_holder_loss_val
130
-
131
- return loss, l2_loss_val, loss_lpips
132
-
133
- def forward(self, w):
134
- generated_images = self.G.synthesis(
135
- w, noise_mode='const', force_fp32=True)
136
-
137
- return generated_images
138
-
139
- def initilize_e4e(self):
140
- ckpt = torch.load(paths_config.e4e, map_location='cpu')
141
- opts = ckpt['opts']
142
- opts['batch_size'] = hyperparameters.train_batch_size
143
- opts['checkpoint_path'] = paths_config.e4e
144
- opts = Namespace(**opts)
145
- self.e4e_inversion_net = pSp(opts)
146
- self.e4e_inversion_net.eval()
147
- self.e4e_inversion_net = self.e4e_inversion_net.to(
148
- global_config.device)
149
- toogle_grad(self.e4e_inversion_net, False)
150
-
151
- def get_e4e_inversion(self, image):
152
- image = (image + 1) / 2
153
- new_image = self.e4e_image_transform(image[0]).to(global_config.device)
154
- _, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
155
- input_code=False)
156
- if self.use_wandb:
157
- log_image_from_w(w, self.G, 'First e4e inversion')
158
- return w
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/models_utils.py DELETED
@@ -1,28 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
-
4
- import pickle
5
- import functools
6
- import torch
7
- from pti.pti_configs import paths_config, global_config
8
-
9
-
10
- def toogle_grad(model, flag=True):
11
- for p in model.parameters():
12
- p.requires_grad = flag
13
-
14
-
15
- def load_tuned_G(run_id, type):
16
- new_G_path = f'{paths_config.checkpoints_dir}/model_{run_id}_{type}.pt'
17
- with open(new_G_path, 'rb') as f:
18
- new_G = torch.load(f).to(global_config.device).eval()
19
- new_G = new_G.float()
20
- toogle_grad(new_G, False)
21
- return new_G
22
-
23
-
24
- def load_old_G():
25
- with open(paths_config.stylegan2_ada_shhq, 'rb') as f:
26
- old_G = pickle.load(f)['G_ema'].to(global_config.device).eval()
27
- old_G = old_G.float()
28
- return old_G
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/model_editing.md DELETED
@@ -1,35 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Text-to-image model editing
14
-
15
- [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://huggingface.co/papers/2303.08084) is by Hadas Orgad, Bahjat Kawar, and Yonatan Belinkov. This pipeline enables editing diffusion model weights, such that its assumptions of a given concept are changed. The resulting change is expected to take effect in all prompt generations related to the edited concept.
16
-
17
- The abstract from the paper is:
18
-
19
- *Text-to-image diffusion models often make implicit assumptions about the world when generating images. While some assumptions are useful (e.g., the sky is blue), they can also be outdated, incorrect, or reflective of social biases present in the training data. Thus, there is a need to control these assumptions without requiring explicit user input or costly re-training. In this work, we aim to edit a given implicit assumption in a pre-trained diffusion model. Our Text-to-Image Model Editing method, TIME for short, receives a pair of inputs: a "source" under-specified prompt for which the model makes an implicit assumption (e.g., "a pack of roses"), and a "destination" prompt that describes the same setting, but with a specified desired attribute (e.g., "a pack of blue roses"). TIME then updates the model's cross-attention layers, as these layers assign visual meaning to textual tokens. We edit the projection matrices in these layers such that the source prompt is projected close to the destination prompt. Our method is highly efficient, as it modifies a mere 2.2% of the model's parameters in under one second. To evaluate model editing approaches, we introduce TIMED (TIME Dataset), containing 147 source and destination prompt pairs from various domains. Our experiments (using Stable Diffusion) show that TIME is successful in model editing, generalizes well for related prompts unseen during editing, and imposes minimal effect on unrelated generations.*
20
-
21
- You can find additional information about model editing on the [project page](https://time-diffusion.github.io/), [original codebase](https://github.com/bahjat-kawar/time-diffusion), and try it out in a [demo](https://huggingface.co/spaces/bahjat-kawar/time-diffusion).
22
-
23
- <Tip>
24
-
25
- Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
26
-
27
- </Tip>
28
-
29
- ## StableDiffusionModelEditingPipeline
30
- [[autodoc]] StableDiffusionModelEditingPipeline
31
- - __call__
32
- - all
33
-
34
- ## StableDiffusionPipelineOutput
35
- [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/detr/detr_r50_8x2_150e_coco.py DELETED
@@ -1,131 +0,0 @@
1
- _base_ = [
2
- '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
3
- ]
4
- model = dict(
5
- type='DETR',
6
- pretrained='torchvision://resnet50',
7
- backbone=dict(
8
- type='ResNet',
9
- depth=50,
10
- num_stages=4,
11
- out_indices=(3, ),
12
- frozen_stages=1,
13
- norm_cfg=dict(type='BN', requires_grad=False),
14
- norm_eval=True,
15
- style='pytorch'),
16
- bbox_head=dict(
17
- type='TransformerHead',
18
- num_classes=80,
19
- in_channels=2048,
20
- num_fcs=2,
21
- transformer=dict(
22
- type='Transformer',
23
- embed_dims=256,
24
- num_heads=8,
25
- num_encoder_layers=6,
26
- num_decoder_layers=6,
27
- feedforward_channels=2048,
28
- dropout=0.1,
29
- act_cfg=dict(type='ReLU', inplace=True),
30
- norm_cfg=dict(type='LN'),
31
- num_fcs=2,
32
- pre_norm=False,
33
- return_intermediate_dec=True),
34
- positional_encoding=dict(
35
- type='SinePositionalEncoding', num_feats=128, normalize=True),
36
- loss_cls=dict(
37
- type='CrossEntropyLoss',
38
- bg_cls_weight=0.1,
39
- use_sigmoid=False,
40
- loss_weight=1.0,
41
- class_weight=1.0),
42
- loss_bbox=dict(type='L1Loss', loss_weight=5.0),
43
- loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
44
- # training and testing settings
45
- train_cfg=dict(
46
- assigner=dict(
47
- type='HungarianAssigner',
48
- cls_cost=dict(type='ClassificationCost', weight=1.),
49
- reg_cost=dict(type='BBoxL1Cost', weight=5.0),
50
- iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
51
- test_cfg=dict(max_per_img=100))
52
- img_norm_cfg = dict(
53
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
54
- # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
55
- # from the default setting in mmdet.
56
- train_pipeline = [
57
- dict(type='LoadImageFromFile'),
58
- dict(type='LoadAnnotations', with_bbox=True),
59
- dict(type='RandomFlip', flip_ratio=0.5),
60
- dict(
61
- type='AutoAugment',
62
- policies=[[
63
- dict(
64
- type='Resize',
65
- img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
66
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
67
- (736, 1333), (768, 1333), (800, 1333)],
68
- multiscale_mode='value',
69
- keep_ratio=True)
70
- ],
71
- [
72
- dict(
73
- type='Resize',
74
- img_scale=[(400, 1333), (500, 1333), (600, 1333)],
75
- multiscale_mode='value',
76
- keep_ratio=True),
77
- dict(
78
- type='RandomCrop',
79
- crop_type='absolute_range',
80
- crop_size=(384, 600),
81
- allow_negative_crop=True),
82
- dict(
83
- type='Resize',
84
- img_scale=[(480, 1333), (512, 1333), (544, 1333),
85
- (576, 1333), (608, 1333), (640, 1333),
86
- (672, 1333), (704, 1333), (736, 1333),
87
- (768, 1333), (800, 1333)],
88
- multiscale_mode='value',
89
- override=True,
90
- keep_ratio=True)
91
- ]]),
92
- dict(type='Normalize', **img_norm_cfg),
93
- dict(type='Pad', size_divisor=1),
94
- dict(type='DefaultFormatBundle'),
95
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
96
- ]
97
- # test_pipeline, NOTE the Pad's size_divisor is different from the default
98
- # setting (size_divisor=32). While there is little effect on the performance
99
- # whether we use the default setting or use size_divisor=1.
100
- test_pipeline = [
101
- dict(type='LoadImageFromFile'),
102
- dict(
103
- type='MultiScaleFlipAug',
104
- img_scale=(1333, 800),
105
- flip=False,
106
- transforms=[
107
- dict(type='Resize', keep_ratio=True),
108
- dict(type='RandomFlip'),
109
- dict(type='Normalize', **img_norm_cfg),
110
- dict(type='Pad', size_divisor=1),
111
- dict(type='ImageToTensor', keys=['img']),
112
- dict(type='Collect', keys=['img'])
113
- ])
114
- ]
115
- data = dict(
116
- samples_per_gpu=2,
117
- workers_per_gpu=2,
118
- train=dict(pipeline=train_pipeline),
119
- val=dict(pipeline=test_pipeline),
120
- test=dict(pipeline=test_pipeline))
121
- # optimizer
122
- optimizer = dict(
123
- type='AdamW',
124
- lr=0.0001,
125
- weight_decay=0.0001,
126
- paramwise_cfg=dict(
127
- custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}))
128
- optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
129
- # learning policy
130
- lr_config = dict(policy='step', step=[100])
131
- runner = dict(type='EpochBasedRunner', max_epochs=150)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py DELETED
@@ -1,16 +0,0 @@
1
- _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://regnetx_800mf',
4
- backbone=dict(
5
- type='RegNet',
6
- arch='regnetx_800mf',
7
- out_indices=(0, 1, 2, 3),
8
- frozen_stages=1,
9
- norm_cfg=dict(type='BN', requires_grad=True),
10
- norm_eval=True,
11
- style='pytorch'),
12
- neck=dict(
13
- type='FPN',
14
- in_channels=[64, 128, 288, 672],
15
- out_channels=256,
16
- num_outs=5))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/fast_scnn.py DELETED
@@ -1,57 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
3
- model = dict(
4
- type='EncoderDecoder',
5
- backbone=dict(
6
- type='FastSCNN',
7
- downsample_dw_channels=(32, 48),
8
- global_in_channels=64,
9
- global_block_channels=(64, 96, 128),
10
- global_block_strides=(2, 2, 1),
11
- global_out_channels=128,
12
- higher_in_channels=64,
13
- lower_in_channels=128,
14
- fusion_out_channels=128,
15
- out_indices=(0, 1, 2),
16
- norm_cfg=norm_cfg,
17
- align_corners=False),
18
- decode_head=dict(
19
- type='DepthwiseSeparableFCNHead',
20
- in_channels=128,
21
- channels=128,
22
- concat_input=False,
23
- num_classes=19,
24
- in_index=-1,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
29
- auxiliary_head=[
30
- dict(
31
- type='FCNHead',
32
- in_channels=128,
33
- channels=32,
34
- num_convs=1,
35
- num_classes=19,
36
- in_index=-2,
37
- norm_cfg=norm_cfg,
38
- concat_input=False,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
42
- dict(
43
- type='FCNHead',
44
- in_channels=64,
45
- channels=32,
46
- num_convs=1,
47
- num_classes=19,
48
- in_index=-3,
49
- norm_cfg=norm_cfg,
50
- concat_input=False,
51
- align_corners=False,
52
- loss_decode=dict(
53
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
54
- ],
55
- # model training and testing settings
56
- train_cfg=dict(),
57
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/utils.py DELETED
@@ -1,189 +0,0 @@
1
- """Utils for monoDepth."""
2
- import sys
3
- import re
4
- import numpy as np
5
- import cv2
6
- import torch
7
-
8
-
9
- def read_pfm(path):
10
- """Read pfm file.
11
-
12
- Args:
13
- path (str): path to file
14
-
15
- Returns:
16
- tuple: (data, scale)
17
- """
18
- with open(path, "rb") as file:
19
-
20
- color = None
21
- width = None
22
- height = None
23
- scale = None
24
- endian = None
25
-
26
- header = file.readline().rstrip()
27
- if header.decode("ascii") == "PF":
28
- color = True
29
- elif header.decode("ascii") == "Pf":
30
- color = False
31
- else:
32
- raise Exception("Not a PFM file: " + path)
33
-
34
- dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
35
- if dim_match:
36
- width, height = list(map(int, dim_match.groups()))
37
- else:
38
- raise Exception("Malformed PFM header.")
39
-
40
- scale = float(file.readline().decode("ascii").rstrip())
41
- if scale < 0:
42
- # little-endian
43
- endian = "<"
44
- scale = -scale
45
- else:
46
- # big-endian
47
- endian = ">"
48
-
49
- data = np.fromfile(file, endian + "f")
50
- shape = (height, width, 3) if color else (height, width)
51
-
52
- data = np.reshape(data, shape)
53
- data = np.flipud(data)
54
-
55
- return data, scale
56
-
57
-
58
- def write_pfm(path, image, scale=1):
59
- """Write pfm file.
60
-
61
- Args:
62
- path (str): pathto file
63
- image (array): data
64
- scale (int, optional): Scale. Defaults to 1.
65
- """
66
-
67
- with open(path, "wb") as file:
68
- color = None
69
-
70
- if image.dtype.name != "float32":
71
- raise Exception("Image dtype must be float32.")
72
-
73
- image = np.flipud(image)
74
-
75
- if len(image.shape) == 3 and image.shape[2] == 3: # color image
76
- color = True
77
- elif (
78
- len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
79
- ): # greyscale
80
- color = False
81
- else:
82
- raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
83
-
84
- file.write("PF\n" if color else "Pf\n".encode())
85
- file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
86
-
87
- endian = image.dtype.byteorder
88
-
89
- if endian == "<" or endian == "=" and sys.byteorder == "little":
90
- scale = -scale
91
-
92
- file.write("%f\n".encode() % scale)
93
-
94
- image.tofile(file)
95
-
96
-
97
- def read_image(path):
98
- """Read image and output RGB image (0-1).
99
-
100
- Args:
101
- path (str): path to file
102
-
103
- Returns:
104
- array: RGB image (0-1)
105
- """
106
- img = cv2.imread(path)
107
-
108
- if img.ndim == 2:
109
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
110
-
111
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
112
-
113
- return img
114
-
115
-
116
- def resize_image(img):
117
- """Resize image and make it fit for network.
118
-
119
- Args:
120
- img (array): image
121
-
122
- Returns:
123
- tensor: data ready for network
124
- """
125
- height_orig = img.shape[0]
126
- width_orig = img.shape[1]
127
-
128
- if width_orig > height_orig:
129
- scale = width_orig / 384
130
- else:
131
- scale = height_orig / 384
132
-
133
- height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
134
- width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
135
-
136
- img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
137
-
138
- img_resized = (
139
- torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
140
- )
141
- img_resized = img_resized.unsqueeze(0)
142
-
143
- return img_resized
144
-
145
-
146
- def resize_depth(depth, width, height):
147
- """Resize depth map and bring to CPU (numpy).
148
-
149
- Args:
150
- depth (tensor): depth
151
- width (int): image width
152
- height (int): image height
153
-
154
- Returns:
155
- array: processed depth
156
- """
157
- depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
158
-
159
- depth_resized = cv2.resize(
160
- depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
161
- )
162
-
163
- return depth_resized
164
-
165
- def write_depth(path, depth, bits=1):
166
- """Write depth map to pfm and png file.
167
-
168
- Args:
169
- path (str): filepath without extension
170
- depth (array): depth
171
- """
172
- write_pfm(path + ".pfm", depth.astype(np.float32))
173
-
174
- depth_min = depth.min()
175
- depth_max = depth.max()
176
-
177
- max_val = (2**(8*bits))-1
178
-
179
- if depth_max - depth_min > np.finfo("float").eps:
180
- out = max_val * (depth - depth_min) / (depth_max - depth_min)
181
- else:
182
- out = np.zeros(depth.shape, dtype=depth.type)
183
-
184
- if bits == 1:
185
- cv2.imwrite(path + ".png", out.astype("uint8"))
186
- elif bits == 2:
187
- cv2.imwrite(path + ".png", out.astype("uint16"))
188
-
189
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AntNikYab/NaturalLanguageProcessing/function/lstm_preprocessing.py DELETED
@@ -1,162 +0,0 @@
1
- import re
2
- import string
3
- import numpy as np
4
- import torch
5
- import torch.nn as nn
6
- from transformers import BertTokenizer, BertModel
7
- from sklearn.linear_model import LogisticRegression
8
- from nltk.stem import SnowballStemmer
9
-
10
- from nltk.corpus import stopwords
11
- import nltk
12
- nltk.download('stopwords')
13
- stop_words = set(stopwords.words('russian'))
14
- stemmer = SnowballStemmer('russian')
15
- sw = stopwords.words('russian')
16
-
17
- tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
18
-
19
- class LSTMClassifier(nn.Module):
20
- def __init__(self, embedding_dim: int, hidden_size:int, embedding: torch.nn.modules.sparse.Embedding) -> None:
21
- super().__init__()
22
-
23
- self.embedding_dim = embedding_dim
24
- self.hidden_size = hidden_size
25
- self.embedding = embedding
26
-
27
- self.lstm = nn.LSTM(
28
- input_size=self.embedding_dim,
29
- hidden_size=self.hidden_size,
30
- batch_first=True
31
- )
32
- self.clf = nn.Linear(self.hidden_size, 1)
33
-
34
- def forward(self, x):
35
- embeddings = self.embedding(x)
36
- _, (h_n, _) = self.lstm(embeddings)
37
- out = self.clf(h_n.squeeze())
38
- return out
39
-
40
-
41
- def data_preprocessing(text: str) -> str:
42
- """preprocessing string: lowercase, removing html-tags, punctuation,
43
- stopwords, digits
44
-
45
- Args:
46
- text (str): input string for preprocessing
47
-
48
- Returns:
49
- str: preprocessed string
50
- """
51
-
52
- text = text.lower()
53
- text = re.sub('<.*?>', '', text) # html tags
54
- text = ''.join([c for c in text if c not in string.punctuation])# Remove punctuation
55
- text = ' '.join([word for word in text.split() if word not in stop_words])
56
- text = [word for word in text.split() if not word.isdigit()]
57
- text = ' '.join(text)
58
- return text
59
-
60
- def get_words_by_freq(sorted_words: list, n: int = 10) -> list:
61
- return list(filter(lambda x: x[1] > n, sorted_words))
62
-
63
- def padding(review_int: list, seq_len: int) -> np.array: # type: ignore
64
- """Make left-sided padding for input list of tokens
65
-
66
- Args:
67
- review_int (list): input list of tokens
68
- seq_len (int): max length of sequence, it len(review_int[i]) > seq_len it will be trimmed, else it will be padded by zeros
69
-
70
- Returns:
71
- np.array: padded sequences
72
- """
73
- features = np.zeros((len(review_int), seq_len), dtype = int)
74
- for i, review in enumerate(review_int):
75
- if len(review) <= seq_len:
76
- zeros = list(np.zeros(seq_len - len(review)))
77
- new = zeros + review
78
- else:
79
- new = review[: seq_len]
80
- features[i, :] = np.array(new)
81
-
82
- return features
83
-
84
- def preprocess_single_string(
85
- input_string: str,
86
- seq_len: int,
87
- vocab_to_int: dict,
88
- ) -> torch.tensor:
89
- """Function for all preprocessing steps on a single string
90
-
91
- Args:
92
- input_string (str): input single string for preprocessing
93
- seq_len (int): max length of sequence, it len(review_int[i]) > seq_len it will be trimmed, else it will be padded by zeros
94
- vocab_to_int (dict, optional): word corpus {'word' : int index}. Defaults to vocab_to_int.
95
-
96
- Returns:
97
- list: preprocessed string
98
- """
99
-
100
- preprocessed_string = data_preprocessing(input_string)
101
- result_list = []
102
- for word in preprocessed_string.split():
103
- try:
104
- result_list.append(vocab_to_int[word])
105
- except KeyError as e:
106
- print(f'{e}: not in dictionary!')
107
- result_padded = padding([result_list], seq_len)[0]
108
-
109
- return torch.tensor(result_padded)
110
-
111
- def predict_sentence(text: str, model: nn.Module, seq_len: int, vocab_to_int: dict) -> str:
112
- p_str = preprocess_single_string(text, seq_len, vocab_to_int).unsqueeze(0)
113
- model.eval()
114
- pred = model(p_str)
115
- output = pred.sigmoid().round().item()
116
- if output == 0:
117
- return 'Негативный отзыв'
118
- else:
119
- return 'Позитивный отзыв'
120
-
121
- def predict_single_string(text: str,
122
- model: BertModel,
123
- loaded_model: LogisticRegression
124
- ) -> str:
125
-
126
- with torch.no_grad():
127
- encoded_input = tokenizer(text, return_tensors='pt')
128
- output = model(**encoded_input)
129
- vector = output[0][:,0,:]
130
- pred0 = loaded_model.predict_proba(vector)[0][0]
131
- pred1 = loaded_model.predict_proba(vector)[0][1]
132
- if pred0 > pred1:
133
- return 'Негативный отзыв'
134
- else:
135
- return 'Позитивный отзыв'
136
-
137
- def clean(text):
138
-
139
- text = text.lower()
140
- text = re.sub(r'\s+', ' ', text) # заменить два и более пробела на один пробел
141
- text = re.sub(r'\d+', ' ', text) # удаляем числа
142
- text = text.translate(str.maketrans('', '', string.punctuation)) # удаляем знаки пунктуации
143
- text = re.sub(r'\n+', ' ', text) # удаляем символ перевод строки
144
-
145
- return text
146
-
147
- def tokin(text):
148
- text = clean(text)
149
- text = ' '.join([stemmer.stem(word) for word in text.split()])
150
- text = ' '.join([word for word in text.split() if word not in sw])
151
- return text
152
-
153
-
154
- def predict_ml_class(text, loaded_vectorizer, loaded_classifier):
155
-
156
- t = tokin(text).split(' ')
157
- new_text_bow = loaded_vectorizer.transform(t)
158
- predicted_label = loaded_classifier.predict(new_text_bow)
159
- if predicted_label == 0:
160
- return 'Негативный отзыв'
161
- else:
162
- return 'Позитивный отзыв'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/loggers/comet/__init__.py DELETED
@@ -1,508 +0,0 @@
1
- import glob
2
- import json
3
- import logging
4
- import os
5
- import sys
6
- from pathlib import Path
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
- FILE = Path(__file__).resolve()
11
- ROOT = FILE.parents[3] # YOLOv5 root directory
12
- if str(ROOT) not in sys.path:
13
- sys.path.append(str(ROOT)) # add ROOT to PATH
14
-
15
- try:
16
- import comet_ml
17
-
18
- # Project Configuration
19
- config = comet_ml.config.get_config()
20
- COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
21
- except (ModuleNotFoundError, ImportError):
22
- comet_ml = None
23
- COMET_PROJECT_NAME = None
24
-
25
- import PIL
26
- import torch
27
- import torchvision.transforms as T
28
- import yaml
29
-
30
- from utils.dataloaders import img2label_paths
31
- from utils.general import check_dataset, scale_boxes, xywh2xyxy
32
- from utils.metrics import box_iou
33
-
34
- COMET_PREFIX = "comet://"
35
-
36
- COMET_MODE = os.getenv("COMET_MODE", "online")
37
-
38
- # Model Saving Settings
39
- COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
40
-
41
- # Dataset Artifact Settings
42
- COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true"
43
-
44
- # Evaluation Settings
45
- COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true"
46
- COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true"
47
- COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100))
48
-
49
- # Confusion Matrix Settings
50
- CONF_THRES = float(os.getenv("CONF_THRES", 0.001))
51
- IOU_THRES = float(os.getenv("IOU_THRES", 0.6))
52
-
53
- # Batch Logging Settings
54
- COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true"
55
- COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1)
56
- COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1)
57
- COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true"
58
-
59
- RANK = int(os.getenv("RANK", -1))
60
-
61
- to_pil = T.ToPILImage()
62
-
63
-
64
- class CometLogger:
65
- """Log metrics, parameters, source code, models and much more
66
- with Comet
67
- """
68
-
69
- def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None:
70
- self.job_type = job_type
71
- self.opt = opt
72
- self.hyp = hyp
73
-
74
- # Comet Flags
75
- self.comet_mode = COMET_MODE
76
-
77
- self.save_model = opt.save_period > -1
78
- self.model_name = COMET_MODEL_NAME
79
-
80
- # Batch Logging Settings
81
- self.log_batch_metrics = COMET_LOG_BATCH_METRICS
82
- self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
83
-
84
- # Dataset Artifact Settings
85
- self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET
86
- self.resume = self.opt.resume
87
-
88
- # Default parameters to pass to Experiment objects
89
- self.default_experiment_kwargs = {
90
- "log_code": False,
91
- "log_env_gpu": True,
92
- "log_env_cpu": True,
93
- "project_name": COMET_PROJECT_NAME,}
94
- self.default_experiment_kwargs.update(experiment_kwargs)
95
- self.experiment = self._get_experiment(self.comet_mode, run_id)
96
-
97
- self.data_dict = self.check_dataset(self.opt.data)
98
- self.class_names = self.data_dict["names"]
99
- self.num_classes = self.data_dict["nc"]
100
-
101
- self.logged_images_count = 0
102
- self.max_images = COMET_MAX_IMAGE_UPLOADS
103
-
104
- if run_id is None:
105
- self.experiment.log_other("Created from", "YOLOv5")
106
- if not isinstance(self.experiment, comet_ml.OfflineExperiment):
107
- workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:]
108
- self.experiment.log_other(
109
- "Run Path",
110
- f"{workspace}/{project_name}/{experiment_id}",
111
- )
112
- self.log_parameters(vars(opt))
113
- self.log_parameters(self.opt.hyp)
114
- self.log_asset_data(
115
- self.opt.hyp,
116
- name="hyperparameters.json",
117
- metadata={"type": "hyp-config-file"},
118
- )
119
- self.log_asset(
120
- f"{self.opt.save_dir}/opt.yaml",
121
- metadata={"type": "opt-config-file"},
122
- )
123
-
124
- self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
125
-
126
- if hasattr(self.opt, "conf_thres"):
127
- self.conf_thres = self.opt.conf_thres
128
- else:
129
- self.conf_thres = CONF_THRES
130
- if hasattr(self.opt, "iou_thres"):
131
- self.iou_thres = self.opt.iou_thres
132
- else:
133
- self.iou_thres = IOU_THRES
134
-
135
- self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres})
136
-
137
- self.comet_log_predictions = COMET_LOG_PREDICTIONS
138
- if self.opt.bbox_interval == -1:
139
- self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
140
- else:
141
- self.comet_log_prediction_interval = self.opt.bbox_interval
142
-
143
- if self.comet_log_predictions:
144
- self.metadata_dict = {}
145
- self.logged_image_names = []
146
-
147
- self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
148
-
149
- self.experiment.log_others({
150
- "comet_mode": COMET_MODE,
151
- "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS,
152
- "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS,
153
- "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS,
154
- "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX,
155
- "comet_model_name": COMET_MODEL_NAME,})
156
-
157
- # Check if running the Experiment with the Comet Optimizer
158
- if hasattr(self.opt, "comet_optimizer_id"):
159
- self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id)
160
- self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective)
161
- self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric)
162
- self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp))
163
-
164
- def _get_experiment(self, mode, experiment_id=None):
165
- if mode == "offline":
166
- if experiment_id is not None:
167
- return comet_ml.ExistingOfflineExperiment(
168
- previous_experiment=experiment_id,
169
- **self.default_experiment_kwargs,
170
- )
171
-
172
- return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,)
173
-
174
- else:
175
- try:
176
- if experiment_id is not None:
177
- return comet_ml.ExistingExperiment(
178
- previous_experiment=experiment_id,
179
- **self.default_experiment_kwargs,
180
- )
181
-
182
- return comet_ml.Experiment(**self.default_experiment_kwargs)
183
-
184
- except ValueError:
185
- logger.warning("COMET WARNING: "
186
- "Comet credentials have not been set. "
187
- "Comet will default to offline logging. "
188
- "Please set your credentials to enable online logging.")
189
- return self._get_experiment("offline", experiment_id)
190
-
191
- return
192
-
193
- def log_metrics(self, log_dict, **kwargs):
194
- self.experiment.log_metrics(log_dict, **kwargs)
195
-
196
- def log_parameters(self, log_dict, **kwargs):
197
- self.experiment.log_parameters(log_dict, **kwargs)
198
-
199
- def log_asset(self, asset_path, **kwargs):
200
- self.experiment.log_asset(asset_path, **kwargs)
201
-
202
- def log_asset_data(self, asset, **kwargs):
203
- self.experiment.log_asset_data(asset, **kwargs)
204
-
205
- def log_image(self, img, **kwargs):
206
- self.experiment.log_image(img, **kwargs)
207
-
208
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
209
- if not self.save_model:
210
- return
211
-
212
- model_metadata = {
213
- "fitness_score": fitness_score[-1],
214
- "epochs_trained": epoch + 1,
215
- "save_period": opt.save_period,
216
- "total_epochs": opt.epochs,}
217
-
218
- model_files = glob.glob(f"{path}/*.pt")
219
- for model_path in model_files:
220
- name = Path(model_path).name
221
-
222
- self.experiment.log_model(
223
- self.model_name,
224
- file_or_folder=model_path,
225
- file_name=name,
226
- metadata=model_metadata,
227
- overwrite=True,
228
- )
229
-
230
- def check_dataset(self, data_file):
231
- with open(data_file) as f:
232
- data_config = yaml.safe_load(f)
233
-
234
- if data_config['path'].startswith(COMET_PREFIX):
235
- path = data_config['path'].replace(COMET_PREFIX, "")
236
- data_dict = self.download_dataset_artifact(path)
237
-
238
- return data_dict
239
-
240
- self.log_asset(self.opt.data, metadata={"type": "data-config-file"})
241
-
242
- return check_dataset(data_file)
243
-
244
- def log_predictions(self, image, labelsn, path, shape, predn):
245
- if self.logged_images_count >= self.max_images:
246
- return
247
- detections = predn[predn[:, 4] > self.conf_thres]
248
- iou = box_iou(labelsn[:, 1:], detections[:, :4])
249
- mask, _ = torch.where(iou > self.iou_thres)
250
- if len(mask) == 0:
251
- return
252
-
253
- filtered_detections = detections[mask]
254
- filtered_labels = labelsn[mask]
255
-
256
- image_id = path.split("/")[-1].split(".")[0]
257
- image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}"
258
- if image_name not in self.logged_image_names:
259
- native_scale_image = PIL.Image.open(path)
260
- self.log_image(native_scale_image, name=image_name)
261
- self.logged_image_names.append(image_name)
262
-
263
- metadata = []
264
- for cls, *xyxy in filtered_labels.tolist():
265
- metadata.append({
266
- "label": f"{self.class_names[int(cls)]}-gt",
267
- "score": 100,
268
- "box": {
269
- "x": xyxy[0],
270
- "y": xyxy[1],
271
- "x2": xyxy[2],
272
- "y2": xyxy[3]},})
273
- for *xyxy, conf, cls in filtered_detections.tolist():
274
- metadata.append({
275
- "label": f"{self.class_names[int(cls)]}",
276
- "score": conf * 100,
277
- "box": {
278
- "x": xyxy[0],
279
- "y": xyxy[1],
280
- "x2": xyxy[2],
281
- "y2": xyxy[3]},})
282
-
283
- self.metadata_dict[image_name] = metadata
284
- self.logged_images_count += 1
285
-
286
- return
287
-
288
- def preprocess_prediction(self, image, labels, shape, pred):
289
- nl, _ = labels.shape[0], pred.shape[0]
290
-
291
- # Predictions
292
- if self.opt.single_cls:
293
- pred[:, 5] = 0
294
-
295
- predn = pred.clone()
296
- scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])
297
-
298
- labelsn = None
299
- if nl:
300
- tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
301
- scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels
302
- labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
303
- scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred
304
-
305
- return predn, labelsn
306
-
307
- def add_assets_to_artifact(self, artifact, path, asset_path, split):
308
- img_paths = sorted(glob.glob(f"{asset_path}/*"))
309
- label_paths = img2label_paths(img_paths)
310
-
311
- for image_file, label_file in zip(img_paths, label_paths):
312
- image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
313
-
314
- try:
315
- artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split})
316
- artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split})
317
- except ValueError as e:
318
- logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
319
- logger.error(f"COMET ERROR: {e}")
320
- continue
321
-
322
- return artifact
323
-
324
- def upload_dataset_artifact(self):
325
- dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
326
- path = str((ROOT / Path(self.data_dict["path"])).resolve())
327
-
328
- metadata = self.data_dict.copy()
329
- for key in ["train", "val", "test"]:
330
- split_path = metadata.get(key)
331
- if split_path is not None:
332
- metadata[key] = split_path.replace(path, "")
333
-
334
- artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata)
335
- for key in metadata.keys():
336
- if key in ["train", "val", "test"]:
337
- if isinstance(self.upload_dataset, str) and (key != self.upload_dataset):
338
- continue
339
-
340
- asset_path = self.data_dict.get(key)
341
- if asset_path is not None:
342
- artifact = self.add_assets_to_artifact(artifact, path, asset_path, key)
343
-
344
- self.experiment.log_artifact(artifact)
345
-
346
- return
347
-
348
- def download_dataset_artifact(self, artifact_path):
349
- logged_artifact = self.experiment.get_artifact(artifact_path)
350
- artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
351
- logged_artifact.download(artifact_save_dir)
352
-
353
- metadata = logged_artifact.metadata
354
- data_dict = metadata.copy()
355
- data_dict["path"] = artifact_save_dir
356
-
357
- metadata_names = metadata.get("names")
358
- if type(metadata_names) == dict:
359
- data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()}
360
- elif type(metadata_names) == list:
361
- data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
362
- else:
363
- raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
364
-
365
- data_dict = self.update_data_paths(data_dict)
366
- return data_dict
367
-
368
- def update_data_paths(self, data_dict):
369
- path = data_dict.get("path", "")
370
-
371
- for split in ["train", "val", "test"]:
372
- if data_dict.get(split):
373
- split_path = data_dict.get(split)
374
- data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [
375
- f"{path}/{x}" for x in split_path])
376
-
377
- return data_dict
378
-
379
- def on_pretrain_routine_end(self, paths):
380
- if self.opt.resume:
381
- return
382
-
383
- for path in paths:
384
- self.log_asset(str(path))
385
-
386
- if self.upload_dataset:
387
- if not self.resume:
388
- self.upload_dataset_artifact()
389
-
390
- return
391
-
392
- def on_train_start(self):
393
- self.log_parameters(self.hyp)
394
-
395
- def on_train_epoch_start(self):
396
- return
397
-
398
- def on_train_epoch_end(self, epoch):
399
- self.experiment.curr_epoch = epoch
400
-
401
- return
402
-
403
- def on_train_batch_start(self):
404
- return
405
-
406
- def on_train_batch_end(self, log_dict, step):
407
- self.experiment.curr_step = step
408
- if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0):
409
- self.log_metrics(log_dict, step=step)
410
-
411
- return
412
-
413
- def on_train_end(self, files, save_dir, last, best, epoch, results):
414
- if self.comet_log_predictions:
415
- curr_epoch = self.experiment.curr_epoch
416
- self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch)
417
-
418
- for f in files:
419
- self.log_asset(f, metadata={"epoch": epoch})
420
- self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch})
421
-
422
- if not self.opt.evolve:
423
- model_path = str(best if best.exists() else last)
424
- name = Path(model_path).name
425
- if self.save_model:
426
- self.experiment.log_model(
427
- self.model_name,
428
- file_or_folder=model_path,
429
- file_name=name,
430
- overwrite=True,
431
- )
432
-
433
- # Check if running Experiment with Comet Optimizer
434
- if hasattr(self.opt, 'comet_optimizer_id'):
435
- metric = results.get(self.opt.comet_optimizer_metric)
436
- self.experiment.log_other('optimizer_metric_value', metric)
437
-
438
- self.finish_run()
439
-
440
- def on_val_start(self):
441
- return
442
-
443
- def on_val_batch_start(self):
444
- return
445
-
446
- def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
447
- if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)):
448
- return
449
-
450
- for si, pred in enumerate(outputs):
451
- if len(pred) == 0:
452
- continue
453
-
454
- image = images[si]
455
- labels = targets[targets[:, 0] == si, 1:]
456
- shape = shapes[si]
457
- path = paths[si]
458
- predn, labelsn = self.preprocess_prediction(image, labels, shape, pred)
459
- if labelsn is not None:
460
- self.log_predictions(image, labelsn, path, shape, predn)
461
-
462
- return
463
-
464
- def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
465
- if self.comet_log_per_class_metrics:
466
- if self.num_classes > 1:
467
- for i, c in enumerate(ap_class):
468
- class_name = self.class_names[c]
469
- self.experiment.log_metrics(
470
- {
471
- '[email protected]': ap50[i],
472
- '[email protected]:.95': ap[i],
473
- 'precision': p[i],
474
- 'recall': r[i],
475
- 'f1': f1[i],
476
- 'true_positives': tp[i],
477
- 'false_positives': fp[i],
478
- 'support': nt[c]},
479
- prefix=class_name)
480
-
481
- if self.comet_log_confusion_matrix:
482
- epoch = self.experiment.curr_epoch
483
- class_names = list(self.class_names.values())
484
- class_names.append("background")
485
- num_classes = len(class_names)
486
-
487
- self.experiment.log_confusion_matrix(
488
- matrix=confusion_matrix.matrix,
489
- max_categories=num_classes,
490
- labels=class_names,
491
- epoch=epoch,
492
- column_label='Actual Category',
493
- row_label='Predicted Category',
494
- file_name=f"confusion-matrix-epoch-{epoch}.json",
495
- )
496
-
497
- def on_fit_epoch_end(self, result, epoch):
498
- self.log_metrics(result, epoch=epoch)
499
-
500
- def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
501
- if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
502
- self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
503
-
504
- def on_params_update(self, params):
505
- self.log_parameters(params)
506
-
507
- def finish_run(self):
508
- self.experiment.end()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arsenii2023/Demo1/demo1.py DELETED
@@ -1,73 +0,0 @@
1
- #Author: Arsenii Kostenko
2
- import numpy as np
3
- from sklearn.linear_model import LinearRegression, LogisticRegression
4
- import gradio as gr
5
-
6
- # Данные для обучения моделей
7
- x_train = np.array([[0, 0], [1, 1], [2, 2]])
8
- y_train = np.array([0, 1, 2])
9
-
10
- # Обучение моделей
11
- linear_model = LinearRegression()
12
- linear_model.fit(x_train, y_train)
13
-
14
- logistic_model = LogisticRegression()
15
- logistic_model.fit(x_train, y_train)
16
-
17
- # Функция для предсказания значений линейной регрессии
18
- def predict_linear(x, y):
19
- # Преобразование строк в список списков
20
- x_nested_list = [list(map(int, sublist.split(","))) for sublist in x.split(";")]
21
- y_nested_list = [list(map(int, sublist.split(","))) for sublist in y.split(";")]
22
-
23
- # Преобразование списка списков в numpy array
24
- x_array = np.array(x_nested_list)
25
- y_array = np.array(y_nested_list)
26
-
27
- # Проверка исходных данных на соответствие
28
- if x_array.shape != y_array.shape:
29
- return "Ошибка: x и y должны иметь одинаковую размерность"
30
-
31
- # Предсказание значений для линейной регрессии
32
- predictions = linear_model.predict(x_array)
33
-
34
- return predictions
35
-
36
- # Функция для предсказания значений логистической регрессии
37
- def predict_logistic(x, y):
38
- # Преобразование строк в список списков
39
- x_nested_list = [list(map(int, sublist.split(","))) for sublist in x.split(";")]
40
- y_nested_list = [list(map(int, sublist.split(","))) for sublist in y.split(";")]
41
-
42
- # Преобразование списка списков в numpy array
43
- x_array = np.array(x_nested_list)
44
- y_array = np.array(y_nested_list)
45
-
46
- # Проверка исходных данных на соответствие
47
- if x_array.shape != y_array.shape:
48
- return "Ошибка: x и y должны иметь одинаковую размерность"
49
-
50
- # Предсказание значений для логистической регрессии
51
- predictions = logistic_model.predict(x_array)
52
-
53
- return predictions
54
-
55
- # Создание интерфейса gradio для линейной регрессии
56
- interface_linear = gr.Interface(
57
- fn=predict_linear,
58
- inputs=["text", "text"],
59
- outputs="text",
60
- title="Линейная регрессия"
61
- )
62
-
63
- # Создание интерфейса gradio для логистической регрессии
64
- interface_logistic = gr.Interface(
65
- fn=predict_logistic,
66
- inputs=["text", "text"],
67
- outputs="text",
68
- title="Логистическая регрессия"
69
- )
70
-
71
- # Запуск обоих интерфейсов
72
- interface_linear.launch(debug=True)
73
- interface_logistic.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/criteria/w_norm.py DELETED
@@ -1,14 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class WNormLoss(nn.Module):
6
-
7
- def __init__(self, start_from_latent_avg=True):
8
- super(WNormLoss, self).__init__()
9
- self.start_from_latent_avg = start_from_latent_avg
10
-
11
- def forward(self, latent, latent_avg=None):
12
- if self.start_from_latent_avg:
13
- latent = latent - latent_avg
14
- return torch.sum(latent.norm(2, dim=(1, 2))) / latent.shape[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/go-applio.bat DELETED
@@ -1,92 +0,0 @@
1
- @echo off
2
- setlocal
3
- title Start Applio
4
-
5
- :::
6
- ::: _ _
7
- ::: /\ | (_)
8
- ::: / \ _ __ _ __ | |_ ___
9
- ::: / /\ \ | '_ \| '_ \| | |/ _ \
10
- ::: / ____ \| |_) | |_) | | | (_) |
11
- ::: /_/ \_\ .__/| .__/|_|_|\___/
12
- ::: | | | |
13
- ::: |_| |_|
14
- :::
15
- :::
16
-
17
- :menu
18
- for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A
19
-
20
- echo [1] Start Applio
21
- echo [2] Start Applio (DML)
22
- echo [3] Start Realtime GUI (DML)
23
- echo [4] Start Realtime GUI (V0)
24
- echo [5] Start Realtime GUI (V1)
25
- echo.
26
-
27
- set /p choice=Select an option:
28
- set choice=%choice: =%
29
-
30
- cls
31
- echo WARNING: It's recommended to disable antivirus or firewall, as errors might occur when starting the ssl.
32
- pause
33
-
34
- if "%choice%"=="1" (
35
- cls
36
- echo WARNING: At this point, it's recommended to disable antivirus or firewall, as errors might occur when downloading pretrained models.
37
- pause>null
38
- echo Starting Applio...
39
- echo.
40
- runtime\python.exe infer-web.py --pycmd runtime\python.exe --port 7897
41
- pause
42
- cls
43
- goto menu
44
- )
45
-
46
- if "%choice%"=="2" (
47
- cls
48
- echo Starting Applio ^(DML^)...
49
- echo.
50
- runtime\python.exe infer-web.py --pycmd runtime\python.exe --port 7897 --dml
51
- pause
52
- cls
53
- goto menu
54
- )
55
-
56
- if "%choice%"=="3" (
57
- cls
58
- echo Starting Realtime GUI ^(DML^)...
59
- echo.
60
- runtime\python.exe gui_v1.py --pycmd runtime\python.exe --dml
61
- pause
62
- cls
63
- goto menu
64
- )
65
-
66
- if "%choice%"=="4" (
67
- cls
68
- echo Starting Realtime GUI ^(V0^)...
69
- echo.
70
- runtime\python.exe gui_v0.py
71
- pause
72
- cls
73
- goto menu
74
- )
75
-
76
- if "%choice%"=="5" (
77
- cls
78
- echo Starting Realtime GUI ^(V1^)...
79
- echo.
80
- runtime\python.exe gui_v1.py
81
- pause
82
- cls
83
- goto menu
84
- )
85
-
86
- cls
87
- echo Invalid option. Please enter a number from 1 to 5.
88
- echo.
89
- echo Press 'Enter' to access the main menu...
90
- pause>nul
91
- cls
92
- goto menu
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/constants.py DELETED
@@ -1,30 +0,0 @@
1
- # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import s3transfer
14
-
15
- KB = 1024
16
- MB = KB * KB
17
- GB = MB * KB
18
-
19
- ALLOWED_DOWNLOAD_ARGS = [
20
- 'ChecksumMode',
21
- 'VersionId',
22
- 'SSECustomerAlgorithm',
23
- 'SSECustomerKey',
24
- 'SSECustomerKeyMD5',
25
- 'RequestPayer',
26
- 'ExpectedBucketOwner',
27
- ]
28
-
29
- USER_AGENT = 's3transfer/%s' % s3transfer.__version__
30
- PROCESS_USER_AGENT = '%s processpool' % USER_AGENT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Summarize biomedical papers in a long, detailed synopsis or extreme, TLDR summary
3
- emoji: 🧬📃🗜
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.0.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BwayKC/darkstorm2150-Protogen_v2.2_Official_Release/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/darkstorm2150/Protogen_v2.2_Official_Release").launch()
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/samplers/distributed_sampler.py DELETED
@@ -1,199 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import itertools
3
- import math
4
- from collections import defaultdict
5
- from typing import Optional
6
- import torch
7
- from torch.utils.data.sampler import Sampler
8
-
9
- from detectron2.utils import comm
10
-
11
-
12
- class TrainingSampler(Sampler):
13
- """
14
- In training, we only care about the "infinite stream" of training data.
15
- So this sampler produces an infinite stream of indices and
16
- all workers cooperate to correctly shuffle the indices and sample different indices.
17
-
18
- The samplers in each worker effectively produces `indices[worker_id::num_workers]`
19
- where `indices` is an infinite stream of indices consisting of
20
- `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
21
- or `range(size) + range(size) + ...` (if shuffle is False)
22
- """
23
-
24
- def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
25
- """
26
- Args:
27
- size (int): the total number of data of the underlying dataset to sample from
28
- shuffle (bool): whether to shuffle the indices or not
29
- seed (int): the initial seed of the shuffle. Must be the same
30
- across all workers. If None, will use a random seed shared
31
- among workers (require synchronization among all workers).
32
- """
33
- self._size = size
34
- assert size > 0
35
- self._shuffle = shuffle
36
- if seed is None:
37
- seed = comm.shared_random_seed()
38
- self._seed = int(seed)
39
-
40
- self._rank = comm.get_rank()
41
- self._world_size = comm.get_world_size()
42
-
43
- def __iter__(self):
44
- start = self._rank
45
- yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
46
-
47
- def _infinite_indices(self):
48
- g = torch.Generator()
49
- g.manual_seed(self._seed)
50
- while True:
51
- if self._shuffle:
52
- yield from torch.randperm(self._size, generator=g)
53
- else:
54
- yield from torch.arange(self._size)
55
-
56
-
57
- class RepeatFactorTrainingSampler(Sampler):
58
- """
59
- Similar to TrainingSampler, but suitable for training on class imbalanced datasets
60
- like LVIS. In each epoch, an image may appear multiple times based on its "repeat
61
- factor". The repeat factor for an image is a function of the frequency the rarest
62
- category labeled in that image. The "frequency of category c" in [0, 1] is defined
63
- as the fraction of images in the training set (without repeats) in which category c
64
- appears.
65
-
66
- See https://arxiv.org/abs/1908.03195 (>= v2) Appendix B.2.
67
- """
68
-
69
- def __init__(self, dataset_dicts, repeat_thresh, shuffle=True, seed=None):
70
- """
71
- Args:
72
- dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
73
- repeat_thresh (float): frequency threshold below which data is repeated.
74
- shuffle (bool): whether to shuffle the indices or not
75
- seed (int): the initial seed of the shuffle. Must be the same
76
- across all workers. If None, will use a random seed shared
77
- among workers (require synchronization among all workers).
78
- """
79
- self._shuffle = shuffle
80
- if seed is None:
81
- seed = comm.shared_random_seed()
82
- self._seed = int(seed)
83
-
84
- self._rank = comm.get_rank()
85
- self._world_size = comm.get_world_size()
86
-
87
- # Get fractional repeat factors and split into whole number (_int_part)
88
- # and fractional (_frac_part) parts.
89
- rep_factors = self._get_repeat_factors(dataset_dicts, repeat_thresh)
90
- self._int_part = torch.trunc(rep_factors)
91
- self._frac_part = rep_factors - self._int_part
92
-
93
- def _get_repeat_factors(self, dataset_dicts, repeat_thresh):
94
- """
95
- Compute (fractional) per-image repeat factors.
96
-
97
- Args:
98
- See __init__.
99
-
100
- Returns:
101
- torch.Tensor: the i-th element is the repeat factor for the dataset image
102
- at index i.
103
- """
104
- # 1. For each category c, compute the fraction of images that contain it: f(c)
105
- category_freq = defaultdict(int)
106
- for dataset_dict in dataset_dicts: # For each image (without repeats)
107
- cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
108
- for cat_id in cat_ids:
109
- category_freq[cat_id] += 1
110
- num_images = len(dataset_dicts)
111
- for k, v in category_freq.items():
112
- category_freq[k] = v / num_images
113
-
114
- # 2. For each category c, compute the category-level repeat factor:
115
- # r(c) = max(1, sqrt(t / f(c)))
116
- category_rep = {
117
- cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
118
- for cat_id, cat_freq in category_freq.items()
119
- }
120
-
121
- # 3. For each image I, compute the image-level repeat factor:
122
- # r(I) = max_{c in I} r(c)
123
- rep_factors = []
124
- for dataset_dict in dataset_dicts:
125
- cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
126
- rep_factor = max({category_rep[cat_id] for cat_id in cat_ids})
127
- rep_factors.append(rep_factor)
128
-
129
- return torch.tensor(rep_factors, dtype=torch.float32)
130
-
131
- def _get_epoch_indices(self, generator):
132
- """
133
- Create a list of dataset indices (with repeats) to use for one epoch.
134
-
135
- Args:
136
- generator (torch.Generator): pseudo random number generator used for
137
- stochastic rounding.
138
-
139
- Returns:
140
- torch.Tensor: list of dataset indices to use in one epoch. Each index
141
- is repeated based on its calculated repeat factor.
142
- """
143
- # Since repeat factors are fractional, we use stochastic rounding so
144
- # that the target repeat factor is achieved in expectation over the
145
- # course of training
146
- rands = torch.rand(len(self._frac_part), generator=generator)
147
- rep_factors = self._int_part + (rands < self._frac_part).float()
148
- # Construct a list of indices in which we repeat images as specified
149
- indices = []
150
- for dataset_index, rep_factor in enumerate(rep_factors):
151
- indices.extend([dataset_index] * int(rep_factor.item()))
152
- return torch.tensor(indices, dtype=torch.int64)
153
-
154
- def __iter__(self):
155
- start = self._rank
156
- yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
157
-
158
- def _infinite_indices(self):
159
- g = torch.Generator()
160
- g.manual_seed(self._seed)
161
- while True:
162
- # Sample indices with repeats determined by stochastic rounding; each
163
- # "epoch" may have a slightly different size due to the rounding.
164
- indices = self._get_epoch_indices(g)
165
- if self._shuffle:
166
- randperm = torch.randperm(len(indices), generator=g)
167
- yield from indices[randperm]
168
- else:
169
- yield from indices
170
-
171
-
172
- class InferenceSampler(Sampler):
173
- """
174
- Produce indices for inference.
175
- Inference needs to run on the __exact__ set of samples,
176
- therefore when the total number of samples is not divisible by the number of workers,
177
- this sampler produces different number of samples on different workers.
178
- """
179
-
180
- def __init__(self, size: int):
181
- """
182
- Args:
183
- size (int): the total number of data of the underlying dataset to sample from
184
- """
185
- self._size = size
186
- assert size > 0
187
- self._rank = comm.get_rank()
188
- self._world_size = comm.get_world_size()
189
-
190
- shard_size = (self._size - 1) // self._world_size + 1
191
- begin = shard_size * self._rank
192
- end = min(shard_size * (self._rank + 1), self._size)
193
- self._local_indices = range(begin, end)
194
-
195
- def __iter__(self):
196
- yield from self._local_indices
197
-
198
- def __len__(self):
199
- return len(self._local_indices)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_callbacks.cpp DELETED
@@ -1,168 +0,0 @@
1
- /*
2
- tests/test_callbacks.cpp -- callbacks
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
- #include "constructor_stats.h"
12
- #include <pybind11/functional.h>
13
- #include <thread>
14
-
15
-
16
- int dummy_function(int i) { return i + 1; }
17
-
18
- TEST_SUBMODULE(callbacks, m) {
19
- // test_callbacks, test_function_signatures
20
- m.def("test_callback1", [](py::object func) { return func(); });
21
- m.def("test_callback2", [](py::object func) { return func("Hello", 'x', true, 5); });
22
- m.def("test_callback3", [](const std::function<int(int)> &func) {
23
- return "func(43) = " + std::to_string(func(43)); });
24
- m.def("test_callback4", []() -> std::function<int(int)> { return [](int i) { return i+1; }; });
25
- m.def("test_callback5", []() {
26
- return py::cpp_function([](int i) { return i+1; }, py::arg("number"));
27
- });
28
-
29
- // test_keyword_args_and_generalized_unpacking
30
- m.def("test_tuple_unpacking", [](py::function f) {
31
- auto t1 = py::make_tuple(2, 3);
32
- auto t2 = py::make_tuple(5, 6);
33
- return f("positional", 1, *t1, 4, *t2);
34
- });
35
-
36
- m.def("test_dict_unpacking", [](py::function f) {
37
- auto d1 = py::dict("key"_a="value", "a"_a=1);
38
- auto d2 = py::dict();
39
- auto d3 = py::dict("b"_a=2);
40
- return f("positional", 1, **d1, **d2, **d3);
41
- });
42
-
43
- m.def("test_keyword_args", [](py::function f) {
44
- return f("x"_a=10, "y"_a=20);
45
- });
46
-
47
- m.def("test_unpacking_and_keywords1", [](py::function f) {
48
- auto args = py::make_tuple(2);
49
- auto kwargs = py::dict("d"_a=4);
50
- return f(1, *args, "c"_a=3, **kwargs);
51
- });
52
-
53
- m.def("test_unpacking_and_keywords2", [](py::function f) {
54
- auto kwargs1 = py::dict("a"_a=1);
55
- auto kwargs2 = py::dict("c"_a=3, "d"_a=4);
56
- return f("positional", *py::make_tuple(1), 2, *py::make_tuple(3, 4), 5,
57
- "key"_a="value", **kwargs1, "b"_a=2, **kwargs2, "e"_a=5);
58
- });
59
-
60
- m.def("test_unpacking_error1", [](py::function f) {
61
- auto kwargs = py::dict("x"_a=3);
62
- return f("x"_a=1, "y"_a=2, **kwargs); // duplicate ** after keyword
63
- });
64
-
65
- m.def("test_unpacking_error2", [](py::function f) {
66
- auto kwargs = py::dict("x"_a=3);
67
- return f(**kwargs, "x"_a=1); // duplicate keyword after **
68
- });
69
-
70
- m.def("test_arg_conversion_error1", [](py::function f) {
71
- f(234, UnregisteredType(), "kw"_a=567);
72
- });
73
-
74
- m.def("test_arg_conversion_error2", [](py::function f) {
75
- f(234, "expected_name"_a=UnregisteredType(), "kw"_a=567);
76
- });
77
-
78
- // test_lambda_closure_cleanup
79
- struct Payload {
80
- Payload() { print_default_created(this); }
81
- ~Payload() { print_destroyed(this); }
82
- Payload(const Payload &) { print_copy_created(this); }
83
- Payload(Payload &&) { print_move_created(this); }
84
- };
85
- // Export the payload constructor statistics for testing purposes:
86
- m.def("payload_cstats", &ConstructorStats::get<Payload>);
87
- /* Test cleanup of lambda closure */
88
- m.def("test_cleanup", []() -> std::function<void(void)> {
89
- Payload p;
90
-
91
- return [p]() {
92
- /* p should be cleaned up when the returned function is garbage collected */
93
- (void) p;
94
- };
95
- });
96
-
97
- // test_cpp_function_roundtrip
98
- /* Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer */
99
- m.def("dummy_function", &dummy_function);
100
- m.def("dummy_function2", [](int i, int j) { return i + j; });
101
- m.def("roundtrip", [](std::function<int(int)> f, bool expect_none = false) {
102
- if (expect_none && f)
103
- throw std::runtime_error("Expected None to be converted to empty std::function");
104
- return f;
105
- }, py::arg("f"), py::arg("expect_none")=false);
106
- m.def("test_dummy_function", [](const std::function<int(int)> &f) -> std::string {
107
- using fn_type = int (*)(int);
108
- auto result = f.target<fn_type>();
109
- if (!result) {
110
- auto r = f(1);
111
- return "can't convert to function pointer: eval(1) = " + std::to_string(r);
112
- } else if (*result == dummy_function) {
113
- auto r = (*result)(1);
114
- return "matches dummy_function: eval(1) = " + std::to_string(r);
115
- } else {
116
- return "argument does NOT match dummy_function. This should never happen!";
117
- }
118
- });
119
-
120
- class AbstractBase { public: virtual unsigned int func() = 0; };
121
- m.def("func_accepting_func_accepting_base", [](std::function<double(AbstractBase&)>) { });
122
-
123
- struct MovableObject {
124
- bool valid = true;
125
-
126
- MovableObject() = default;
127
- MovableObject(const MovableObject &) = default;
128
- MovableObject &operator=(const MovableObject &) = default;
129
- MovableObject(MovableObject &&o) : valid(o.valid) { o.valid = false; }
130
- MovableObject &operator=(MovableObject &&o) {
131
- valid = o.valid;
132
- o.valid = false;
133
- return *this;
134
- }
135
- };
136
- py::class_<MovableObject>(m, "MovableObject");
137
-
138
- // test_movable_object
139
- m.def("callback_with_movable", [](std::function<void(MovableObject &)> f) {
140
- auto x = MovableObject();
141
- f(x); // lvalue reference shouldn't move out object
142
- return x.valid; // must still return `true`
143
- });
144
-
145
- // test_bound_method_callback
146
- struct CppBoundMethodTest {};
147
- py::class_<CppBoundMethodTest>(m, "CppBoundMethodTest")
148
- .def(py::init<>())
149
- .def("triple", [](CppBoundMethodTest &, int val) { return 3 * val; });
150
-
151
- // test async Python callbacks
152
- using callback_f = std::function<void(int)>;
153
- m.def("test_async_callback", [](callback_f f, py::list work) {
154
- // make detached thread that calls `f` with piece of work after a little delay
155
- auto start_f = [f](int j) {
156
- auto invoke_f = [f, j] {
157
- std::this_thread::sleep_for(std::chrono::milliseconds(50));
158
- f(j);
159
- };
160
- auto t = std::thread(std::move(invoke_f));
161
- t.detach();
162
- };
163
-
164
- // spawn worker threads
165
- for (auto i : work)
166
- start_f(py::cast<int>(i));
167
- });
168
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/testing/unittest/special_types.h DELETED
@@ -1,184 +0,0 @@
1
- #pragma once
2
-
3
- #include <iostream>
4
- #include <thrust/execution_policy.h>
5
-
6
- template <typename T, unsigned int N>
7
- struct FixedVector
8
- {
9
- T data[N];
10
-
11
- __host__ __device__
12
- FixedVector()
13
- {
14
- for(unsigned int i = 0; i < N; i++)
15
- data[i] = T();
16
- }
17
-
18
- __host__ __device__
19
- FixedVector(T init)
20
- {
21
- for(unsigned int i = 0; i < N; i++)
22
- data[i] = init;
23
- }
24
-
25
- __host__ __device__
26
- FixedVector operator+(const FixedVector& bs) const
27
- {
28
- FixedVector output;
29
- for(unsigned int i = 0; i < N; i++)
30
- output.data[i] = data[i] + bs.data[i];
31
- return output;
32
- }
33
-
34
- __host__ __device__
35
- bool operator<(const FixedVector& bs) const
36
- {
37
- for(unsigned int i = 0; i < N; i++)
38
- {
39
- if(data[i] < bs.data[i])
40
- return true;
41
- else if(bs.data[i] < data[i])
42
- return false;
43
- }
44
- return false;
45
- }
46
-
47
- __host__ __device__
48
- bool operator==(const FixedVector& bs) const
49
- {
50
- for(unsigned int i = 0; i < N; i++)
51
- {
52
- if(!(data[i] == bs.data[i]))
53
- return false;
54
- }
55
- return true;
56
- }
57
- };
58
-
59
- template<typename Key, typename Value>
60
- struct key_value
61
- {
62
- typedef Key key_type;
63
- typedef Value value_type;
64
-
65
- __host__ __device__
66
- key_value(void)
67
- : key(), value()
68
- {}
69
-
70
- __host__ __device__
71
- key_value(key_type k, value_type v)
72
- : key(k), value(v)
73
- {}
74
-
75
- __host__ __device__
76
- bool operator<(const key_value &rhs) const
77
- {
78
- return key < rhs.key;
79
- }
80
-
81
- __host__ __device__
82
- bool operator>(const key_value &rhs) const
83
- {
84
- return key > rhs.key;
85
- }
86
-
87
- __host__ __device__
88
- bool operator==(const key_value &rhs) const
89
- {
90
- return key == rhs.key && value == rhs.value;
91
- }
92
-
93
- __host__ __device__
94
- bool operator!=(const key_value &rhs) const
95
- {
96
- return !operator==(rhs);
97
- }
98
-
99
- friend std::ostream &operator<<(std::ostream &os, const key_value &kv)
100
- {
101
- return os << "(" << kv.key << ", " << kv.value << ")";
102
- }
103
-
104
- key_type key;
105
- value_type value;
106
- };
107
-
108
- struct user_swappable
109
- {
110
- inline __host__ __device__
111
- user_swappable(bool swapped = false)
112
- : was_swapped(swapped)
113
- {}
114
-
115
- bool was_swapped;
116
- };
117
-
118
- inline __host__ __device__
119
- bool operator==(const user_swappable &x, const user_swappable &y)
120
- {
121
- return x.was_swapped == y.was_swapped;
122
- }
123
-
124
- inline __host__ __device__
125
- void swap(user_swappable &x, user_swappable &y)
126
- {
127
- x.was_swapped = true;
128
- y.was_swapped = false;
129
- }
130
-
131
- class my_system : public thrust::device_execution_policy<my_system>
132
- {
133
- public:
134
- my_system(int)
135
- : correctly_dispatched(false),
136
- num_copies(0)
137
- {}
138
-
139
- my_system(const my_system &other)
140
- : correctly_dispatched(false),
141
- num_copies(other.num_copies + 1)
142
- {}
143
-
144
- void validate_dispatch()
145
- {
146
- correctly_dispatched = (num_copies == 0);
147
- }
148
-
149
- bool is_valid()
150
- {
151
- return correctly_dispatched;
152
- }
153
-
154
- private:
155
- bool correctly_dispatched;
156
-
157
- // count the number of copies so that we can validate
158
- // that dispatch does not introduce any
159
- unsigned int num_copies;
160
-
161
-
162
- // disallow default construction
163
- my_system();
164
- };
165
-
166
- struct my_tag : thrust::device_execution_policy<my_tag> {};
167
-
168
- namespace unittest
169
- {
170
-
171
-
172
- using thrust::detail::int8_t;
173
- using thrust::detail::int16_t;
174
- using thrust::detail::int32_t;
175
- using thrust::detail::int64_t;
176
-
177
- using thrust::detail::uint8_t;
178
- using thrust::detail::uint16_t;
179
- using thrust::detail::uint32_t;
180
- using thrust::detail::uint64_t;
181
-
182
-
183
- }
184
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/device_reference.h DELETED
@@ -1,983 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file device_reference.h
19
- * \brief A reference to a variable which resides in the "device" system's memory space
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/device_ptr.h>
26
- #include <thrust/detail/type_traits.h>
27
- #include <thrust/detail/reference.h>
28
-
29
- namespace thrust
30
- {
31
-
32
- /*! \addtogroup memory_management_classes Memory Management Classes
33
- * \ingroup memory_management
34
- * \{
35
- */
36
-
37
- /*! \p device_reference acts as a reference-like object to an object stored in device memory.
38
- * \p device_reference is not intended to be used directly; rather, this type
39
- * is the result of deferencing a \p device_ptr. Similarly, taking the address of
40
- * a \p device_reference yields a \p device_ptr.
41
- *
42
- * \p device_reference may often be used from host code in place of operations defined on
43
- * its associated \c value_type. For example, when \p device_reference refers to an
44
- * arithmetic type, arithmetic operations on it are legal:
45
- *
46
- * \code
47
- * #include <thrust/device_vector.h>
48
- *
49
- * int main(void)
50
- * {
51
- * thrust::device_vector<int> vec(1, 13);
52
- *
53
- * thrust::device_reference<int> ref_to_thirteen = vec[0];
54
- *
55
- * int x = ref_to_thirteen + 1;
56
- *
57
- * // x is 14
58
- *
59
- * return 0;
60
- * }
61
- * \endcode
62
- *
63
- * Similarly, we can print the value of \c ref_to_thirteen in the above code by using an
64
- * \c iostream:
65
- *
66
- * \code
67
- * #include <thrust/device_vector.h>
68
- * #include <iostream>
69
- *
70
- * int main(void)
71
- * {
72
- * thrust::device_vector<int> vec(1, 13);
73
- *
74
- * thrust::device_reference<int> ref_to_thirteen = vec[0];
75
- *
76
- * std::cout << ref_to_thirteen << std::endl;
77
- *
78
- * // 13 is printed
79
- *
80
- * return 0;
81
- * }
82
- * \endcode
83
- *
84
- * Of course, we needn't explicitly create a \p device_reference in the previous
85
- * example, because one is returned by \p device_vector's bracket operator. A more natural
86
- * way to print the value of a \p device_vector element might be:
87
- *
88
- * \code
89
- * #include <thrust/device_vector.h>
90
- * #include <iostream>
91
- *
92
- * int main(void)
93
- * {
94
- * thrust::device_vector<int> vec(1, 13);
95
- *
96
- * std::cout << vec[0] << std::endl;
97
- *
98
- * // 13 is printed
99
- *
100
- * return 0;
101
- * }
102
- * \endcode
103
- *
104
- * These kinds of operations should be used sparingly in performance-critical code, because
105
- * they imply a potentially expensive copy between host and device space.
106
- *
107
- * Some operations which are possible with regular objects are impossible with their
108
- * corresponding \p device_reference objects due to the requirements of the C++ language. For
109
- * example, because the member access operator cannot be overloaded, member variables and functions
110
- * of a referent object cannot be directly accessed through its \p device_reference.
111
- *
112
- * The following code, which generates a compiler error, illustrates:
113
- *
114
- * \code
115
- * #include <thrust/device_vector.h>
116
- *
117
- * struct foo
118
- * {
119
- * int x;
120
- * };
121
- *
122
- * int main(void)
123
- * {
124
- * thrust::device_vector<foo> foo_vec(1);
125
- *
126
- * thrust::device_reference<foo> foo_ref = foo_vec[0];
127
- *
128
- * foo_ref.x = 13; // ERROR: x cannot be accessed through foo_ref
129
- *
130
- * return 0;
131
- * }
132
- * \endcode
133
- *
134
- * Instead, a host space copy must be created to access \c foo's \c x member:
135
- *
136
- * \code
137
- * #include <thrust/device_vector.h>
138
- *
139
- * struct foo
140
- * {
141
- * int x;
142
- * };
143
- *
144
- * int main(void)
145
- * {
146
- * thrust::device_vector<foo> foo_vec(1);
147
- *
148
- * // create a local host-side foo object
149
- * foo host_foo;
150
- * host_foo.x = 13;
151
- *
152
- * thrust::device_reference<foo> foo_ref = foo_vec[0];
153
- *
154
- * foo_ref = host_foo;
155
- *
156
- * // foo_ref's x member is 13
157
- *
158
- * return 0;
159
- * }
160
- * \endcode
161
- *
162
- * Another common case where a \p device_reference cannot directly be used in place of
163
- * its referent object occurs when passing them as parameters to functions like \c printf
164
- * which have varargs parameters. Because varargs parameters must be Plain Old Data, a
165
- * \p device_reference to a POD type requires a cast when passed to \c printf:
166
- *
167
- * \code
168
- * #include <stdio.h>
169
- * #include <thrust/device_vector.h>
170
- *
171
- * int main(void)
172
- * {
173
- * thrust::device_vector<int> vec(1,13);
174
- *
175
- * // vec[0] must be cast to int when passing to printf
176
- * printf("%d\n", (int) vec[0]);
177
- *
178
- * return 0;
179
- * }
180
- * \endcode
181
- *
182
- * \see device_ptr
183
- * \see device_vector
184
- */
185
- template<typename T>
186
- class device_reference
187
- : public thrust::reference<
188
- T,
189
- thrust::device_ptr<T>,
190
- thrust::device_reference<T>
191
- >
192
- {
193
- private:
194
- typedef thrust::reference<
195
- T,
196
- thrust::device_ptr<T>,
197
- thrust::device_reference<T>
198
- > super_t;
199
-
200
- public:
201
- /*! The type of the value referenced by this type of \p device_reference.
202
- */
203
- typedef typename super_t::value_type value_type;
204
-
205
- /*! The type of the expression <tt>&ref</tt>, where <tt>ref</tt> is a \p device_reference.
206
- */
207
- typedef typename super_t::pointer pointer;
208
-
209
- /*! This copy constructor accepts a const reference to another
210
- * \p device_reference. After this \p device_reference is constructed,
211
- * it shall refer to the same object as \p other.
212
- *
213
- * \param other A \p device_reference to copy from.
214
- *
215
- * The following code snippet demonstrates the semantics of this
216
- * copy constructor.
217
- *
218
- * \code
219
- * #include <thrust/device_vector.h>
220
- * #include <assert.h>
221
- * ...
222
- * thrust::device_vector<int> v(1,0);
223
- * thrust::device_reference<int> ref = v[0];
224
- *
225
- * // ref equals the object at v[0]
226
- * assert(ref == v[0]);
227
- *
228
- * // the address of ref equals the address of v[0]
229
- * assert(&ref == &v[0]);
230
- *
231
- * // modifying v[0] modifies ref
232
- * v[0] = 13;
233
- * assert(ref == 13);
234
- * \endcode
235
- *
236
- * \note This constructor is templated primarily to allow initialization of
237
- * <tt>device_reference<const T></tt> from <tt>device_reference<T></tt>.
238
- */
239
- template<typename OtherT>
240
- __host__ __device__
241
- device_reference(const device_reference<OtherT> &other,
242
- typename thrust::detail::enable_if_convertible<
243
- typename device_reference<OtherT>::pointer,
244
- pointer
245
- >::type * = 0)
246
- : super_t(other)
247
- {}
248
-
249
- /*! This copy constructor initializes this \p device_reference
250
- * to refer to an object pointed to by the given \p device_ptr. After
251
- * this \p device_reference is constructed, it shall refer to the
252
- * object pointed to by \p ptr.
253
- *
254
- * \param ptr A \p device_ptr to copy from.
255
- *
256
- * The following code snippet demonstrates the semantic of this
257
- * copy constructor.
258
- *
259
- * \code
260
- * #include <thrust/device_vector.h>
261
- * #include <assert.h>
262
- * ...
263
- * thrust::device_vector<int> v(1,0);
264
- * thrust::device_ptr<int> ptr = &v[0];
265
- * thrust::device_reference<int> ref(ptr);
266
- *
267
- * // ref equals the object pointed to by ptr
268
- * assert(ref == *ptr);
269
- *
270
- * // the address of ref equals ptr
271
- * assert(&ref == ptr);
272
- *
273
- * // modifying *ptr modifies ref
274
- * *ptr = 13;
275
- * assert(ref == 13);
276
- * \endcode
277
- */
278
- __host__ __device__
279
- explicit device_reference(const pointer &ptr)
280
- : super_t(ptr)
281
- {}
282
-
283
- /*! This assignment operator assigns the value of the object referenced by
284
- * the given \p device_reference to the object referenced by this
285
- * \p device_reference.
286
- *
287
- * \param other The \p device_reference to assign from.
288
- * \return <tt>*this</tt>
289
- */
290
- template<typename OtherT>
291
- __host__ __device__
292
- device_reference &operator=(const device_reference<OtherT> &other);
293
-
294
- /*! Assignment operator assigns the value of the given value to the
295
- * value referenced by this \p device_reference.
296
- *
297
- * \param x The value to assign from.
298
- * \return <tt>*this</tt>
299
- */
300
- __host__ __device__
301
- device_reference &operator=(const value_type &x);
302
-
303
- // declare these members for the purpose of Doxygenating them
304
- // they actually exist in a derived-from class
305
- #if 0
306
- /*! Address-of operator returns a \p device_ptr pointing to the object
307
- * referenced by this \p device_reference. It does not return the
308
- * address of this \p device_reference.
309
- *
310
- * \return A \p device_ptr pointing to the object this
311
- * \p device_reference references.
312
- */
313
- __host__ __device__
314
- pointer operator&(void) const;
315
-
316
- /*! Conversion operator converts this \p device_reference to T
317
- * by returning a copy of the object referenced by this
318
- * \p device_reference.
319
- *
320
- * \return A copy of the object referenced by this \p device_reference.
321
- */
322
- __host__ __device__
323
- operator value_type (void) const;
324
-
325
- /*! swaps the value this \p device_reference references with another.
326
- * \p other The other \p device_reference with which to swap.
327
- */
328
- __host__ __device__
329
- void swap(device_reference &other);
330
-
331
- /*! Prefix increment operator increments the object referenced by this
332
- * \p device_reference.
333
- *
334
- * \return <tt>*this</tt>
335
- *
336
- * The following code snippet demonstrates the semantics of
337
- * \p device_reference's prefix increment operator.
338
- *
339
- * \code
340
- * #include <thrust/device_vector.h>
341
- * #include <assert.h>
342
- * ...
343
- * thrust::device_vector<int> v(1,0);
344
- * thrust::device_ptr<int> ptr = &v[0];
345
- * thrust::device_reference<int> ref(ptr);
346
- *
347
- * // ref equals 0
348
- * assert(ref == 0);
349
- *
350
- * // the object pointed to by ptr equals 1
351
- * assert(*ptr == 1);
352
- *
353
- * // v[0] equals 1
354
- * assert(v[0] == 1);
355
- *
356
- * // increment ref
357
- * ++ref;
358
- *
359
- * // ref equals 1
360
- * assert(ref == 1);
361
- *
362
- * // the object pointed to by ptr equals 1
363
- * assert(*ptr == 1);
364
- *
365
- * // v[0] equals 1
366
- * assert(v[0] == 1);
367
- * \endcode
368
- *
369
- * \note The increment executes as if it were executed on the host.
370
- * This may change in a later version.
371
- */
372
- device_reference &operator++(void);
373
-
374
- /*! Postfix increment operator copies the object referenced by this
375
- * \p device_reference, increments the object referenced by this
376
- * \p device_reference, and returns the copy.
377
- *
378
- * \return A copy of the object referenced by this \p device_reference
379
- * before being incremented.
380
- *
381
- * The following code snippet demonstrates the semantics of
382
- * \p device_reference's postfix increment operator.
383
- *
384
- * \code
385
- * #include <thrust/device_vector.h>
386
- * #include <assert.h>
387
- * ...
388
- * thrust::device_vector<int> v(1,0);
389
- * thrust::device_ptr<int> ptr = &v[0];
390
- * thrust::device_reference<int> ref(ptr);
391
- *
392
- * // ref equals 0
393
- * assert(ref == 0);
394
- *
395
- * // the object pointed to by ptr equals 0
396
- * assert(*ptr == 0);
397
- *
398
- * // v[0] equals 0
399
- * assert(v[0] == 0);
400
- *
401
- * // increment ref
402
- * int x = ref++;
403
- *
404
- * // x equals 0
405
- * assert(x == 0)
406
- *
407
- * // ref equals 1
408
- * assert(ref == 1);
409
- *
410
- * // the object pointed to by ptr equals 1
411
- * assert(*ptr == 1);
412
- *
413
- * // v[0] equals 1
414
- * assert(v[0] == 1);
415
- * \endcode
416
- *
417
- * \note The increment executes as if it were executed on the host.
418
- * This may change in a later version.
419
- */
420
- value_type operator++(int);
421
-
422
- /*! Addition assignment operator add-assigns the object referenced by this
423
- * \p device_reference and returns this \p device_reference.
424
- *
425
- * \param rhs The right hand side of the add-assignment.
426
- * \return <tt>*this</tt>.
427
- *
428
- * The following code snippet demonstrates the semantics of
429
- * \p device_reference's addition assignment operator.
430
- *
431
- * \code
432
- * #include <thrust/device_vector.h>
433
- * #include <assert.h>
434
- * ...
435
- * thrust::device_vector<int> v(1,0);
436
- * thrust::device_ptr<int> ptr = &v[0];
437
- * thrust::device_reference<int> ref(ptr);
438
- *
439
- * // ref equals 0
440
- * assert(ref == 0);
441
- *
442
- * // the object pointed to by ptr equals 0
443
- * assert(*ptr == 0);
444
- *
445
- * // v[0] equals 0
446
- * assert(v[0] == 0);
447
- *
448
- * // add-assign ref
449
- * ref += 5;
450
- *
451
- * // ref equals 5
452
- * assert(ref == 5);
453
- *
454
- * // the object pointed to by ptr equals 5
455
- * assert(*ptr == 5);
456
- *
457
- * // v[0] equals 5
458
- * assert(v[0] == 5);
459
- * \endcode
460
- *
461
- * \note The add-assignment executes as as if it were executed on the host.
462
- * This may change in a later version.
463
- */
464
- device_reference &operator+=(const T &rhs);
465
-
466
- /*! Prefix decrement operator decrements the object referenced by this
467
- * \p device_reference.
468
- *
469
- * \return <tt>*this</tt>
470
- *
471
- * The following code snippet demonstrates the semantics of
472
- * \p device_reference's prefix decrement operator.
473
- *
474
- * \code
475
- * #include <thrust/device_vector.h>
476
- * #include <assert.h>
477
- * ...
478
- * thrust::device_vector<int> v(1,0);
479
- * thrust::device_ptr<int> ptr = &v[0];
480
- * thrust::device_reference<int> ref(ptr);
481
- *
482
- * // ref equals 0
483
- * assert(ref == 0);
484
- *
485
- * // the object pointed to by ptr equals 0
486
- * assert(*ptr == 0);
487
- *
488
- * // v[0] equals 0
489
- * assert(v[0] == 0);
490
- *
491
- * // decrement ref
492
- * --ref;
493
- *
494
- * // ref equals -1
495
- * assert(ref == -1);
496
- *
497
- * // the object pointed to by ptr equals -1
498
- * assert(*ptr == -1);
499
- *
500
- * // v[0] equals -1
501
- * assert(v[0] == -1);
502
- * \endcode
503
- *
504
- * \note The decrement executes as if it were executed on the host.
505
- * This may change in a later version.
506
- */
507
- device_reference &operator--(void);
508
-
509
- /*! Postfix decrement operator copies the object referenced by this
510
- * \p device_reference, decrements the object referenced by this
511
- * \p device_reference, and returns the copy.
512
- *
513
- * \return A copy of the object referenced by this \p device_reference
514
- * before being decremented.
515
- *
516
- * The following code snippet demonstrates the semantics of
517
- * \p device_reference's postfix decrement operator.
518
- *
519
- * \code
520
- * #include <thrust/device_vector.h>
521
- * #include <assert.h>
522
- * ...
523
- * thrust::device_vector<int> v(1,0);
524
- * thrust::device_ptr<int> ptr = &v[0];
525
- * thrust::device_reference<int> ref(ptr);
526
- *
527
- * // ref equals 0
528
- * assert(ref == 0);
529
- *
530
- * // the object pointed to by ptr equals 0
531
- * assert(*ptr == 0);
532
- *
533
- * // v[0] equals 0
534
- * assert(v[0] == 0);
535
- *
536
- * // decrement ref
537
- * int x = ref--;
538
- *
539
- * // x equals 0
540
- * assert(x == 0)
541
- *
542
- * // ref equals -1
543
- * assert(ref == -1);
544
- *
545
- * // the object pointed to by ptr equals -1
546
- * assert(*ptr == -1);
547
- *
548
- * // v[0] equals -1
549
- * assert(v[0] == -1);
550
- * \endcode
551
- *
552
- * \note The decrement executes as if it were executed on the host.
553
- * This may change in a later version.
554
- */
555
- value_type operator--(int);
556
-
557
- /*! Subtraction assignment operator subtract-assigns the object referenced by this
558
- * \p device_reference and returns this \p device_reference.
559
- *
560
- * \param rhs The right hand side of the subtraction-assignment.
561
- * \return <tt>*this</tt>.
562
- *
563
- * The following code snippet demonstrates the semantics of
564
- * \p device_reference's addition assignment operator.
565
- *
566
- * \code
567
- * #include <thrust/device_vector.h>
568
- * #include <assert.h>
569
- * ...
570
- * thrust::device_vector<int> v(1,0);
571
- * thrust::device_ptr<int> ptr = &v[0];
572
- * thrust::device_reference<int> ref(ptr);
573
- *
574
- * // ref equals 0
575
- * assert(ref == 0);
576
- *
577
- * // the object pointed to by ptr equals 0
578
- * assert(*ptr == 0);
579
- *
580
- * // v[0] equals 0
581
- * assert(v[0] == 0);
582
- *
583
- * // subtract-assign ref
584
- * ref -= 5;
585
- *
586
- * // ref equals -5
587
- * assert(ref == -5);
588
- *
589
- * // the object pointed to by ptr equals -5
590
- * assert(*ptr == -5);
591
- *
592
- * // v[0] equals -5
593
- * assert(v[0] == -5);
594
- * \endcode
595
- *
596
- * \note The subtract-assignment executes as as if it were executed on the host.
597
- * This may change in a later version.
598
- */
599
- device_reference &operator-=(const T &rhs);
600
-
601
- /*! Multiplication assignment operator multiply-assigns the object referenced by this
602
- * \p device_reference and returns this \p device_reference.
603
- *
604
- * \param rhs The right hand side of the multiply-assignment.
605
- * \return <tt>*this</tt>.
606
- *
607
- * The following code snippet demonstrates the semantics of
608
- * \p device_reference's multiply assignment operator.
609
- *
610
- * \code
611
- * #include <thrust/device_vector.h>
612
- * #include <assert.h>
613
- * ...
614
- * thrust::device_vector<int> v(1,1);
615
- * thrust::device_ptr<int> ptr = &v[0];
616
- * thrust::device_reference<int> ref(ptr);
617
- *
618
- * // ref equals 1
619
- * assert(ref == 1);
620
- *
621
- * // the object pointed to by ptr equals 1
622
- * assert(*ptr == 1);
623
- *
624
- * // v[0] equals 1
625
- * assert(v[0] == 1);
626
- *
627
- * // multiply-assign ref
628
- * ref *= 5;
629
- *
630
- * // ref equals 5
631
- * assert(ref == 5);
632
- *
633
- * // the object pointed to by ptr equals 5
634
- * assert(*ptr == 5);
635
- *
636
- * // v[0] equals 5
637
- * assert(v[0] == 5);
638
- * \endcode
639
- *
640
- * \note The multiply-assignment executes as as if it were executed on the host.
641
- * This may change in a later version.
642
- */
643
- device_reference &operator*=(const T &rhs);
644
-
645
- /*! Division assignment operator divide-assigns the object referenced by this
646
- * \p device_reference and returns this \p device_reference.
647
- *
648
- * \param rhs The right hand side of the divide-assignment.
649
- * \return <tt>*this</tt>.
650
- *
651
- * The following code snippet demonstrates the semantics of
652
- * \p device_reference's divide assignment operator.
653
- *
654
- * \code
655
- * #include <thrust/device_vector.h>
656
- * #include <assert.h>
657
- * ...
658
- * thrust::device_vector<int> v(1,5);
659
- * thrust::device_ptr<int> ptr = &v[0];
660
- * thrust::device_reference<int> ref(ptr);
661
- *
662
- * // ref equals 5
663
- * assert(ref == 5);
664
- *
665
- * // the object pointed to by ptr equals 5
666
- * assert(*ptr == 5);
667
- *
668
- * // v[0] equals 5
669
- * assert(v[0] == 5);
670
- *
671
- * // divide-assign ref
672
- * ref /= 5;
673
- *
674
- * // ref equals 1
675
- * assert(ref == 1);
676
- *
677
- * // the object pointed to by ptr equals 1
678
- * assert(*ptr == 1);
679
- *
680
- * // v[0] equals 1
681
- * assert(v[0] == 1);
682
- * \endcode
683
- *
684
- * \note The divide-assignment executes as as if it were executed on the host.
685
- * This may change in a later version.
686
- */
687
- device_reference &operator/=(const T &rhs);
688
-
689
- /*! Modulation assignment operator modulus-assigns the object referenced by this
690
- * \p device_reference and returns this \p device_reference.
691
- *
692
- * \param rhs The right hand side of the divide-assignment.
693
- * \return <tt>*this</tt>.
694
- *
695
- * The following code snippet demonstrates the semantics of
696
- * \p device_reference's divide assignment operator.
697
- *
698
- * \code
699
- * #include <thrust/device_vector.h>
700
- * #include <assert.h>
701
- * ...
702
- * thrust::device_vector<int> v(1,5);
703
- * thrust::device_ptr<int> ptr = &v[0];
704
- * thrust::device_reference<int> ref(ptr);
705
- *
706
- * // ref equals 5
707
- * assert(ref == 5);
708
- *
709
- * // the object pointed to by ptr equals 5
710
- * assert(*ptr == 5);
711
- *
712
- * // v[0] equals 5
713
- * assert(v[0] == 5);
714
- *
715
- * // modulus-assign ref
716
- * ref %= 5;
717
- *
718
- * // ref equals 0
719
- * assert(ref == 0);
720
- *
721
- * // the object pointed to by ptr equals 0
722
- * assert(*ptr == 0);
723
- *
724
- * // v[0] equals 0
725
- * assert(v[0] == 0);
726
- * \endcode
727
- *
728
- * \note The modulus-assignment executes as as if it were executed on the host.
729
- * This may change in a later version.
730
- */
731
- device_reference &operator%=(const T &rhs);
732
-
733
- /*! Bitwise left shift assignment operator left shift-assigns the object referenced by this
734
- * \p device_reference and returns this \p device_reference.
735
- *
736
- * \param rhs The right hand side of the left shift-assignment.
737
- * \return <tt>*this</tt>.
738
- *
739
- * The following code snippet demonstrates the semantics of
740
- * \p device_reference's left shift assignment operator.
741
- *
742
- * \code
743
- * #include <thrust/device_vector.h>
744
- * #include <assert.h>
745
- * ...
746
- * thrust::device_vector<int> v(1,1);
747
- * thrust::device_ptr<int> ptr = &v[0];
748
- * thrust::device_reference<int> ref(ptr);
749
- *
750
- * // ref equals 1
751
- * assert(ref == 1);
752
- *
753
- * // the object pointed to by ptr equals 1
754
- * assert(*ptr == 1);
755
- *
756
- * // v[0] equals 1
757
- * assert(v[0] == 1);
758
- *
759
- * // left shift-assign ref
760
- * ref <<= 1;
761
- *
762
- * // ref equals 2
763
- * assert(ref == 2);
764
- *
765
- * // the object pointed to by ptr equals 2
766
- * assert(*ptr == 2);
767
- *
768
- * // v[0] equals 2
769
- * assert(v[0] == 2);
770
- * \endcode
771
- *
772
- * \note The left shift-assignment executes as as if it were executed on the host.
773
- * This may change in a later version.
774
- */
775
- device_reference &operator<<=(const T &rhs);
776
-
777
- /*! Bitwise right shift assignment operator right shift-assigns the object referenced by this
778
- * \p device_reference and returns this \p device_reference.
779
- *
780
- * \param rhs The right hand side of the right shift-assignment.
781
- * \return <tt>*this</tt>.
782
- *
783
- * The following code snippet demonstrates the semantics of
784
- * \p device_reference's right shift assignment operator.
785
- *
786
- * \code
787
- * #include <thrust/device_vector.h>
788
- * #include <assert.h>
789
- * ...
790
- * thrust::device_vector<int> v(1,2);
791
- * thrust::device_ptr<int> ptr = &v[0];
792
- * thrust::device_reference<int> ref(ptr);
793
- *
794
- * // ref equals 2
795
- * assert(ref == 2);
796
- *
797
- * // the object pointed to by ptr equals 2
798
- * assert(*ptr == 2);
799
- *
800
- * // v[0] equals 2
801
- * assert(v[0] == 2);
802
- *
803
- * // right shift-assign ref
804
- * ref >>= 1;
805
- *
806
- * // ref equals 1
807
- * assert(ref == 1);
808
- *
809
- * // the object pointed to by ptr equals 1
810
- * assert(*ptr == 1);
811
- *
812
- * // v[0] equals 1
813
- * assert(v[0] == 1);
814
- * \endcode
815
- *
816
- * \note The right shift-assignment executes as as if it were executed on the host.
817
- * This may change in a later version.
818
- */
819
- device_reference &operator>>=(const T &rhs);
820
-
821
- /*! Bitwise AND assignment operator AND-assigns the object referenced by this
822
- * \p device_reference and returns this \p device_reference.
823
- *
824
- * \param rhs The right hand side of the AND-assignment.
825
- * \return <tt>*this</tt>.
826
- *
827
- * The following code snippet demonstrates the semantics of
828
- * \p device_reference's AND assignment operator.
829
- *
830
- * \code
831
- * #include <thrust/device_vector.h>
832
- * #include <assert.h>
833
- * ...
834
- * thrust::device_vector<int> v(1,1);
835
- * thrust::device_ptr<int> ptr = &v[0];
836
- * thrust::device_reference<int> ref(ptr);
837
- *
838
- * // ref equals 1
839
- * assert(ref == 1);
840
- *
841
- * // the object pointed to by ptr equals 1
842
- * assert(*ptr == 1);
843
- *
844
- * // v[0] equals 1
845
- * assert(v[0] == 1);
846
- *
847
- * // right AND-assign ref
848
- * ref &= 0;
849
- *
850
- * // ref equals 0
851
- * assert(ref == 0);
852
- *
853
- * // the object pointed to by ptr equals 0
854
- * assert(*ptr == 0);
855
- *
856
- * // v[0] equals 0
857
- * assert(v[0] == 0);
858
- * \endcode
859
- *
860
- * \note The AND-assignment executes as as if it were executed on the host.
861
- * This may change in a later version.
862
- */
863
- device_reference &operator&=(const T &rhs);
864
-
865
- /*! Bitwise OR assignment operator OR-assigns the object referenced by this
866
- * \p device_reference and returns this \p device_reference.
867
- *
868
- * \param rhs The right hand side of the OR-assignment.
869
- * \return <tt>*this</tt>.
870
- *
871
- * The following code snippet demonstrates the semantics of
872
- * \p device_reference's OR assignment operator.
873
- *
874
- * \code
875
- * #include <thrust/device_vector.h>
876
- * #include <assert.h>
877
- * ...
878
- * thrust::device_vector<int> v(1,0);
879
- * thrust::device_ptr<int> ptr = &v[0];
880
- * thrust::device_reference<int> ref(ptr);
881
- *
882
- * // ref equals 0
883
- * assert(ref == 0);
884
- *
885
- * // the object pointed to by ptr equals 0
886
- * assert(*ptr == 0);
887
- *
888
- * // v[0] equals 0
889
- * assert(v[0] == 0);
890
- *
891
- * // right OR-assign ref
892
- * ref |= 1;
893
- *
894
- * // ref equals 1
895
- * assert(ref == 1);
896
- *
897
- * // the object pointed to by ptr equals 1
898
- * assert(*ptr == 1);
899
- *
900
- * // v[0] equals 1
901
- * assert(v[0] == 1);
902
- * \endcode
903
- *
904
- * \note The OR-assignment executes as as if it were executed on the host.
905
- * This may change in a later version.
906
- */
907
- device_reference &operator|=(const T &rhs);
908
-
909
- /*! Bitwise XOR assignment operator XOR-assigns the object referenced by this
910
- * \p device_reference and returns this \p device_reference.
911
- *
912
- * \param rhs The right hand side of the XOR-assignment.
913
- * \return <tt>*this</tt>.
914
- *
915
- * The following code snippet demonstrates the semantics of
916
- * \p device_reference's XOR assignment operator.
917
- *
918
- * \code
919
- * #include <thrust/device_vector.h>
920
- * #include <assert.h>
921
- * ...
922
- * thrust::device_vector<int> v(1,1);
923
- * thrust::device_ptr<int> ptr = &v[0];
924
- * thrust::device_reference<int> ref(ptr);
925
- *
926
- * // ref equals 1
927
- * assert(ref == 1);
928
- *
929
- * // the object pointed to by ptr equals 1
930
- * assert(*ptr == 1);
931
- *
932
- * // v[0] equals 1
933
- * assert(v[0] == 1);
934
- *
935
- * // right XOR-assign ref
936
- * ref ^= 1;
937
- *
938
- * // ref equals 0
939
- * assert(ref == 0);
940
- *
941
- * // the object pointed to by ptr equals 0
942
- * assert(*ptr == 0);
943
- *
944
- * // v[0] equals 0
945
- * assert(v[0] == 0);
946
- * \endcode
947
- *
948
- * \note The XOR-assignment executes as as if it were executed on the host.
949
- * This may change in a later version.
950
- */
951
- device_reference &operator^=(const T &rhs);
952
- #endif // end doxygen-only members
953
- }; // end device_reference
954
-
955
- /*! swaps the value of one \p device_reference with another.
956
- * \p x The first \p device_reference of interest.
957
- * \p y The second \p device_reference of interest.
958
- */
959
- template<typename T>
960
- __host__ __device__
961
- void swap(device_reference<T> x, device_reference<T> y);
962
-
963
- // declare these methods for the purpose of Doxygenating them
964
- // they actually are defined for a derived-from class
965
- #if 0
966
- /*! Writes to an output stream the value of a \p device_reference.
967
- *
968
- * \param os The output stream.
969
- * \param y The \p device_reference to output.
970
- * \return os.
971
- */
972
- template<typename T, typename charT, typename traits>
973
- std::basic_ostream<charT, traits> &
974
- operator<<(std::basic_ostream<charT, traits> &os, const device_reference<T> &y);
975
- #endif
976
-
977
- /*! \}
978
- */
979
-
980
- } // end thrust
981
-
982
- #include <thrust/detail/device_reference.inl>
983
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Object-Detection-With-DETR-and-YOLOS/app.py DELETED
@@ -1,153 +0,0 @@
1
- import io
2
- import gradio as gr
3
- import matplotlib.pyplot as plt
4
- import requests, validators
5
- import torch
6
- import pathlib
7
- from PIL import Image
8
- from transformers import AutoFeatureExtractor, DetrForObjectDetection, YolosForObjectDetection
9
-
10
- import os
11
-
12
- # colors for visualization
13
- COLORS = [
14
- [0.000, 0.447, 0.741],
15
- [0.850, 0.325, 0.098],
16
- [0.929, 0.694, 0.125],
17
- [0.494, 0.184, 0.556],
18
- [0.466, 0.674, 0.188],
19
- [0.301, 0.745, 0.933]
20
- ]
21
-
22
- def make_prediction(img, feature_extractor, model):
23
- inputs = feature_extractor(img, return_tensors="pt")
24
- outputs = model(**inputs)
25
- img_size = torch.tensor([tuple(reversed(img.size))])
26
- processed_outputs = feature_extractor.post_process(outputs, img_size)
27
- return processed_outputs[0]
28
-
29
- def fig2img(fig):
30
- buf = io.BytesIO()
31
- fig.savefig(buf)
32
- buf.seek(0)
33
- img = Image.open(buf)
34
- return img
35
-
36
-
37
- def visualize_prediction(pil_img, output_dict, threshold=0.7, id2label=None):
38
- keep = output_dict["scores"] > threshold
39
- boxes = output_dict["boxes"][keep].tolist()
40
- scores = output_dict["scores"][keep].tolist()
41
- labels = output_dict["labels"][keep].tolist()
42
- if id2label is not None:
43
- labels = [id2label[x] for x in labels]
44
-
45
- plt.figure(figsize=(16, 10))
46
- plt.imshow(pil_img)
47
- ax = plt.gca()
48
- colors = COLORS * 100
49
- for score, (xmin, ymin, xmax, ymax), label, color in zip(scores, boxes, labels, colors):
50
- ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=3))
51
- ax.text(xmin, ymin, f"{label}: {score:0.2f}", fontsize=15, bbox=dict(facecolor="yellow", alpha=0.5))
52
- plt.axis("off")
53
- return fig2img(plt.gcf())
54
-
55
- def detect_objects(model_name,url_input,image_input,threshold):
56
-
57
- #Extract model and feature extractor
58
- feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
59
-
60
- if 'detr' in model_name:
61
-
62
- model = DetrForObjectDetection.from_pretrained(model_name)
63
-
64
- elif 'yolos' in model_name:
65
-
66
- model = YolosForObjectDetection.from_pretrained(model_name)
67
-
68
- if validators.url(url_input):
69
- image = Image.open(requests.get(url_input, stream=True).raw)
70
-
71
- elif image_input:
72
- image = image_input
73
-
74
- #Make prediction
75
- processed_outputs = make_prediction(image, feature_extractor, model)
76
-
77
- #Visualize prediction
78
- viz_img = visualize_prediction(image, processed_outputs, threshold, model.config.id2label)
79
-
80
- return viz_img
81
-
82
- def set_example_image(example: list) -> dict:
83
- return gr.Image.update(value=example[0])
84
-
85
- def set_example_url(example: list) -> dict:
86
- return gr.Textbox.update(value=example[0])
87
-
88
-
89
- title = """<h1 id="title">Object Detection App with DETR and YOLOS</h1>"""
90
-
91
- description = """
92
- Links to HuggingFace Models:
93
- - [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50)
94
- - [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101)
95
- - [hustvl/yolos-small](https://huggingface.co/hustvl/yolos-small)
96
- - [hustvl/yolos-tiny](https://huggingface.co/hustvl/yolos-tiny)
97
- """
98
-
99
- models = ["facebook/detr-resnet-50","facebook/detr-resnet-101",'hustvl/yolos-small','hustvl/yolos-tiny']
100
- urls = ["https://c8.alamy.com/comp/J2AB4K/the-new-york-stock-exchange-on-the-wall-street-in-new-york-J2AB4K.jpg"]
101
-
102
- twitter_link = """
103
- [![](https://img.shields.io/twitter/follow/nickmuchi?label=@nickmuchi&style=social)](https://twitter.com/nickmuchi)
104
- """
105
-
106
- css = '''
107
- h1#title {
108
- text-align: center;
109
- }
110
- '''
111
- demo = gr.Blocks(css=css)
112
-
113
- with demo:
114
- gr.Markdown(title)
115
- gr.Markdown(description)
116
- gr.Markdown(twitter_link)
117
- options = gr.Dropdown(choices=models,label='Select Object Detection Model',show_label=True)
118
- slider_input = gr.Slider(minimum=0.2,maximum=1,value=0.7,label='Prediction Threshold')
119
-
120
- with gr.Tabs():
121
- with gr.TabItem('Image URL'):
122
- with gr.Row():
123
- url_input = gr.Textbox(lines=2,label='Enter valid image URL here..')
124
- img_output_from_url = gr.Image(shape=(650,650))
125
-
126
- with gr.Row():
127
- example_url = gr.Dataset(components=[url_input],samples=[[str(url)] for url in urls])
128
-
129
- url_but = gr.Button('Detect')
130
-
131
- with gr.TabItem('Image Upload'):
132
- with gr.Row():
133
- img_input = gr.Image(type='pil')
134
- img_output_from_upload= gr.Image(shape=(650,650))
135
-
136
- with gr.Row():
137
- example_images = gr.Dataset(components=[img_input],
138
- samples=[[path.as_posix()]
139
- for path in sorted(pathlib.Path('images').rglob('*.JPG'))])
140
-
141
- img_but = gr.Button('Detect')
142
-
143
-
144
- url_but.click(detect_objects,inputs=[options,url_input,img_input,slider_input],outputs=img_output_from_url,queue=True)
145
- img_but.click(detect_objects,inputs=[options,url_input,img_input,slider_input],outputs=img_output_from_upload,queue=True)
146
- example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input])
147
- example_url.click(fn=set_example_url,inputs=[example_url],outputs=[url_input])
148
-
149
-
150
- gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=nickmuchi-object-detection-with-detr-and-yolos)")
151
-
152
-
153
- demo.launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/samplers/sampling_result.py DELETED
@@ -1,152 +0,0 @@
1
- import torch
2
-
3
- from mmdet.utils import util_mixins
4
-
5
-
6
- class SamplingResult(util_mixins.NiceRepr):
7
- """Bbox sampling result.
8
-
9
- Example:
10
- >>> # xdoctest: +IGNORE_WANT
11
- >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
12
- >>> self = SamplingResult.random(rng=10)
13
- >>> print(f'self = {self}')
14
- self = <SamplingResult({
15
- 'neg_bboxes': torch.Size([12, 4]),
16
- 'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
17
- 'num_gts': 4,
18
- 'pos_assigned_gt_inds': tensor([], dtype=torch.int64),
19
- 'pos_bboxes': torch.Size([0, 4]),
20
- 'pos_inds': tensor([], dtype=torch.int64),
21
- 'pos_is_gt': tensor([], dtype=torch.uint8)
22
- })>
23
- """
24
-
25
- def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
26
- gt_flags):
27
- self.pos_inds = pos_inds
28
- self.neg_inds = neg_inds
29
- self.pos_bboxes = bboxes[pos_inds]
30
- self.neg_bboxes = bboxes[neg_inds]
31
- self.pos_is_gt = gt_flags[pos_inds]
32
-
33
- self.num_gts = gt_bboxes.shape[0]
34
- self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
35
-
36
- if gt_bboxes.numel() == 0:
37
- # hack for index error case
38
- assert self.pos_assigned_gt_inds.numel() == 0
39
- self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
40
- else:
41
- if len(gt_bboxes.shape) < 2:
42
- gt_bboxes = gt_bboxes.view(-1, 4)
43
-
44
- self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
45
-
46
- if assign_result.labels is not None:
47
- self.pos_gt_labels = assign_result.labels[pos_inds]
48
- else:
49
- self.pos_gt_labels = None
50
-
51
- @property
52
- def bboxes(self):
53
- """torch.Tensor: concatenated positive and negative boxes"""
54
- return torch.cat([self.pos_bboxes, self.neg_bboxes])
55
-
56
- def to(self, device):
57
- """Change the device of the data inplace.
58
-
59
- Example:
60
- >>> self = SamplingResult.random()
61
- >>> print(f'self = {self.to(None)}')
62
- >>> # xdoctest: +REQUIRES(--gpu)
63
- >>> print(f'self = {self.to(0)}')
64
- """
65
- _dict = self.__dict__
66
- for key, value in _dict.items():
67
- if isinstance(value, torch.Tensor):
68
- _dict[key] = value.to(device)
69
- return self
70
-
71
- def __nice__(self):
72
- data = self.info.copy()
73
- data['pos_bboxes'] = data.pop('pos_bboxes').shape
74
- data['neg_bboxes'] = data.pop('neg_bboxes').shape
75
- parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
76
- body = ' ' + ',\n '.join(parts)
77
- return '{\n' + body + '\n}'
78
-
79
- @property
80
- def info(self):
81
- """Returns a dictionary of info about the object."""
82
- return {
83
- 'pos_inds': self.pos_inds,
84
- 'neg_inds': self.neg_inds,
85
- 'pos_bboxes': self.pos_bboxes,
86
- 'neg_bboxes': self.neg_bboxes,
87
- 'pos_is_gt': self.pos_is_gt,
88
- 'num_gts': self.num_gts,
89
- 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
90
- }
91
-
92
- @classmethod
93
- def random(cls, rng=None, **kwargs):
94
- """
95
- Args:
96
- rng (None | int | numpy.random.RandomState): seed or state.
97
- kwargs (keyword arguments):
98
- - num_preds: number of predicted boxes
99
- - num_gts: number of true boxes
100
- - p_ignore (float): probability of a predicted box assinged to \
101
- an ignored truth.
102
- - p_assigned (float): probability of a predicted box not being \
103
- assigned.
104
- - p_use_label (float | bool): with labels or not.
105
-
106
- Returns:
107
- :obj:`SamplingResult`: Randomly generated sampling result.
108
-
109
- Example:
110
- >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
111
- >>> self = SamplingResult.random()
112
- >>> print(self.__dict__)
113
- """
114
- from mmdet.core.bbox.samplers.random_sampler import RandomSampler
115
- from mmdet.core.bbox.assigners.assign_result import AssignResult
116
- from mmdet.core.bbox import demodata
117
- rng = demodata.ensure_rng(rng)
118
-
119
- # make probabalistic?
120
- num = 32
121
- pos_fraction = 0.5
122
- neg_pos_ub = -1
123
-
124
- assign_result = AssignResult.random(rng=rng, **kwargs)
125
-
126
- # Note we could just compute an assignment
127
- bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)
128
- gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)
129
-
130
- if rng.rand() > 0.2:
131
- # sometimes algorithms squeeze their data, be robust to that
132
- gt_bboxes = gt_bboxes.squeeze()
133
- bboxes = bboxes.squeeze()
134
-
135
- if assign_result.labels is None:
136
- gt_labels = None
137
- else:
138
- gt_labels = None # todo
139
-
140
- if gt_labels is None:
141
- add_gt_as_proposals = False
142
- else:
143
- add_gt_as_proposals = True # make probabalistic?
144
-
145
- sampler = RandomSampler(
146
- num,
147
- pos_fraction,
148
- neg_pos_ub=neg_pos_ub,
149
- add_gt_as_proposals=add_gt_as_proposals,
150
- rng=rng)
151
- self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
152
- return self
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/flava-multimodal-zero-shot/app.py DELETED
@@ -1,131 +0,0 @@
1
- import numpy as np
2
- import gradio as gr
3
- import torch
4
-
5
- from transformers import BertTokenizer, FlavaForPreTraining, FlavaModel, FlavaFeatureExtractor, FlavaProcessor
6
- from PIL import Image
7
-
8
-
9
- demo = gr.Blocks()
10
-
11
- tokenizer = BertTokenizer.from_pretrained("facebook/flava-full")
12
- flava_pt = FlavaForPreTraining.from_pretrained("facebook/flava-full")
13
- flava = FlavaModel.from_pretrained("facebook/flava-full")
14
- processor = FlavaProcessor.from_pretrained("facebook/flava-full")
15
- fe = FlavaFeatureExtractor.from_pretrained("facebook/flava-full")
16
-
17
-
18
- PREDICTION_ATTR = "mlm_logits"
19
-
20
- def zero_shot_text(text, options):
21
- options = [option.strip() for option in options.split(";")]
22
- option_indices = tokenizer.convert_tokens_to_ids(options)
23
- tokens = tokenizer([text], return_tensors="pt")
24
- mask_ids = tokens["input_ids"][0] == 103
25
- with torch.no_grad():
26
- output = flava_pt(**tokens)
27
-
28
- text_logits = getattr(output, PREDICTION_ATTR)
29
- probs = text_logits[0, mask_ids, option_indices].view(-1, len(option_indices)).mean(dim=0)
30
- probs = torch.nn.functional.softmax(probs, dim=-1)
31
- return {label: probs[idx].item() for idx, label in enumerate(options)}
32
-
33
-
34
- def zero_shot_image(image, options):
35
- PIL_image = Image.fromarray(np.uint8(image)).convert("RGB")
36
- labels = [label.strip() for label in options.split(";")]
37
- image_input = fe([PIL_image], return_tensors="pt")
38
- text_inputs = tokenizer(
39
- labels, padding="max_length", return_tensors="pt"
40
- )
41
-
42
- image_embeddings = flava.get_image_features(**image_input)[:, 0, :]
43
- text_embeddings = flava.get_text_features(**text_inputs)[:, 0, :]
44
- similarities = list(
45
- torch.nn.functional.softmax(
46
- (text_embeddings @ image_embeddings.T).squeeze(0), dim=0
47
- )
48
- )
49
- return {label: similarities[idx].item() for idx, label in enumerate(labels)}
50
-
51
- def zero_shot_multimodal(image, text, options):
52
- options = [option.strip() for option in options.split(";")]
53
- option_indices = tokenizer.convert_tokens_to_ids(options)
54
- tokens = processor([image], [text], return_tensors="pt", return_codebook_pixels=True, return_image_mask=True)
55
-
56
- mask_ids = tokens["input_ids"][0] == 103
57
- tokens["bool_masked_pos"] = torch.ones_like(tokens["bool_masked_pos"])
58
-
59
- with torch.no_grad():
60
- output = flava_pt(**tokens)
61
-
62
- text_logits = getattr(output, "mmm_text_logits")
63
- probs = text_logits[0, mask_ids, option_indices].view(-1, len(option_indices)).mean(dim=0)
64
- probs = torch.nn.functional.softmax(probs, dim=-1)
65
- return {label: probs[idx].item() for idx, label in enumerate(options)}
66
-
67
- with demo:
68
- gr.Markdown(
69
- """
70
- # Zero-Shot image, text or multimodal classification using the same FLAVA model
71
-
72
- Click on one the examples provided to load them into the UI and "Classify".
73
-
74
- - For image classification, provide class options to be ranked separated by `;`.
75
- - For text and multimodal classification, provide your 1) prompt with the word you want to be filled in as `[MASK]`, and 2) possible options to be ranked separated by `;`.
76
- """
77
- )
78
- with gr.Tabs():
79
- with gr.TabItem("Zero-Shot Image Classification"):
80
- with gr.Row():
81
- with gr.Column():
82
- image_input = gr.Image()
83
- text_options_i = gr.Textbox(label="Classes (seperated by ;)")
84
- image_button = gr.Button("Classify")
85
- image_dataset = gr.Dataset(
86
- components=[image_input, text_options_i],
87
- samples=[
88
- ["cows.jpg", "a cow; two cows in a green field; a cow in a green field"],
89
- ["sofa.jpg", "a room with red sofa; a red room with sofa; ladder in a room"]
90
- ]
91
- )
92
-
93
- labels_image = gr.Label(label="Probabilities")
94
- with gr.TabItem("Zero-Shot Text Classification"):
95
- with gr.Row():
96
- with gr.Column():
97
- text_input = gr.Textbox(label="Prompt")
98
- text_options = gr.Textbox(label="Label options (separate by ;)")
99
- text_button = gr.Button("Classify")
100
- text_dataset = gr.Dataset(
101
- components=[text_input, text_options],
102
- samples=[
103
- ["by far the worst movie of the year. This was [MASK]", "negative; positive"],
104
- ["Lord Voldemort -- in the films; born Tom Marvolo Riddle) is a fictional character and the main antagonist in J.K. Rowling's series of Harry Potter novels. Voldemort first appeared in Harry Potter and the Philosopher's Stone, which was released in 1997. Voldemort appears either in person or in flashbacks in each book and its film adaptation in the series, except the third, Harry Potter and the Prisoner of Azkaban, where he is only mentioned. Question: are tom riddle and lord voldemort the same person? Answer: [MASK]", "no; yes"],
105
- ]
106
- )
107
- labels_text = gr.Label(label="Probabilities")
108
- with gr.TabItem("Zero-Shot MultiModal Classification"):
109
- with gr.Row():
110
- with gr.Column():
111
- image_input_mm = gr.Image()
112
- text_input_mm = gr.Textbox(label="Prompt")
113
- text_options_mm = gr.Textbox(label="Options (separate by ;)")
114
- multimodal_button = gr.Button("Classify")
115
- multimodal_dataset = gr.Dataset(
116
- components=[image_input_mm, text_input_mm],
117
- samples=[
118
- ["cows.jpg", "What animals are in the field? They are [MASK].", "cows; lions; sheep; monkeys"],
119
- ["sofa.jpg", "What furniture is in the room? It is [MASK].", "sofa; ladder; bucket"]
120
- ]
121
- )
122
- labels_multimodal = gr.Label(label="Probabilities")
123
-
124
- text_button.click(zero_shot_text, inputs=[text_input, text_options], outputs=labels_text)
125
- image_button.click(zero_shot_image, inputs=[image_input, text_options_i], outputs=labels_image)
126
- multimodal_button.click(zero_shot_multimodal, inputs=[image_input_mm, text_input_mm, text_options_mm], outputs=labels_multimodal)
127
- text_dataset.click(lambda a: a, inputs=[text_dataset], outputs=[text_input, text_options])
128
- image_dataset.click(lambda a: a, inputs=[image_dataset], outputs=[image_input, text_options_i])
129
- multimodal_dataset.click(lambda a: a, inputs=[multimodal_dataset], outputs=[image_input_mm, text_input_mm, text_options_mm])
130
-
131
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/calc_dataset_stats.py DELETED
@@ -1,88 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import os
4
-
5
- import numpy as np
6
- import tqdm
7
- from scipy.ndimage.morphology import distance_transform_edt
8
-
9
- from saicinpainting.evaluation.data import InpaintingDataset
10
- from saicinpainting.evaluation.vis import save_item_for_vis
11
-
12
-
13
- def main(args):
14
- dataset = InpaintingDataset(args.datadir, img_suffix='.png')
15
-
16
- area_bins = np.linspace(0, 1, args.area_bins + 1)
17
-
18
- heights = []
19
- widths = []
20
- image_areas = []
21
- hole_areas = []
22
- hole_area_percents = []
23
- known_pixel_distances = []
24
-
25
- area_bins_count = np.zeros(args.area_bins)
26
- area_bin_titles = [f'{area_bins[i] * 100:.0f}-{area_bins[i + 1] * 100:.0f}' for i in range(args.area_bins)]
27
-
28
- bin2i = [[] for _ in range(args.area_bins)]
29
-
30
- for i, item in enumerate(tqdm.tqdm(dataset)):
31
- h, w = item['image'].shape[1:]
32
- heights.append(h)
33
- widths.append(w)
34
- full_area = h * w
35
- image_areas.append(full_area)
36
- bin_mask = item['mask'] > 0.5
37
- hole_area = bin_mask.sum()
38
- hole_areas.append(hole_area)
39
- hole_percent = hole_area / full_area
40
- hole_area_percents.append(hole_percent)
41
- bin_i = np.clip(np.searchsorted(area_bins, hole_percent) - 1, 0, len(area_bins_count) - 1)
42
- area_bins_count[bin_i] += 1
43
- bin2i[bin_i].append(i)
44
-
45
- cur_dist = distance_transform_edt(bin_mask)
46
- cur_dist_inside_mask = cur_dist[bin_mask]
47
- known_pixel_distances.append(cur_dist_inside_mask.mean())
48
-
49
- os.makedirs(args.outdir, exist_ok=True)
50
- with open(os.path.join(args.outdir, 'summary.txt'), 'w') as f:
51
- f.write(f'''Location: {args.datadir}
52
-
53
- Number of samples: {len(dataset)}
54
-
55
- Image height: min {min(heights):5d} max {max(heights):5d} mean {np.mean(heights):.2f}
56
- Image width: min {min(widths):5d} max {max(widths):5d} mean {np.mean(widths):.2f}
57
- Image area: min {min(image_areas):7d} max {max(image_areas):7d} mean {np.mean(image_areas):.2f}
58
- Hole area: min {min(hole_areas):7d} max {max(hole_areas):7d} mean {np.mean(hole_areas):.2f}
59
- Hole area %: min {min(hole_area_percents) * 100:2.2f} max {max(hole_area_percents) * 100:2.2f} mean {np.mean(hole_area_percents) * 100:2.2f}
60
- Dist 2known: min {min(known_pixel_distances):2.2f} max {max(known_pixel_distances):2.2f} mean {np.mean(known_pixel_distances):2.2f} median {np.median(known_pixel_distances):2.2f}
61
-
62
- Stats by hole area %:
63
- ''')
64
- for bin_i in range(args.area_bins):
65
- f.write(f'{area_bin_titles[bin_i]}%: '
66
- f'samples number {area_bins_count[bin_i]}, '
67
- f'{area_bins_count[bin_i] / len(dataset) * 100:.1f}%\n')
68
-
69
- for bin_i in range(args.area_bins):
70
- bindir = os.path.join(args.outdir, 'samples', area_bin_titles[bin_i])
71
- os.makedirs(bindir, exist_ok=True)
72
- bin_idx = bin2i[bin_i]
73
- for sample_i in np.random.choice(bin_idx, size=min(len(bin_idx), args.samples_n), replace=False):
74
- save_item_for_vis(dataset[sample_i], os.path.join(bindir, f'{sample_i}.png'))
75
-
76
-
77
- if __name__ == '__main__':
78
- import argparse
79
-
80
- aparser = argparse.ArgumentParser()
81
- aparser.add_argument('datadir', type=str,
82
- help='Path to folder with images and masks (output of gen_mask_dataset.py)')
83
- aparser.add_argument('outdir', type=str, help='Where to put results')
84
- aparser.add_argument('--samples-n', type=int, default=10,
85
- help='Number of sample images with masks to copy for visualization for each area bin')
86
- aparser.add_argument('--area-bins', type=int, default=10, help='How many area bins to have')
87
-
88
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/run_single.py DELETED
@@ -1,212 +0,0 @@
1
- from os.path import join as pjoin
2
- import cv2
3
- import os
4
- import shutil
5
- import time
6
- import json
7
- import CDM.detect_compo.ip_region_proposal as ip
8
- import CDM.detect_classify.classification as clf
9
- import pandas as pd
10
- import openai
11
-
12
- def summarize_segment(segment):
13
- openai.api_key = os.environ.get('openai_key')
14
-
15
- prompt = f"Shorten this paragraph: \"{str(segment)}\"."
16
-
17
- response = openai.ChatCompletion.create(
18
- # engine="text-davinci-002",
19
- model="gpt-3.5-turbo",
20
- messages=[
21
- # {"role": "system", "content": "You are a helpful assistant."},
22
- {"role": "user", "content": prompt}
23
- ],
24
- max_tokens=400,
25
- n=1,
26
- stop=None,
27
- temperature=0,
28
- )
29
-
30
- shortened_segment = response.choices[0].message['content']
31
-
32
- return shortened_segment
33
-
34
- def resize_height_by_longest_edge(img_path, resize_length=800):
35
- org = cv2.imread(img_path)
36
- height, width = org.shape[:2]
37
- if height > width:
38
- return resize_length
39
- else:
40
- return int(resize_length * (height / width))
41
-
42
- def run_single_img(input_img, output_root, segment_root):
43
- # input_img_root = "./input_examples/"
44
- # output_root = "./result_classification"
45
- # segment_root = '../scrutinizing_alexa/txt'
46
-
47
- if os.path.exists(output_root):
48
- shutil.rmtree(output_root)
49
- os.makedirs(output_root)
50
-
51
- # image_list = os.listdir(input_img_root)
52
- #
53
- # input_imgs = [input_img_root + image_name for image_name in image_list]
54
-
55
- key_params = {'min-grad': 4, 'ffl-block': 5, 'min-ele-area': 50, 'merge-contained-ele': True,
56
- 'max-word-inline-gap': 10, 'max-line-ingraph-gap': 4, 'remove-top-bar': False}
57
-
58
- is_ip = True
59
- is_clf = False
60
- is_ocr = True
61
- is_merge = True
62
- is_classification = True
63
-
64
- # # Load deep learning models in advance
65
- # compo_classifier = None
66
- # if is_ip and is_clf:
67
- # compo_classifier = {}
68
- # from cnn.CNN import CNN
69
- # # compo_classifier['Image'] = CNN('Image')
70
- # compo_classifier['Elements'] = CNN('Elements')
71
- # # compo_classifier['Noise'] = CNN('Noise')
72
- # ocr_model = None
73
- if is_ocr:
74
- import CDM.detect_text.text_detection as text
75
-
76
- # set the range of target inputs' indices
77
- # num = 0
78
- # start_index = 30800 # 61728
79
- # end_index = 100000
80
-
81
- img_time_cost_all = []
82
- ocr_time_cost_all = []
83
- ic_time_cost_all = []
84
- ts_time_cost_all = []
85
- cd_time_cost_all = []
86
-
87
- resize_by_height = 800
88
- # for input_img in input_imgs:
89
-
90
- output_data = pd.DataFrame(columns=['screenshot', 'id', 'label', 'index', 'text', 'sentences'])
91
-
92
- this_img_start_time = time.process_time()
93
-
94
- resized_height = resize_height_by_longest_edge(input_img, resize_by_height)
95
- index = input_img.split('/')[-1][:-4]
96
-
97
- # if index != "1-1" and index != "1-2":
98
- # continue
99
-
100
- if is_ocr:
101
- os.makedirs(pjoin(output_root, 'ocr'), exist_ok=True)
102
- this_ocr_time_cost = text.text_detection(input_img, output_root, show=False, method='google') # pytesseract
103
- ocr_time_cost_all.append(this_ocr_time_cost)
104
-
105
- if is_ip:
106
- os.makedirs(pjoin(output_root, 'ip'), exist_ok=True)
107
- this_cd_time_cost = ip.compo_detection(input_img, output_root, key_params,
108
- resize_by_height=resized_height, show=False)
109
- cd_time_cost_all.append(this_cd_time_cost)
110
-
111
- if is_merge:
112
- import CDM.detect_merge.merge as merge
113
-
114
- os.makedirs(pjoin(output_root, 'merge'), exist_ok=True)
115
- compo_path = pjoin(output_root, 'ip', str(index) + '.json')
116
- ocr_path = pjoin(output_root, 'ocr', str(index) + '.json')
117
- board_merge, components_merge = merge.merge(input_img, compo_path, ocr_path, pjoin(output_root, 'merge'),
118
- is_remove_top_bar=key_params['remove-top-bar'], show=False)
119
- # ic_time_cost_all.append(this_ic_time_cost)
120
- # ts_time_cost_all.append(this_ts_time_cost)
121
-
122
- if is_classification:
123
- os.makedirs(pjoin(output_root, 'classification'), exist_ok=True)
124
- merge_path = pjoin(output_root, 'merge', str(index) + '.json')
125
- merge_json = json.load(open(merge_path, 'r'))
126
- os.makedirs(pjoin(output_root, 'classification', 'GUI'), exist_ok=True)
127
- this_time_cost_ic, this_time_cost_ts, output_data, output_board = clf.compo_classification(input_img, output_root,
128
- segment_root, merge_json,
129
- output_data,
130
- resize_by_height=resize_by_height, clf_model="ViT")
131
-
132
- ic_time_cost_all.append(this_time_cost_ic)
133
- ts_time_cost_all.append(this_time_cost_ts)
134
-
135
- this_img_time_cost = time.process_time() - this_img_start_time
136
- img_time_cost_all.append(this_img_time_cost)
137
- print("time cost for this image: %2.2f s" % this_img_time_cost)
138
-
139
- if os.path.isfile(output_root + '/output.csv'):
140
- output_data.to_csv(output_root + '/output.csv', index=False, mode='a', header=False)
141
- else:
142
- output_data.to_csv(output_root + '/output.csv', index=False, mode='w')
143
-
144
- # avg_ocr_time_cost = sum(ocr_time_cost_all) / len(ocr_time_cost_all)
145
- # avg_cd_time_cost = sum(cd_time_cost_all) / len(cd_time_cost_all)
146
- # avg_ic_time_cost = sum(ic_time_cost_all) / len(ic_time_cost_all)
147
- # avg_ts_time_cost = sum(ts_time_cost_all) / len(ts_time_cost_all)
148
- # avg_time_cost = sum(img_time_cost_all) / len(img_time_cost_all)
149
- # print("average text extraction time cost for this app: %2.2f s" % avg_ocr_time_cost)
150
- # print("average widget detection time cost for this app: %2.2f s" % avg_cd_time_cost)
151
- # print("average icon classification time cost for this app: %2.2f s" % avg_ic_time_cost)
152
- # print("average text selection processing time cost for this app: %2.2f s" % avg_ts_time_cost)
153
- # print("average screenshot processing time cost for this app: %2.2f s" % avg_time_cost)
154
-
155
- short_output_data = output_data[['id', 'label', 'text']].copy()
156
- short_output_data = short_output_data.rename(columns={'text': 'segment'})
157
-
158
- # summarize segments:
159
-
160
- # original_output_data = short_output_data.copy()
161
- # retries = 3
162
- # for index in range(1, len(short_output_data)):
163
- # seg = short_output_data.loc[index, 'segment']
164
- # for i in range(retries):
165
- # try:
166
- # shortened_seg = summarize_segment(seg)
167
- # break
168
- # except openai.error.RateLimitError as e:
169
- # if "overloaded" in str(e):
170
- # # Exponential backoff with jitter
171
- # sleep_time = 2 * (2 ** i) + 0.1
172
- # time.sleep(sleep_time)
173
- # except Exception as e:
174
- # # If you wish, you can print or log the exception details here without raising it
175
- # print(e)
176
- # else:
177
- # # This part will be executed if the for loop doesn't hit 'break'
178
- # shortened_seg = seg
179
- #
180
- # short_output_data.loc[index, 'segment'] = shortened_seg
181
-
182
- original_output = []
183
- retries = 3
184
- summarized_data = [] # List to hold summarized rows
185
- for index, row in short_output_data.iterrows():
186
- seg = row['segment']
187
- for i in range(retries):
188
- try:
189
- shortened_seg = summarize_segment(seg)
190
- break
191
- except openai.error.RateLimitError as e:
192
- if "overloaded" in str(e):
193
-
194
- sleep_time = 2 * (2 ** i) + 0.1
195
- # sleep_time = 3
196
- time.sleep(sleep_time)
197
- except Exception as e:
198
- # If you wish, you can print or log the exception details here without raising it
199
- print(e)
200
- else:
201
- # This part will be executed if the for loop doesn't hit 'break'
202
- shortened_seg = seg
203
-
204
- summarized_data.append({'id': row['id'], 'label': row['label'], 'segment': shortened_seg})
205
- original_output.append({'id': row['id'], 'label': row['label'], 'segment': seg[0].upper() + seg[1:]})
206
-
207
- summarized_output_data = pd.DataFrame(summarized_data)
208
- original_output_data = pd.DataFrame(original_output)
209
-
210
- return output_board, summarized_output_data, original_output_data
211
-
212
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/realesrgan/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # flake8: noqa
2
- from .archs import *
3
- from .data import *
4
- from .models import *
5
- from .utils import *
6
- #from .version import *
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cu2qu/errors.py DELETED
@@ -1,77 +0,0 @@
1
- # Copyright 2016 Google Inc. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- class Error(Exception):
17
- """Base Cu2Qu exception class for all other errors."""
18
-
19
-
20
- class ApproxNotFoundError(Error):
21
- def __init__(self, curve):
22
- message = "no approximation found: %s" % curve
23
- super().__init__(message)
24
- self.curve = curve
25
-
26
-
27
- class UnequalZipLengthsError(Error):
28
- pass
29
-
30
-
31
- class IncompatibleGlyphsError(Error):
32
- def __init__(self, glyphs):
33
- assert len(glyphs) > 1
34
- self.glyphs = glyphs
35
- names = set(repr(g.name) for g in glyphs)
36
- if len(names) > 1:
37
- self.combined_name = "{%s}" % ", ".join(sorted(names))
38
- else:
39
- self.combined_name = names.pop()
40
-
41
- def __repr__(self):
42
- return "<%s %s>" % (type(self).__name__, self.combined_name)
43
-
44
-
45
- class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
46
- def __str__(self):
47
- return "Glyphs named %s have different number of segments" % (
48
- self.combined_name
49
- )
50
-
51
-
52
- class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
53
- def __init__(self, glyphs, segments):
54
- IncompatibleGlyphsError.__init__(self, glyphs)
55
- self.segments = segments
56
-
57
- def __str__(self):
58
- lines = []
59
- ndigits = len(str(max(self.segments)))
60
- for i, tags in sorted(self.segments.items()):
61
- lines.append(
62
- "%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
63
- )
64
- return "Glyphs named %s have incompatible segment types:\n %s" % (
65
- self.combined_name,
66
- "\n ".join(lines),
67
- )
68
-
69
-
70
- class IncompatibleFontsError(Error):
71
- def __init__(self, glyph_errors):
72
- self.glyph_errors = glyph_errors
73
-
74
- def __str__(self):
75
- return "fonts contains incompatible glyphs: %s" % (
76
- ", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
77
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/Image-Classification/app.py DELETED
@@ -1,81 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import optim
4
- from torch.nn import Module
5
- from torchvision import models, transforms
6
- from torchvision.datasets import ImageFolder
7
- from PIL import Image
8
- import numpy as np
9
- import onnxruntime
10
- import gradio as gr
11
- import json
12
-
13
-
14
- def get_image(x):
15
- return x.split(', ')[0]
16
-
17
-
18
- def to_numpy(tensor):
19
- return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
20
-
21
-
22
- # Transform image to ToTensor
23
- def transform_image(myarray):
24
- transform = transforms.Compose([
25
- transforms.Resize(224),
26
- transforms.CenterCrop(224),
27
- transforms.ToTensor(),
28
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
29
- ])
30
- image = Image.fromarray(np.uint8(myarray)).convert('RGB')
31
- image = transform(image).unsqueeze(0)
32
- return image
33
-
34
-
35
- f = open('imagenet_label.json',)
36
- label_map=json.load(f)
37
- f.close()
38
-
39
- # Load list of images for similarity
40
- sub_test_list = open('img_list.txt', 'r')
41
- sub_test_list = [i.strip() for i in sub_test_list]
42
-
43
- # Load images embedding for similarity
44
- embeddings = torch.load('embeddings.pt')
45
-
46
- # Configure
47
- options = onnxruntime.SessionOptions()
48
- options.intra_op_num_threads = 8
49
- options.inter_op_num_threads = 8
50
-
51
- # Load model
52
- PATH = 'model_onnx.onnx'
53
- ort_session = onnxruntime.InferenceSession(PATH, sess_options=options)
54
- input_name = ort_session.get_inputs()[0].name
55
-
56
-
57
- # predict multi-level classification
58
- def get_classification(img):
59
-
60
- image_tensor = transform_image(img)
61
- ort_inputs = {input_name: to_numpy(image_tensor)}
62
- x = ort_session.run(None, ort_inputs)
63
- predictions = torch.topk(torch.from_numpy(x[0]), k=5).indices.squeeze(0).tolist()
64
-
65
- result = {}
66
- for i in predictions:
67
- label = label_map[str(i)]
68
- prob = x[0][0, i].item()
69
- result[label] = prob
70
- return result
71
-
72
-
73
- iface = gr.Interface(
74
- get_classification,
75
- gr.inputs.Image(shape=(200, 200)),
76
- outputs="label",
77
- title = 'Universal Image Classification',
78
- description = "Imagenet classification from Mobilenetv3 converting to ONNX runtime",
79
- article = "Author: <a href=\"https://huggingface.co/vumichien\">Vu Minh Chien</a>.",
80
- )
81
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EdBianchi/ThemeParksAccidents_RDF-SPARQL/app.py DELETED
@@ -1,297 +0,0 @@
1
- # IMPORTING TOOLS
2
- import streamlit as st
3
- from rdflib import Graph, Literal
4
- from rdflib.plugins.sparql import prepareQuery
5
- import pandas as pd
6
- import plotly.express as px
7
- import numpy as np
8
-
9
- # SET PAGE SETTINGS
10
- st.set_page_config(page_title='Amusement Accidents', layout="centered")
11
-
12
-
13
- # METHOD TO LOAD THE RDF
14
- @st.cache(persist=True)
15
- def importRDF(filename, format):
16
- graph = Graph().parse(filename, format)
17
- return graph
18
-
19
- # IMPORTING THE RDF
20
- with st.spinner('Loading all the stuffs...'):
21
- graph = importRDF("rdf-dataset.ttl", "ttl")
22
-
23
- # METHOD TO CONVERT THE QUERY RESULT INTO A DATAFRAME
24
- def sparql_results_to_df(results):
25
- return pd.DataFrame(
26
- data=([None if x is None else x.toPython() for x in row] for row in results),
27
- columns=[str(x) for x in results.vars],
28
- )
29
-
30
- # METHOD TO EXECUTE A GENERIC QUERY
31
- def computeQuery(query, executor):
32
- result = executor.query(query)
33
- res_df = sparql_results_to_df(result)
34
- return res_df
35
-
36
- # METHOD TO EXECUTE A PARAMETRIC QUERY
37
- def rideAccidentDescription(ride_name, executor):
38
- ride_name = Literal(ride_name)
39
- query = """
40
- PREFIX ride_type: <http://example.org/ride_type#>
41
- PREFIX acc: <http://example.org/accident#>
42
- PREFIX ride: <http://example.org/ride#>
43
- SELECT (?manuf AS ?Manufacturer) (?description AS ?Accident_Description)
44
- WHERE {
45
- ?instance acc:description ?description ;
46
- acc:ref-ride_id ?ride_id .
47
- ?ride_id ride:name ?name ;
48
- ride:manufacturer ?manuf .
49
- FILTER (?name = ?ride_name)
50
- }
51
- """
52
- prep_query = prepareQuery(query)
53
- r = executor.query(prep_query, initBindings={'ride_name': ride_name})
54
- return sparql_results_to_df(r), query
55
-
56
- # PROCESSING & DISPLAY
57
- def display():
58
- with st.container():
59
- st.write("#### What are the months with the highest number of accidents?")
60
- res = computeQuery(query_5, graph)
61
- fig = px.bar(res, x="mon", y="count", color="count", labels={"mon":"Month", "count":"Num. of Accidents"}, text_auto="True")
62
- fig.update_xaxes(type="category")
63
- fig.update_yaxes(showticklabels=False)
64
- st.plotly_chart(fig, use_container_width=True)
65
- with st.expander("Show query"):
66
- st.code(query_5, language="sparql")
67
- st.markdown("---")
68
-
69
- with st.container():
70
- st.write("#### Which cities and states have recorded the most accidents?")
71
- res = computeQuery(query_8, graph)
72
- fig = px.treemap(res, path=[px.Constant("U.S"), "state", "city"], values="count", hover_data=["state", "city","count"],
73
- color="count",
74
- color_continuous_scale='tealrose',
75
- color_continuous_midpoint=np.average(res['count'], weights=res['count']))
76
- st.plotly_chart(fig, use_container_width=True)
77
- with st.expander("Show query"):
78
- st.code(query_8, language="sparql")
79
- st.markdown("---")
80
-
81
- with st.container():
82
- st.write("#### What incidents have occurred on your favorite ride?")
83
- ride_names = computeQuery(query_0, graph)
84
- option = st.selectbox("Select a Ride", options=ride_names)
85
- res, query = rideAccidentDescription(option, graph)
86
- res_count = res.count()[0]
87
- if (res_count < 3):
88
- st.table(res)
89
- else:
90
- limit = st.slider("Num. of Accidents to Visualize", 1, int(res_count), 2, 1)
91
- st.table(res[:limit])
92
- with st.expander("Show query"):
93
- st.code(query, language="sparql")
94
- st.markdown("---")
95
-
96
- with st.container():
97
- st.write("#### What Are the Most Common Categories of Accidents?")
98
- res = computeQuery(query_4, graph)
99
- fig = px.treemap(res, path=[px.Constant("Accident Category"), "category_name"], values="count", hover_data=["category_name","count"])
100
- st.plotly_chart(fig, use_container_width=True)
101
- with st.expander("Show query"):
102
- st.code(query_4, language="sparql")
103
- st.markdown("---")
104
-
105
- with st.container():
106
- st.write("#### What are the Most Dangerous Ride Categories?")
107
- res = computeQuery(query_6, graph)
108
- fig = px.pie(res, names="amus_cat_name", values="count", hole=.4)
109
- st.plotly_chart(fig, use_container_width=True)
110
- with st.expander("Show query"):
111
- st.code(query_6, language="sparql")
112
- st.markdown("---")
113
-
114
- with st.container():
115
- st.write("#### What are the Most Dangerous Ride Types?")
116
- res = computeQuery(query_3, graph)
117
- fig = px.bar(res, x="type_name", y="count", labels={"type_name":"Ride Type", "count":"Num. of Accidents"}, text_auto=True)
118
- fig.update_xaxes(tickangle=45)
119
- st.plotly_chart(fig, use_container_width=True)
120
- with st.expander("Show query"):
121
- st.code(query_3, language="sparql")
122
- st.markdown("---")
123
-
124
- with st.container():
125
- st.write("#### How many people are generally involved in an accident?")
126
- res = computeQuery(query_1, graph)
127
- fig = px.bar(res, x="num_inj", y="count", labels={"num_inj":"Injured People", "count":"Num. of Accidents"}, text_auto=True)
128
- fig.update_xaxes(type="category")
129
- st.plotly_chart(fig, use_container_width=True)
130
- with st.expander("Show query"):
131
- st.code(query_1, language="sparql")
132
- st.markdown("---")
133
-
134
- return None
135
-
136
- # ANALYTICAL QUERIES DEFINITION
137
- # get the names of all the rides
138
- query_0 = """
139
- PREFIX ride:<http://example.org/ride#>
140
-
141
- SELECT DISTINCT ?name
142
- WHERE {
143
- ?ride ride:name ?name .
144
- }
145
- """
146
- # num of accidents per injured people
147
- query_1 = """
148
- PREFIX r:<http://example.org/ride#>
149
- PREFIX a:<http://example.org/accident#>
150
-
151
- SELECT ?num_inj (COUNT(?num_inj) AS ?count)
152
- WHERE {
153
- ?acc a:num_injured ?num_inj .
154
- }
155
- GROUP BY ?num_inj
156
- ORDER BY (?num_inj)
157
- """
158
-
159
- # manufacturers of the rides subjected to most accidents
160
- query_2 = """
161
- PREFIX acc: <http://example.org/accident#>
162
- PREFIX ride: <http://example.org/ride#>
163
-
164
- SELECT ?ride_manuf (COUNT(?ride_manuf) AS ?count)
165
- WHERE {
166
- ?instance acc:ref-ride_id ?ride_id .
167
- ?ride_id ride:manufacturer ?ride_manuf
168
- }
169
- GROUP BY ?ride_manuf
170
- ORDER BY DESC(?count)
171
- """
172
-
173
- # Top n types of rides most subjected to accidents
174
- query_3 = """
175
- PREFIX ride_type: <http://example.org/ride_type#>
176
- PREFIX acc: <http://example.org/accident#>
177
- PREFIX ride: <http://example.org/ride#>
178
-
179
- SELECT ?type_name (COUNT(?type_name) AS ?count)
180
- WHERE {
181
- ?instance acc:ref-ride_id ?ride_id .
182
- ?ride_id ride:ref-ride_type_id ?type_id .
183
- ?type_id ride_type:type ?type_name .
184
- }
185
- GROUP BY ?type_name
186
- ORDER BY DESC(?count)
187
- LIMIT 7
188
- """
189
-
190
- # Top 6 categories of rides most subjected to accidents
191
- query_6 = """
192
- PREFIX amusement_cat: <http://example.org/amusement_category#>
193
- PREFIX ride_type: <http://example.org/ride_type#>
194
- PREFIX acc: <http://example.org/accident#>
195
- PREFIX ride: <http://example.org/ride#>
196
-
197
- SELECT ?amus_cat_name (COUNT(?amus_cat_name) AS ?count)
198
- WHERE {
199
- ?instance acc:ref-ride_id ?ride_id .
200
- ?ride_id ride:ref-ride_type_id ?type_id .
201
- ?type_id ride_type:ref-amusement_category_id ?amus_cat_id .
202
- ?amus_cat_id amusement_cat:amusement_category ?amus_cat_name .
203
- }
204
- GROUP BY ?amus_cat_name
205
- ORDER BY DESC(?count)
206
- LIMIT 6
207
-
208
- """
209
-
210
- # most common categories of accidents
211
- query_4 = """
212
- PREFIX acc_cat: <http://example.org/accident_category#>
213
- PREFIX acc: <http://example.org/accident#>
214
-
215
- SELECT ?category_name (COUNT(?category_name) AS ?count)
216
- WHERE {
217
- ?instance acc:ref-accident_category_id ?category_id .
218
- ?category_id acc_cat:accident_category ?category_name .
219
- }
220
- GROUP BY ?category_name
221
- ORDER BY DESC(?count)
222
- """
223
-
224
- # months with the ngher num of accidents
225
- query_5 = """
226
- PREFIX acc: <http://example.org/accident#>
227
-
228
- SELECT ?mon (COUNT(?mon) AS ?count)
229
- WHERE {
230
- ?instance acc:date ?date .
231
- }
232
- GROUP BY (month(?date) AS ?mon)
233
- ORDER BY (?mon)
234
- """
235
-
236
- # cities with the higher num of accidents
237
- query_8 = """
238
- PREFIX location: <http://example.org/location#>
239
- PREFIX acc: <http://example.org/accident#>
240
-
241
- SELECT ?city (COUNT(?city) AS ?count) ?state
242
- WHERE {
243
- ?instance acc:ref-location_id ?location_id .
244
- ?location_id location:city ?city ;
245
- location:state ?state
246
- }
247
- GROUP BY ?city
248
- ORDER BY DESC(?count)
249
-
250
- """
251
-
252
-
253
- # TITLE
254
- st.header("Theme Park Ride Accidents")
255
- st.markdown("""There are **thousands of amusement parks** around the world that welcome **millions of visitors** each year.
256
- Children, families, and teenagers are ready to spend days of adrenaline and fun.
257
- Unfortunately, **accidents sometimes occur**. This raises some questions: **Are amusement parks safe? Which rides are the most accident-prone? What accidents happen most often? At what time of year are accidents most common?**
258
- Let's try to find out in this **RDF data exploration** using **SPARQL** and **Plotly**.""")
259
- st.markdown("---")
260
-
261
- display()
262
-
263
- # WRITE & RUN YOUR OWN QUERY
264
- st.write("#### Write & Run your Custom Query")
265
- pers_query = st.text_area('', """
266
- PREFIX ride:<http://example.org/ride#>
267
- SELECT ?name
268
- WHERE {
269
- ?ride ride:manufacturer "Vekoma" ;
270
- ride:name ?name
271
- }
272
- """, height=200)
273
- with st.container():
274
- try:
275
- res = computeQuery(pers_query, graph)
276
- st.table(res)
277
- except:
278
- st.error("Ooops! Check you query syntax...")
279
- st.markdown("---")
280
-
281
- # SIDEBAR
282
- with st.sidebar:
283
- st.write("""
284
- This App proposes some visualization about theme park ride accidents.
285
- The original dataset comes from "Saferparks", an organization that reports and collects data about theme park ride accidents in the US.
286
- The original dataset covers years from 2010 to 2017 and comes in CSV or Excel format. I used python to split the dataset and convert it into the
287
- Third Normal Form (3NF) of Database.
288
- I uploaded the data into a PostgreSQL database and I used the Ontop tool to get the final RDF dataset.
289
- Queries are expressed in SPARQL, and charts are generated with Plotly Express.
290
- """)
291
- st.markdown("---")
292
- st.markdown("## Dataset Resources:")
293
- st.markdown("""
294
- Saferparks Original Dataset: https://ridesdatabase.org/saferparks/data/
295
-
296
- Saferparks Dataset Description: https://ridesdatabase.org/wp-content/uploads/2020/02/Saferparks-data-description.pdf
297
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/nets_new.py DELETED
@@ -1,133 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import layers_new
6
-
7
-
8
- class BaseNet(nn.Module):
9
- def __init__(
10
- self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))
11
- ):
12
- super(BaseNet, self).__init__()
13
- self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1)
14
- self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1)
15
- self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1)
16
- self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1)
17
- self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1)
18
-
19
- self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True)
20
-
21
- self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1)
22
- self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1)
23
- self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1)
24
- self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm)
25
- self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1)
26
-
27
- def __call__(self, x):
28
- e1 = self.enc1(x)
29
- e2 = self.enc2(e1)
30
- e3 = self.enc3(e2)
31
- e4 = self.enc4(e3)
32
- e5 = self.enc5(e4)
33
-
34
- h = self.aspp(e5)
35
-
36
- h = self.dec4(h, e4)
37
- h = self.dec3(h, e3)
38
- h = self.dec2(h, e2)
39
- h = torch.cat([h, self.lstm_dec2(h)], dim=1)
40
- h = self.dec1(h, e1)
41
-
42
- return h
43
-
44
-
45
- class CascadedNet(nn.Module):
46
- def __init__(self, n_fft, nout=32, nout_lstm=128):
47
- super(CascadedNet, self).__init__()
48
-
49
- self.max_bin = n_fft // 2
50
- self.output_bin = n_fft // 2 + 1
51
- self.nin_lstm = self.max_bin // 2
52
- self.offset = 64
53
-
54
- self.stg1_low_band_net = nn.Sequential(
55
- BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm),
56
- layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0),
57
- )
58
-
59
- self.stg1_high_band_net = BaseNet(
60
- 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2
61
- )
62
-
63
- self.stg2_low_band_net = nn.Sequential(
64
- BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm),
65
- layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0),
66
- )
67
- self.stg2_high_band_net = BaseNet(
68
- nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2
69
- )
70
-
71
- self.stg3_full_band_net = BaseNet(
72
- 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm
73
- )
74
-
75
- self.out = nn.Conv2d(nout, 2, 1, bias=False)
76
- self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False)
77
-
78
- def forward(self, x):
79
- x = x[:, :, : self.max_bin]
80
-
81
- bandw = x.size()[2] // 2
82
- l1_in = x[:, :, :bandw]
83
- h1_in = x[:, :, bandw:]
84
- l1 = self.stg1_low_band_net(l1_in)
85
- h1 = self.stg1_high_band_net(h1_in)
86
- aux1 = torch.cat([l1, h1], dim=2)
87
-
88
- l2_in = torch.cat([l1_in, l1], dim=1)
89
- h2_in = torch.cat([h1_in, h1], dim=1)
90
- l2 = self.stg2_low_band_net(l2_in)
91
- h2 = self.stg2_high_band_net(h2_in)
92
- aux2 = torch.cat([l2, h2], dim=2)
93
-
94
- f3_in = torch.cat([x, aux1, aux2], dim=1)
95
- f3 = self.stg3_full_band_net(f3_in)
96
-
97
- mask = torch.sigmoid(self.out(f3))
98
- mask = F.pad(
99
- input=mask,
100
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
101
- mode="replicate",
102
- )
103
-
104
- if self.training:
105
- aux = torch.cat([aux1, aux2], dim=1)
106
- aux = torch.sigmoid(self.aux_out(aux))
107
- aux = F.pad(
108
- input=aux,
109
- pad=(0, 0, 0, self.output_bin - aux.size()[2]),
110
- mode="replicate",
111
- )
112
- return mask, aux
113
- else:
114
- return mask
115
-
116
- def predict_mask(self, x):
117
- mask = self.forward(x)
118
-
119
- if self.offset > 0:
120
- mask = mask[:, :, :, self.offset : -self.offset]
121
- assert mask.size()[3] > 0
122
-
123
- return mask
124
-
125
- def predict(self, x, aggressiveness=None):
126
- mask = self.forward(x)
127
- pred_mag = x * mask
128
-
129
- if self.offset > 0:
130
- pred_mag = pred_mag[:, :, :, self.offset : -self.offset]
131
- assert pred_mag.size()[3] > 0
132
-
133
- return pred_mag