Commit
·
9599f93
1
Parent(s):
3c068de
Update parquet files (step 125 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/12Venusssss/text_generator/README.md +0 -12
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/8 12 Cung G [HOT].md +0 -15
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avunu Valliddaru Istapaddaru Full Movie Download Experience the Magic of Telugu Cinema with this Film.md +0 -168
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bmw Scanner 140 Full Version Unlock Version with Software Download.md +0 -133
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Der Herr Der Ringe Die Schlacht Um Mittelerde 2 German Pc Iso.md +0 -86
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (torrent lumion 3.0pro x86) - Experience the power of Lumion the best 3D rendering software.md +0 -96
- spaces/1gistliPinn/ChatGPT4/Examples/Behen Hogi Teri Full Hd Movie 1080p NEW.md +0 -130
- spaces/1gistliPinn/ChatGPT4/Examples/Chota Bheem Jam Jam Jambura Full Movie In Hindi Free Download.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/DOWNLOAD XBLA Unlocker V1.6.rar.rar.md +0 -22
- spaces/1gistliPinn/ChatGPT4/Examples/Danza Kuduro Video 1080p Download Enjoy the Best Quality of the Latin Dance Anthem.md +0 -5
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/lp_main.py +0 -670
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/indexed_datasets.py +0 -71
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/griffin_lim.py +0 -85
- spaces/AP123/IllusionDiffusion/illusion_style.py +0 -10
- spaces/AP123/Upside-Down-Diffusion/app.py +0 -382
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov6_s_fast.py +0 -124
- spaces/AchyuthGamer/OpenGPT/client/css/settings.css +0 -44
- spaces/AiBototicus/BucksAI-4/app.py +0 -3
- spaces/Aki004/herta-so-vits/inference/slicer.py +0 -142
- spaces/Alesteba/NeRF_ficus-pxl/transformations.py +0 -47
- spaces/AlexWang/lama/models/ade20k/resnet.py +0 -181
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/training_stats.py +0 -268
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py +0 -272
- spaces/Andy1621/uniformer_image_detection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py +0 -77
- spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py +0 -45
- spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/rpn_test_mixin.py +0 -59
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/README.md +0 -66
- spaces/AnimeStudio/anime-models/app.py +0 -198
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Chat-mode.md +0 -39
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/tags.py +0 -487
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py +0 -68
- spaces/AzumaSeren100/XuanShen-Bert-VITS2/preprocess_text.py +0 -65
- spaces/Benson/text-generation/Examples/Bubble Shooter Genies Apk.md +0 -72
- spaces/BernardoOlisan/vqganclip/app.py +0 -392
- spaces/CVMX-jaca-tonos/Spanish-Audio-Transcription-to-Quechua-Translation/app.py +0 -58
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/replace.h +0 -22
- spaces/CVPR/regionclip-demo/detectron2/engine/__init__.py +0 -12
- spaces/CVPR/regionclip-demo/detectron2/structures/keypoints.py +0 -230
- spaces/Chaitanya01/InvestingPlatform/app.py +0 -1297
- spaces/ChihChiu29/mychatbot/main.py +0 -72
- spaces/Cropinky/esrgan/realesrgan/models/realesrnet_model.py +0 -188
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py +0 -73
- spaces/DEEMOSTECH/ChatAvatar/static/css/main.629d4bc9.css +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/FpxImagePlugin.py +0 -253
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/setters.py +0 -73
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ce791c16.js +0 -2
- spaces/Dagfinn1962/stablediffusion-members/main.css +0 -61
spaces/12Venusssss/text_generator/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Text Generator
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.11.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/8 12 Cung G [HOT].md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
|
2 |
-
<code>
|
3 |
-
<h1>8/12 cung gì? Tìm hiá»u vá» ngưá»i sinh ngà y 8 tháng 12 trong 12 cung hoà ng Äạo</h1>
|
4 |
-
<p>Bạn có biết ngưá»i sinh ngà y 8 tháng 12 thuá»c cung hoà ng Äạo nà o không? Bạn có muá»n tìm hiá»u vá» tÃnh cách, sá» thÃch, tình yêu, sá»± nghiá»p và váºn má»nh cá»§a há» không? Nếu có, hãy cùng chúng tôi khám phá bà i viết nà y Äá» biết 8/12 cung gì và những Äiá»u thú vá» vá» ngưá»i sinh ngà y nà y nhé.</p>
|
5 |
-
<h2>8 12 cung gì</h2><br /><p><b><b>DOWNLOAD</b> ————— <a href="https://byltly.com/2uKwBF">https://byltly.com/2uKwBF</a></b></p><br /><br />
|
6 |
-
<h2>8/12 cung gì trong 12 cung hoà ng Äạo?</h2>
|
7 |
-
<p>Theo phương pháp tÃnh cung hoà ng Äạo theo ngà y sinh, ngưá»i sinh ngà y 8 tháng 12 thuá»c và o cung <b>Nhân Mã</b> trong 12 cung hoà ng Äạo . Cung Nhân Mã có biá»u tượng là má»t cung thá»§, tay giương cung tên. Cung Nhân Mã là má»t trong ba cung thuá»c nguyên tá» Lá»a, bên cạnh Sư Tá» và Bạch Dương. Cung Nhân Mã ÄÆ°á»£c cai trá» bá»i sao Má»c, vì váºy há» có tÃnh chất nÄng Äá»ng, lý tưá»ng và phiêu lưu.</p>
|
8 |
-
<h2>TÃnh cách cá»§a ngưá»i sinh ngà y 8 tháng 12</h2>
|
9 |
-
<p>Ngưá»i sinh ngà y 8 tháng 12 có tÃnh cách rất Äặc biá»t và khác biá»t so vá»i những ngưá»i khác. Há» là những ngưá»i hà o phóng, lạc quan và khiếu hà i hưá»c tuyá»t vá»i. Há» có kiến thức rá»ng và thÃch tá»± do, triết lý sá»ng cá»§a há» là lanh lợi khôn khéo và hưá»ng thụ. Há» không sợ khó khÄn và luôn sẵn sà ng cho những thá» thách má»i. Há» là những ngưá»i dám mÆ¡ ưá»c và dám theo Äuá»i ưá»c mÆ¡ cá»§a mình.</p>
|
10 |
-
<p>Tuy nhiên, ngưá»i sinh ngà y 8 tháng 12 cÅ©ng có những Äiá»m yếu cá»§a mình. Há» có thá» hứa hẹn nhiá»u hÆ¡n có thá» cung cấp, rất thiếu kiên nhẫn và sẽ nói bất cứ Äiá»u gì cho dù phi ká»· luáºt như thế nà o. Há» có thá» quá tá»± tin và kiêu ngạo, không chá»u nghe lá»i khuyên cá»§a ngưá»i khác. Há» cÅ©ng có thá» quá phóng khoáng và thiếu trách nhiá»m trong má»t sá» trưá»ng hợp.</p>
|
11 |
-
<h2>Sá» thÃch cá»§a ngưá»i sinh ngà y 8 tháng 12</h2>
|
12 |
-
<p>Ngưá»i sinh ngà y 8 tháng 12 có sá» thÃch rất Äa dạng và phong phú. Há» thÃch những hoạt Äá»ng ngoà i trá»i, du lá»ch, khám phá những nÆ¡i má»i lạ và vÄn hóa khác bi</p>
|
13 |
-
<p></p> ddb901b051<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avunu Valliddaru Istapaddaru Full Movie Download Experience the Magic of Telugu Cinema with this Film.md
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Avunu Valliddaru Istapaddaru Full Movie Download: A Romantic Comedy by Vamsy</h1>
|
3 |
-
<p>If you are looking for a fun-filled movie to watch with your family or friends, you might want to check out <b>Avunu Valliddaru Istapaddaru</b>, a Telugu romantic comedy film written and directed by Vamsy. This movie was released in 2002 and won many awards including the Andhra Pradesh State Nandi Award. It features Ravi Teja, Kalyani, Prasanna, Krishna Bhagavan, Sankaramanchi Parthasarathi, Mallikarjuna Rao, Jeeva, etc. in prominent roles. The film's music is composed by Chakri.</p>
|
4 |
-
<p>In this article, we will give you a brief overview of the movie, its plot, its characters, its music, its direction, and its availability for download. We will also answer some frequently asked questions about the movie at the end. So, let's get started!</p>
|
5 |
-
<h2>Avunu Valliddaru Istapaddaru Full Movie Download</h2><br /><p><b><b>DOWNLOAD</b> ○ <a href="https://byltly.com/2uKzMi">https://byltly.com/2uKzMi</a></b></p><br /><br />
|
6 |
-
<h2>The Plot of Avunu Valliddaru Istapaddaru</h2>
|
7 |
-
<p>The plot of <b>Avunu Valliddaru Istapaddaru</b> revolves around two parallel stories that eventually converge into one hilarious climax. The stories are:</p>
|
8 |
-
<h3>The Story of Anil and Swathi</h3>
|
9 |
-
<p>Anil (Ravi Teja) is a well-educated but unemployed youth who comes to Hyderabad in search of a job. After attending a hundred interviews, he is offered a night watchman job by an employer who is impressed by Anil's honesty and dignity of labour. Anil starts looking for a room to stay in a nearby colony.</p>
|
10 |
-
<p>Satyanandam (Jeeva) stays in the colony and takes care of a house of his friend living in America, collecting rent for him. Swathi (Kalyani) stays in that house and works in a software firm. Satyanandam is interested in collecting a second rent for himself and offers the house to Anil on the condition that he can stay there only during the day, without Swathi's knowledge. Anil agrees and moves into the house.</p>
|
11 |
-
<p>Anil is impressed by the way the room is decorated and artfully arranged and understands that the lady staying there has a very good taste. The colony is full of comical characters very typical of Vamsy's films. Satyanandam is a miser trying to make money by what ever means he can. His crazy brother-in-law (Krishna Bhagavan) keeps creating trouble for him and others in the colony. The washer-man (Mallikarjuna Rao) sells his crazy ideas to people. Potti Raju (Kondavalasa Lakshmana Rao) keeps making several attempts to start his own business but always ends up in a loss. The interaction of these characters with each other and the humorous situations that arise form the backbone to the movie's story line.</p>
|
12 |
-
<p>After a month, Anil accidentally breaks Swathi's porcelain artefact in the room and writes a letter to her apologising for his mistake. Swathi comes across the letter, and learns that someone else has been staying in her room without her knowledge. But she takes a liking for Anil's honesty and lets him stay in her house when she is not there. Both keep communicating through letters and become good friends, gradually falling in love without seeing each other.</p>
|
13 |
-
<p>Anil and Swathi also happen to meet in a restaurant when she accidentally accuses him of stealing her purse. They start off as enemies but become good friends without knowing that they are roommates too. One day, Anil discovers that the friend he has been meeting outside is none other than his own roommate Swathi, but does not reveal this to her, wanting to surprise her at the time of marriage.</p>
|
14 |
-
<h3>The Story of Anand and Madhavi</h3>
|
15 |
-
<p>Anand (Prasanna) is the brother of Swathi's office manager (Banerjee) who takes a liking for her after seeing her photo on his brother's desk. He sends his father (Kota Srinivasa Rao) to her adopted parents (Surya & Saroja) in their village seeking alliance.</p>
|
16 |
-
<p>Avunu Valliddaru Ishta Paddaru HD Telugu Movie Online<br />
|
17 |
-
Watch Avunu Valliddaru Istapaddaru 2002 Full Movie Free<br />
|
18 |
-
Avunu Valliddaru Istapaddaru Mp4 HDRip BR 720p Download<br />
|
19 |
-
Avunu Valliddaru Istapaddaru Star Maa Tv Drama Serial<br />
|
20 |
-
Avunu Valliddaru Istapaddaru Telugu Romance Movie Voot<br />
|
21 |
-
Avunu Valliddaru Istapaddaru Full Movie With English Subtitles<br />
|
22 |
-
Avunu Valliddaru Istapaddaru Movie Songs Download<br />
|
23 |
-
Avunu Valliddaru Istapaddaru Cast and Crew Details<br />
|
24 |
-
Avunu Valliddaru Istapaddaru Movie Review and Rating<br />
|
25 |
-
Avunu Valliddaru Istapaddaru Movie Box Office Collection<br />
|
26 |
-
Avunu Valliddaru Ishta Paddaru Full Movie Dailymotion<br />
|
27 |
-
Avunu Valliddaru Ishta Paddaru Telugu Movie Watch Online Movierulz<br />
|
28 |
-
How to Download Avunu Valliddaru Istapaddaru Full Movie<br />
|
29 |
-
Avunu Valliddaru Istapaddaru Movie Scenes and Dialogues<br />
|
30 |
-
Avunu Valliddaru Istapaddaru Movie Trivia and Facts<br />
|
31 |
-
Avunu Valliddaru Ishta Paddaru Full Movie Youtube<br />
|
32 |
-
Avunu Valliddaru Ishta Paddaru Telugu Movie Online Hotstar<br />
|
33 |
-
Avunu Valliddaru Istapaddaru Full Movie Torrent Download<br />
|
34 |
-
Avunu Valliddaru Istapaddaru Movie Awards and Nominations<br />
|
35 |
-
Avunu Valliddaru Istapaddaru Movie Behind The Scenes<br />
|
36 |
-
Avunu Valliddaru Ishta Paddaru Full Movie Netflix<br />
|
37 |
-
Avunu Valliddardu Ishta Paddarau Telugu Movie Online Prime Video<br />
|
38 |
-
Avunu Valliddardu Ishta Paddarau Full Movie Magnet Link<br />
|
39 |
-
Avunu Valliddardu Ishta Paddarau Movie Making and Bloopers<br />
|
40 |
-
Avunu Valliddardu Ishta Paddarau Movie Fan Reactions and Reviews<br />
|
41 |
-
Avunu Valliddardu Ishta Paddarau Full Movie Disney+ Hotstar<br />
|
42 |
-
Avunu Valliddardu Ishta Paddarau Telugu Movie Online Zee5<br />
|
43 |
-
Avunu Valliddardu Ishta Paddarau Full Movie Filmywap Download<br />
|
44 |
-
Avunu Valliddardu Ishta Paddarau Movie Analysis and Criticism<br />
|
45 |
-
Avunu Valliddardu Ishta Paddarau Movie Inspirations and References<br />
|
46 |
-
Avunu Vallidharu Ista Padharu Full Length Telugu Film Online Free <br />
|
47 |
-
Watch Online Telugu Film AVI (Avunu Valldharu Ista Padharu) 2002 <br />
|
48 |
-
AVI (Avunu Valldharu Ista Padharu) Telugu Film Download HD Quality <br />
|
49 |
-
AVI (Avunu Valldharu Ista Padharu) Star Maa TV Serial Episodes <br />
|
50 |
-
AVI (Avunu Valldharu Ista Padharu) Telugu Romantic Film Voot Select <br />
|
51 |
-
AVI (Avunu Valldharu Ista Padharu) Full Film With Subtitles in Hindi <br />
|
52 |
-
AVI (Avunu Valldharu Ista Padharu) Film Songs MP3 Download <br />
|
53 |
-
AVI (Avunu Valldharu Ista Padharu) Film Actors and Actresses Names <br />
|
54 |
-
AVI (Avunu Valldharu Ista Padharu) Film Critics Review and Score <br />
|
55 |
-
AVI (Avunu Valldharu Ista Padharu) Film Gross Income and Budget <br />
|
56 |
-
Watch AVIP (Avunu Valldidharu Ista Padhdharu) Full Telugu Film Online <br />
|
57 |
-
Stream AVIP (Avunu Valldidharu Ista Padhdharu) 2002 Full Film Free <br />
|
58 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Telugu Film HD Download Link <br />
|
59 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Star Maa TV Show Online <br />
|
60 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Telugu Love Film Voot Premium <br />
|
61 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Full Film With Subtitles in Tamil <br />
|
62 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Film Audio Songs Free Download <br />
|
63 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Film Director and Producer Details <br />
|
64 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Film Audience Review and Rating <br />
|
65 |
-
AVIP (Avunu Valldidharu Ista Padhdharu) Film Profit and Loss Report</p>
|
66 |
-
<p>Madhavi (Kalyani) is Swathi's look-alike who lives in their village with her grandmother (Sri Lakshmi). She is an innocent girl who loves nature and animals. She gets mistaken for Swathi by Anand's father who thinks she is his son's bride-to-be.</p>
|
67 |
-
<p>Madhavi falls in love with Anil after seeing his photo on Swathi's letter. She decides to elope with him without knowing his name or address. She reaches Hyderabad with her grandmother's help and finds out that he lives in Satyanandam's house.</p>
|
68 |
-
<p>Madhavi tries to marry Anil by pretending to be Swathi while Swathi gets kidnapped by Anand who wants to marry her by force. A series of confusions ensue as Anil tries to escape from Madhavi while looking for Swathi while Anand tries to convince Swathi while avoiding Madhavi.</p>
|
69 |
-
<p>The climax reveals that Madhavi is actually Swathi's twin sister who was separated at birth due to an accident. They reunite with their parents after clearing all misunderstandings. Anil marries Swathi while Anand marries Madhavi.</p>
|
70 |
-
<h2>The Characters of Avunu Valliddaru Istapaddaru</h2>
|
71 |
-
<h2>The Music of Avunu Valliddaru Istapaddaru</h2>
|
72 |
-
<p>The music of <b>Avunu Valliddaru Istapaddaru</b> is composed by Chakri, who is known for his catchy tunes and melodious songs. The movie has 11 songs in total, including the background score. The songs are sung by popular singers like S.P. Balasubrahmanyam, Kousalya, Sandeep, Ravi Varma, etc. The lyrics are written by Sai Sriharsha, who has also written dialogues for the movie.</p>
|
73 |
-
<p>The songs of the movie are of different genres and themes, ranging from romantic to folk to comedy. The songs are well-received by the audience and have become evergreen hits. Some of the songs are:</p>
|
74 |
-
<table>
|
75 |
-
<tr>
|
76 |
-
<th>Song</th>
|
77 |
-
<th>Singer(s)</th>
|
78 |
-
<th>Lyricist</th>
|
79 |
-
<th>Genre/Theme</th>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Venello Hai Hai</td>
|
83 |
-
<td>Chakri</td>
|
84 |
-
<td>Sai Sriharsha</td>
|
85 |
-
<td>Romantic/Title song</td>
|
86 |
-
</tr>
|
87 |
-
<tr>
|
88 |
-
<td>Raa Rammani</td>
|
89 |
-
<td>S.P. Balasubrahmanyam & Kousalya</td>
|
90 |
-
<td>Sai Sriharsha</td>
|
91 |
-
<td>Romantic/Duet</td>
|
92 |
-
</tr>
|
93 |
-
<tr>
|
94 |
-
<td>O Neshtma</td>
|
95 |
-
<td>S.P. Balasubrahmanyam & Kousalya</td>
|
96 |
-
<td>Sai Sriharsha</td>
|
97 |
-
<td>Romantic/Sad song</td>
|
98 |
-
</tr>
|
99 |
-
<tr>
|
100 |
-
<td>Nalo Nenu Lene</td>
|
101 |
-
<td>Sandeep & Kousalya</td>
|
102 |
-
<td>Sai Sriharsha</td>
|
103 |
-
<td>Romantic/Letter song</td>
|
104 |
-
</tr>
|
105 |
-
<tr>
|
106 |
-
<td>Pema Gelupu</td>
|
107 |
-
<td>Kousalya</td>
|
108 |
-
<td>Sai Sriharsha</td>
|
109 |
-
<td>Folk/Village song</td>
|
110 |
-
</tr>
|
111 |
-
<tr>
|
112 |
-
<td>Sithakoka Chiluka</td>
|
113 |
-
<td>Chakri & Kousalya</td>
|
114 |
-
<td>Sai Sriharsha</td>
|
115 |
-
<td>Comedy/Item song</td>
|
116 |
-
</tr>
|
117 |
-
<tr>
|
118 |
-
<td>Yemi Ee Bhagyamo</td>
|
119 |
-
<td>Kousalya</td>
|
120 |
-
<td>Sai Sriharsha</td>
|
121 |
-
<td>Romantic/Solo song</td>
|
122 |
-
</tr>
|
123 |
-
<tr>
|
124 |
-
<td>Ennenno Varnalu</td>
|
125 |
-
<td>S.P. Balasubrahmanyam & Kousalya</td>
|
126 |
-
<td>Sai Sriharsha</td>
|
127 |
-
<td>Romantic/Duet song</td>
|
128 |
-
<tr>
|
129 |
-
<tr>
|
130 |
-
<td>Pogadamaku Athiga </td>
|
131 |
-
<td>S.P. Balasubrahmanyam & Kousalya </td>
|
132 |
-
<td>Sai Sriharsha </td>
|
133 |
-
<td>Folk/Wedding song </td>
|
134 |
-
<tr>
|
135 |
-
<tr>
|
136 |
-
<td>Madhi Ninduga </td>
|
137 |
-
<td>S.P. Balasubrahmanyam & Kousalya </td>
|
138 |
-
<td>Sai Sriharsha </td>
|
139 |
-
<td>Romantic/Lullaby song </td>
|
140 |
-
<tr>
|
141 |
-
<tr>
|
142 |
-
<td>Nuziveedu Sonia </td>
|
143 |
-
<td>Ravi Varma </td>
|
144 |
-
<td>Sai Sriharsha </td>
|
145 |
-
<td>Comedy/Parody song </td>
|
146 |
-
<tr>
|
147 |
-
<table>
|
148 |
-
<h2>The Direction of Avunu Valliddaru Istapaddaru</h2>
|
149 |
-
<p>The direction of <b>Avunu Valliddaru Istapaddaru</b> is done by Vamsy, who is one of the most acclaimed and versatile filmmakers in Telugu cinema. He is known for his unique style and vision as a writer and director. He has made many cult classics and award-winning movies in different genres such as comedy, thriller, drama, romance, etc.</p>
|
150 |
-
<p>Vamsy was inspired by the story of Gooduru Viswanatha Sastry, a renowned Telugu writer and poet. He adapted his story "Nenu Naa Rakshasi" for the movie and added his own touch of humor, romance, drama, and suspense. He also wrote the screenplay and dialogues for the movie.</p>
|
151 |
-
<p>Vamsy used his trademark elements such as colorful costumes, natural locations, quirky characters, witty dialogues, and poetic narration in the movie. He also used some innovative techniques such as split-screen, freeze-frame, voice-over, etc. to enhance the storytelling. He blended comedy and romance with a touch of mystery and suspense in the movie.</p>
|
152 |
-
<p>Vamsy also extracted the best performances from his actors and technicians. He made Ravi Teja and Kalyani shine in their dual roles as Anil-Swathi and Anand-Madhavi. He also brought out the comic talent of Prasanna, Krishna Bhagavan, Jeeva, Mallikarjuna Rao, etc. He also collaborated with Chakri for the music and K. Rajendra Prasad for the cinematography of the movie.</p>
|
153 |
-
<h2>Avunu Valliddaru Istapaddaru Full Movie Download</h2>
|
154 |
-
<p>If you are interested in watching <b>Avunu Valliddaru Istapaddaru</b> full movie, you might be wondering where and how to download it. Well, there are several options available for you to download the movie legally and safely. Here are some of them:</p>
|
155 |
-
- <b>Voot</b>: Voot is a popular streaming platform that offers a wide range of movies, shows, originals, and live TV channels. You can watch <b>Avunu Valliddaru Istapaddaru</b> full movie online on Voot for free with ads. You can also download the movie on your device and watch it offline. You just need to register on Voot and enjoy the movie. - <b>Disney+ Hotstar</b>: Disney+ Hotstar is another leading streaming service that provides a variety of content across genres and languages. You can watch <b>Avunu Valliddaru Istapaddaru</b> full movie online on Disney+ Hotstar with a subscription. You can also download the movie on your device and watch it offline. You need to have a Disney+ Hotstar VIP or Premium subscription to access the movie. - <b>YouTube</b>: YouTube is the world's largest video-sharing platform that hosts millions of videos on various topics. You can watch <b>Avunu Valliddaru Istapaddaru</b> full movie online on YouTube for free with ads. You can also download the movie on your device and watch it offline. You just need to search for the movie title on YouTube and find a reliable channel that has uploaded the movie. <p>These are some of the legal and safe ways to download <b>Avunu Valliddaru Istapaddaru</b> full movie. However, we advise you to avoid any illegal or pirated websites that claim to offer the movie for free or at a low cost. These websites may harm your device or expose your personal data to hackers. Moreover, downloading or watching movies from such websites is a violation of the copyright law and may result in legal action.</p>
|
156 |
-
<h2>Conclusion</h2>
|
157 |
-
<p><b>Avunu Valliddaru Istapaddaru</b> is a Telugu romantic comedy film that is written and directed by Vamsy. It is based on the story of Gooduru Viswanatha Sastry and stars Ravi Teja, Kalyani, Prasanna, etc. in the lead roles. The movie has a unique plot that revolves around two parallel stories of love and confusion. The movie also has a great cast of characters who add to the comedy and drama of the movie. The movie also has a melodious music composed by Chakri and a colorful cinematography by K. Rajendra Prasad.</p>
|
158 |
-
<p><b>Avunu Valliddaru Istapaddaru</b> is a movie that will make you laugh, cry, and fall in love. It is a movie that will entertain you and touch your heart. It is a movie that you should not miss. If you want to watch or download <b>Avunu Valliddaru Istapaddaru</b> full movie, you can use any of the legal and safe platforms mentioned above. We hope you enjoy watching this movie and have a great time.</p>
|
159 |
-
<h2>FAQs</h2>
|
160 |
-
<p>Here are some frequently asked questions about <b>Avunu Valliddaru Istapaddaru</b> full movie:</p>
|
161 |
-
- <b>Q: Who is the director of Avunu Valliddaru Istapaddaru?</b>
|
162 |
-
- A: Vamsy is the director of Avunu Valliddaru Istapaddaru. He is also the writer and dialogue writer of the movie. - <b>Q: Who are the main actors of Avunu Valliddaru Istapaddaru?</b>
|
163 |
-
- A: Ravi Teja, Kalyani, Prasanna are the main actors of Avunu Valliddaru Istapaddaru. They play the roles of Anil, Swathi/Madhavi, and Anand respectively. - <b>Q: What is the genre of Avunu Valliddaru Istapaddaru?</b>
|
164 |
-
- A: Avunu Valliddaru Istapaddaru is a romantic comedy film. It has elements of humor, romance, drama, and suspense. - <b>Q: What is the story of Avunu Valliddaru Istapaddaru?</b>
|
165 |
-
- A: Avunu Valliddaru Istapaddaru is based on the story of Gooduru Viswanatha Sastry. It revolves around two parallel stories of love and confusion. Anil and Swathi are roommates who fall in love through letters without seeing each other. Anand and Madhavi are look-alikes who get involved in a mistaken identity situation. The movie shows how these four characters sort out their problems and find their true partners. - <b>Q: Where can I watch or download Avunu Valliddaru Istapaddaru full movie?</b>
|
166 |
-
- A: You can watch or download Avunu Valliddaru Istapaddaru full movie on any of the legal and safe platforms mentioned above, such as Voot, Disney+ Hotstar, or YouTube. </p> 0a6ba089eb<br />
|
167 |
-
<br />
|
168 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bmw Scanner 140 Full Version Unlock Version with Software Download.md
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download T-RackS 3 Deluxe Full Crack 11</h1>
|
3 |
-
<p>If you want your mixes and mastered files to sound their best, you need the processors in <strong>T-RackS 3 Deluxe</strong>. Simple as that. But what is T-RackS 3 Deluxe and why do you need it? What are the features and benefits of this amazing software? And how can you download T-RackS 3 Deluxe full crack 11 safely and legally? In this article, we will answer all these questions and more. Read on to find out everything you need to know about T-RackS 3 Deluxe.</p>
|
4 |
-
<h2>download t racks 3 deluxe full crack 11</h2><br /><p><b><b>DOWNLOAD</b> ⏩ <a href="https://byltly.com/2uKvwU">https://byltly.com/2uKvwU</a></b></p><br /><br />
|
5 |
-
<h2>What is T-RackS 3 Deluxe and why you need it</h2>
|
6 |
-
<p>T-RackS 3 Deluxe is a suite of professional mixing and mastering tools that can help you achieve stunning sonic results. It consists of <strong>9 processors</strong> based on vintage and modern analog and digital gear, such as compressors, equalizers, limiters, clippers, and more. You can use these processors individually or in combination to shape, enhance, polish, and finalize your audio tracks.</p>
|
7 |
-
<p>T-RackS 3 Deluxe was developed by <strong>IK Multimedia</strong>, a leading company in the field of music technology. IK Multimedia has been creating innovative products for musicians, producers, engineers, and hobbyists since 1996. Some of their most popular products include AmpliTube, SampleTank, Miroslav Philharmonik, iRig, iLoud, and of course, T-RackS.</p>
|
8 |
-
<p>T-RackS was one of the first software-based mastering solutions on the market. It was launched in <strong>1999</strong> as a standalone application that emulated a classic analog mastering rack. Since then, it has evolved into a powerful plug-in suite that can be used both as a standalone application or as a plug-in within your favorite DAW (Digital Audio Workstation).</p>
|
9 |
-
<p>How to get t racks 3 deluxe for free with crack<br />
|
10 |
-
T racks 3 deluxe full version download link<br />
|
11 |
-
T racks 3 deluxe crack only download<br />
|
12 |
-
Download t racks 3 deluxe full crack 11 torrent<br />
|
13 |
-
T racks 3 deluxe serial number generator<br />
|
14 |
-
T racks 3 deluxe activation code crack<br />
|
15 |
-
T racks 3 deluxe keygen download<br />
|
16 |
-
T racks 3 deluxe license key crack<br />
|
17 |
-
T racks 3 deluxe patch download<br />
|
18 |
-
T racks 3 deluxe registration code crack<br />
|
19 |
-
T racks 3 deluxe cracked software download<br />
|
20 |
-
T racks 3 deluxe full crack 11 free download<br />
|
21 |
-
T racks 3 deluxe crack mac download<br />
|
22 |
-
T racks 3 deluxe crack windows download<br />
|
23 |
-
T racks 3 deluxe full crack 11 zip download<br />
|
24 |
-
T racks 3 deluxe full crack 11 rar download<br />
|
25 |
-
T racks 3 deluxe full crack 11 iso download<br />
|
26 |
-
T racks 3 deluxe full crack 11 exe download<br />
|
27 |
-
T racks 3 deluxe full crack 11 setup download<br />
|
28 |
-
T racks 3 deluxe full crack 11 installer download<br />
|
29 |
-
Download t racks 3 deluxe full crack offline<br />
|
30 |
-
Download t racks 3 deluxe full crack online<br />
|
31 |
-
Download t racks 3 deluxe full crack no survey<br />
|
32 |
-
Download t racks 3 deluxe full crack no password<br />
|
33 |
-
Download t racks 3 deluxe full crack no virus<br />
|
34 |
-
Download t racks 3 deluxe full crack safe<br />
|
35 |
-
Download t racks 3 deluxe full crack legit<br />
|
36 |
-
Download t racks 3 deluxe full crack working<br />
|
37 |
-
Download t racks 3 deluxe full crack latest version<br />
|
38 |
-
Download t racks 3 deluxe full crack updated version<br />
|
39 |
-
Download t racks 3 deluxe full crack new version<br />
|
40 |
-
Download t racks 3 deluxe full crack old version<br />
|
41 |
-
Download t racks 3 deluxe full crack original version<br />
|
42 |
-
Download t racks 3 deluxe full crack official version<br />
|
43 |
-
Download t racks 3 deluxe full crack premium version<br />
|
44 |
-
Download t racks 3 deluxe full crack pro version<br />
|
45 |
-
Download t racks 3 deluxe full crack plus version<br />
|
46 |
-
Download t racks 3 deluxe full crack ultimate version<br />
|
47 |
-
Download t racks 3 deluxe full crack final version<br />
|
48 |
-
Download t racks 3 deluxe full crack best version<br />
|
49 |
-
Download t racks 3 deluxe full crack modded version<br />
|
50 |
-
Download t racks 3 deluxe full crack hacked version<br />
|
51 |
-
Download t racks 3 deluxe full crack unlocked version<br />
|
52 |
-
Download t racks 3 deluxe full crack cracked apk<br />
|
53 |
-
Download t racks 3 deluxe full crack cracked ios<br />
|
54 |
-
Download t racks 3 deluxe full crack cracked pc<br />
|
55 |
-
Download t racks 3 deluxe full crack cracked mac<br />
|
56 |
-
Download t racks 3 deluxe full crack cracked windows<br />
|
57 |
-
Download t tracks studio software with rack effects and mastering suite</p>
|
58 |
-
<p>The latest version of T-RackS is <strong>T-RackS 5</strong>, which was released in <strong>2017</strong>. It features <strong>38 processors</strong>, including four new ones: Master Match, Dyna-Mu, ONE, and EQual. It also features an improved interface, a resizable window, a comprehensive metering section, an album assembly module, and more.</p>
|
59 |
-
<p>However, if you are looking for a more affordable option that still offers great quality and versatility, you might want to check out <strong>T-RackS 3 Deluxe</strong>. This version was released in <strong>2008</strong> and includes <strong>9 processors</strong>, which are more than enough to cover most of your mixing and mastering needs. Plus, you can get it for a fraction of the price of T-RackS 5.</p>
|
60 |
-
<p>So why do you need T-RackS 3 Deluxe? Because it can make your recordings sound warm, full, rich, spacious, clear, punchy, loud, balanced, professional, and ready for distribution. Whether you are working on rock, pop, hip-hop, jazz, classical, or any other genre of music, you can use T-RackS 3 Deluxe to bring out the best in your tracks.</p>
|
61 |
-
<h2>What are the features and benefits of T-RackS 3 Deluxe</h2>
|
62 |
-
<p>T-RackS 3 Deluxe comes with <strong>9 processors</strong>, each one modeled after some of the most iconic analog or digital devices ever created. These processors are:</p>
|
63 |
-
<ul>
|
64 |
-
<li><strong>Vintage Tube Compressor/Limiter model 670</strong>: An emulation of the legendary Fairchild 670 compressor/limiter that was used on countless classic recordings from The Beatles to Pink Floyd. This processor adds warmth, smoothness, glue, and character to your tracks.</li>
|
65 |
-
<li><strong>Opto compressor</strong>: An emulation of the classic optical compressor that was widely used in the 60s and 70s for its smooth and transparent compression. This processor adds consistency and control to your tracks.</li>
|
66 |
-
<li><strong>Brickwall limiter</strong>: A modern digital limiter that can make your tracks loud and clear without distorting or clipping. This processor is ideal for maximizing the level of your masters.</li>
|
67 |
-
<li><strong>Linear Phase Equalizer</strong>: A high-end digital equalizer that can adjust the frequency spectrum of your tracks without affecting the phase or introducing artifacts. This processor is perfect for surgical and transparent equalization.</li>
|
68 |
-
<li><strong>Classic T-RackS Compressor</strong>: A versatile and easy-to-use compressor that can handle any kind of dynamic processing. This processor can add punch, warmth, or smoothness to your tracks.</li>
|
69 |
-
<li><strong>Classic T-RackS Multi-band Limiter</strong>: A powerful and flexible multi-band limiter that can split your signal into three bands and apply different limiting settings to each one. This processor can balance the dynamics and frequency response of your tracks.</li>
|
70 |
-
<li><strong>Classic T-RackS Clipper</strong>: A simple and effective clipper that can cut off the peaks of your signal and increase the perceived loudness. This processor can add some edge and grit to your tracks.</li>
|
71 |
-
<li><strong>Classic T-RackS Equalizer</strong>: A classic analog equalizer that can boost or cut up to six bands of frequencies with different shapes and slopes. This processor can shape the tone and color of your tracks.</li>
|
72 |
-
</ul>
|
73 |
-
<p>As you can see, T-RackS 3 Deluxe offers a wide range of features and benefits that can improve your mixing and mastering workflow and results. You can use these processors in three ways:</p>
|
74 |
-
<ol>
|
75 |
-
<li><strong>As individual plug-ins</strong>: You can insert any of these processors on individual tracks or buses in your DAW and tweak them as you like.</li>
|
76 |
-
<li><strong>As a plug-in suite</strong>: You can load up to 12 processors in a single plug-in instance and create custom chains and presets. You can also reorder, bypass, solo, or mute any processor with a simple drag-and-drop.</li>
|
77 |
-
<li><strong>As a standalone application</strong>: You can launch T-RackS 3 Deluxe as a standalone application and use it as a complete mastering station. You can load multiple audio files, edit them, process them, compare them, export them, and more.</li>
|
78 |
-
</ol>
|
79 |
-
<p>No matter how you use T-RackS 3 Deluxe, you will get the same high-quality sound and performance. T-RackS 3 Deluxe supports 64-bit Audio Units, VST2, VST3, AAX formats and is compatible with most DAWs and operating systems.</p>
|
80 |
-
<h2>How to download T-RackS 3 Deluxe full crack 11 safely and legally</h2>
|
81 |
-
<p>Now that you know what T-RackS 3 Deluxe is and what it can do for you, you might be wondering how to download it for free. After all, who doesn't like free stuff? However, before you start searching for T-RackS 3 Deluxe full crack 11 on shady websites or torrent sites, you should be aware of the risks and consequences of doing so.</p>
|
82 |
-
<p>Downloading illegal or pirated software is not only unethical but also dangerous. You could end up with:</p>
|
83 |
-
<ul>
|
84 |
-
<li><strong>Viruses, malware, spyware, ransomware, or other harmful programs</strong>: These could infect your computer, damage your files, steal your personal information, lock your system, or even demand money from you.</li>
|
85 |
-
<li><strong>Poor performance, bugs, crashes, errors, or compatibility issues</strong>: These could ruin your mixing and mastering experience, waste your time and effort, or even corrupt your projects.</li>
|
86 |
-
<li><strong>Lawsuits, fines, penalties, or jail time</strong>: These could happen if you get caught by the authorities or by the software developers for violating their intellectual property rights.</li>
|
87 |
-
</ul>
|
88 |
-
<p>Do you really want to risk all that for a free download? We don't think so. That's why we recommend you to download T-RackS 3 Deluxe full crack 11 safely and legally from the official website of IK Multimedia. Here's how:</p>
|
89 |
-
<ol>
|
90 |
-
<li><strong>Go to <a href="https://www.ikmultimedia.com/products/tr5deluxe/">https://www.ikmultimedia.com/products/tr5deluxe/</a></strong>: This is the product page of T-RackS 5 Deluxe, which includes T-RackS 3 Deluxe as well as four new processors.</li>
|
91 |
-
<li><strong>Click on "Buy Now"</strong>: This will take you to the online store where you can choose your preferred payment method and currency.</li>
|
92 |
-
<li><strong>Enter the coupon code "TR5DELUXE11"</strong>: This will apply a special discount of <strong>80%</strong> on the regular price of $199.99 USD. You will only pay $39.99 USD for T-RackS 5 Deluxe!</li>
|
93 |
-
<li><strong>Complete the checkout process</strong>: This will require you to create an account or log in with an existing one, enter your billing information, review your order details, and confirm your purchase.</li>
|
94 |
-
<li><strong>Download T-RackS 5 Deluxe full crack 11</strong>: After completing the purchase, you will receive an email with a download link and an authorization code for T-RackS 5 Deluxe full crack 11. You can also access these from your IK Multimedia user area.</li>
|
95 |
-
<li><strong>Install and activate T-RackS 5 Deluxe full crack 11</strong>: Follow the instructions in the email or on the website to install T-RackS 5 Deluxe full crack 11 on your computer. Then launch the Authorization Manager application and enter your authorization code to activate T-RackS 5 Deluxe full crack 11.</li>
|
96 |
-
</ol>
|
97 |
-
<p>Congratulations! You have successfully downloaded T-RackS 3 Deluxe full crack 11 safely and legally from IK Multimedia's website. You can now enjoy all the features and benefits of this amazing software without any worries or regrets.</p>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
<p>and detail. You can also benefit from the flexible and user-friendly interface, the comprehensive and accurate metering section, and the standalone and plug-in modes. T-RackS 3 Deluxe is a must-have tool for any serious musician, producer, or engineer who wants to take their sound to the next level.</p>
|
100 |
-
<h2>How to download T-RackS 3 Deluxe full crack 11 safely and legally</h2>
|
101 |
-
<p>Now that you know what T-RackS 3 Deluxe is and what it can do for you, you might be wondering how to download it for free. After all, who doesn't like free stuff? However, before you start searching for T-RackS 3 Deluxe full crack 11 on shady websites or torrent sites, you should be aware of the risks and consequences of doing so.</p>
|
102 |
-
<p>Downloading illegal or pirated software is not only unethical but also dangerous. You could end up with:</p>
|
103 |
-
<ul>
|
104 |
-
<li><strong>Viruses, malware, spyware, ransomware, or other harmful programs</strong>: These could infect your computer, damage your files, steal your personal information, lock your system, or even demand money from you.</li>
|
105 |
-
<li><strong>Poor performance, bugs, crashes, errors, or compatibility issues</strong>: These could ruin your mixing and mastering experience, waste your time and effort, or even corrupt your projects.</li>
|
106 |
-
<li><strong>Lawsuits, fines, penalties, or jail time</strong>: These could happen if you get caught by the authorities or by the software developers for violating their intellectual property rights.</li>
|
107 |
-
</ul>
|
108 |
-
<p>Do you really want to risk all that for a free download? We don't think so. That's why we recommend you to download T-RackS 3 Deluxe full crack 11 safely and legally from the official website of IK Multimedia. Here's how:</p>
|
109 |
-
<ol>
|
110 |
-
<li><strong>Go to <a href="https://www.ikmultimedia.com/products/tr5deluxe/">https://www.ikmultimedia.com/products/tr5deluxe/</a></strong>: This is the product page of T-RackS 5 Deluxe, which includes T-RackS 3 Deluxe as well as four new processors.</li>
|
111 |
-
<li><strong>Click on "Buy Now"</strong>: This will take you to the online store where you can choose your preferred payment method and currency.</li>
|
112 |
-
<li><strong>Enter the coupon code "TR5DELUXE11"</strong>: This will apply a special discount of <strong>80%</strong> on the regular price of $199.99 USD. You will only pay $39.99 USD for T-RackS 5 Deluxe!</li>
|
113 |
-
<li><strong>Complete the checkout process</strong>: This will require you to create an account or log in with an existing one, enter your billing information, review your order details, and confirm your purchase.</li>
|
114 |
-
<li><strong>Download T-RackS 5 Deluxe full crack 11</strong>: After completing the purchase, you will receive an email with a download link and an authorization code for T-RackS 5 Deluxe full crack 11. You can also access these from your IK Multimedia user area.</li>
|
115 |
-
<li><strong>Install and activate T-RackS 5 Deluxe full crack 11</strong>: Follow the instructions in the email or on the website to install T-RackS 5 Deluxe full crack 11 on your computer. Then launch the Authorization Manager application and enter your authorization code to activate T-RackS 5 Deluxe full crack 11.</li>
|
116 |
-
</ol>
|
117 |
-
<p>Congratulations! You have successfully downloaded T-RackS 3 Deluxe full crack 11 safely and legally from IK Multimedia's website. You can now enjoy all the features and benefits of this amazing software without any worries or regrets.</p>
|
118 |
-
<h2>Conclusion</h2>
|
119 |
-
and detail. You can also benefit from the flexible and user-friendly interface, the comprehensive and accurate metering section, and the standalone and plug-in modes. T-RackS 3 Deluxe is a must-have tool for any serious musician, producer, or engineer who wants to take their sound to the next level.</p>
|
120 |
-
<p>However, you don't have to pay a fortune to get T-RackS 3 Deluxe. You can download it for free from the official website of IK Multimedia by using a special coupon code that gives you an 80% discount. This way, you can save money and avoid the risks and consequences of downloading illegal or pirated software.</p>
|
121 |
-
<p>So what are you waiting for? Download T-RackS 3 Deluxe full crack 11 today and start mixing and mastering like a pro. You won't regret it!</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<p>Here are some common questions that you might have about T-RackS 3 Deluxe full crack 11:</p>
|
124 |
-
<ol>
|
125 |
-
<li><strong>What is the difference between T-RackS 3 Deluxe and T-RackS 5 Deluxe?</strong>: T-RackS 5 Deluxe is the latest version of T-RackS that includes four new processors: Master Match, Dyna-Mu, ONE, and EQual. It also has an improved interface, a resizable window, a comprehensive metering section, an album assembly module, and more. However, T-RackS 3 Deluxe still has all the essential processors that you need for mixing and mastering, and it costs much less than T-RackS 5 Deluxe.</li>
|
126 |
-
<li><strong>Can I use T-RackS 3 Deluxe with any DAW?</strong>: Yes, you can use T-RackS 3 Deluxe with any DAW that supports 64-bit Audio Units, VST2, VST3, or AAX formats. You can also use it as a standalone application for mastering multiple audio files.</li>
|
127 |
-
<li><strong>How many processors can I use at the same time in T-RackS 3 Deluxe?</strong>: You can use up to 12 processors in a single plug-in instance or standalone application. You can create custom chains and presets by dragging and dropping the processors in any order.</li>
|
128 |
-
<li><strong>How can I get more processors for T-RackS 3 Deluxe?</strong>: You can buy more processors from IK Multimedia's online store or from authorized dealers. You can also upgrade to T-RackS 5 MAX v2, which includes all the processors available for T-RackS.</li>
|
129 |
-
<li><strong>How can I get technical support for T-RackS 3 Deluxe?</strong>: You can contact IK Multimedia's technical support team via email, phone, or online forum. You can also check their FAQ page or user manual for more information.</li>
|
130 |
-
</ol>
|
131 |
-
</p> 0a6ba089eb<br />
|
132 |
-
<br />
|
133 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Der Herr Der Ringe Die Schlacht Um Mittelerde 2 German Pc Iso.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Der Herr Der Ringe Die Schlacht Um Mittelerde 2 German Pc Iso: A Review</h1>
|
3 |
-
<p>If you are a fan of J.R.R. Tolkien's epic fantasy saga, The Lord of the Rings, you might be interested in playing a video game that lets you experience the war in Middle-earth. One such game is Der Herr Der Ringe Die Schlacht Um Mittelerde 2, or The Lord of the Rings: The Battle for Middle-Earth II, a real-time strategy game developed by EA Los Angeles and published by Electronic Arts in 2006. In this article, we will review this game and tell you everything you need to know about it.</p>
|
4 |
-
<h2>Der Herr Der Ringe Die Schlacht Um Mittelerde 2 German Pc Iso</h2><br /><p><b><b>DOWNLOAD</b> 🗹 <a href="https://byltly.com/2uKwK5">https://byltly.com/2uKwK5</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is Der Herr Der Ringe Die Schlacht Um Mittelerde 2?</h3>
|
7 |
-
<p>Der Herr Der Ringe Die Schlacht Um Mittelerde 2 is a sequel to The Lord of the Rings: The Battle for Middle-Earth, which was released in 2004. The game covers the events of The Lord of the Rings trilogy, as well as some elements from The Hobbit and The Silmarillion. Unlike the first game, which focused on the War of the Ring in southern Middle-earth, this game features a new campaign that explores the War in the North, where elves, dwarves, men, and goblins fight for control over lands such as Rivendell, Erebor, Mirkwood, and Angmar.</p>
|
8 |
-
<h3>What are the features of the game?</h3>
|
9 |
-
<p>The game has several features that make it an enjoyable and immersive experience for fans of The Lord of the Rings. Some of these features are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>You can choose from six playable factions: Men of the West (Gondor and Rohan), Elves (Rivendell, Lothlorien, and Mirkwood), Dwarves (Erebor, Iron Hills, and Ered Luin), Isengard (Uruk-hai, Wargs, and Dunlendings), Mordor (Orcs, Trolls, Haradrim, and Easterlings), and Goblins (Goblins, Spiders, Dragons, and Half-trolls).</li>
|
12 |
-
<li>You can play two different campaigns: Good or Evil. The Good campaign follows the story of Glorfindel and Gloin as they lead their forces against Sauron's allies in the North. The Evil campaign follows the story of Sauron's lieutenant, the Witch-king of Angmar, as he seeks to destroy Arnor and Rivendell.</li>
|
13 |
-
<li>You can create your own custom hero with unique abilities and appearance. You can also use famous heroes from The Lord of the Rings, such as Aragorn, Gandalf, Legolas, Gimli, Frodo, Saruman, Sauron, Gollum, and more.</li>
|
14 |
-
<li>You can build your own base with various structures that provide resources, units, upgrades, defenses, and special powers. You can also capture outposts and settlements that grant you additional bonuses.</li>
|
15 |
-
<li>You can command your army with simple mouse clicks or use more advanced tactics such as formations, stances, flanking maneuvers, and hero abilities.</li>
|
16 |
-
<li>You can win battles by destroying your enemy's base or by capturing strategic points on the map. You can also use powerful spells such as fireballs, lightning strikes, earthquakes, blizzards, summoning allies or enemies.</li>
|
17 |
-
<li>You can play online with other players or against AI opponents. You can choose from different modes such as Skirmish (free-for-all or team-based), War of the Ring (a turn-based strategy mode where you conquer territories on a map of Middle-earth), or Custom Scenarios (user-created maps with specific objectives).</li>
|
18 |
-
</ul>
|
19 |
-
<h3>What are the requirements to play the game?</h3>
|
20 |
-
<p>To play this game on your PC, you will need:</p>
|
21 |
-
<table>
|
22 |
-
<tr><td>Operating System</td><td>Windows XP or later</td></tr>
|
23 |
-
<tr><td>Processor</td><td>1.6 GHz or faster</td></tr>
|
24 |
-
<tr><td>Memory</td><td>512 MB RAM or more</td></tr>
|
25 |
-
<tr><td>Graphics</td><td>64 MB video card with DirectX 9.0c compatible drivers</td></tr>
|
26 |
-
<tr><td>Sound</td><td>DirectX 9.0c compatible sound card</td></tr>
|
27 |
-
<tr><td>Disk Space</td><td>6 GB or more</td></tr>
|
28 |
-
<tr><td>DVD-ROM Drive</td><td>Required for installation</td></tr>
|
29 |
-
<tr><td>Internet Connection</td><td>Required for online play</td></tr>
|
30 |
-
</table>
|
31 |
-
<h2>Gameplay</h2>
|
32 |
-
<h3>How to install the game?</h3>
|
33 |
-
<p>To install this game on your PC:</p>
|
34 |
-
<ol>
|
35 |
-
<li>If you have a physical copy of the game on DVD-ROM disc, insert it into your DVD-ROM drive. If you have a digital copy of the game on ISO file, mount it using an image program such as Daemon Tools Lite. If you have neither option available, you can download an ISO file from archive.org. </li>
|
36 |
-
<li>Run setup.exe from your DVD-ROM drive or mounted ISO file. Follow the instructions on screen to install the game using serial from 'serial.txt'. Do not start it yet.</li>
|
37 |
-
<li>Patch your game to version 1.09v2 by executing 'LotrBfMe-65539-german.exe'. This patch will fix some bugs and improve compatibility with newer systems.</li>
|
38 |
-
<li>Create a folder named 'Meine Die Schlacht um Mittelerde™ II-Dateien' with 'Options.ini' as its content. Copy this folder to 'C:\\Users\\%Username%\\AppData\\Roaming\\'. This will prevent some errors when launching or playing the game.</li>
|
39 |
-
<li>Run 'BFME2 Patch Switcher' and click on '1.09' with 'v2 Zoomfaktor' & 'FIX MY RESOLUTION'. This will adjust your resolution settings according to your monitor size.</li>
|
40 |
-
<li>Eject your DVD-ROM disc or unmount your ISO file. You do not need them anymore to play.</li>
|
41 |
-
<li>Start your game by running 'game.dat' from your installation directory.</li>
|
42 |
-
</ol>
|
43 |
-
<h3>How to start the game?</h3>
|
44 |
-
<p>To start playing this game:</p>
|
45 |
-
<p>Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Download Vollversion<br />
|
46 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Patch 1.06<br />
|
47 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Aufstieg Des Hexenkönigs<br />
|
48 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Edain Mod<br />
|
49 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Windows 10<br />
|
50 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Online Spielen<br />
|
51 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Cd Key<br />
|
52 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Cheats<br />
|
53 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Mods<br />
|
54 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Tipps Und Tricks<br />
|
55 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Komplettlösung<br />
|
56 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Systemanforderungen<br />
|
57 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Trainer<br />
|
58 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 No Cd Crack<br />
|
59 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Multiplayer<br />
|
60 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Karten<br />
|
61 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Sprachausgabe Ändern<br />
|
62 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Steam<br />
|
63 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Kaufen<br />
|
64 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Hd Edition<br />
|
65 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Fehler Beim Initialisieren Des Spiels<br />
|
66 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Filmsequenzen Überspringen<br />
|
67 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Free Download<br />
|
68 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Gameplay<br />
|
69 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Grafik Verbessern<br />
|
70 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Helden Erstellen<br />
|
71 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Installieren<br />
|
72 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Intro Überspringen<br />
|
73 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Kampagne Startet Nicht<br />
|
74 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Key Generator<br />
|
75 |
-
Der Herr Der Ringe Die Schlacht Um Mittelerde 2 Lan Spielen<br />
|
76 |
-
(cont.)<br />
|
77 |
-
-Der Herr der Ringe die Schlacht um Mittelerde 2 Let's Play </p>
|
78 |
-
<ol>
|
79 |
-
<li>Select your language from English (default), German (Deutsch), French (Français), Italian (Italiano), Spanish (Español), Dutch (Nederlands), Norwegian (Norsk), Polish (Polski), or Swedish (Svenska).</li>
|
80 |
-
<li>Select your profile name or create a new one.</li>
|
81 |
-
<li>Select your preferred mode from Single Player or Multiplayer.</li>
|
82 |
-
<li>Select your preferred option from Campaigns (Good or Evil), Skirmish (online or offline), War of the Ring (online or offline), Custom Scenarios (online or offline), Options (graphics, sound, controls, etc.), Credits (view who made this game), or Quit Game (exit).</li>
|
83 |
-
</ol>
|
84 |
-
<h3>How to choose a faction</p> 0a6ba089eb<br />
|
85 |
-
<br />
|
86 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (torrent lumion 3.0pro x86) - Experience the power of Lumion the best 3D rendering software.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>HD Online Player (torrent lumion 3.0pro x86): A Review</h1>
|
3 |
-
<p>If you are an architect, designer, or 3D enthusiast, you might have heard of Lumion, a powerful 3D rendering software that can turn your CAD models into stunning videos or images in seconds. But what if you don't have the latest version of Lumion or a powerful computer to run it? In this article, we will review HD Online Player (torrent lumion 3.0pro x86), a torrent file that allows you to download and install Lumion 3.0 Pro on your Windows PC with a 32-bit processor. We will also show you how to use Lumion 3.0 Pro to create amazing renders with realistic environments and effects.</p>
|
4 |
-
<h2>What is Lumion and why do you need it?</h2>
|
5 |
-
<p>Lumion is a 3D rendering software that can visualize your CAD models in a video or image with real-life environments and striking artistic flair. It is designed for architects, designers, and anyone who wants to showcase their 3D projects in a fast and easy way. Lumion can import your model from Revit, 3ds Max, SketchUp, AutoCAD, Rhino, ArchiCAD, among many other modeling programs, and instantly breathe life into your designs with realistic landscapes and urban context, stylish effects, and thousands of objects and materials from the content library.</p>
|
6 |
-
<h2>HD Online Player (torrent lumion 3.0pro x86)</h2><br /><p><b><b>Download File</b> > <a href="https://byltly.com/2uKxke">https://byltly.com/2uKxke</a></b></p><br /><br />
|
7 |
-
<p>Lumion can help you communicate your design vision to your clients, colleagues, or audience in a more engaging and convincing way. You can also use Lumion to explore different design options, test different scenarios, and refine your ideas before finalizing them. Lumion can save you time, money, and effort by making 3D rendering a fun and easy process.</p>
|
8 |
-
<h2>Lumion features and benefits</h2>
|
9 |
-
<p>Lumion has many features and benefits that make it one of the best 3D rendering software in the market. Here are some of them:</p>
|
10 |
-
<h3>Volumetric spotlights and omni lights</h3>
|
11 |
-
<p>One of the new features in Lumion 12 Pro is the volumetric effect for spotlights and omni lights. This feature allows you to create beams of light that illuminate the dust particles in the air, creating a dramatic and atmospheric effect. You can use this feature to highlight specific areas of your design, such as entrances, windows, or sculptures. You can also adjust the color, intensity, angle, and size of the light beams to suit your needs.</p>
|
12 |
-
<h3>Surface decals and realistic materials</h3>
|
13 |
-
<p>Another new feature in Lumion 12 Pro is the surface decals option. This option allows you to add stickers, logos, signs, graffiti, or any other image to any surface in your scene. You can use this feature to add details, branding, or personality to your design. You can also adjust the transparency, scale, rotation, and position of the decals to fit them perfectly on the surface.</p>
|
14 |
-
<h3>Improved scene-building and workflow</h3>
|
15 |
-
<p>Lumion 12 also offers a vastly improved scene-building experience and subtle yet powerful usability and workflow improvements that make Lumion much more intuitive and significantly faster. You can access the vast content library with a simple click and drag action, and easily place objects, materials, effects, and more in your scene. You can also use the new categories and filters to find what you need quickly and efficiently.</p>
|
16 |
-
<p>Lumion 12 also introduces a new alignment tool that helps you align objects with other objects or with the terrain. You can use this tool to snap objects to a grid, a line, a surface, or a point. You can also use the new copy/paste tool to duplicate objects across multiple projects. These tools can help you save time and effort when building your scene.</p>
|
17 |
-
<h3>Torrent sources and instructions</h3>
|
18 |
-
<p>There are many torrent sources that offer Lumion 3.0pro x86 for download, but not all of them are reliable or safe. Some of them may contain viruses, malware, or fake files that can harm your PC or compromise your privacy. Therefore, you need to be careful and choose a reputable and trusted torrent source that has positive feedback from other users.</p>
|
19 |
-
<p>One of the possible torrent sources that you can use is CracksHash, which provides a direct download link for Lumion 3.0pro x86 with a fix. You can also use Archive.org, which hosts a copy of Lumion 3.0pro x86 multilingual with a crack. However, please note that we do not endorse or guarantee the quality or safety of these torrent sources, and you should use them at your own risk.</p>
|
20 |
-
<p>HD Online Player (lumion 3.0 pro torrent download)<br />
|
21 |
-
HD Online Player (torrent lumion 3.0 pro crack)<br />
|
22 |
-
HD Online Player (lumion 3.0 pro torrent full version)<br />
|
23 |
-
HD Online Player (torrent lumion 3.0 pro free download)<br />
|
24 |
-
HD Online Player (lumion 3.0 pro torrent with keygen)<br />
|
25 |
-
HD Online Player (torrent lumion 3.0 pro activation code)<br />
|
26 |
-
HD Online Player (lumion 3.0 pro torrent for windows)<br />
|
27 |
-
HD Online Player (torrent lumion 3.0 pro for mac)<br />
|
28 |
-
HD Online Player (lumion 3.0 pro torrent 32 bit)<br />
|
29 |
-
HD Online Player (torrent lumion 3.0 pro 64 bit)<br />
|
30 |
-
HD Online Player (lumion 3.0 pro torrent latest version)<br />
|
31 |
-
HD Online Player (torrent lumion 3.0 pro serial number)<br />
|
32 |
-
HD Online Player (lumion 3.0 pro torrent license key)<br />
|
33 |
-
HD Online Player (torrent lumion 3.0 pro patch)<br />
|
34 |
-
HD Online Player (lumion 3.0 pro torrent offline installer)<br />
|
35 |
-
HD Online Player (torrent lumion 3.0 pro online installer)<br />
|
36 |
-
HD Online Player (lumion 3.0 pro torrent direct link)<br />
|
37 |
-
HD Online Player (torrent lumion 3.0 pro magnet link)<br />
|
38 |
-
HD Online Player (lumion 3.0 pro torrent iso file)<br />
|
39 |
-
HD Online Player (torrent lumion 3.0 pro rar file)<br />
|
40 |
-
HD Online Player (lumion 3.0 pro torrent zip file)<br />
|
41 |
-
HD Online Player (torrent lumion 3.0 pro setup file)<br />
|
42 |
-
HD Online Player (lumion 3.0 pro torrent portable version)<br />
|
43 |
-
HD Online Player (torrent lumion 3.0 pro standalone version)<br />
|
44 |
-
HD Online Player (lumion 3.0 pro torrent no survey)<br />
|
45 |
-
HD Online Player (torrent lumion 3.0 pro no password)<br />
|
46 |
-
HD Online Player (lumion 3.0 pro torrent no virus)<br />
|
47 |
-
HD Online Player (torrent lumion 3.0 pro safe download)<br />
|
48 |
-
HD Online Player (lumion 3.0 pro torrent fast download)<br />
|
49 |
-
HD Online Player (torrent lumion 3.0 pro high quality download)<br />
|
50 |
-
HD Online Player (lumion 3.0 pro torrent hd download)<br />
|
51 |
-
HD Online Player (torrent lumion 3.0 pro best download)<br />
|
52 |
-
HD Online Player (lumion 3.0 pro torrent review)<br />
|
53 |
-
HD Online Player (torrent lumion 3.0 pro tutorial)<br />
|
54 |
-
HD Online Player (torrent lumion 3.0 pro guide)<br />
|
55 |
-
HD Online Player (torrent lumion 3.0 pro tips and tricks)<br />
|
56 |
-
HD Online Player (torrent lumion 3.0 pro features and benefits)<br />
|
57 |
-
HD Online Player (torrent lumion 3.0 pro comparison and contrast)<br />
|
58 |
-
HD Online Player (torrent lumion 3.0 pro pros and cons)<br />
|
59 |
-
HD Online Player (torrent lumion 3.0 pro alternatives and substitutes)<br />
|
60 |
-
HD Online Player (torrent lumion 3.0 pro competitors and rivals)<br />
|
61 |
-
HD Online Player (torrent lumion 3.0 pro advantages and disadvantages)<br />
|
62 |
-
HD Online Player (torrent lumion 3.0 pro strengths and weaknesses)<br />
|
63 |
-
HD Online Player (torrent lumion 3.0 pro recommendations and suggestions)<br />
|
64 |
-
HD Online Player (torrent lumion 3.0 pro testimonials and feedbacks)<br />
|
65 |
-
HD Online Player (torrent lumion 3.0 pro ratings and rankings)<br />
|
66 |
-
HD Online Player (torrent lumion 3.0 pro awards and achievements)<br />
|
67 |
-
HD Online Player (torrent lumion 3.0 pro updates and upgrades)<br />
|
68 |
-
HD Online Player (torrent lumion 3.0 pro support and assistance)<br />
|
69 |
-
HD Online Player (torrent lumion 3.0 pro FAQs and answers)</p>
|
70 |
-
<p>Here are the general instructions to download and install Lumion 3.0pro x86 from a torrent file:</p>
|
71 |
-
- Download and install a torrent client, such as uTorrent or BitTorrent, on your PC. - Go to the torrent source website and find the torrent file for Lumion 3.0pro x86. Make sure it has a good number of seeders and leechers, and check the comments for any issues or warnings. - Download the torrent file and open it with your torrent client. Choose a location to save the downloaded files and start the download process. - Wait until the download is complete. You should have a folder with several files, such as Lumion_3_0_Pro_x86.exe, Lumion_3_0_Pro_x86.bin, Lumion_3_0_Pro_x86.crack.zip, etc. - Run the Lumion_3_0_Pro_x86.exe file to start the installation process. Follow the on-screen instructions and accept the terms and conditions. Choose a destination folder for Lumion 3.0 Pro and click Next. - Wait until the installation is complete. Do not run Lumion 3.0 Pro yet. - Extract the Lumion_3_0_Pro_x86.crack.zip file to get the crack file, such as Lumion.exe or Lumion.dll. - Copy and paste the crack file into the installation folder of Lumion 3.0 Pro, replacing the original file. <h2>How to use Lumion 3.0pro x86 to create stunning renders</h2>
|
72 |
-
<p>After you have downloaded and installed Lumion 3.0pro x86, you can start using it to create stunning renders of your 3D models. Lumion is very easy and intuitive to use, and you can follow these simple steps to get started:</p>
|
73 |
-
<h3>Importing your model from CAD software</h3>
|
74 |
-
<p>Lumion can import your model from various CAD software, such as Revit, SketchUp, 3ds Max, AutoCAD, Rhino, ArchiCAD, and more. You can either use the Lumion LiveSync plugin for real-time synchronization with your CAD software, or you can export your model as a Collada (.DAE), SketchUp (.SKP), FBX (.FBX) or DWG (.DWG) file and import it in Lumion.</p>
|
75 |
-
<p>To import your model in Lumion, you need to click on the Import button on the top left corner of the screen, and then browse to the location of your model file. You can also drag and drop your model file into the Lumion window. After you have imported your model, you can move, rotate, scale, or duplicate it using the object placement tools on the bottom right corner of the screen.</p>
|
76 |
-
<h3>Adding environments and effects</h3>
|
77 |
-
<p>Lumion has a vast library of environments and effects that you can use to enhance your render. You can add realistic landscapes and urban context, such as mountains, forests, roads, buildings, cars, people, animals, and more. You can also add stylish effects, such as weather, lighting, shadows, reflections, fog, fire, water, and more.</p>
|
78 |
-
<p>To add environments and effects in Lumion, you need to click on the corresponding buttons on the top right corner of the screen. You can then browse through the categories and subcategories of the content library and drag and drop the items you want into your scene. You can also adjust the settings and parameters of each item using the sliders and buttons on the bottom left corner of the screen.</p>
|
79 |
-
<h3>Exporting your video or image</h3>
|
80 |
-
<p>Lumion can export your render as a video or an image with high quality and resolution. You can choose from various presets and formats for your output file, such as MP4, AVI, JPG, PNG, TGA, etc. You can also customize the frame rate, resolution, quality, aspect ratio, and duration of your output file.</p>
|
81 |
-
<h2>Pros and cons of Lumion 3.0pro x86</h2>
|
82 |
-
<p>Lumion 3.0pro x86 has many advantages and disadvantages that you should consider before using it. Here are some of them:</p>
|
83 |
-
<h3>Pros</h3>
|
84 |
-
- Lumion 3.0pro x86 is very easy and intuitive to use, and you can learn it in minutes without any prior training or experience. - Lumion 3.0pro x86 has a vast library of realistic and stylish environments, effects, objects, and materials that you can use to enhance your render. - Lumion 3.0pro x86 can import your model from various CAD software and instantly breathe life into your design with real-life context and artistic flair. - Lumion 3.0pro x86 can export your render as a video or an image with high quality and resolution, and you can customize the output settings to suit your needs. - Lumion 3.0pro x86 can save you time, money, and effort by making 3D rendering a fast and easy process. <h3>Cons</h3>
|
85 |
-
- Lumion 3.0pro x86 is only compatible with Windows operating systems and does not support Mac OS X or Linux. - Lumion 3.0pro x86 requires a powerful PC with a good graphics card, processor, memory, and hard drive space to run smoothly and handle complex projects. - Lumion 3.0pro x86 is not free and has a high price difference between the standard and pro versions. You also need to download it from a torrent file, which may not be reliable or safe. - Lumion 3.0pro x86 may not have the latest features and improvements that are available in the newer versions of Lumion, such as orthographic views, animated phasing, rain streaks, etc. <h2>Conclusion and FAQs</h2>
|
86 |
-
<p>Lumion 3.0pro x86 is a powerful 3D rendering software that can help you visualize your CAD models in a video or image with real-life environments and striking artistic flair. It is very easy and intuitive to use, and it has a vast library of content and effects that you can use to enhance your render. However, it also has some drawbacks, such as compatibility, performance, cost, and realism issues. Therefore, you should weigh the pros and cons of Lumion 3.0pro x86 before using it.</p>
|
87 |
-
<p>Here are some frequently asked questions about Lumion 3.0pro x86:</p>
|
88 |
-
<h3>What is the difference between Lumion 3.0pro x86 and Lumion 12 Pro?</h3>
|
89 |
-
<p>Lumion 3.0pro x86 is an older version of Lumion that was released in 2012. Lumion 12 Pro is the latest version of Lumion that was released in 2021. Lumion 12 Pro has many new features and improvements that are not available in Lumion 3.0pro x86, such as orthographic views, animated phasing, rain streaks, surface decals, volumetric lights, improved scene-building and workflow, and more. Lumion 12 Pro also has a larger and more updated content library than Lumion 3.0pro x86.</p>
|
90 |
-
<h3>How can I get Lumion 3.0pro x86 for free?</h3>
|
91 |
-
<p>Lumion 3.0pro x86 is not free and you need to purchase a license to use it legally. However, some torrent sources offer Lumion 3.0pro x86 for download without a license, but this is not recommended or safe. Downloading Lumion 3.0pro x86 from a torrent file may expose your PC to viruses, malware, or fake files that can harm your PC or compromise your privacy. It may also violate the intellectual property rights of Lumion and cause legal problems.</p>
|
92 |
-
<h3>Can I use Lumion 3.0pro x86 on a Mac?</h3>
|
93 |
-
<h2></h2>
|
94 |
-
<p>This is the end of the article on HD Online Player (torrent lumion 3.0pro x86): A Review. I hope you enjoyed reading it and learned something new. If you have any questions or feedback, please leave them in the comments below. Thank you for your attention and have a great day!</p> 0a6ba089eb<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Behen Hogi Teri Full Hd Movie 1080p NEW.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Behen Hogi Teri Full HD Movie 1080p - How to Watch or Download It Online</h1>
|
3 |
-
|
4 |
-
<p>Behen Hogi Teri is a 2017 Bollywood romantic comedy film starring Rajkummar Rao and Shruti Haasan. The film revolves around Gattu, a young man who falls in love with his neighbor Binny, but faces opposition from his community who considers all the girls of the neighborhood as their sisters. The film is a hilarious and heartwarming story of how Gattu tries to win Binny's heart and the approval of his community.</p>
|
5 |
-
<h2>Behen Hogi Teri full hd movie 1080p</h2><br /><p><b><b>DOWNLOAD</b> 🔗 <a href="https://imgfil.com/2uxX90">https://imgfil.com/2uxX90</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>If you are looking for a fun and entertaining movie to watch, you should check out Behen Hogi Teri Full HD Movie 1080p. This is a high-quality version of the film that has a resolution of 1080p and a format of x264. This means that you will get to enjoy the film with clear and crisp images and sounds that will enhance your viewing experience.</p>
|
8 |
-
|
9 |
-
<p>But how can you watch or download Behen Hogi Teri Full HD Movie 1080p online? In this article, we will show you some of the best ways to get this movie online for free or for a low cost. We will also tell you some of the benefits and challenges of getting this movie, and some of the tips and precautions that you should follow.</p>
|
10 |
-
|
11 |
-
<h2>Where to Watch or Download Behen Hogi Teri Full HD Movie 1080p Online</h2>
|
12 |
-
|
13 |
-
<p>There are many websites and platforms that offer Behen Hogi Teri Full HD Movie 1080p online. However, not all of them are reliable or safe. Some of them may have low-quality or fake files, some of them may have ads or interruptions, and some of them may have legal or security risks.</p>
|
14 |
-
|
15 |
-
<p>To help you find the best option for you, we have selected some of the most popular and trusted websites and platforms that offer Behen Hogi Teri Full HD Movie 1080p online. Here are some of them:</p>
|
16 |
-
|
17 |
-
<h3>JustWatch.com</h3>
|
18 |
-
|
19 |
-
<p>JustWatch.com is a streaming guide that helps you find where to watch movies and TV shows online. You can watch Behen Hogi Teri Full HD Movie 1080p on JustWatch.com for free or for a low cost by choosing from different streaming services that offer this movie. You can also compare prices, quality, and availability of different services.</p>
|
20 |
-
<p></p>
|
21 |
-
|
22 |
-
<p>To watch Behen Hogi Teri Full HD Movie 1080p on JustWatch.com, you just need to follow these simple steps:</p>
|
23 |
-
|
24 |
-
<ol>
|
25 |
-
<li>Go to JustWatch.com and search for Behen Hogi Teri Full HD Movie 1080p in the search bar.</li>
|
26 |
-
<li>Select the result that matches the movie exactly and click on it.</li>
|
27 |
-
<li>Choose a streaming service that works best for you from the list of options.</li>
|
28 |
-
<li>Enjoy watching Behen Hogi Teri Full HD Movie 1080p on JustWatch.com.</li>
|
29 |
-
</ol>
|
30 |
-
|
31 |
-
<h3>Actvid.com</h3>
|
32 |
-
|
33 |
-
<p>Actvid.com is a free streaming website that offers a large collection of movies and TV shows in various genres and languages. You can watch Behen Hogi Teri Full HD Movie 1080p on Actvid.com for free without any registration or subscription. You can also choose from different servers and quality options to suit your preference and convenience.</p>
|
34 |
-
|
35 |
-
<p>To watch Behen Hogi Teri Full HD Movie 1080p on Actvid.com, you just need to follow these simple steps:</p>
|
36 |
-
|
37 |
-
<ol>
|
38 |
-
<li>Go to Actvid.com and search for Behen Hogi Teri Full HD Movie 1080p in the search bar.</li>
|
39 |
-
<li>Select the result that matches the movie exactly and click on it.</li>
|
40 |
-
<li>Choose a server and a quality option that works best for you.</li>
|
41 |
-
<li>Enjoy watching Behen Hogi Teri Full HD Movie 1080p on Actvid.com.</li>
|
42 |
-
</ol>
|
43 |
-
|
44 |
-
<h3>Dotmovies.tv</h3>
|
45 |
-
|
46 |
-
<p>Dotmovies.tv is a download website that offers a large collection of movies and TV shows in various genres and languages. You can download Behen Hogi Teri Full HD Movie 1080p on Dotmovies.tv for free with high-speed Google Drive links. You can also choose from different resolutions and formats to suit your preference and convenience.</p>
|
47 |
-
|
48 |
-
<p>To download Behen Hogi Teri Full HD Movie 1080p on Dotmovies.tv, you just need to follow these simple steps:</p>
|
49 |
-
|
50 |
-
<ol>
|
51 |
-
<li>Go to Dotmovies.tv and search for Behen Hogi Teri Full HD Movie 1080p in the search bar.</li>
|
52 |
-
<li>Select the result that matches the movie exactly and click on it.</li>
|
53 |
-
<li>Choose a resolution and a format that works best for you.</li>
|
54 |
-
<li>Click on the Google Drive link and wait for the download to start.</li>
|
55 |
-
<li>Enjoy watching Behen Hogi Teri Full HD Movie 1080p on your device.</li>
|
56 |
-
</ol>
|
57 |
-
|
58 |
-
<h2>What are the Benefits of Watching or Downloading Behen Hogi Teri Full HD Movie 1080p Online</h2>
|
59 |
-
|
60 |
-
<p>By watching or downloading Behen Hogi Teri Full HD Movie 1080p online, you will enjoy several benefits that will enhance your viewing experience. Here are some of them:</p>
|
61 |
-
|
62 |
-
<ul>
|
63 |
-
<li>You will get to watch one of the most entertaining and hilarious Bollywood movies of 2017, with a great story, cast, music, and comedy.</li>
|
64 |
-
<li>You will get to watch the movie in full high definition, with a resolution of 1080p</p>
|
65 |
-
<h2>What are the Challenges of Watching or Downloading Behen Hogi Teri Full HD Movie 1080p Online</h2>
|
66 |
-
|
67 |
-
<p>While watching or downloading Behen Hogi Teri Full HD Movie 1080p online is a great option, it is not without its challenges. Here are some of the difficulties that you may encounter when trying to get this movie online:</p>
|
68 |
-
|
69 |
-
<ul>
|
70 |
-
<li>You may face some issues with availability, speed, or quality of the streaming or download links. Some links may be broken, slow, or low-quality due to high demand or competition from other users.</li>
|
71 |
-
<li>You may face some legal or security risks from authorities or hackers who may try to track or harm you for accessing pirated content. You may face some consequences such as fines, lawsuits, malware, or viruses if you are not careful or protected.</li>
|
72 |
-
<li>You may face some compatibility issues with your device or player if they do not support x264 format. You may need to convert or play the file using another software or device that supports x264 format.</li>
|
73 |
-
</ul>
|
74 |
-
|
75 |
-
<p>These are some of the challenges that you may face when trying to watch or download Behen Hogi Teri Full HD Movie 1080p online, but they are not insurmountable. You can overcome them by following some tips and precautions that we will share in the next section.</p>
|
76 |
-
|
77 |
-
<h2>What are the Tips and Precautions for Watching or Downloading Behen Hogi Teri Full HD Movie 1080p Online</h2>
|
78 |
-
|
79 |
-
<p>To watch or download Behen Hogi Teri Full HD Movie 1080p online safely and successfully, you should follow some tips and precautions that will help you avoid or minimize the challenges that we mentioned in the previous section. Here are some of them:</p>
|
80 |
-
|
81 |
-
<ul>
|
82 |
-
<li>Use a reliable and reputable website or platform that has good reviews and ratings from other users. Avoid websites or platforms that have low-quality or fake files, ads or interruptions, or legal or security risks.</li>
|
83 |
-
<li>Use a VPN (Virtual Private Network) service that will hide your IP address and encrypt your traffic from prying eyes. This will protect your privacy and security from authorities or hackers who may try to track or harm you.</li>
|
84 |
-
<li>Use an antivirus software that will scan and remove any malware or viruses that may infect your device or files. This will protect your device and data from damage or loss.</li>
|
85 |
-
<li>Use a media player that supports x264 format and has good features and performance. This will ensure that you can play Behen Hogi Teri Full HD Movie 1080p smoothly and enjoyably on your device.</li>
|
86 |
-
</ul>
|
87 |
-
|
88 |
-
<p>These are some of the tips and precautions that you should follow when watching or downloading Behen Hogi Teri Full HD Movie 1080p online. We hope this article has helped you understand how to get this movie online and why it is worth getting it. Now you can enjoy watching Behen Hogi Teri Full HD Movie 1080p and join the millions of fans who love this movie.</p>
|
89 |
-
<h2>What is the Plot of Behen Hogi Teri Full HD Movie 1080p</h2>
|
90 |
-
|
91 |
-
<p>Behen Hogi Teri Full HD Movie 1080p is a romantic comedy film that follows the love story of Gattu and Binny, who are neighbors in a small town in India. Gattu is a shy and timid guy who works as a sales executive for a company that sells CCTV cameras. Binny is a smart and confident girl who works as a teacher in a school. They have known each other since childhood and have a friendly relationship.</p>
|
92 |
-
|
93 |
-
<p>However, Gattu has a secret crush on Binny, but he is afraid to confess his feelings to her. He also faces a problem from his community, who believes in a tradition that all the girls of the neighborhood are like sisters to the boys. This means that Gattu cannot date or marry Binny without facing the wrath of his community.</p>
|
94 |
-
|
95 |
-
<p>Things get complicated when Binny's parents arrange her marriage with Rahul, a rich and handsome guy who lives in Delhi. Gattu is heartbroken and decides to stop Binny's marriage at any cost. He also gets help from his friends and family, who support his love for Binny. However, he has to face many obstacles and challenges from Rahul, Binny's parents, and his own community.</p>
|
96 |
-
|
97 |
-
<p>Will Gattu be able to win Binny's heart and the approval of his community? Will Binny realize her feelings for Gattu and choose him over Rahul? Will they be able to overcome the tradition of Behen Hogi Teri and live happily ever after? Watch Behen Hogi Teri Full HD Movie 1080p to find out.</p>
|
98 |
-
|
99 |
-
<h2>What are the Reviews of Behen Hogi Teri Full HD Movie 1080p</h2>
|
100 |
-
|
101 |
-
<p>Behen Hogi Teri Full HD Movie 1080p has received mixed reviews from critics and audiences. Some of them have praised the film for its humor, music, and performances, while others have criticized it for its cliched plot, weak direction, and lack of originality.</p>
|
102 |
-
|
103 |
-
<p>The film has a rating of 5.6 out of 10 on IMDb, based on 2,000 user ratings. The film has a rating of 64% on JustWatch, based on 4 user ratings. The film has a rating of 4.5 out of 10 on Desicinemas.tv, based on 1 user rating.</p>
|
104 |
-
|
105 |
-
<p>Some of the positive reviews of the film are:</p>
|
106 |
-
|
107 |
-
<ul>
|
108 |
-
<li>"Behen Hogi Teri is a refreshing take on the rom-com genre with some witty dialogues, catchy songs, and charming performances by Rajkummar Rao and Shruti Haasan." - Times of India</li>
|
109 |
-
<li>"Behen Hogi Teri is a light-hearted and enjoyable film that makes you laugh and smile with its quirky characters, funny situations, and sweet romance." - Bollywood Hungama</li>
|
110 |
-
<li>"Behen Hogi Teri is a fun and entertaining film that showcases the chemistry and talent of Rajkummar Rao and Shruti Haasan. The film also has some hilarious moments and a good message about breaking stereotypes." - Filmfare</li>
|
111 |
-
</ul>
|
112 |
-
|
113 |
-
<p>Some of the negative reviews of the film are:</p>
|
114 |
-
|
115 |
-
<ul>
|
116 |
-
<li>"Behen Hogi Teri is a dull and boring film that fails to impress with its predictable plot, poor direction, and lack of originality. The film wastes the potential of Rajkummar Rao and Shruti Haasan with their weak characters and chemistry." - Hindustan Times</li>
|
117 |
-
<li>"Behen Hogi Teri is a disappointing film that tries to be funny and romantic but ends up being neither. The film has a stale story, lame jokes, and mediocre songs that make you lose interest in the film." - NDTV</li>
|
118 |
-
<li>"Behen Hogi Teri is a forgettable film that does not offer anything new or exciting to the audience. The film has a cliched plot, bland direction, and average performances that make you wonder why you watched it in the first place." - Deccan Chronicle</li>
|
119 |
-
</ul>
|
120 |
-
|
121 |
-
<p>These are some of the reviews of Behen Hogi Teri Full HD Movie 1080p online.</p>
|
122 |
-
<h2>Conclusion</h2>
|
123 |
-
|
124 |
-
<p>Behen Hogi Teri Full HD Movie 1080p is a Bollywood romantic comedy film that tells the story of Gattu and Binny, who are neighbors and lovers, but face opposition from their community who considers all the girls of the neighborhood as their sisters. The film is a funny and heartwarming film that has a great cast, music, and comedy.</p>
|
125 |
-
|
126 |
-
<p>If you want to watch Behen Hogi Teri Full HD Movie 1080p online, you can choose from different websites and platforms that offer this movie for free or for a low cost. You can also enjoy the benefits of watching this movie in high definition, with a resolution of 1080p and a format of x264. However, you should also be aware of the challenges and risks of watching or downloading this movie online, and follow some tips and precautions to avoid or minimize them.</p>
|
127 |
-
|
128 |
-
<p>We hope this article has helped you understand how to watch or download Behen Hogi Teri Full HD Movie 1080p online and why it is worth watching it. Now you can enjoy watching this movie and join the millions of fans who love this movie.</p> 3cee63e6c2<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chota Bheem Jam Jam Jambura Full Movie In Hindi Free Download.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>chhota bheem and the curse of damyaan full movie in hindi tamil. download chhota bheem and the curse of damyaan full movie. chota bheem and the curse of damyaan full movie. ram charan and the seventh ray (2016). watch chhota bheem and the curse of damyaan full movie online. </p>
|
3 |
-
<p>watch chhota bheem and the curse of damyaan full movie in hindi tamil. chhota bheem and the curse of damyaan full movie. chota bheem and the curse of damyaan full movie. chhota bheem and the curse of damyaan full movie watch full hd. </p>
|
4 |
-
<h2>chota bheem jam jam jambura full movie in hindi free download</h2><br /><p><b><b>Download</b> –––––>>> <a href="https://imgfil.com/2uy0FZ">https://imgfil.com/2uy0FZ</a></b></p><br /><br />
|
5 |
-
<p>download mp4 480p (304.1 mb)<br /> download from openload <br /> download from mega <br /> download from mediafire <br /> download from 2giga <br /> download from 6 more servers related posts chhota bheem and the rise of damyaan in hindi full movie free download chhota bheem ka roopsi romaanch (chhota bheem kalari party) in chhota bheem - bheem in the city in hindi full movie free download chhota bheem: bheem vs aliens in hindi full movie free download mp4 chhota bheem movie romani adventure in hindi full movie free download mp4 & 3gp chhota bheem aur chhalchhaaya hindi full movie free download mp4 & 3gp chhota bheem aur hanuman in hindi full movie free download mp4 & 3gp <footer class=entry-footer> categories : uncategorized tags : chhota bheem hindi movies </footer><nav class=navigation post-navigation aria-label=posts>post navigation previous post previous post: chhota bheem and the crown of valahalla hindi full movie free download mp4 & 3gp next post next post: chhota bheem aur chhalchhaaya hindi full movie free download mp4 & 3gp </nav> 8 comments click here to write a comment..leave a reply <small> cancel reply </small><p>your email address will not be published. required fields are marked *</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/DOWNLOAD XBLA Unlocker V1.6.rar.rar.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
<h2>DOWNLOAD XBLA Unlocker v1.6.rar.rar</h2><br /><p><b><b>Download</b> ✑ <a href="https://imgfil.com/2uxXMJ">https://imgfil.com/2uxXMJ</a></b></p><br /><br />
|
2 |
-
|
3 |
-
January 8, 2557 B.C. - Support for unpacking multi-volume archives: zip, rar, 7z. ... 06/18/2016 • Improved ability to select items in the list for pen and mouse. ○
|
4 |
-
Mar 10 2017 Download file...
|
5 |
-
February 6, 2019 at 3:20 pm ...
|
6 |
-
Download file ...
|
7 |
-
March 10, 2018, 3:10 p.m.
|
8 |
-
March 8, 2018 at 08:52 ...
|
9 |
-
June 28, 2017 at 07:48 ...
|
10 |
-
May 6, 2016 Download file...
|
11 |
-
June 24, 2015 at 6:11 pm ...
|
12 |
-
June 14, 2018 at 08:54 ...
|
13 |
-
June 15, 2016 at 6:03 pm ...
|
14 |
-
June 14, 2017 at 10:23 am ...
|
15 |
-
June 24, 2017 at 3:44 pm ...
|
16 |
-
June 20, 2018 at 08:50 ...
|
17 |
-
June 16, 2015 at 10:28 am ...
|
18 |
-
June 25, 2015 at 3:29 pm ...
|
19 |
-
March 12, 2016 7:13 PM ... 8a78ff9644<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
22 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Danza Kuduro Video 1080p Download Enjoy the Best Quality of the Latin Dance Anthem.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>download Uradur Dj Song Video Dawnlod unlimited Movies and videos Download Here.Uradur Dj Song Video Dawnlod Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.</p>
|
3 |
-
<h2>Danza Kuduro Video 1080p Download</h2><br /><p><b><b>DOWNLOAD</b> ✏ ✏ ✏ <a href="https://imgfil.com/2uy1IX">https://imgfil.com/2uy1IX</a></b></p><br /><br /> aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/lp_main.py
DELETED
@@ -1,670 +0,0 @@
|
|
1 |
-
from cmath import cos
|
2 |
-
from inspect import getargs
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
from datetime import datetime
|
7 |
-
import bisect
|
8 |
-
import copy
|
9 |
-
from sched import scheduler
|
10 |
-
import numpy as np
|
11 |
-
import torch
|
12 |
-
import torch.backends.cudnn as cudnn
|
13 |
-
from torch import optim
|
14 |
-
from torch.cuda.amp import GradScaler
|
15 |
-
import faulthandler
|
16 |
-
import pathlib
|
17 |
-
import argparse
|
18 |
-
import time
|
19 |
-
|
20 |
-
try:
|
21 |
-
import wandb
|
22 |
-
except ImportError:
|
23 |
-
wandb = None
|
24 |
-
|
25 |
-
try:
|
26 |
-
import torch.utils.tensorboard as tensorboard
|
27 |
-
except ImportError:
|
28 |
-
tensorboard = None
|
29 |
-
|
30 |
-
try:
|
31 |
-
import horovod.torch as hvd
|
32 |
-
except ImportError:
|
33 |
-
hvd = None
|
34 |
-
|
35 |
-
from open_clip import create_model_and_transforms, trace_model, create_model
|
36 |
-
from training.data import get_data
|
37 |
-
from training.params import parse_args
|
38 |
-
from training.distributed import is_master, init_distributed_device, world_info_from_env
|
39 |
-
from training.logger import setup_logging
|
40 |
-
from training.scheduler import cosine_lr
|
41 |
-
from training.lp_train import train_one_epoch, evaluate
|
42 |
-
from open_clip.utils import get_tar_path_from_dataset_name, dataset_split, get_optimizer
|
43 |
-
from open_clip.utils import load_p, load_class_label
|
44 |
-
from open_clip.linear_probe import LinearProbe
|
45 |
-
|
46 |
-
|
47 |
-
def maintain_ckpts(args, startidx, all_idx_len):
|
48 |
-
for i in reversed(range(startidx, all_idx_len)):
|
49 |
-
if os.path.exists(os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt")):
|
50 |
-
os.rename(
|
51 |
-
os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
|
52 |
-
os.path.join(args.checkpoint_path, f"epoch_top_{i+1}.pt"),
|
53 |
-
)
|
54 |
-
if os.path.exists(
|
55 |
-
os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt")
|
56 |
-
):
|
57 |
-
os.remove(os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt"))
|
58 |
-
return
|
59 |
-
|
60 |
-
|
61 |
-
def update_top_k_performance(
|
62 |
-
new_metrics_inputs, current_top_k_ckpt_metrics, args, ckpt, bignumbetter=True
|
63 |
-
):
|
64 |
-
"""
|
65 |
-
Record the top-k performance of the current epoch.
|
66 |
-
current_top_k_metrics is a dictionary of the form: {1: top_1_ckpt_measure, 2: top_2_ckpt_measure, ...}
|
67 |
-
"""
|
68 |
-
if isinstance(new_metrics_inputs, (list, tuple)):
|
69 |
-
new_metrics_inputs = np.mean(new_metrics_inputs)
|
70 |
-
return update_top_k_performance(
|
71 |
-
new_metrics_inputs,
|
72 |
-
current_top_k_ckpt_metrics,
|
73 |
-
args=args,
|
74 |
-
ckpt=ckpt,
|
75 |
-
bignumbetter=bignumbetter,
|
76 |
-
)
|
77 |
-
elif isinstance(new_metrics_inputs, dict):
|
78 |
-
new_metrics_inputs = np.mean(list(new_metrics_inputs.values()))
|
79 |
-
return update_top_k_performance(
|
80 |
-
new_metrics_inputs,
|
81 |
-
current_top_k_ckpt_metrics,
|
82 |
-
args=args,
|
83 |
-
ckpt=ckpt,
|
84 |
-
bignumbetter=bignumbetter,
|
85 |
-
)
|
86 |
-
elif isinstance(new_metrics_inputs, (float, int)):
|
87 |
-
update_flag = {k: False for k in current_top_k_ckpt_metrics.keys()}
|
88 |
-
sorted_keys = sorted(current_top_k_ckpt_metrics.keys())
|
89 |
-
sorted_values = sorted(
|
90 |
-
current_top_k_ckpt_metrics.values(), reverse=bignumbetter
|
91 |
-
)
|
92 |
-
sorted_values_ = copy.deepcopy(sorted_values)
|
93 |
-
sorted_values.append(new_metrics_inputs)
|
94 |
-
sorted_values = sorted(sorted_values, reverse=bignumbetter)
|
95 |
-
sorted_values = sorted_values[:-1]
|
96 |
-
|
97 |
-
if sorted_values == sorted_values_:
|
98 |
-
return current_top_k_ckpt_metrics, new_metrics_inputs
|
99 |
-
else:
|
100 |
-
for i in range(len(sorted_keys)):
|
101 |
-
if current_top_k_ckpt_metrics[sorted_keys[i]] != sorted_values[i]:
|
102 |
-
current_top_k_ckpt_metrics[sorted_keys[i]] = sorted_values[i]
|
103 |
-
update_flag[sorted_keys[i]] = True
|
104 |
-
for i in range(len(update_flag)):
|
105 |
-
if update_flag[i]:
|
106 |
-
maintain_ckpts(args, i, len(sorted_keys))
|
107 |
-
torch.save(
|
108 |
-
ckpt,
|
109 |
-
os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
|
110 |
-
)
|
111 |
-
break
|
112 |
-
return current_top_k_ckpt_metrics, new_metrics_inputs
|
113 |
-
|
114 |
-
|
115 |
-
# def updateifNone(a, b):
|
116 |
-
# a = b if None else a
|
117 |
-
# return a
|
118 |
-
|
119 |
-
|
120 |
-
def is_pretrained_params(n):
|
121 |
-
return (
|
122 |
-
n.startswith("clap_model.transformer")
|
123 |
-
or n in ["clap_model.positional_embedding", "clap_model.text_projection"]
|
124 |
-
or n.startswith("clap_model.token_embedding")
|
125 |
-
or n.startswith("clap_model.ln_final")
|
126 |
-
or n.startswith("clap_model.logit_scale_t")
|
127 |
-
)
|
128 |
-
|
129 |
-
|
130 |
-
def random_seed(seed=42, rank=0):
|
131 |
-
torch.manual_seed(seed + rank)
|
132 |
-
np.random.seed(seed + rank)
|
133 |
-
random.seed(seed + rank)
|
134 |
-
|
135 |
-
|
136 |
-
def config_lp_optimizer(model, data, args):
|
137 |
-
# set wd-related params to 0 if use adam optimizer
|
138 |
-
if args.optimizer == "adam":
|
139 |
-
args.wd = 0
|
140 |
-
args.wd_pretrained = 0
|
141 |
-
args.wd_new = 0
|
142 |
-
|
143 |
-
in_clap = lambda n, p: n.startswith("clap_model")
|
144 |
-
|
145 |
-
named_parameters = list(model.named_parameters())
|
146 |
-
|
147 |
-
optimizer = {}
|
148 |
-
scheduler = {}
|
149 |
-
|
150 |
-
# freeze text encoder
|
151 |
-
text_freeze_parameters = [
|
152 |
-
p
|
153 |
-
for n, p in named_parameters
|
154 |
-
if n.startswith("clap_model.transformer")
|
155 |
-
or n in ["clap_model.positional_embedding", "clap_model.text_projection"]
|
156 |
-
or n.startswith("clap_model.token_embedding")
|
157 |
-
or n.startswith("clap_model.ln_final")
|
158 |
-
]
|
159 |
-
|
160 |
-
if args.freeze_text:
|
161 |
-
logging.info("Freeze Text!!!!")
|
162 |
-
for k in text_freeze_parameters:
|
163 |
-
k.requires_grad = False
|
164 |
-
|
165 |
-
if not args.lp_freeze:
|
166 |
-
exclude = (
|
167 |
-
lambda n, p: p.ndim < 2
|
168 |
-
or "bn" in n
|
169 |
-
or "ln" in n
|
170 |
-
or "bias" in n
|
171 |
-
or "logit_scale" in n
|
172 |
-
)
|
173 |
-
include = lambda n, p: not exclude(n, p)
|
174 |
-
|
175 |
-
# (yusong): we do not split the learning rate anymore
|
176 |
-
# p for n, p in named_parameters if in_clap(n,p) and exclude(n, p) and p.requires_grad
|
177 |
-
gain_or_bias_params = [
|
178 |
-
p for n, p in named_parameters if exclude(n, p) and p.requires_grad
|
179 |
-
]
|
180 |
-
# rest_params = [p for n, p in named_parameters if in_clap(n,p) and include(n, p) and p.requires_grad]
|
181 |
-
rest_params = [
|
182 |
-
p for n, p in named_parameters if include(n, p) and p.requires_grad
|
183 |
-
]
|
184 |
-
|
185 |
-
if args.train_data is None:
|
186 |
-
optimizer = None
|
187 |
-
scheduler = None
|
188 |
-
else:
|
189 |
-
total_steps = data["train"].dataloader.num_batches * args.epochs
|
190 |
-
|
191 |
-
if args.split_opt:
|
192 |
-
for x in ["lr", "beta1", "beta2", "eps", "wd"]:
|
193 |
-
for y in ["_new", "_pretrained"]:
|
194 |
-
if getattr(args, x + y) is None:
|
195 |
-
setattr(args, x + y, getattr(args, x))
|
196 |
-
|
197 |
-
gain_or_bias_pretrained_params = [
|
198 |
-
p
|
199 |
-
for n, p in named_parameters
|
200 |
-
if (exclude(n, p) and p.requires_grad) and is_pretrained_params(n)
|
201 |
-
]
|
202 |
-
rest_pretrained_params = [
|
203 |
-
p
|
204 |
-
for n, p in named_parameters
|
205 |
-
if (include(n, p) and p.requires_grad) and is_pretrained_params(n)
|
206 |
-
]
|
207 |
-
gain_or_bias_new_params = [
|
208 |
-
p
|
209 |
-
for n, p in named_parameters
|
210 |
-
if (exclude(n, p) and p.requires_grad)
|
211 |
-
and (not is_pretrained_params(n))
|
212 |
-
]
|
213 |
-
rest_new_params = [
|
214 |
-
p
|
215 |
-
for n, p in named_parameters
|
216 |
-
if (include(n, p) and p.requires_grad)
|
217 |
-
and (not is_pretrained_params(n))
|
218 |
-
]
|
219 |
-
|
220 |
-
pretrained_params_optimizer = get_optimizer(
|
221 |
-
[
|
222 |
-
{"params": gain_or_bias_pretrained_params, "weight_decay": 0.0},
|
223 |
-
{
|
224 |
-
"params": rest_pretrained_params,
|
225 |
-
"weight_decay": args.wd_pretrained,
|
226 |
-
},
|
227 |
-
],
|
228 |
-
lr=args.lr_pretrained,
|
229 |
-
betas=(args.beta1_pretrained, args.beta2_pretrained),
|
230 |
-
eps=args.eps_pretrained,
|
231 |
-
momentum=args.momentum_pretrained,
|
232 |
-
optimizer_name=args.optimizer,
|
233 |
-
)
|
234 |
-
pretrained_params_scheduler = cosine_lr(
|
235 |
-
pretrained_params_optimizer,
|
236 |
-
args.lr_pretrained,
|
237 |
-
args.warmup,
|
238 |
-
total_steps,
|
239 |
-
)
|
240 |
-
|
241 |
-
new_params_optimizer = get_optimizer(
|
242 |
-
[
|
243 |
-
{"params": gain_or_bias_new_params, "weight_decay": 0.0},
|
244 |
-
{"params": rest_new_params, "weight_decay": args.wd_new},
|
245 |
-
],
|
246 |
-
lr=args.lr_new,
|
247 |
-
betas=(args.beta1_new, args.beta2_new),
|
248 |
-
eps=args.eps_new,
|
249 |
-
momentum=args.momentum_new,
|
250 |
-
optimizer_name=args.optimizer,
|
251 |
-
)
|
252 |
-
new_params_scheduler = cosine_lr(
|
253 |
-
new_params_optimizer, args.lr_new, args.warmup, total_steps
|
254 |
-
)
|
255 |
-
|
256 |
-
optimizer["text"] = pretrained_params_optimizer
|
257 |
-
optimizer["audio"] = new_params_optimizer
|
258 |
-
scheduler["text"] = pretrained_params_scheduler
|
259 |
-
scheduler["audio"] = new_params_scheduler
|
260 |
-
|
261 |
-
if args.horovod:
|
262 |
-
pretrained_params_optimizer = hvd.DistributedOptimizer(
|
263 |
-
pretrained_params_optimizer,
|
264 |
-
named_parameters=model.named_parameters(),
|
265 |
-
)
|
266 |
-
new_params_optimizer = hvd.DistributedOptimizer(
|
267 |
-
new_params_optimizer, named_parameters=model.named_parameters()
|
268 |
-
)
|
269 |
-
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
|
270 |
-
hvd.broadcast_optimizer_state(
|
271 |
-
pretrained_params_optimizer, root_rank=0
|
272 |
-
)
|
273 |
-
hvd.broadcast_optimizer_state(new_params_optimizer, root_rank=0)
|
274 |
-
else:
|
275 |
-
|
276 |
-
optimizer["clap"] = get_optimizer(
|
277 |
-
[
|
278 |
-
{"params": gain_or_bias_params, "weight_decay": 0.0},
|
279 |
-
{"params": rest_params, "weight_decay": args.wd},
|
280 |
-
],
|
281 |
-
lr=args.lr,
|
282 |
-
betas=(args.beta1, args.beta2),
|
283 |
-
eps=args.eps,
|
284 |
-
momentum=args.momentum,
|
285 |
-
optimizer_name=args.optimizer,
|
286 |
-
)
|
287 |
-
scheduler["clap"] = cosine_lr(
|
288 |
-
optimizer["clap"], args.lr, args.warmup, total_steps
|
289 |
-
)
|
290 |
-
|
291 |
-
if args.horovod:
|
292 |
-
optimizer["clap"] = hvd.DistributedOptimizer(
|
293 |
-
optimizer["clap"], named_parameters=model.named_parameters()
|
294 |
-
)
|
295 |
-
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
|
296 |
-
hvd.broadcast_optimizer_state(optimizer["clap"], root_rank=0)
|
297 |
-
|
298 |
-
# linear probe optimizer
|
299 |
-
else:
|
300 |
-
lp_params = [
|
301 |
-
p for n, p in named_parameters if (not in_clap(n, p)) and p.requires_grad
|
302 |
-
]
|
303 |
-
lp_optim = get_optimizer(
|
304 |
-
lp_params,
|
305 |
-
lr=args.lp_lr,
|
306 |
-
betas=(args.beta1, args.beta2),
|
307 |
-
eps=args.eps,
|
308 |
-
momentum=0.9,
|
309 |
-
optimizer_name=args.optimizer,
|
310 |
-
)
|
311 |
-
optimizer["lp"] = lp_optim
|
312 |
-
|
313 |
-
return optimizer, scheduler, text_freeze_parameters
|
314 |
-
|
315 |
-
|
316 |
-
def main():
|
317 |
-
args = parse_args()
|
318 |
-
|
319 |
-
time.sleep(args.sleep)
|
320 |
-
|
321 |
-
# sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
|
322 |
-
args.amodel = args.amodel.replace("/", "-")
|
323 |
-
# download sizes.json file
|
324 |
-
|
325 |
-
# (yusong): the below two lines are for debug
|
326 |
-
# print("setting up faulthandler")
|
327 |
-
# faulthandler.register(10)
|
328 |
-
|
329 |
-
random.seed(args.seed)
|
330 |
-
torch.manual_seed(args.seed)
|
331 |
-
torch.cuda.manual_seed(args.seed)
|
332 |
-
torch.cuda.manual_seed_all(args.seed)
|
333 |
-
np.random.seed(args.seed)
|
334 |
-
args.class_index_dict = load_class_label(args.class_label_path)
|
335 |
-
|
336 |
-
# get the name of the experiments
|
337 |
-
if args.name is None:
|
338 |
-
args.name = "-".join(
|
339 |
-
[
|
340 |
-
datetime.now().strftime("%Y_%m_%d-%H_%M_%S"),
|
341 |
-
f"linear_probe" f"model_{args.amodel}",
|
342 |
-
f"lr_{args.lr}",
|
343 |
-
f"b_{args.batch_size}",
|
344 |
-
f"j_{args.workers}",
|
345 |
-
f"p_{args.precision}",
|
346 |
-
]
|
347 |
-
)
|
348 |
-
|
349 |
-
# discover initial world args early so we can log properly
|
350 |
-
args.distributed = False
|
351 |
-
args.local_rank, args.rank, args.world_size = world_info_from_env()
|
352 |
-
|
353 |
-
if args.remotedata and is_master(args):
|
354 |
-
for dataset_name in args.datasetnames:
|
355 |
-
for split in dataset_split[dataset_name]:
|
356 |
-
if not os.path.exists(f"./json_files/{dataset_name}/{split}"):
|
357 |
-
os.makedirs(f"./json_files/{dataset_name}/{split}")
|
358 |
-
os.system(
|
359 |
-
f"aws s3 cp s3://s-laion-audio/webdataset_tar/{dataset_name}/{split}/sizes.json ./json_files/{dataset_name}/{split}/sizes.json"
|
360 |
-
)
|
361 |
-
|
362 |
-
args.log_path = None
|
363 |
-
if is_master(args, local=args.log_local):
|
364 |
-
log_base_path = os.path.join(args.logs, args.name)
|
365 |
-
os.makedirs(log_base_path, exist_ok=True)
|
366 |
-
log_filename = f"out-{args.rank}" if args.log_local else "out.log"
|
367 |
-
args.log_path = os.path.join(log_base_path, log_filename)
|
368 |
-
|
369 |
-
# avoid log dir in same name:
|
370 |
-
postfix = 0
|
371 |
-
while os.path.exists(args.log_path):
|
372 |
-
postfix += 1
|
373 |
-
log_base_path_new = log_base_path + "-" + str(postfix)
|
374 |
-
os.makedirs(log_base_path_new, exist_ok=True)
|
375 |
-
log_filename = f"out-{args.rank}" if args.log_local else "out.log"
|
376 |
-
args.log_path = os.path.join(log_base_path_new, log_filename)
|
377 |
-
# print(
|
378 |
-
# "Error. Experiment already exists. Use --name {} to specify a new experiment."
|
379 |
-
# )
|
380 |
-
# return -1
|
381 |
-
|
382 |
-
# Set logger
|
383 |
-
args.log_level = logging.DEBUG if args.debug else logging.INFO
|
384 |
-
setup_logging(args.log_path, args.log_level)
|
385 |
-
|
386 |
-
# fully initialize distributed device environment
|
387 |
-
device = init_distributed_device(args)
|
388 |
-
|
389 |
-
args.wandb = "wandb" in args.report_to or "all" in args.report_to
|
390 |
-
args.tensorboard = "tensorboard" in args.report_to or "all" in args.report_to
|
391 |
-
if is_master(args):
|
392 |
-
args.tensorboard_path = (
|
393 |
-
os.path.join(args.logs, args.name, "tensorboard")
|
394 |
-
if args.tensorboard
|
395 |
-
else ""
|
396 |
-
)
|
397 |
-
args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints")
|
398 |
-
for dirname in [args.tensorboard_path, args.checkpoint_path]:
|
399 |
-
if dirname:
|
400 |
-
os.makedirs(dirname, exist_ok=True)
|
401 |
-
else:
|
402 |
-
args.tensorboard_path = ""
|
403 |
-
args.checkpoint_path = ""
|
404 |
-
|
405 |
-
if args.copy_codebase:
|
406 |
-
copy_codebase(args)
|
407 |
-
|
408 |
-
assert args.precision in ["amp", "fp16", "fp32"]
|
409 |
-
if args.precision == "fp16":
|
410 |
-
logging.warning(
|
411 |
-
"It is recommended to use AMP mixed-precision instead of FP16. "
|
412 |
-
"FP16 support needs further verification and tuning, especially for train."
|
413 |
-
)
|
414 |
-
|
415 |
-
if args.horovod:
|
416 |
-
logging.info(
|
417 |
-
f"Running in horovod mode with multiple processes / nodes. Device: {args.device}."
|
418 |
-
f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
|
419 |
-
)
|
420 |
-
elif args.distributed:
|
421 |
-
logging.info(
|
422 |
-
f"Running in distributed mode with multiple processes. Device: {args.device}."
|
423 |
-
f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
|
424 |
-
)
|
425 |
-
else:
|
426 |
-
logging.info(f"Running with a single process. Device {args.device}.")
|
427 |
-
|
428 |
-
logging.info(f"openai cache dir: {os.path.expanduser(args.openai_model_cache_dir)}")
|
429 |
-
|
430 |
-
# Create CLAP model
|
431 |
-
clap_model, clap_model_cfg = create_model(
|
432 |
-
args.amodel,
|
433 |
-
args.tmodel,
|
434 |
-
args.pretrained,
|
435 |
-
precision=args.precision,
|
436 |
-
device=device,
|
437 |
-
jit=args.torchscript,
|
438 |
-
force_quick_gelu=args.force_quick_gelu,
|
439 |
-
openai_model_cache_dir=os.path.expanduser(args.openai_model_cache_dir),
|
440 |
-
skip_params=False,
|
441 |
-
pretrained_audio=args.pretrained_audio,
|
442 |
-
pretrained_text=args.pretrained_text,
|
443 |
-
enable_fusion=args.enable_fusion,
|
444 |
-
fusion_type=args.fusion_type,
|
445 |
-
)
|
446 |
-
|
447 |
-
args.lp_out_ch = len(list(args.class_index_dict.keys()))
|
448 |
-
# Linear Probe
|
449 |
-
logging.info(f"linear probe using mlp: {args.lp_mlp}")
|
450 |
-
logging.info(f"linear probe using freeze: {args.lp_freeze}")
|
451 |
-
logging.info(f"linear probe act layer: {args.lp_act}")
|
452 |
-
logging.info(f"linear probe out ch: {args.lp_out_ch}")
|
453 |
-
logging.info(f"linear probe learning rate (if applicable): {args.lp_lr}")
|
454 |
-
logging.info(f"linear probe loss func: {args.lp_loss}")
|
455 |
-
logging.info(f"linear probe lp_metrics: {args.lp_metrics}")
|
456 |
-
|
457 |
-
model = LinearProbe(
|
458 |
-
clap_model,
|
459 |
-
mlp=args.lp_mlp,
|
460 |
-
freeze=args.lp_freeze,
|
461 |
-
in_ch=512,
|
462 |
-
out_ch=args.lp_out_ch,
|
463 |
-
act=args.lp_act,
|
464 |
-
) # in_ch is fixed (i.e., 512)
|
465 |
-
model = model.to(device)
|
466 |
-
|
467 |
-
if args.horovod:
|
468 |
-
with torch.no_grad():
|
469 |
-
for param in model.parameters():
|
470 |
-
param.set_(param.contiguous())
|
471 |
-
|
472 |
-
if args.trace:
|
473 |
-
model = trace_model(model, batch_size=args.batch_size, device=device)
|
474 |
-
|
475 |
-
if is_master(args):
|
476 |
-
logging.info("Linear Probe CLAP Model:")
|
477 |
-
logging.info(f"{str(clap_model)}")
|
478 |
-
logging.info("Params:")
|
479 |
-
params_file = os.path.join(args.logs, args.name, "params.txt")
|
480 |
-
with open(params_file, "w") as f:
|
481 |
-
for name in sorted(vars(args)):
|
482 |
-
val = getattr(args, name)
|
483 |
-
logging.info(f" {name}: {val}")
|
484 |
-
f.write(f"{name}: {val}\n")
|
485 |
-
|
486 |
-
if args.distributed and not args.horovod:
|
487 |
-
if args.use_bn_sync:
|
488 |
-
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
|
489 |
-
ddp_args = {}
|
490 |
-
if args.ddp_static_graph:
|
491 |
-
# this doesn't exist in older PyTorch, arg only added if enabled
|
492 |
-
ddp_args["static_graph"] = True
|
493 |
-
model = torch.nn.parallel.DistributedDataParallel(
|
494 |
-
model, device_ids=[device], find_unused_parameters=True, **ddp_args
|
495 |
-
)
|
496 |
-
|
497 |
-
data = get_data(args, clap_model_cfg)
|
498 |
-
assert len(data), "At least one train or eval dataset must be specified."
|
499 |
-
if args.trace:
|
500 |
-
assert "train" not in data, "Cannot train with traced model"
|
501 |
-
|
502 |
-
optimizer, scheduler, text_freeze_parameters = config_lp_optimizer(
|
503 |
-
model, data, args
|
504 |
-
)
|
505 |
-
|
506 |
-
scaler = GradScaler() if args.precision == "amp" else None
|
507 |
-
|
508 |
-
# optionally resume from a checkpoint
|
509 |
-
start_epoch = 0
|
510 |
-
if args.resume is not None:
|
511 |
-
if os.path.isfile(args.resume):
|
512 |
-
checkpoint = torch.load(args.resume, map_location=device)
|
513 |
-
if "epoch" in checkpoint:
|
514 |
-
# resuming a train checkpoint w/ epoch and optimizer state
|
515 |
-
start_epoch = checkpoint["epoch"]
|
516 |
-
sd = checkpoint["state_dict"]
|
517 |
-
if not args.distributed and next(iter(sd.items()))[0].startswith(
|
518 |
-
"module"
|
519 |
-
):
|
520 |
-
sd = {k[len("module.") :]: v for k, v in sd.items()}
|
521 |
-
model.load_state_dict(sd)
|
522 |
-
if args.split_opt:
|
523 |
-
if optimizer is not None:
|
524 |
-
for k, o_ in optimizer.items():
|
525 |
-
o_.load_state_dict(checkpoint[k + "_" + "optimizer"])
|
526 |
-
if optimizer is not None:
|
527 |
-
optimizer.load_state_dict(checkpoint["optimizer"])
|
528 |
-
if scaler is not None and "scaler" in checkpoint:
|
529 |
-
scaler.load_state_dict(checkpoint["scaler"])
|
530 |
-
logging.info(
|
531 |
-
f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})"
|
532 |
-
)
|
533 |
-
else:
|
534 |
-
# loading a bare (model only) checkpoint for fine-tune or evaluation
|
535 |
-
model.load_state_dict(checkpoint)
|
536 |
-
logging.info(
|
537 |
-
f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})"
|
538 |
-
)
|
539 |
-
if args.freeze_text:
|
540 |
-
print("Freeze Text!!!!")
|
541 |
-
for k in text_freeze_parameters:
|
542 |
-
k.requires_grad = False
|
543 |
-
else:
|
544 |
-
logging.info("=> no checkpoint found at '{}'".format(args.resume))
|
545 |
-
|
546 |
-
cudnn.benchmark = True
|
547 |
-
cudnn.deterministic = False
|
548 |
-
|
549 |
-
# determine if this worker should save logs and checkpoints. only do so if it is rank == 0
|
550 |
-
args.save_logs = args.logs and args.logs.lower() != "none" and is_master(args)
|
551 |
-
writer = None
|
552 |
-
if args.save_logs and args.tensorboard:
|
553 |
-
assert tensorboard is not None, "Please install tensorboard."
|
554 |
-
writer = tensorboard.SummaryWriter(args.tensorboard_path)
|
555 |
-
|
556 |
-
if args.wandb and is_master(args):
|
557 |
-
assert wandb is not None, "Please install wandb."
|
558 |
-
logging.debug("Starting wandb.")
|
559 |
-
args.train_sz = data["train"].dataloader.num_samples
|
560 |
-
if args.val_data is not None:
|
561 |
-
args.val_sz = data["val"].dataloader.num_samples
|
562 |
-
# you will have to configure this for your project!
|
563 |
-
wandb.init(
|
564 |
-
project="clap",
|
565 |
-
notes=args.wandb_notes,
|
566 |
-
name=args.wandb_notes,
|
567 |
-
tags=[],
|
568 |
-
config=vars(args),
|
569 |
-
)
|
570 |
-
if args.debug:
|
571 |
-
wandb.watch(model, log="all")
|
572 |
-
wandb.save(params_file)
|
573 |
-
logging.debug("Finished loading wandb.")
|
574 |
-
|
575 |
-
if "train" not in data:
|
576 |
-
evaluate(model, data, start_epoch, args, writer)
|
577 |
-
return
|
578 |
-
elif start_epoch == 0 and "val" in data and not args.no_eval:
|
579 |
-
evaluate(model, data, 0, args, writer)
|
580 |
-
if args.save_top_performance:
|
581 |
-
current_top_k_ckpt_metrics = {
|
582 |
-
i: 0 for i in range(args.save_top_performance)
|
583 |
-
} # initialize the top-k metric for ckpts to 0
|
584 |
-
|
585 |
-
for epoch in range(start_epoch, args.epochs):
|
586 |
-
# freeze the text param after (include) args.freeze_text_after, this is -1 by default
|
587 |
-
if epoch == args.freeze_text_after:
|
588 |
-
print("Text pretrained parameters are freezed since this epoch.")
|
589 |
-
for k in text_freeze_parameters:
|
590 |
-
k.requires_grad = False
|
591 |
-
if is_master(args):
|
592 |
-
logging.info(f"Start epoch {epoch}")
|
593 |
-
|
594 |
-
train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer)
|
595 |
-
completed_epoch = epoch + 1
|
596 |
-
|
597 |
-
if (
|
598 |
-
any(v in data for v in ("val", "imagenet-val", "imagenet-v2"))
|
599 |
-
and not args.no_eval
|
600 |
-
):
|
601 |
-
metrics = evaluate(model, data, completed_epoch, args, writer)
|
602 |
-
if args.save_top_performance:
|
603 |
-
top_k_dataset = args.top_k_checkpoint_select_dataset
|
604 |
-
top_k_metric = args.top_k_checkpoint_select_metric
|
605 |
-
filtered_metrics = [
|
606 |
-
v
|
607 |
-
for k, v in metrics.items()
|
608 |
-
if top_k_metric in k and top_k_dataset in k
|
609 |
-
] # check all R@10 metrics (all dataset) and use it to update the ckpt
|
610 |
-
# Saving checkpoints.
|
611 |
-
if args.save_logs:
|
612 |
-
opt_dict = {
|
613 |
-
k + "_" + "optimizer": v.state_dict() for k, v in optimizer.items()
|
614 |
-
}
|
615 |
-
checkpoint_dict = {
|
616 |
-
"epoch": completed_epoch,
|
617 |
-
"name": args.name,
|
618 |
-
"state_dict": model.state_dict(),
|
619 |
-
}
|
620 |
-
checkpoint_dict.update(opt_dict)
|
621 |
-
if scaler is not None:
|
622 |
-
checkpoint_dict["scaler"] = scaler.state_dict()
|
623 |
-
|
624 |
-
if completed_epoch == args.epochs or (
|
625 |
-
args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0
|
626 |
-
):
|
627 |
-
torch.save(
|
628 |
-
checkpoint_dict,
|
629 |
-
os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"),
|
630 |
-
)
|
631 |
-
if args.save_most_recent:
|
632 |
-
torch.save(
|
633 |
-
checkpoint_dict,
|
634 |
-
os.path.join(args.checkpoint_path, f"epoch_latest.pt"),
|
635 |
-
)
|
636 |
-
if args.save_top_performance and not args.no_eval:
|
637 |
-
update_top_k_performance(
|
638 |
-
filtered_metrics,
|
639 |
-
current_top_k_ckpt_metrics,
|
640 |
-
args,
|
641 |
-
checkpoint_dict,
|
642 |
-
bignumbetter=True,
|
643 |
-
)
|
644 |
-
|
645 |
-
if args.wandb and is_master(args):
|
646 |
-
wandb.finish()
|
647 |
-
|
648 |
-
|
649 |
-
def copy_codebase(args):
|
650 |
-
from shutil import copytree, ignore_patterns
|
651 |
-
|
652 |
-
new_code_path = os.path.join(args.logs, args.name, "code")
|
653 |
-
if os.path.exists(new_code_path):
|
654 |
-
print(
|
655 |
-
f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
|
656 |
-
)
|
657 |
-
return -1
|
658 |
-
print(f"Copying codebase to {new_code_path}")
|
659 |
-
current_code_path = os.path.realpath(__file__)
|
660 |
-
for _ in range(3):
|
661 |
-
current_code_path = os.path.dirname(current_code_path)
|
662 |
-
copytree(
|
663 |
-
current_code_path, new_code_path, ignore=ignore_patterns("log", "logs", "wandb")
|
664 |
-
)
|
665 |
-
print("Done copying code.")
|
666 |
-
return 1
|
667 |
-
|
668 |
-
|
669 |
-
if __name__ == "__main__":
|
670 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/indexed_datasets.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
import pickle
|
2 |
-
from copy import deepcopy
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
class IndexedDataset:
|
8 |
-
def __init__(self, path, num_cache=1):
|
9 |
-
super().__init__()
|
10 |
-
self.path = path
|
11 |
-
self.data_file = None
|
12 |
-
self.data_offsets = np.load(f"{path}.idx", allow_pickle=True).item()['offsets']
|
13 |
-
self.data_file = open(f"{path}.data", 'rb', buffering=-1)
|
14 |
-
self.cache = []
|
15 |
-
self.num_cache = num_cache
|
16 |
-
|
17 |
-
def check_index(self, i):
|
18 |
-
if i < 0 or i >= len(self.data_offsets) - 1:
|
19 |
-
raise IndexError('index out of range')
|
20 |
-
|
21 |
-
def __del__(self):
|
22 |
-
if self.data_file:
|
23 |
-
self.data_file.close()
|
24 |
-
|
25 |
-
def __getitem__(self, i):
|
26 |
-
self.check_index(i)
|
27 |
-
if self.num_cache > 0:
|
28 |
-
for c in self.cache:
|
29 |
-
if c[0] == i:
|
30 |
-
return c[1]
|
31 |
-
self.data_file.seek(self.data_offsets[i])
|
32 |
-
b = self.data_file.read(self.data_offsets[i + 1] - self.data_offsets[i])
|
33 |
-
item = pickle.loads(b)
|
34 |
-
if self.num_cache > 0:
|
35 |
-
self.cache = [(i, deepcopy(item))] + self.cache[:-1]
|
36 |
-
return item
|
37 |
-
|
38 |
-
def __len__(self):
|
39 |
-
return len(self.data_offsets) - 1
|
40 |
-
|
41 |
-
class IndexedDatasetBuilder:
|
42 |
-
def __init__(self, path):
|
43 |
-
self.path = path
|
44 |
-
self.out_file = open(f"{path}.data", 'wb')
|
45 |
-
self.byte_offsets = [0]
|
46 |
-
|
47 |
-
def add_item(self, item):
|
48 |
-
s = pickle.dumps(item)
|
49 |
-
bytes = self.out_file.write(s)
|
50 |
-
self.byte_offsets.append(self.byte_offsets[-1] + bytes)
|
51 |
-
|
52 |
-
def finalize(self):
|
53 |
-
self.out_file.close()
|
54 |
-
np.save(open(f"{self.path}.idx", 'wb'), {'offsets': self.byte_offsets})
|
55 |
-
|
56 |
-
|
57 |
-
if __name__ == "__main__":
|
58 |
-
import random
|
59 |
-
from tqdm import tqdm
|
60 |
-
ds_path = '/tmp/indexed_ds_example'
|
61 |
-
size = 100
|
62 |
-
items = [{"a": np.random.normal(size=[10000, 10]),
|
63 |
-
"b": np.random.normal(size=[10000, 10])} for i in range(size)]
|
64 |
-
builder = IndexedDatasetBuilder(ds_path)
|
65 |
-
for i in tqdm(range(size)):
|
66 |
-
builder.add_item(items[i])
|
67 |
-
builder.finalize()
|
68 |
-
ds = IndexedDataset(ds_path)
|
69 |
-
for i in tqdm(range(10000)):
|
70 |
-
idx = random.randint(0, size - 1)
|
71 |
-
assert (ds[idx]['a'] == items[idx]['a']).all()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/griffin_lim.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import librosa
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
|
7 |
-
def _stft(y, hop_size, win_size, fft_size):
|
8 |
-
return librosa.stft(y=y, n_fft=fft_size, hop_length=hop_size, win_length=win_size, pad_mode='constant')
|
9 |
-
|
10 |
-
|
11 |
-
def _istft(y, hop_size, win_size):
|
12 |
-
return librosa.istft(y, hop_length=hop_size, win_length=win_size)
|
13 |
-
|
14 |
-
|
15 |
-
def griffin_lim(S, hop_size, win_size, fft_size, angles=None, n_iters=30):
|
16 |
-
angles = np.exp(2j * np.pi * np.random.rand(*S.shape)) if angles is None else angles
|
17 |
-
S_complex = np.abs(S).astype(np.complex)
|
18 |
-
y = _istft(S_complex * angles, hop_size, win_size)
|
19 |
-
for i in range(n_iters):
|
20 |
-
angles = np.exp(1j * np.angle(_stft(y, hop_size, win_size, fft_size)))
|
21 |
-
y = _istft(S_complex * angles, hop_size, win_size)
|
22 |
-
return y
|
23 |
-
|
24 |
-
|
25 |
-
def istft(amp, ang, hop_size, win_size, fft_size, pad=False, window=None):
|
26 |
-
spec = amp * torch.exp(1j * ang)
|
27 |
-
spec_r = spec.real
|
28 |
-
spec_i = spec.imag
|
29 |
-
spec = torch.stack([spec_r, spec_i], -1)
|
30 |
-
if window is None:
|
31 |
-
window = torch.hann_window(win_size).to(amp.device)
|
32 |
-
if pad:
|
33 |
-
spec = F.pad(spec, [0, 0, 0, 1], mode='reflect')
|
34 |
-
wav = torch.istft(spec, fft_size, hop_size, win_size)
|
35 |
-
return wav
|
36 |
-
|
37 |
-
|
38 |
-
def griffin_lim_torch(S, hop_size, win_size, fft_size, angles=None, n_iters=30):
|
39 |
-
"""
|
40 |
-
|
41 |
-
Examples:
|
42 |
-
>>> x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, win_length=win_length, pad_mode="constant")
|
43 |
-
>>> x_stft = x_stft[None, ...]
|
44 |
-
>>> amp = np.abs(x_stft)
|
45 |
-
>>> angle_init = np.exp(2j * np.pi * np.random.rand(*x_stft.shape))
|
46 |
-
>>> amp = torch.FloatTensor(amp)
|
47 |
-
>>> wav = griffin_lim_torch(amp, angle_init, hparams)
|
48 |
-
|
49 |
-
:param amp: [B, n_fft, T]
|
50 |
-
:param ang: [B, n_fft, T]
|
51 |
-
:return: [B, T_wav]
|
52 |
-
"""
|
53 |
-
angles = torch.exp(2j * np.pi * torch.rand(*S.shape)) if angles is None else angles
|
54 |
-
window = torch.hann_window(win_size).to(S.device)
|
55 |
-
y = istft(S, angles, hop_size, win_size, fft_size, window=window)
|
56 |
-
for i in range(n_iters):
|
57 |
-
x_stft = torch.stft(y, fft_size, hop_size, win_size, window)
|
58 |
-
x_stft = x_stft[..., 0] + 1j * x_stft[..., 1]
|
59 |
-
angles = torch.angle(x_stft)
|
60 |
-
y = istft(S, angles, hop_size, win_size, fft_size, window=window)
|
61 |
-
return y
|
62 |
-
|
63 |
-
|
64 |
-
# Conversions
|
65 |
-
_mel_basis = None
|
66 |
-
_inv_mel_basis = None
|
67 |
-
|
68 |
-
|
69 |
-
def _build_mel_basis(audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax):
|
70 |
-
assert fmax <= audio_sample_rate // 2
|
71 |
-
return librosa.filters.mel(audio_sample_rate, fft_size, n_mels=audio_num_mel_bins, fmin=fmin, fmax=fmax)
|
72 |
-
|
73 |
-
|
74 |
-
def _linear_to_mel(spectogram, audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax):
|
75 |
-
global _mel_basis
|
76 |
-
if _mel_basis is None:
|
77 |
-
_mel_basis = _build_mel_basis(audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax)
|
78 |
-
return np.dot(_mel_basis, spectogram)
|
79 |
-
|
80 |
-
|
81 |
-
def _mel_to_linear(mel_spectrogram, audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax):
|
82 |
-
global _inv_mel_basis
|
83 |
-
if _inv_mel_basis is None:
|
84 |
-
_inv_mel_basis = np.linalg.pinv(_build_mel_basis(audio_sample_rate, fft_size, audio_num_mel_bins, fmin, fmax))
|
85 |
-
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/IllusionDiffusion/illusion_style.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
css='''
|
2 |
-
#share-btn-container {padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto;}
|
3 |
-
div#share-btn-container > div {flex-direction: row;background: black;align-items: center}
|
4 |
-
#share-btn-container:hover {background-color: #060606}
|
5 |
-
#share-btn {all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important;right:0;}
|
6 |
-
#share-btn * {all: unset}
|
7 |
-
#share-btn-container div:nth-child(-n+2){width: auto !important;min-height: 0px !important;}
|
8 |
-
#share-btn-container .wrap {display: none !important}
|
9 |
-
#share-btn-container.hidden {display: none!important}
|
10 |
-
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/Upside-Down-Diffusion/app.py
DELETED
@@ -1,382 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
|
2 |
-
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
|
3 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
4 |
-
import torch
|
5 |
-
from PIL import Image, ImageOps
|
6 |
-
import gradio as gr
|
7 |
-
import user_history
|
8 |
-
|
9 |
-
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
10 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
11 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
12 |
-
vae=vae,
|
13 |
-
torch_dtype=torch.float16,
|
14 |
-
variant="fp16",
|
15 |
-
use_safetensors=True,
|
16 |
-
)
|
17 |
-
pipe.to("cuda")
|
18 |
-
pipe.unet.to(memory_format=torch.channels_last)
|
19 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
20 |
-
|
21 |
-
@torch.no_grad()
|
22 |
-
def call(
|
23 |
-
pipe,
|
24 |
-
prompt: Union[str, List[str]] = None,
|
25 |
-
prompt2: Union[str, List[str]] = None,
|
26 |
-
height: Optional[int] = None,
|
27 |
-
width: Optional[int] = None,
|
28 |
-
num_inference_steps: int = 50,
|
29 |
-
denoising_end: Optional[float] = None,
|
30 |
-
guidance_scale: float = 5.0,
|
31 |
-
guidance_scale2: float = 5.0,
|
32 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
33 |
-
negative_prompt2: Optional[Union[str, List[str]]] = None,
|
34 |
-
num_images_per_prompt: Optional[int] = 1,
|
35 |
-
eta: float = 0.0,
|
36 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
37 |
-
latents: Optional[torch.FloatTensor] = None,
|
38 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
39 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
40 |
-
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
41 |
-
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
42 |
-
output_type: Optional[str] = "pil",
|
43 |
-
return_dict: bool = True,
|
44 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
45 |
-
callback_steps: int = 1,
|
46 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
47 |
-
guidance_rescale: float = 0.0,
|
48 |
-
original_size: Optional[Tuple[int, int]] = None,
|
49 |
-
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
50 |
-
target_size: Optional[Tuple[int, int]] = None,
|
51 |
-
negative_original_size: Optional[Tuple[int, int]] = None,
|
52 |
-
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
53 |
-
negative_target_size: Optional[Tuple[int, int]] = None,
|
54 |
-
):
|
55 |
-
# 0. Default height and width to unet
|
56 |
-
height = height or pipe.default_sample_size * pipe.vae_scale_factor
|
57 |
-
width = width or pipe.default_sample_size * pipe.vae_scale_factor
|
58 |
-
|
59 |
-
original_size = original_size or (height, width)
|
60 |
-
target_size = target_size or (height, width)
|
61 |
-
|
62 |
-
# 1. Check inputs. Raise error if not correct
|
63 |
-
pipe.check_inputs(
|
64 |
-
prompt,
|
65 |
-
None,
|
66 |
-
height,
|
67 |
-
width,
|
68 |
-
callback_steps,
|
69 |
-
negative_prompt,
|
70 |
-
None,
|
71 |
-
prompt_embeds,
|
72 |
-
negative_prompt_embeds,
|
73 |
-
pooled_prompt_embeds,
|
74 |
-
negative_pooled_prompt_embeds,
|
75 |
-
)
|
76 |
-
|
77 |
-
# 2. Define call parameters
|
78 |
-
if prompt is not None and isinstance(prompt, str):
|
79 |
-
batch_size = 1
|
80 |
-
elif prompt is not None and isinstance(prompt, list):
|
81 |
-
batch_size = len(prompt)
|
82 |
-
else:
|
83 |
-
batch_size = prompt_embeds.shape[0]
|
84 |
-
|
85 |
-
device = pipe._execution_device
|
86 |
-
|
87 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
88 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
89 |
-
# corresponds to doing no classifier free guidance.
|
90 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
91 |
-
|
92 |
-
# 3. Encode input prompt
|
93 |
-
text_encoder_lora_scale = (
|
94 |
-
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
95 |
-
)
|
96 |
-
|
97 |
-
(
|
98 |
-
prompt_embeds,
|
99 |
-
negative_prompt_embeds,
|
100 |
-
pooled_prompt_embeds,
|
101 |
-
negative_pooled_prompt_embeds,
|
102 |
-
) = pipe.encode_prompt(
|
103 |
-
prompt=prompt,
|
104 |
-
device=device,
|
105 |
-
num_images_per_prompt=num_images_per_prompt,
|
106 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
107 |
-
negative_prompt=negative_prompt,
|
108 |
-
prompt_embeds=None,
|
109 |
-
negative_prompt_embeds=None,
|
110 |
-
pooled_prompt_embeds=None,
|
111 |
-
negative_pooled_prompt_embeds=None,
|
112 |
-
lora_scale=text_encoder_lora_scale,
|
113 |
-
)
|
114 |
-
|
115 |
-
(
|
116 |
-
prompt2_embeds,
|
117 |
-
negative_prompt2_embeds,
|
118 |
-
pooled_prompt2_embeds,
|
119 |
-
negative_pooled_prompt2_embeds,
|
120 |
-
) = pipe.encode_prompt(
|
121 |
-
prompt=prompt2,
|
122 |
-
device=device,
|
123 |
-
num_images_per_prompt=num_images_per_prompt,
|
124 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
125 |
-
negative_prompt=negative_prompt2,
|
126 |
-
prompt_embeds=None,
|
127 |
-
negative_prompt_embeds=None,
|
128 |
-
pooled_prompt_embeds=None,
|
129 |
-
negative_pooled_prompt_embeds=None,
|
130 |
-
lora_scale=text_encoder_lora_scale,
|
131 |
-
)
|
132 |
-
|
133 |
-
# 4. Prepare timesteps
|
134 |
-
pipe.scheduler.set_timesteps(num_inference_steps, device=device)
|
135 |
-
|
136 |
-
timesteps = pipe.scheduler.timesteps
|
137 |
-
|
138 |
-
# 5. Prepare latent variables
|
139 |
-
num_channels_latents = pipe.unet.config.in_channels
|
140 |
-
latents = pipe.prepare_latents(
|
141 |
-
batch_size * num_images_per_prompt,
|
142 |
-
num_channels_latents,
|
143 |
-
height,
|
144 |
-
width,
|
145 |
-
prompt_embeds.dtype,
|
146 |
-
device,
|
147 |
-
generator,
|
148 |
-
latents,
|
149 |
-
)
|
150 |
-
|
151 |
-
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
152 |
-
extra_step_kwargs = pipe.prepare_extra_step_kwargs(generator, eta)
|
153 |
-
|
154 |
-
# 7. Prepare added time ids & embeddings
|
155 |
-
add_text_embeds = pooled_prompt_embeds
|
156 |
-
add_text2_embeds = pooled_prompt2_embeds
|
157 |
-
|
158 |
-
add_time_ids = pipe._get_add_time_ids(
|
159 |
-
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
160 |
-
)
|
161 |
-
add_time2_ids = pipe._get_add_time_ids(
|
162 |
-
original_size, crops_coords_top_left, target_size, dtype=prompt2_embeds.dtype
|
163 |
-
)
|
164 |
-
|
165 |
-
if negative_original_size is not None and negative_target_size is not None:
|
166 |
-
negative_add_time_ids = pipe._get_add_time_ids(
|
167 |
-
negative_original_size,
|
168 |
-
negative_crops_coords_top_left,
|
169 |
-
negative_target_size,
|
170 |
-
dtype=prompt_embeds.dtype,
|
171 |
-
)
|
172 |
-
else:
|
173 |
-
negative_add_time_ids = add_time_ids
|
174 |
-
negative_add_time2_ids = add_time2_ids
|
175 |
-
|
176 |
-
if do_classifier_free_guidance:
|
177 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
178 |
-
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
179 |
-
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
180 |
-
|
181 |
-
prompt2_embeds = torch.cat([negative_prompt2_embeds, prompt2_embeds], dim=0)
|
182 |
-
add_text2_embeds = torch.cat([negative_pooled_prompt2_embeds, add_text2_embeds], dim=0)
|
183 |
-
add_time2_ids = torch.cat([negative_add_time2_ids, add_time2_ids], dim=0)
|
184 |
-
|
185 |
-
prompt_embeds = prompt_embeds.to(device)
|
186 |
-
add_text_embeds = add_text_embeds.to(device)
|
187 |
-
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
188 |
-
|
189 |
-
prompt2_embeds = prompt2_embeds.to(device)
|
190 |
-
add_text2_embeds = add_text2_embeds.to(device)
|
191 |
-
add_time2_ids = add_time2_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
192 |
-
|
193 |
-
# 8. Denoising loop
|
194 |
-
num_warmup_steps = max(len(timesteps) - num_inference_steps * pipe.scheduler.order, 0)
|
195 |
-
|
196 |
-
# 7.1 Apply denoising_end
|
197 |
-
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
198 |
-
discrete_timestep_cutoff = int(
|
199 |
-
round(
|
200 |
-
pipe.scheduler.config.num_train_timesteps
|
201 |
-
- (denoising_end * pipe.scheduler.config.num_train_timesteps)
|
202 |
-
)
|
203 |
-
)
|
204 |
-
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
205 |
-
timesteps = timesteps[:num_inference_steps]
|
206 |
-
|
207 |
-
with pipe.progress_bar(total=num_inference_steps) as progress_bar:
|
208 |
-
for i, t in enumerate(timesteps):
|
209 |
-
if i % 2 == 0:
|
210 |
-
# expand the latents if we are doing classifier free guidance
|
211 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
212 |
-
|
213 |
-
latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t)
|
214 |
-
|
215 |
-
# predict the noise residual
|
216 |
-
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
217 |
-
noise_pred = pipe.unet(
|
218 |
-
latent_model_input,
|
219 |
-
t,
|
220 |
-
encoder_hidden_states=prompt_embeds,
|
221 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
222 |
-
added_cond_kwargs=added_cond_kwargs,
|
223 |
-
return_dict=False,
|
224 |
-
)[0]
|
225 |
-
|
226 |
-
# perform guidance
|
227 |
-
if do_classifier_free_guidance:
|
228 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
229 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
230 |
-
else:
|
231 |
-
# expand the latents if we are doing classifier free guidance
|
232 |
-
latent_model_input2 = torch.cat([latents.flip(2)] * 2) if do_classifier_free_guidance else latents
|
233 |
-
latent_model_input2 = pipe.scheduler.scale_model_input(latent_model_input2, t)
|
234 |
-
|
235 |
-
# predict the noise residual
|
236 |
-
added_cond2_kwargs = {"text_embeds": add_text2_embeds, "time_ids": add_time2_ids}
|
237 |
-
noise_pred2 = pipe.unet(
|
238 |
-
latent_model_input2,
|
239 |
-
t,
|
240 |
-
encoder_hidden_states=prompt2_embeds,
|
241 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
242 |
-
added_cond_kwargs=added_cond2_kwargs,
|
243 |
-
return_dict=False,
|
244 |
-
)[0]
|
245 |
-
|
246 |
-
# perform guidance
|
247 |
-
if do_classifier_free_guidance:
|
248 |
-
noise_pred2_uncond, noise_pred2_text = noise_pred2.chunk(2)
|
249 |
-
noise_pred2 = noise_pred2_uncond + guidance_scale2 * (noise_pred2_text - noise_pred2_uncond)
|
250 |
-
|
251 |
-
noise_pred = noise_pred if i % 2 == 0 else noise_pred2.flip(2)
|
252 |
-
|
253 |
-
# compute the previous noisy sample x_t -> x_t-1
|
254 |
-
latents = pipe.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
255 |
-
|
256 |
-
# call the callback, if provided
|
257 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % pipe.scheduler.order == 0):
|
258 |
-
progress_bar.update()
|
259 |
-
if callback is not None and i % callback_steps == 0:
|
260 |
-
callback(i, t, latents)
|
261 |
-
|
262 |
-
if not output_type == "latent":
|
263 |
-
# make sure the VAE is in float32 mode, as it overflows in float16
|
264 |
-
needs_upcasting = pipe.vae.dtype == torch.float16 and pipe.vae.config.force_upcast
|
265 |
-
|
266 |
-
if needs_upcasting:
|
267 |
-
pipe.upcast_vae()
|
268 |
-
latents = latents.to(next(iter(pipe.vae.post_quant_conv.parameters())).dtype)
|
269 |
-
|
270 |
-
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False)[0]
|
271 |
-
|
272 |
-
# cast back to fp16 if needed
|
273 |
-
if needs_upcasting:
|
274 |
-
pipe.vae.to(dtype=torch.float16)
|
275 |
-
else:
|
276 |
-
image = latents
|
277 |
-
|
278 |
-
if not output_type == "latent":
|
279 |
-
# apply watermark if available
|
280 |
-
if pipe.watermark is not None:
|
281 |
-
image = pipe.watermark.apply_watermark(image)
|
282 |
-
|
283 |
-
image = pipe.image_processor.postprocess(image, output_type=output_type)
|
284 |
-
|
285 |
-
# Offload all models
|
286 |
-
pipe.maybe_free_model_hooks()
|
287 |
-
|
288 |
-
if not return_dict:
|
289 |
-
return (image,)
|
290 |
-
|
291 |
-
return StableDiffusionXLPipelineOutput(images=image)
|
292 |
-
|
293 |
-
NEGATIVE_PROMPTS = "text, watermark, low-quality, signature, moiré pattern, downsampling, aliasing, distorted, blurry, glossy, blur, jpeg artifacts, compression artifacts, poorly drawn, low-resolution, bad, distortion, twisted, excessive, exaggerated pose, exaggerated limbs, grainy, symmetrical, duplicate, error, pattern, beginner, pixelated, fake, hyper, glitch, overexposed, high-contrast, bad-contrast"
|
294 |
-
|
295 |
-
def rotate_output(has_flipped):
|
296 |
-
if(has_flipped):
|
297 |
-
return gr.Image(elem_classes="not_rotated"), gr.Button("Rotate to see prompt 2!"), not has_flipped
|
298 |
-
else:
|
299 |
-
return gr.Image(elem_classes="rotated"), gr.Button("Rotate to see prompt 1!"), not has_flipped
|
300 |
-
|
301 |
-
def simple_call(prompt1, prompt2, profile: gr.OAuthProfile | None=None):
|
302 |
-
generator = [torch.Generator(device="cuda").manual_seed(5)]
|
303 |
-
res = call(
|
304 |
-
pipe,
|
305 |
-
prompt1,
|
306 |
-
prompt2,
|
307 |
-
width=768,
|
308 |
-
height=768,
|
309 |
-
num_images_per_prompt=1,
|
310 |
-
num_inference_steps=50,
|
311 |
-
guidance_scale=5.0,
|
312 |
-
guidance_scale2=8.0,
|
313 |
-
negative_prompt=NEGATIVE_PROMPTS,
|
314 |
-
negative_prompt2=NEGATIVE_PROMPTS,
|
315 |
-
generator=generator
|
316 |
-
)
|
317 |
-
image1 = res.images[0]
|
318 |
-
|
319 |
-
# save generated images (if logged in)
|
320 |
-
user_history.save_image(label=f"{prompt1} / {prompt2}", image=image1, profile=profile, metadata={
|
321 |
-
"prompt2": prompt1,
|
322 |
-
"prompt1": prompt2,
|
323 |
-
})
|
324 |
-
|
325 |
-
return image1
|
326 |
-
css = '''
|
327 |
-
#result_image{ transition: transform 2s ease-in-out }
|
328 |
-
#result_image.rotated{transform: rotate(180deg)}
|
329 |
-
'''
|
330 |
-
with gr.Blocks() as app:
|
331 |
-
gr.Markdown(
|
332 |
-
'''
|
333 |
-
<center>
|
334 |
-
<h1>Upside Down Diffusion</h1>
|
335 |
-
<p>Code by Alex Carlier, <a href="https://colab.research.google.com/drive/1rjDQOn11cTHAf3Oeq87Hfl_Vh41NbTl4?usp=sharing">Google Colab</a>, follow them on <a href="https://twitter.com/alexcarliera">Twitter</a></p>
|
336 |
-
<p>A space by <a href="https://twitter.com/angrypenguinPNG">AP</a> with contributions from <a href="https://twitter.com/multimodalart">MultimodalArt</a></p>
|
337 |
-
</center>
|
338 |
-
<hr>
|
339 |
-
<p>
|
340 |
-
Enter your first prompt to craft an image that will show when upright. Then, add a second prompt to reveal a mesmerizing surprise when you flip the image upside down! ✨
|
341 |
-
</p>
|
342 |
-
<p>
|
343 |
-
<em>For best results, please include the prompt in the following format: Art Style and Object. Here is an example: Prompt 1: A sketch of a turtle, Prompt 2: A sketch of a tree. Both prompts need to have the same style!</em>
|
344 |
-
</p>
|
345 |
-
'''
|
346 |
-
)
|
347 |
-
|
348 |
-
has_flipped = gr.State(value=False)
|
349 |
-
with gr.Row():
|
350 |
-
with gr.Column():
|
351 |
-
prompt1 = gr.Textbox(label="Prompt 1", info="Prompt for the side up", placeholder="A sketch of a...")
|
352 |
-
prompt2 = gr.Textbox(label="Prompt 2", info="Prompt for the side down", placeholder="A sketch of a...")
|
353 |
-
run_btn = gr.Button("Run")
|
354 |
-
|
355 |
-
with gr.Column():
|
356 |
-
result_image1 = gr.Image(label="Output", elem_id="result_image", elem_classes="not_rotated")
|
357 |
-
rotate_button = gr.Button("Rotate to see prompt 2!")
|
358 |
-
|
359 |
-
|
360 |
-
run_btn.click(
|
361 |
-
simple_call,
|
362 |
-
inputs=[prompt1, prompt2],
|
363 |
-
outputs=[result_image1]
|
364 |
-
)
|
365 |
-
rotate_button.click(
|
366 |
-
rotate_output,
|
367 |
-
inputs=[has_flipped],
|
368 |
-
outputs=[result_image1, rotate_button, has_flipped],
|
369 |
-
queue=False,
|
370 |
-
show_progress=False
|
371 |
-
)
|
372 |
-
|
373 |
-
with gr.Blocks(css=css) as app_with_history:
|
374 |
-
with gr.Tab("Upside Down Diffusion"):
|
375 |
-
app.render()
|
376 |
-
with gr.Tab("Past generations"):
|
377 |
-
user_history.render()
|
378 |
-
|
379 |
-
app_with_history.queue(max_size=20)
|
380 |
-
|
381 |
-
if __name__ == "__main__":
|
382 |
-
app_with_history.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov6_s_fast.py
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
_base_ = '../yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py'
|
2 |
-
|
3 |
-
max_epochs = 100 # 训练的最大 epoch
|
4 |
-
data_root = './data-df2/' # 数据集目录的绝对路径
|
5 |
-
|
6 |
-
# 结果保存的路径,可以省略,省略保存的文件名位于 work_dirs 下 config 同名的文件夹中
|
7 |
-
# 如果某个 config 只是修改了部分参数,修改这个变量就可以将新的训练文件保存到其他地方
|
8 |
-
work_dir = './work_dirs/yolov6_s_df2'
|
9 |
-
|
10 |
-
# 根据自己的 GPU 情况,修改 batch size,YOLOv5-s 默认为 8卡 x 16bs
|
11 |
-
train_batch_size_per_gpu = 32
|
12 |
-
train_num_workers = 4 # 推荐使用 train_num_workers = nGPU x 4
|
13 |
-
|
14 |
-
save_epoch_intervals = 2
|
15 |
-
|
16 |
-
# 根据自己的 GPU 情况,修改 base_lr,修改的比例是 base_lr_default * (your_bs / default_bs)
|
17 |
-
base_lr = _base_.base_lr / 4
|
18 |
-
|
19 |
-
class_name = ('short_sleeved_shirt',
|
20 |
-
'long_sleeved_shirt',
|
21 |
-
'short_sleeved_outwear',
|
22 |
-
'long_sleeved_outwear',
|
23 |
-
'vest',
|
24 |
-
'sling',
|
25 |
-
'shorts',
|
26 |
-
'trousers',
|
27 |
-
'skirt',
|
28 |
-
'short_sleeved_dress',
|
29 |
-
'long_sleeved_dress',
|
30 |
-
'vest_dress',
|
31 |
-
'sling_dress') # 根据 class_with_id.txt 类别信息,设置 class_name
|
32 |
-
|
33 |
-
num_classes = len(class_name)
|
34 |
-
metainfo = dict(
|
35 |
-
classes=class_name,
|
36 |
-
palette=[(255, 0, 0),
|
37 |
-
(255, 128, 0),
|
38 |
-
(255, 255, 0),
|
39 |
-
(128, 255, 0),
|
40 |
-
(0, 255, 0),
|
41 |
-
(0, 255, 128),
|
42 |
-
(0, 255, 255),
|
43 |
-
(0, 128, 255),
|
44 |
-
(0, 0, 255),
|
45 |
-
(127, 0, 255),
|
46 |
-
(255, 0, 255),
|
47 |
-
(255, 0, 127),
|
48 |
-
(128, 128, 128)] # 画图时候的颜色,随便设置即可
|
49 |
-
)
|
50 |
-
|
51 |
-
train_cfg = dict(
|
52 |
-
max_epochs=max_epochs,
|
53 |
-
val_begin=20, # 第几个 epoch 后验证,这里设置 20 是因为前 20 个 epoch 精度不高,测试意义不大,故跳过
|
54 |
-
val_interval=save_epoch_intervals, # 每 val_interval 轮迭代进行一次测试评估
|
55 |
-
dynamic_intervals=[(max_epochs-_base_.num_last_epochs, 1)]
|
56 |
-
)
|
57 |
-
|
58 |
-
model = dict(
|
59 |
-
bbox_head=dict(
|
60 |
-
head_module=dict(num_classes=num_classes)),
|
61 |
-
train_cfg=dict(
|
62 |
-
initial_assigner=dict(num_classes=num_classes),
|
63 |
-
assigner=dict(num_classes=num_classes)
|
64 |
-
)
|
65 |
-
)
|
66 |
-
|
67 |
-
train_dataloader = dict(
|
68 |
-
batch_size=train_batch_size_per_gpu,
|
69 |
-
num_workers=train_num_workers,
|
70 |
-
dataset=dict(
|
71 |
-
_delete_=True,
|
72 |
-
type='RepeatDataset',
|
73 |
-
# 数据量太少的话,可以使用 RepeatDataset ,在每个 epoch 内重复当前数据集 n 次,这里设置 5 是重复 5 次
|
74 |
-
times=2,
|
75 |
-
dataset=dict(
|
76 |
-
type=_base_.dataset_type,
|
77 |
-
data_root=data_root,
|
78 |
-
metainfo=metainfo,
|
79 |
-
ann_file='annotations/trainval.json',
|
80 |
-
data_prefix=dict(img='smaller-dataset/'),
|
81 |
-
filter_cfg=dict(filter_empty_gt=False, min_size=32),
|
82 |
-
pipeline=_base_.train_pipeline)))
|
83 |
-
|
84 |
-
val_dataloader = dict(
|
85 |
-
dataset=dict(
|
86 |
-
metainfo=metainfo,
|
87 |
-
data_root=data_root,
|
88 |
-
ann_file='annotations/trainval.json',
|
89 |
-
data_prefix=dict(img='smaller-dataset/')))
|
90 |
-
|
91 |
-
test_dataloader = val_dataloader
|
92 |
-
|
93 |
-
val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
|
94 |
-
test_evaluator = val_evaluator
|
95 |
-
|
96 |
-
optim_wrapper = dict(optimizer=dict(lr=base_lr))
|
97 |
-
|
98 |
-
default_hooks = dict(
|
99 |
-
# 设置间隔多少个 epoch 保存模型,以及保存模型最多几个,`save_best` 是另外保存最佳模型(推荐)
|
100 |
-
checkpoint=dict(
|
101 |
-
type='CheckpointHook',
|
102 |
-
interval=save_epoch_intervals,
|
103 |
-
max_keep_ckpts=5,
|
104 |
-
save_best='auto'),
|
105 |
-
param_scheduler=dict(max_epochs=max_epochs),
|
106 |
-
# logger 输出的间隔
|
107 |
-
logger=dict(type='LoggerHook', interval=10))
|
108 |
-
|
109 |
-
custom_hooks = [
|
110 |
-
dict(
|
111 |
-
type="EMAHook",
|
112 |
-
ema_type="ExpMomentumEMA",
|
113 |
-
momentum=0.0001,
|
114 |
-
update_buffers=True,
|
115 |
-
strict_load=False,
|
116 |
-
priority=49),
|
117 |
-
dict(
|
118 |
-
type="mmdet.PipelineSwitchHook",
|
119 |
-
switch_epoch=max_epochs-max_epochs-_base_.num_last_epochs,
|
120 |
-
switch_pipeline=_base_.train_pipeline_stage2
|
121 |
-
)
|
122 |
-
]
|
123 |
-
|
124 |
-
visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/client/css/settings.css
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
.settings-container {
|
2 |
-
color: var(--colour-2);
|
3 |
-
margin: 24px 0px 8px 0px;
|
4 |
-
justify-content: center;
|
5 |
-
}
|
6 |
-
|
7 |
-
.settings-container span {
|
8 |
-
font-size: 0.875rem;
|
9 |
-
margin: 0;
|
10 |
-
}
|
11 |
-
|
12 |
-
.settings-container label {
|
13 |
-
width: 24px;
|
14 |
-
height: 16px;
|
15 |
-
}
|
16 |
-
|
17 |
-
.settings-container .field {
|
18 |
-
justify-content: space-between;
|
19 |
-
}
|
20 |
-
|
21 |
-
.settings-container .checkbox input + label,
|
22 |
-
.settings-container .checkbox input:checked + label:after {
|
23 |
-
background: var(--colour-1);
|
24 |
-
}
|
25 |
-
|
26 |
-
.settings-container .checkbox input + label:after,
|
27 |
-
.settings-container .checkbox input:checked + label {
|
28 |
-
background: var(--colour-3);
|
29 |
-
}
|
30 |
-
|
31 |
-
.settings-container .checkbox label:after {
|
32 |
-
left: 2px;
|
33 |
-
width: 10px;
|
34 |
-
height: 10px;
|
35 |
-
}
|
36 |
-
|
37 |
-
.settings-container .checkbox input:checked + label:after {
|
38 |
-
left: calc(100% - 2px - 10px);
|
39 |
-
}
|
40 |
-
|
41 |
-
.settings-container .dropdown {
|
42 |
-
padding: 4px 8px;
|
43 |
-
font-size: 0.75rem;
|
44 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiBototicus/BucksAI-4/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/AiBototicus/autotrain-colors-1-49130118878").launch()
|
|
|
|
|
|
|
|
spaces/Aki004/herta-so-vits/inference/slicer.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
import librosa
|
2 |
-
import torch
|
3 |
-
import torchaudio
|
4 |
-
|
5 |
-
|
6 |
-
class Slicer:
|
7 |
-
def __init__(self,
|
8 |
-
sr: int,
|
9 |
-
threshold: float = -40.,
|
10 |
-
min_length: int = 5000,
|
11 |
-
min_interval: int = 300,
|
12 |
-
hop_size: int = 20,
|
13 |
-
max_sil_kept: int = 5000):
|
14 |
-
if not min_length >= min_interval >= hop_size:
|
15 |
-
raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
|
16 |
-
if not max_sil_kept >= hop_size:
|
17 |
-
raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
|
18 |
-
min_interval = sr * min_interval / 1000
|
19 |
-
self.threshold = 10 ** (threshold / 20.)
|
20 |
-
self.hop_size = round(sr * hop_size / 1000)
|
21 |
-
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
22 |
-
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
23 |
-
self.min_interval = round(min_interval / self.hop_size)
|
24 |
-
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
25 |
-
|
26 |
-
def _apply_slice(self, waveform, begin, end):
|
27 |
-
if len(waveform.shape) > 1:
|
28 |
-
return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
|
29 |
-
else:
|
30 |
-
return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
|
31 |
-
|
32 |
-
# @timeit
|
33 |
-
def slice(self, waveform):
|
34 |
-
if len(waveform.shape) > 1:
|
35 |
-
samples = librosa.to_mono(waveform)
|
36 |
-
else:
|
37 |
-
samples = waveform
|
38 |
-
if samples.shape[0] <= self.min_length:
|
39 |
-
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
|
40 |
-
rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
|
41 |
-
sil_tags = []
|
42 |
-
silence_start = None
|
43 |
-
clip_start = 0
|
44 |
-
for i, rms in enumerate(rms_list):
|
45 |
-
# Keep looping while frame is silent.
|
46 |
-
if rms < self.threshold:
|
47 |
-
# Record start of silent frames.
|
48 |
-
if silence_start is None:
|
49 |
-
silence_start = i
|
50 |
-
continue
|
51 |
-
# Keep looping while frame is not silent and silence start has not been recorded.
|
52 |
-
if silence_start is None:
|
53 |
-
continue
|
54 |
-
# Clear recorded silence start if interval is not enough or clip is too short
|
55 |
-
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
56 |
-
need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
|
57 |
-
if not is_leading_silence and not need_slice_middle:
|
58 |
-
silence_start = None
|
59 |
-
continue
|
60 |
-
# Need slicing. Record the range of silent frames to be removed.
|
61 |
-
if i - silence_start <= self.max_sil_kept:
|
62 |
-
pos = rms_list[silence_start: i + 1].argmin() + silence_start
|
63 |
-
if silence_start == 0:
|
64 |
-
sil_tags.append((0, pos))
|
65 |
-
else:
|
66 |
-
sil_tags.append((pos, pos))
|
67 |
-
clip_start = pos
|
68 |
-
elif i - silence_start <= self.max_sil_kept * 2:
|
69 |
-
pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
|
70 |
-
pos += i - self.max_sil_kept
|
71 |
-
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
|
72 |
-
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
|
73 |
-
if silence_start == 0:
|
74 |
-
sil_tags.append((0, pos_r))
|
75 |
-
clip_start = pos_r
|
76 |
-
else:
|
77 |
-
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
78 |
-
clip_start = max(pos_r, pos)
|
79 |
-
else:
|
80 |
-
pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
|
81 |
-
pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
|
82 |
-
if silence_start == 0:
|
83 |
-
sil_tags.append((0, pos_r))
|
84 |
-
else:
|
85 |
-
sil_tags.append((pos_l, pos_r))
|
86 |
-
clip_start = pos_r
|
87 |
-
silence_start = None
|
88 |
-
# Deal with trailing silence.
|
89 |
-
total_frames = rms_list.shape[0]
|
90 |
-
if silence_start is not None and total_frames - silence_start >= self.min_interval:
|
91 |
-
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
92 |
-
pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
|
93 |
-
sil_tags.append((pos, total_frames + 1))
|
94 |
-
# Apply and return slices.
|
95 |
-
if len(sil_tags) == 0:
|
96 |
-
return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
|
97 |
-
else:
|
98 |
-
chunks = []
|
99 |
-
# The first segment is not the beginning of the audio.
|
100 |
-
if sil_tags[0][0]:
|
101 |
-
chunks.append(
|
102 |
-
{"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
|
103 |
-
for i in range(0, len(sil_tags)):
|
104 |
-
# Mark audio segment. Skip the first segment.
|
105 |
-
if i:
|
106 |
-
chunks.append({"slice": False,
|
107 |
-
"split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
|
108 |
-
# Mark all mute segments
|
109 |
-
chunks.append({"slice": True,
|
110 |
-
"split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
|
111 |
-
# The last segment is not the end.
|
112 |
-
if sil_tags[-1][1] * self.hop_size < len(waveform):
|
113 |
-
chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
|
114 |
-
chunk_dict = {}
|
115 |
-
for i in range(len(chunks)):
|
116 |
-
chunk_dict[str(i)] = chunks[i]
|
117 |
-
return chunk_dict
|
118 |
-
|
119 |
-
|
120 |
-
def cut(audio_path, db_thresh=-30, min_len=5000):
|
121 |
-
audio, sr = librosa.load(audio_path, sr=None)
|
122 |
-
slicer = Slicer(
|
123 |
-
sr=sr,
|
124 |
-
threshold=db_thresh,
|
125 |
-
min_length=min_len
|
126 |
-
)
|
127 |
-
chunks = slicer.slice(audio)
|
128 |
-
return chunks
|
129 |
-
|
130 |
-
|
131 |
-
def chunks2audio(audio_path, chunks):
|
132 |
-
chunks = dict(chunks)
|
133 |
-
audio, sr = torchaudio.load(audio_path)
|
134 |
-
if len(audio.shape) == 2 and audio.shape[1] >= 2:
|
135 |
-
audio = torch.mean(audio, dim=0).unsqueeze(0)
|
136 |
-
audio = audio.cpu().numpy()[0]
|
137 |
-
result = []
|
138 |
-
for k, v in chunks.items():
|
139 |
-
tag = v["split_time"].split(",")
|
140 |
-
if tag[0] != tag[1]:
|
141 |
-
result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
|
142 |
-
return result, sr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alesteba/NeRF_ficus-pxl/transformations.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import tensorflow as tf
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
def get_translation_t(t):
|
6 |
-
"""Get the translation matrix for movement in t."""
|
7 |
-
matrix = [
|
8 |
-
[1, 0, 0, 0],
|
9 |
-
[0, 1, 0, 0],
|
10 |
-
[0, 0, 1, t],
|
11 |
-
[0, 0, 0, 1],
|
12 |
-
]
|
13 |
-
return tf.convert_to_tensor(matrix, dtype=tf.float32)
|
14 |
-
|
15 |
-
|
16 |
-
def get_rotation_phi(phi):
|
17 |
-
"""Get the rotation matrix for movement in phi."""
|
18 |
-
matrix = [
|
19 |
-
[1, 0, 0, 0],
|
20 |
-
[0, tf.cos(phi), -tf.sin(phi), 0],
|
21 |
-
[0, tf.sin(phi), tf.cos(phi), 0],
|
22 |
-
[0, 0, 0, 1],
|
23 |
-
]
|
24 |
-
return tf.convert_to_tensor(matrix, dtype=tf.float32)
|
25 |
-
|
26 |
-
|
27 |
-
def get_rotation_theta(theta):
|
28 |
-
"""Get the rotation matrix for movement in theta."""
|
29 |
-
matrix = [
|
30 |
-
[tf.cos(theta), 0, -tf.sin(theta), 0],
|
31 |
-
[0, 1, 0, 0],
|
32 |
-
[tf.sin(theta), 0, tf.cos(theta), 0],
|
33 |
-
[0, 0, 0, 1],
|
34 |
-
]
|
35 |
-
return tf.convert_to_tensor(matrix, dtype=tf.float32)
|
36 |
-
|
37 |
-
|
38 |
-
def pose_spherical(theta, phi, t):
|
39 |
-
"""
|
40 |
-
Get the camera to world matrix for the corresponding theta, phi
|
41 |
-
and t.
|
42 |
-
"""
|
43 |
-
c2w = get_translation_t(t)
|
44 |
-
c2w = get_rotation_phi(phi / 180.0 * np.pi) @ c2w
|
45 |
-
c2w = get_rotation_theta(theta / 180.0 * np.pi) @ c2w
|
46 |
-
c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w
|
47 |
-
return c2w
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/models/ade20k/resnet.py
DELETED
@@ -1,181 +0,0 @@
|
|
1 |
-
"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch"""
|
2 |
-
|
3 |
-
import math
|
4 |
-
|
5 |
-
import torch.nn as nn
|
6 |
-
from torch.nn import BatchNorm2d
|
7 |
-
|
8 |
-
from .utils import load_url
|
9 |
-
|
10 |
-
__all__ = ['ResNet', 'resnet50']
|
11 |
-
|
12 |
-
|
13 |
-
model_urls = {
|
14 |
-
'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
|
15 |
-
}
|
16 |
-
|
17 |
-
|
18 |
-
def conv3x3(in_planes, out_planes, stride=1):
|
19 |
-
"3x3 convolution with padding"
|
20 |
-
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
21 |
-
padding=1, bias=False)
|
22 |
-
|
23 |
-
|
24 |
-
class BasicBlock(nn.Module):
|
25 |
-
expansion = 1
|
26 |
-
|
27 |
-
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
28 |
-
super(BasicBlock, self).__init__()
|
29 |
-
self.conv1 = conv3x3(inplanes, planes, stride)
|
30 |
-
self.bn1 = BatchNorm2d(planes)
|
31 |
-
self.relu = nn.ReLU(inplace=True)
|
32 |
-
self.conv2 = conv3x3(planes, planes)
|
33 |
-
self.bn2 = BatchNorm2d(planes)
|
34 |
-
self.downsample = downsample
|
35 |
-
self.stride = stride
|
36 |
-
|
37 |
-
def forward(self, x):
|
38 |
-
residual = x
|
39 |
-
|
40 |
-
out = self.conv1(x)
|
41 |
-
out = self.bn1(out)
|
42 |
-
out = self.relu(out)
|
43 |
-
|
44 |
-
out = self.conv2(out)
|
45 |
-
out = self.bn2(out)
|
46 |
-
|
47 |
-
if self.downsample is not None:
|
48 |
-
residual = self.downsample(x)
|
49 |
-
|
50 |
-
out += residual
|
51 |
-
out = self.relu(out)
|
52 |
-
|
53 |
-
return out
|
54 |
-
|
55 |
-
|
56 |
-
class Bottleneck(nn.Module):
|
57 |
-
expansion = 4
|
58 |
-
|
59 |
-
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
60 |
-
super(Bottleneck, self).__init__()
|
61 |
-
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
|
62 |
-
self.bn1 = BatchNorm2d(planes)
|
63 |
-
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
|
64 |
-
padding=1, bias=False)
|
65 |
-
self.bn2 = BatchNorm2d(planes)
|
66 |
-
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
|
67 |
-
self.bn3 = BatchNorm2d(planes * 4)
|
68 |
-
self.relu = nn.ReLU(inplace=True)
|
69 |
-
self.downsample = downsample
|
70 |
-
self.stride = stride
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
residual = x
|
74 |
-
|
75 |
-
out = self.conv1(x)
|
76 |
-
out = self.bn1(out)
|
77 |
-
out = self.relu(out)
|
78 |
-
|
79 |
-
out = self.conv2(out)
|
80 |
-
out = self.bn2(out)
|
81 |
-
out = self.relu(out)
|
82 |
-
|
83 |
-
out = self.conv3(out)
|
84 |
-
out = self.bn3(out)
|
85 |
-
|
86 |
-
if self.downsample is not None:
|
87 |
-
residual = self.downsample(x)
|
88 |
-
|
89 |
-
out += residual
|
90 |
-
out = self.relu(out)
|
91 |
-
|
92 |
-
return out
|
93 |
-
|
94 |
-
|
95 |
-
class ResNet(nn.Module):
|
96 |
-
|
97 |
-
def __init__(self, block, layers, num_classes=1000):
|
98 |
-
self.inplanes = 128
|
99 |
-
super(ResNet, self).__init__()
|
100 |
-
self.conv1 = conv3x3(3, 64, stride=2)
|
101 |
-
self.bn1 = BatchNorm2d(64)
|
102 |
-
self.relu1 = nn.ReLU(inplace=True)
|
103 |
-
self.conv2 = conv3x3(64, 64)
|
104 |
-
self.bn2 = BatchNorm2d(64)
|
105 |
-
self.relu2 = nn.ReLU(inplace=True)
|
106 |
-
self.conv3 = conv3x3(64, 128)
|
107 |
-
self.bn3 = BatchNorm2d(128)
|
108 |
-
self.relu3 = nn.ReLU(inplace=True)
|
109 |
-
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
110 |
-
|
111 |
-
self.layer1 = self._make_layer(block, 64, layers[0])
|
112 |
-
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
|
113 |
-
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
|
114 |
-
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
|
115 |
-
self.avgpool = nn.AvgPool2d(7, stride=1)
|
116 |
-
self.fc = nn.Linear(512 * block.expansion, num_classes)
|
117 |
-
|
118 |
-
for m in self.modules():
|
119 |
-
if isinstance(m, nn.Conv2d):
|
120 |
-
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
121 |
-
m.weight.data.normal_(0, math.sqrt(2. / n))
|
122 |
-
elif isinstance(m, BatchNorm2d):
|
123 |
-
m.weight.data.fill_(1)
|
124 |
-
m.bias.data.zero_()
|
125 |
-
|
126 |
-
def _make_layer(self, block, planes, blocks, stride=1):
|
127 |
-
downsample = None
|
128 |
-
if stride != 1 or self.inplanes != planes * block.expansion:
|
129 |
-
downsample = nn.Sequential(
|
130 |
-
nn.Conv2d(self.inplanes, planes * block.expansion,
|
131 |
-
kernel_size=1, stride=stride, bias=False),
|
132 |
-
BatchNorm2d(planes * block.expansion),
|
133 |
-
)
|
134 |
-
|
135 |
-
layers = []
|
136 |
-
layers.append(block(self.inplanes, planes, stride, downsample))
|
137 |
-
self.inplanes = planes * block.expansion
|
138 |
-
for i in range(1, blocks):
|
139 |
-
layers.append(block(self.inplanes, planes))
|
140 |
-
|
141 |
-
return nn.Sequential(*layers)
|
142 |
-
|
143 |
-
def forward(self, x):
|
144 |
-
x = self.relu1(self.bn1(self.conv1(x)))
|
145 |
-
x = self.relu2(self.bn2(self.conv2(x)))
|
146 |
-
x = self.relu3(self.bn3(self.conv3(x)))
|
147 |
-
x = self.maxpool(x)
|
148 |
-
|
149 |
-
x = self.layer1(x)
|
150 |
-
x = self.layer2(x)
|
151 |
-
x = self.layer3(x)
|
152 |
-
x = self.layer4(x)
|
153 |
-
|
154 |
-
x = self.avgpool(x)
|
155 |
-
x = x.view(x.size(0), -1)
|
156 |
-
x = self.fc(x)
|
157 |
-
|
158 |
-
return x
|
159 |
-
|
160 |
-
|
161 |
-
def resnet50(pretrained=False, **kwargs):
|
162 |
-
"""Constructs a ResNet-50 model.
|
163 |
-
|
164 |
-
Args:
|
165 |
-
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
166 |
-
"""
|
167 |
-
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
|
168 |
-
if pretrained:
|
169 |
-
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
|
170 |
-
return model
|
171 |
-
|
172 |
-
|
173 |
-
def resnet18(pretrained=False, **kwargs):
|
174 |
-
"""Constructs a ResNet-18 model.
|
175 |
-
Args:
|
176 |
-
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
177 |
-
"""
|
178 |
-
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
|
179 |
-
if pretrained:
|
180 |
-
model.load_state_dict(load_url(model_urls['resnet18']))
|
181 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/training_stats.py
DELETED
@@ -1,268 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Facilities for reporting and collecting training statistics across
|
10 |
-
multiple processes and devices. The interface is designed to minimize
|
11 |
-
synchronization overhead as well as the amount of boilerplate in user
|
12 |
-
code."""
|
13 |
-
|
14 |
-
import re
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
import dnnlib
|
18 |
-
|
19 |
-
from . import misc
|
20 |
-
|
21 |
-
#----------------------------------------------------------------------------
|
22 |
-
|
23 |
-
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
|
24 |
-
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
|
25 |
-
_counter_dtype = torch.float64 # Data type to use for the internal counters.
|
26 |
-
_rank = 0 # Rank of the current process.
|
27 |
-
_sync_device = None # Device to use for multiprocess communication. None = single-process.
|
28 |
-
_sync_called = False # Has _sync() been called yet?
|
29 |
-
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
|
30 |
-
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
|
31 |
-
|
32 |
-
#----------------------------------------------------------------------------
|
33 |
-
|
34 |
-
def init_multiprocessing(rank, sync_device):
|
35 |
-
r"""Initializes `torch_utils.training_stats` for collecting statistics
|
36 |
-
across multiple processes.
|
37 |
-
|
38 |
-
This function must be called after
|
39 |
-
`torch.distributed.init_process_group()` and before `Collector.update()`.
|
40 |
-
The call is not necessary if multi-process collection is not needed.
|
41 |
-
|
42 |
-
Args:
|
43 |
-
rank: Rank of the current process.
|
44 |
-
sync_device: PyTorch device to use for inter-process
|
45 |
-
communication, or None to disable multi-process
|
46 |
-
collection. Typically `torch.device('cuda', rank)`.
|
47 |
-
"""
|
48 |
-
global _rank, _sync_device
|
49 |
-
assert not _sync_called
|
50 |
-
_rank = rank
|
51 |
-
_sync_device = sync_device
|
52 |
-
|
53 |
-
#----------------------------------------------------------------------------
|
54 |
-
|
55 |
-
@misc.profiled_function
|
56 |
-
def report(name, value):
|
57 |
-
r"""Broadcasts the given set of scalars to all interested instances of
|
58 |
-
`Collector`, across device and process boundaries.
|
59 |
-
|
60 |
-
This function is expected to be extremely cheap and can be safely
|
61 |
-
called from anywhere in the training loop, loss function, or inside a
|
62 |
-
`torch.nn.Module`.
|
63 |
-
|
64 |
-
Warning: The current implementation expects the set of unique names to
|
65 |
-
be consistent across processes. Please make sure that `report()` is
|
66 |
-
called at least once for each unique name by each process, and in the
|
67 |
-
same order. If a given process has no scalars to broadcast, it can do
|
68 |
-
`report(name, [])` (empty list).
|
69 |
-
|
70 |
-
Args:
|
71 |
-
name: Arbitrary string specifying the name of the statistic.
|
72 |
-
Averages are accumulated separately for each unique name.
|
73 |
-
value: Arbitrary set of scalars. Can be a list, tuple,
|
74 |
-
NumPy array, PyTorch tensor, or Python scalar.
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
The same `value` that was passed in.
|
78 |
-
"""
|
79 |
-
if name not in _counters:
|
80 |
-
_counters[name] = dict()
|
81 |
-
|
82 |
-
elems = torch.as_tensor(value)
|
83 |
-
if elems.numel() == 0:
|
84 |
-
return value
|
85 |
-
|
86 |
-
elems = elems.detach().flatten().to(_reduce_dtype)
|
87 |
-
moments = torch.stack([
|
88 |
-
torch.ones_like(elems).sum(),
|
89 |
-
elems.sum(),
|
90 |
-
elems.square().sum(),
|
91 |
-
])
|
92 |
-
assert moments.ndim == 1 and moments.shape[0] == _num_moments
|
93 |
-
moments = moments.to(_counter_dtype)
|
94 |
-
|
95 |
-
device = moments.device
|
96 |
-
if device not in _counters[name]:
|
97 |
-
_counters[name][device] = torch.zeros_like(moments)
|
98 |
-
_counters[name][device].add_(moments)
|
99 |
-
return value
|
100 |
-
|
101 |
-
#----------------------------------------------------------------------------
|
102 |
-
|
103 |
-
def report0(name, value):
|
104 |
-
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
|
105 |
-
but ignores any scalars provided by the other processes.
|
106 |
-
See `report()` for further details.
|
107 |
-
"""
|
108 |
-
report(name, value if _rank == 0 else [])
|
109 |
-
return value
|
110 |
-
|
111 |
-
#----------------------------------------------------------------------------
|
112 |
-
|
113 |
-
class Collector:
|
114 |
-
r"""Collects the scalars broadcasted by `report()` and `report0()` and
|
115 |
-
computes their long-term averages (mean and standard deviation) over
|
116 |
-
user-defined periods of time.
|
117 |
-
|
118 |
-
The averages are first collected into internal counters that are not
|
119 |
-
directly visible to the user. They are then copied to the user-visible
|
120 |
-
state as a result of calling `update()` and can then be queried using
|
121 |
-
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
|
122 |
-
internal counters for the next round, so that the user-visible state
|
123 |
-
effectively reflects averages collected between the last two calls to
|
124 |
-
`update()`.
|
125 |
-
|
126 |
-
Args:
|
127 |
-
regex: Regular expression defining which statistics to
|
128 |
-
collect. The default is to collect everything.
|
129 |
-
keep_previous: Whether to retain the previous averages if no
|
130 |
-
scalars were collected on a given round
|
131 |
-
(default: True).
|
132 |
-
"""
|
133 |
-
def __init__(self, regex='.*', keep_previous=True):
|
134 |
-
self._regex = re.compile(regex)
|
135 |
-
self._keep_previous = keep_previous
|
136 |
-
self._cumulative = dict()
|
137 |
-
self._moments = dict()
|
138 |
-
self.update()
|
139 |
-
self._moments.clear()
|
140 |
-
|
141 |
-
def names(self):
|
142 |
-
r"""Returns the names of all statistics broadcasted so far that
|
143 |
-
match the regular expression specified at construction time.
|
144 |
-
"""
|
145 |
-
return [name for name in _counters if self._regex.fullmatch(name)]
|
146 |
-
|
147 |
-
def update(self):
|
148 |
-
r"""Copies current values of the internal counters to the
|
149 |
-
user-visible state and resets them for the next round.
|
150 |
-
|
151 |
-
If `keep_previous=True` was specified at construction time, the
|
152 |
-
operation is skipped for statistics that have received no scalars
|
153 |
-
since the last update, retaining their previous averages.
|
154 |
-
|
155 |
-
This method performs a number of GPU-to-CPU transfers and one
|
156 |
-
`torch.distributed.all_reduce()`. It is intended to be called
|
157 |
-
periodically in the main training loop, typically once every
|
158 |
-
N training steps.
|
159 |
-
"""
|
160 |
-
if not self._keep_previous:
|
161 |
-
self._moments.clear()
|
162 |
-
for name, cumulative in _sync(self.names()):
|
163 |
-
if name not in self._cumulative:
|
164 |
-
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
|
165 |
-
delta = cumulative - self._cumulative[name]
|
166 |
-
self._cumulative[name].copy_(cumulative)
|
167 |
-
if float(delta[0]) != 0:
|
168 |
-
self._moments[name] = delta
|
169 |
-
|
170 |
-
def _get_delta(self, name):
|
171 |
-
r"""Returns the raw moments that were accumulated for the given
|
172 |
-
statistic between the last two calls to `update()`, or zero if
|
173 |
-
no scalars were collected.
|
174 |
-
"""
|
175 |
-
assert self._regex.fullmatch(name)
|
176 |
-
if name not in self._moments:
|
177 |
-
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
|
178 |
-
return self._moments[name]
|
179 |
-
|
180 |
-
def num(self, name):
|
181 |
-
r"""Returns the number of scalars that were accumulated for the given
|
182 |
-
statistic between the last two calls to `update()`, or zero if
|
183 |
-
no scalars were collected.
|
184 |
-
"""
|
185 |
-
delta = self._get_delta(name)
|
186 |
-
return int(delta[0])
|
187 |
-
|
188 |
-
def mean(self, name):
|
189 |
-
r"""Returns the mean of the scalars that were accumulated for the
|
190 |
-
given statistic between the last two calls to `update()`, or NaN if
|
191 |
-
no scalars were collected.
|
192 |
-
"""
|
193 |
-
delta = self._get_delta(name)
|
194 |
-
if int(delta[0]) == 0:
|
195 |
-
return float('nan')
|
196 |
-
return float(delta[1] / delta[0])
|
197 |
-
|
198 |
-
def std(self, name):
|
199 |
-
r"""Returns the standard deviation of the scalars that were
|
200 |
-
accumulated for the given statistic between the last two calls to
|
201 |
-
`update()`, or NaN if no scalars were collected.
|
202 |
-
"""
|
203 |
-
delta = self._get_delta(name)
|
204 |
-
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
|
205 |
-
return float('nan')
|
206 |
-
if int(delta[0]) == 1:
|
207 |
-
return float(0)
|
208 |
-
mean = float(delta[1] / delta[0])
|
209 |
-
raw_var = float(delta[2] / delta[0])
|
210 |
-
return np.sqrt(max(raw_var - np.square(mean), 0))
|
211 |
-
|
212 |
-
def as_dict(self):
|
213 |
-
r"""Returns the averages accumulated between the last two calls to
|
214 |
-
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
|
215 |
-
|
216 |
-
dnnlib.EasyDict(
|
217 |
-
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
|
218 |
-
...
|
219 |
-
)
|
220 |
-
"""
|
221 |
-
stats = dnnlib.EasyDict()
|
222 |
-
for name in self.names():
|
223 |
-
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
|
224 |
-
return stats
|
225 |
-
|
226 |
-
def __getitem__(self, name):
|
227 |
-
r"""Convenience getter.
|
228 |
-
`collector[name]` is a synonym for `collector.mean(name)`.
|
229 |
-
"""
|
230 |
-
return self.mean(name)
|
231 |
-
|
232 |
-
#----------------------------------------------------------------------------
|
233 |
-
|
234 |
-
def _sync(names):
|
235 |
-
r"""Synchronize the global cumulative counters across devices and
|
236 |
-
processes. Called internally by `Collector.update()`.
|
237 |
-
"""
|
238 |
-
if len(names) == 0:
|
239 |
-
return []
|
240 |
-
global _sync_called
|
241 |
-
_sync_called = True
|
242 |
-
|
243 |
-
# Collect deltas within current rank.
|
244 |
-
deltas = []
|
245 |
-
device = _sync_device if _sync_device is not None else torch.device('cpu')
|
246 |
-
for name in names:
|
247 |
-
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
|
248 |
-
for counter in _counters[name].values():
|
249 |
-
delta.add_(counter.to(device))
|
250 |
-
counter.copy_(torch.zeros_like(counter))
|
251 |
-
deltas.append(delta)
|
252 |
-
deltas = torch.stack(deltas)
|
253 |
-
|
254 |
-
# Sum deltas across ranks.
|
255 |
-
if _sync_device is not None:
|
256 |
-
torch.distributed.all_reduce(deltas)
|
257 |
-
|
258 |
-
# Update cumulative values.
|
259 |
-
deltas = deltas.cpu()
|
260 |
-
for idx, name in enumerate(names):
|
261 |
-
if name not in _cumulative:
|
262 |
-
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
|
263 |
-
_cumulative[name].add_(deltas[idx])
|
264 |
-
|
265 |
-
# Return name-value pairs.
|
266 |
-
return [(name, _cumulative[name]) for name in names]
|
267 |
-
|
268 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import unittest
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
|
23 |
-
from diffusers import (
|
24 |
-
DDIMScheduler,
|
25 |
-
KandinskyV22ControlnetPipeline,
|
26 |
-
KandinskyV22PriorPipeline,
|
27 |
-
UNet2DConditionModel,
|
28 |
-
VQModel,
|
29 |
-
)
|
30 |
-
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
|
31 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
32 |
-
|
33 |
-
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
|
34 |
-
|
35 |
-
|
36 |
-
enable_full_determinism()
|
37 |
-
|
38 |
-
|
39 |
-
class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
40 |
-
pipeline_class = KandinskyV22ControlnetPipeline
|
41 |
-
params = ["image_embeds", "negative_image_embeds", "hint"]
|
42 |
-
batch_params = ["image_embeds", "negative_image_embeds", "hint"]
|
43 |
-
required_optional_params = [
|
44 |
-
"generator",
|
45 |
-
"height",
|
46 |
-
"width",
|
47 |
-
"latents",
|
48 |
-
"guidance_scale",
|
49 |
-
"num_inference_steps",
|
50 |
-
"return_dict",
|
51 |
-
"guidance_scale",
|
52 |
-
"num_images_per_prompt",
|
53 |
-
"output_type",
|
54 |
-
"return_dict",
|
55 |
-
]
|
56 |
-
test_xformers_attention = False
|
57 |
-
|
58 |
-
@property
|
59 |
-
def text_embedder_hidden_size(self):
|
60 |
-
return 32
|
61 |
-
|
62 |
-
@property
|
63 |
-
def time_input_dim(self):
|
64 |
-
return 32
|
65 |
-
|
66 |
-
@property
|
67 |
-
def block_out_channels_0(self):
|
68 |
-
return self.time_input_dim
|
69 |
-
|
70 |
-
@property
|
71 |
-
def time_embed_dim(self):
|
72 |
-
return self.time_input_dim * 4
|
73 |
-
|
74 |
-
@property
|
75 |
-
def cross_attention_dim(self):
|
76 |
-
return 100
|
77 |
-
|
78 |
-
@property
|
79 |
-
def dummy_unet(self):
|
80 |
-
torch.manual_seed(0)
|
81 |
-
|
82 |
-
model_kwargs = {
|
83 |
-
"in_channels": 8,
|
84 |
-
# Out channels is double in channels because predicts mean and variance
|
85 |
-
"out_channels": 8,
|
86 |
-
"addition_embed_type": "image_hint",
|
87 |
-
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
|
88 |
-
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
|
89 |
-
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
|
90 |
-
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
|
91 |
-
"layers_per_block": 1,
|
92 |
-
"encoder_hid_dim": self.text_embedder_hidden_size,
|
93 |
-
"encoder_hid_dim_type": "image_proj",
|
94 |
-
"cross_attention_dim": self.cross_attention_dim,
|
95 |
-
"attention_head_dim": 4,
|
96 |
-
"resnet_time_scale_shift": "scale_shift",
|
97 |
-
"class_embed_type": None,
|
98 |
-
}
|
99 |
-
|
100 |
-
model = UNet2DConditionModel(**model_kwargs)
|
101 |
-
return model
|
102 |
-
|
103 |
-
@property
|
104 |
-
def dummy_movq_kwargs(self):
|
105 |
-
return {
|
106 |
-
"block_out_channels": [32, 32, 64, 64],
|
107 |
-
"down_block_types": [
|
108 |
-
"DownEncoderBlock2D",
|
109 |
-
"DownEncoderBlock2D",
|
110 |
-
"DownEncoderBlock2D",
|
111 |
-
"AttnDownEncoderBlock2D",
|
112 |
-
],
|
113 |
-
"in_channels": 3,
|
114 |
-
"latent_channels": 4,
|
115 |
-
"layers_per_block": 1,
|
116 |
-
"norm_num_groups": 8,
|
117 |
-
"norm_type": "spatial",
|
118 |
-
"num_vq_embeddings": 12,
|
119 |
-
"out_channels": 3,
|
120 |
-
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
|
121 |
-
"vq_embed_dim": 4,
|
122 |
-
}
|
123 |
-
|
124 |
-
@property
|
125 |
-
def dummy_movq(self):
|
126 |
-
torch.manual_seed(0)
|
127 |
-
model = VQModel(**self.dummy_movq_kwargs)
|
128 |
-
return model
|
129 |
-
|
130 |
-
def get_dummy_components(self):
|
131 |
-
unet = self.dummy_unet
|
132 |
-
movq = self.dummy_movq
|
133 |
-
|
134 |
-
scheduler = DDIMScheduler(
|
135 |
-
num_train_timesteps=1000,
|
136 |
-
beta_schedule="linear",
|
137 |
-
beta_start=0.00085,
|
138 |
-
beta_end=0.012,
|
139 |
-
clip_sample=False,
|
140 |
-
set_alpha_to_one=False,
|
141 |
-
steps_offset=1,
|
142 |
-
prediction_type="epsilon",
|
143 |
-
thresholding=False,
|
144 |
-
)
|
145 |
-
|
146 |
-
components = {
|
147 |
-
"unet": unet,
|
148 |
-
"scheduler": scheduler,
|
149 |
-
"movq": movq,
|
150 |
-
}
|
151 |
-
return components
|
152 |
-
|
153 |
-
def get_dummy_inputs(self, device, seed=0):
|
154 |
-
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
|
155 |
-
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
|
156 |
-
device
|
157 |
-
)
|
158 |
-
|
159 |
-
# create hint
|
160 |
-
hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
|
161 |
-
|
162 |
-
if str(device).startswith("mps"):
|
163 |
-
generator = torch.manual_seed(seed)
|
164 |
-
else:
|
165 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
166 |
-
inputs = {
|
167 |
-
"image_embeds": image_embeds,
|
168 |
-
"negative_image_embeds": negative_image_embeds,
|
169 |
-
"hint": hint,
|
170 |
-
"generator": generator,
|
171 |
-
"height": 64,
|
172 |
-
"width": 64,
|
173 |
-
"guidance_scale": 4.0,
|
174 |
-
"num_inference_steps": 2,
|
175 |
-
"output_type": "np",
|
176 |
-
}
|
177 |
-
return inputs
|
178 |
-
|
179 |
-
def test_kandinsky_controlnet(self):
|
180 |
-
device = "cpu"
|
181 |
-
|
182 |
-
components = self.get_dummy_components()
|
183 |
-
|
184 |
-
pipe = self.pipeline_class(**components)
|
185 |
-
pipe = pipe.to(device)
|
186 |
-
|
187 |
-
pipe.set_progress_bar_config(disable=None)
|
188 |
-
|
189 |
-
output = pipe(**self.get_dummy_inputs(device))
|
190 |
-
image = output.images
|
191 |
-
|
192 |
-
image_from_tuple = pipe(
|
193 |
-
**self.get_dummy_inputs(device),
|
194 |
-
return_dict=False,
|
195 |
-
)[0]
|
196 |
-
|
197 |
-
image_slice = image[0, -3:, -3:, -1]
|
198 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
199 |
-
|
200 |
-
assert image.shape == (1, 64, 64, 3)
|
201 |
-
|
202 |
-
expected_slice = np.array(
|
203 |
-
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]
|
204 |
-
)
|
205 |
-
|
206 |
-
assert (
|
207 |
-
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
208 |
-
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
|
209 |
-
|
210 |
-
assert (
|
211 |
-
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
212 |
-
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
|
213 |
-
|
214 |
-
|
215 |
-
@slow
|
216 |
-
@require_torch_gpu
|
217 |
-
class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase):
|
218 |
-
def tearDown(self):
|
219 |
-
# clean up the VRAM after each test
|
220 |
-
super().tearDown()
|
221 |
-
gc.collect()
|
222 |
-
torch.cuda.empty_cache()
|
223 |
-
|
224 |
-
def test_kandinsky_controlnet(self):
|
225 |
-
expected_image = load_numpy(
|
226 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
227 |
-
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy"
|
228 |
-
)
|
229 |
-
|
230 |
-
hint = load_image(
|
231 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
232 |
-
"/kandinskyv22/hint_image_cat.png"
|
233 |
-
)
|
234 |
-
hint = torch.from_numpy(np.array(hint)).float() / 255.0
|
235 |
-
hint = hint.permute(2, 0, 1).unsqueeze(0)
|
236 |
-
|
237 |
-
pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
|
238 |
-
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
|
239 |
-
)
|
240 |
-
pipe_prior.to(torch_device)
|
241 |
-
|
242 |
-
pipeline = KandinskyV22ControlnetPipeline.from_pretrained(
|
243 |
-
"kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
|
244 |
-
)
|
245 |
-
pipeline = pipeline.to(torch_device)
|
246 |
-
pipeline.set_progress_bar_config(disable=None)
|
247 |
-
|
248 |
-
prompt = "A robot, 4k photo"
|
249 |
-
|
250 |
-
generator = torch.Generator(device="cuda").manual_seed(0)
|
251 |
-
image_emb, zero_image_emb = pipe_prior(
|
252 |
-
prompt,
|
253 |
-
generator=generator,
|
254 |
-
num_inference_steps=5,
|
255 |
-
negative_prompt="",
|
256 |
-
).to_tuple()
|
257 |
-
|
258 |
-
generator = torch.Generator(device="cuda").manual_seed(0)
|
259 |
-
output = pipeline(
|
260 |
-
image_embeds=image_emb,
|
261 |
-
negative_image_embeds=zero_image_emb,
|
262 |
-
hint=hint,
|
263 |
-
generator=generator,
|
264 |
-
num_inference_steps=100,
|
265 |
-
output_type="np",
|
266 |
-
)
|
267 |
-
|
268 |
-
image = output.images[0]
|
269 |
-
|
270 |
-
assert image.shape == (512, 512, 3)
|
271 |
-
|
272 |
-
assert_mean_pixel_difference(image, expected_image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
rpn_head=dict(
|
4 |
-
_delete_=True,
|
5 |
-
type='CascadeRPNHead',
|
6 |
-
num_stages=2,
|
7 |
-
stages=[
|
8 |
-
dict(
|
9 |
-
type='StageCascadeRPNHead',
|
10 |
-
in_channels=256,
|
11 |
-
feat_channels=256,
|
12 |
-
anchor_generator=dict(
|
13 |
-
type='AnchorGenerator',
|
14 |
-
scales=[8],
|
15 |
-
ratios=[1.0],
|
16 |
-
strides=[4, 8, 16, 32, 64]),
|
17 |
-
adapt_cfg=dict(type='dilation', dilation=3),
|
18 |
-
bridged_feature=True,
|
19 |
-
sampling=False,
|
20 |
-
with_cls=False,
|
21 |
-
reg_decoded_bbox=True,
|
22 |
-
bbox_coder=dict(
|
23 |
-
type='DeltaXYWHBBoxCoder',
|
24 |
-
target_means=(.0, .0, .0, .0),
|
25 |
-
target_stds=(0.1, 0.1, 0.5, 0.5)),
|
26 |
-
loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)),
|
27 |
-
dict(
|
28 |
-
type='StageCascadeRPNHead',
|
29 |
-
in_channels=256,
|
30 |
-
feat_channels=256,
|
31 |
-
adapt_cfg=dict(type='offset'),
|
32 |
-
bridged_feature=False,
|
33 |
-
sampling=True,
|
34 |
-
with_cls=True,
|
35 |
-
reg_decoded_bbox=True,
|
36 |
-
bbox_coder=dict(
|
37 |
-
type='DeltaXYWHBBoxCoder',
|
38 |
-
target_means=(.0, .0, .0, .0),
|
39 |
-
target_stds=(0.05, 0.05, 0.1, 0.1)),
|
40 |
-
loss_cls=dict(
|
41 |
-
type='CrossEntropyLoss', use_sigmoid=True,
|
42 |
-
loss_weight=1.0),
|
43 |
-
loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0))
|
44 |
-
]),
|
45 |
-
train_cfg=dict(rpn=[
|
46 |
-
dict(
|
47 |
-
assigner=dict(
|
48 |
-
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
|
49 |
-
allowed_border=-1,
|
50 |
-
pos_weight=-1,
|
51 |
-
debug=False),
|
52 |
-
dict(
|
53 |
-
assigner=dict(
|
54 |
-
type='MaxIoUAssigner',
|
55 |
-
pos_iou_thr=0.7,
|
56 |
-
neg_iou_thr=0.7,
|
57 |
-
min_pos_iou=0.3,
|
58 |
-
ignore_iof_thr=-1,
|
59 |
-
iou_calculator=dict(type='BboxOverlaps2D')),
|
60 |
-
sampler=dict(
|
61 |
-
type='RandomSampler',
|
62 |
-
num=256,
|
63 |
-
pos_fraction=0.5,
|
64 |
-
neg_pos_ub=-1,
|
65 |
-
add_gt_as_proposals=False),
|
66 |
-
allowed_border=-1,
|
67 |
-
pos_weight=-1,
|
68 |
-
debug=False)
|
69 |
-
]),
|
70 |
-
test_cfg=dict(
|
71 |
-
rpn=dict(
|
72 |
-
nms_pre=2000,
|
73 |
-
max_per_img=2000,
|
74 |
-
nms=dict(type='nms', iou_threshold=0.8),
|
75 |
-
min_bbox_size=0)))
|
76 |
-
optimizer_config = dict(
|
77 |
-
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(
|
4 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
_base_ = '../grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py'
|
2 |
-
# model settings
|
3 |
-
model = dict(
|
4 |
-
roi_head=dict(
|
5 |
-
bbox_roi_extractor=dict(
|
6 |
-
type='GenericRoIExtractor',
|
7 |
-
aggregation='sum',
|
8 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
|
9 |
-
out_channels=256,
|
10 |
-
featmap_strides=[4, 8, 16, 32],
|
11 |
-
pre_cfg=dict(
|
12 |
-
type='ConvModule',
|
13 |
-
in_channels=256,
|
14 |
-
out_channels=256,
|
15 |
-
kernel_size=5,
|
16 |
-
padding=2,
|
17 |
-
inplace=False,
|
18 |
-
),
|
19 |
-
post_cfg=dict(
|
20 |
-
type='GeneralizedAttention',
|
21 |
-
in_channels=256,
|
22 |
-
spatial_range=-1,
|
23 |
-
num_heads=6,
|
24 |
-
attention_type='0100',
|
25 |
-
kv_stride=2)),
|
26 |
-
grid_roi_extractor=dict(
|
27 |
-
type='GenericRoIExtractor',
|
28 |
-
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
|
29 |
-
out_channels=256,
|
30 |
-
featmap_strides=[4, 8, 16, 32],
|
31 |
-
pre_cfg=dict(
|
32 |
-
type='ConvModule',
|
33 |
-
in_channels=256,
|
34 |
-
out_channels=256,
|
35 |
-
kernel_size=5,
|
36 |
-
padding=2,
|
37 |
-
inplace=False,
|
38 |
-
),
|
39 |
-
post_cfg=dict(
|
40 |
-
type='GeneralizedAttention',
|
41 |
-
in_channels=256,
|
42 |
-
spatial_range=-1,
|
43 |
-
num_heads=6,
|
44 |
-
attention_type='0100',
|
45 |
-
kv_stride=2))))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = './rpn_r50_fpn_2x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_32x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=32,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/rpn_test_mixin.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
|
3 |
-
from mmdet.core import merge_aug_proposals
|
4 |
-
|
5 |
-
if sys.version_info >= (3, 7):
|
6 |
-
from mmdet.utils.contextmanagers import completed
|
7 |
-
|
8 |
-
|
9 |
-
class RPNTestMixin(object):
|
10 |
-
"""Test methods of RPN."""
|
11 |
-
|
12 |
-
if sys.version_info >= (3, 7):
|
13 |
-
|
14 |
-
async def async_simple_test_rpn(self, x, img_metas):
|
15 |
-
sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
|
16 |
-
async with completed(
|
17 |
-
__name__, 'rpn_head_forward',
|
18 |
-
sleep_interval=sleep_interval):
|
19 |
-
rpn_outs = self(x)
|
20 |
-
|
21 |
-
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
|
22 |
-
return proposal_list
|
23 |
-
|
24 |
-
def simple_test_rpn(self, x, img_metas):
|
25 |
-
"""Test without augmentation.
|
26 |
-
|
27 |
-
Args:
|
28 |
-
x (tuple[Tensor]): Features from the upstream network, each is
|
29 |
-
a 4D-tensor.
|
30 |
-
img_metas (list[dict]): Meta info of each image.
|
31 |
-
|
32 |
-
Returns:
|
33 |
-
list[Tensor]: Proposals of each image.
|
34 |
-
"""
|
35 |
-
rpn_outs = self(x)
|
36 |
-
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
|
37 |
-
return proposal_list
|
38 |
-
|
39 |
-
def aug_test_rpn(self, feats, img_metas):
|
40 |
-
samples_per_gpu = len(img_metas[0])
|
41 |
-
aug_proposals = [[] for _ in range(samples_per_gpu)]
|
42 |
-
for x, img_meta in zip(feats, img_metas):
|
43 |
-
proposal_list = self.simple_test_rpn(x, img_meta)
|
44 |
-
for i, proposals in enumerate(proposal_list):
|
45 |
-
aug_proposals[i].append(proposals)
|
46 |
-
# reorganize the order of 'img_metas' to match the dimensions
|
47 |
-
# of 'aug_proposals'
|
48 |
-
aug_img_metas = []
|
49 |
-
for i in range(samples_per_gpu):
|
50 |
-
aug_img_meta = []
|
51 |
-
for j in range(len(img_metas)):
|
52 |
-
aug_img_meta.append(img_metas[j][i])
|
53 |
-
aug_img_metas.append(aug_img_meta)
|
54 |
-
# after merging, proposals will be rescaled to the original image size
|
55 |
-
merged_proposals = [
|
56 |
-
merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
|
57 |
-
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
|
58 |
-
]
|
59 |
-
return merged_proposals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/README.md
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
# Deep High-Resolution Representation Learning for Human Pose Estimation
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
<!-- [ALGORITHM] -->
|
6 |
-
|
7 |
-
```latext
|
8 |
-
@inproceedings{SunXLW19,
|
9 |
-
title={Deep High-Resolution Representation Learning for Human Pose Estimation},
|
10 |
-
author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang},
|
11 |
-
booktitle={CVPR},
|
12 |
-
year={2019}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and models
|
17 |
-
|
18 |
-
### Cityscapes
|
19 |
-
|
20 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
21 |
-
| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
22 |
-
| FCN | HRNetV2p-W18-Small | 512x1024 | 40000 | 1.7 | 23.74 | 73.86 | 75.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216.log.json) |
|
23 |
-
| FCN | HRNetV2p-W18 | 512x1024 | 40000 | 2.9 | 12.97 | 77.19 | 78.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216.log.json) |
|
24 |
-
| FCN | HRNetV2p-W48 | 512x1024 | 40000 | 6.2 | 6.42 | 78.48 | 79.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240.log.json) |
|
25 |
-
| FCN | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 75.31 | 77.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700.log.json) |
|
26 |
-
| FCN | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.65 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255.log.json) |
|
27 |
-
| FCN | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 79.93 | 80.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606.log.json) |
|
28 |
-
| FCN | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 76.31 | 78.31 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901.log.json) |
|
29 |
-
| FCN | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 78.80 | 80.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822.log.json) |
|
30 |
-
| FCN | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 80.65 | 81.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946.log.json) |
|
31 |
-
|
32 |
-
### ADE20K
|
33 |
-
|
34 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
35 |
-
| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
36 |
-
| FCN | HRNetV2p-W18-Small | 512x512 | 80000 | 3.8 | 38.66 | 31.38 | 32.45 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345.log.json) |
|
37 |
-
| FCN | HRNetV2p-W18 | 512x512 | 80000 | 4.9 | 22.57 | 35.51 | 36.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20200614_185145-66f20cb7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20200614_185145.log.json) |
|
38 |
-
| FCN | HRNetV2p-W48 | 512x512 | 80000 | 8.2 | 21.23 | 41.90 | 43.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946.log.json) |
|
39 |
-
| FCN | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 33.00 | 34.55 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20200614_214413.log.json) |
|
40 |
-
| FCN | HRNetV2p-W18 | 512x512 | 160000 | - | - | 36.79 | 38.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426.log.json) |
|
41 |
-
| FCN | HRNetV2p-W48 | 512x512 | 160000 | - | - | 42.02 | 43.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407.log.json) |
|
42 |
-
|
43 |
-
### Pascal VOC 2012 + Aug
|
44 |
-
|
45 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
46 |
-
| ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
47 |
-
| FCN | HRNetV2p-W18-Small | 512x512 | 20000 | 1.8 | 43.36 | 65.20 | 68.55 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20200617_224503-56e36088.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20200617_224503.log.json) |
|
48 |
-
| FCN | HRNetV2p-W18 | 512x512 | 20000 | 2.9 | 23.48 | 72.30 | 74.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503.log.json) |
|
49 |
-
| FCN | HRNetV2p-W48 | 512x512 | 20000 | 6.2 | 22.05 | 75.87 | 78.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419.log.json) |
|
50 |
-
| FCN | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 66.61 | 70.00 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648.log.json) |
|
51 |
-
| FCN | HRNetV2p-W18 | 512x512 | 40000 | - | - | 72.90 | 75.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401.log.json) |
|
52 |
-
| FCN | HRNetV2p-W48 | 512x512 | 40000 | - | - | 76.24 | 78.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111.log.json) |
|
53 |
-
|
54 |
-
### Pascal Context
|
55 |
-
|
56 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
57 |
-
| ------ | ------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
58 |
-
| FCN | HRNetV2p-W48 | 480x480 | 40000 | 6.1 | 8.86 | 45.14 | 47.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context_20200911_164852-667d00b0.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context-20200911_164852.log.json) |
|
59 |
-
| FCN | HRNetV2p-W48 | 480x480 | 80000 | - | - | 45.84 | 47.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context_20200911_155322-847a6711.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context-20200911_155322.log.json) |
|
60 |
-
|
61 |
-
### Pascal Context 59
|
62 |
-
|
63 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
64 |
-
| ------ | ------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
65 |
-
| FCN | HRNetV2p-W48 | 480x480 | 40000 | - | - | 50.33 | 52.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59_20210410_122738-b808b8b2.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59-20210410_122738.log.json) |
|
66 |
-
| FCN | HRNetV2p-W48 | 480x480 | 80000 | - | - | 51.12 | 53.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59_20210411_003240-3ae7081e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59-20210411_003240.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnimeStudio/anime-models/app.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
from pathlib import Path
|
5 |
-
|
6 |
-
models = [
|
7 |
-
{"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"},
|
8 |
-
{"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"},
|
9 |
-
{"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"},
|
10 |
-
{"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"},
|
11 |
-
{"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"},
|
12 |
-
{"name": "Openjourney 4", "url": "prompthero/openjourney-v4"},
|
13 |
-
{"name": "OpenNiji", "url": "Korakoe/OpenNiji"},
|
14 |
-
{"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"},
|
15 |
-
{"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"},
|
16 |
-
{"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"},
|
17 |
-
{"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"},
|
18 |
-
{"name": "-------- TOP MODELS -------", "url": "WarriorMama777/AbyssOrangeMix"},
|
19 |
-
{"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"},
|
20 |
-
{"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"},
|
21 |
-
{"name": "Anything 3.1", "url": "cag/anything-v3-1"},
|
22 |
-
{"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"},
|
23 |
-
{"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"},
|
24 |
-
{"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"},
|
25 |
-
{"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"},
|
26 |
-
{"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"},
|
27 |
-
{"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"},
|
28 |
-
{"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"},
|
29 |
-
{"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"},
|
30 |
-
{"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"},
|
31 |
-
{"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"},
|
32 |
-
{"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"},
|
33 |
-
{"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"},
|
34 |
-
{"name": "GrapeFruit", "url": "iZELX1/Grapefruit"},
|
35 |
-
{"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"},
|
36 |
-
{"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"},
|
37 |
-
{"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"},
|
38 |
-
{"name": "Openjourney 4", "url": "prompthero/openjourney-v4"},
|
39 |
-
{"name": "OpenNiji", "url": "Korakoe/OpenNiji"},
|
40 |
-
{"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"},
|
41 |
-
{"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"},
|
42 |
-
{"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"},
|
43 |
-
{"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"},
|
44 |
-
{"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"},
|
45 |
-
{"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"},
|
46 |
-
{"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"},
|
47 |
-
{"name": "-------- ALL ANIME MODELS -------", "url": "WarriorMama777/AbyssOrangeMix"},
|
48 |
-
{"name": "7 Pa", "url": "AIARTCHAN/7pa"},
|
49 |
-
{"name": "A Certain Model", "url": "JosephusCheung/ACertainModel"},
|
50 |
-
{"name": "A Certain Thing", "url": "JosephusCheung/ACertainThing"},
|
51 |
-
{"name": "A Certainity", "url": "JosephusCheung/ACertainty"},
|
52 |
-
{"name": "Abyss Hell Hero", "url": "AIARTCHAN/AbyssHellHero"},
|
53 |
-
{"name": "Abyss Maple 3", "url": "AIARTCHAN/AbyssMapleVer3"},
|
54 |
-
{"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"},
|
55 |
-
{"name": "Abyss Orange Mix", "url": "WarriorMama777/AbyssOrangeMix"},
|
56 |
-
{"name": "AbyssHell 3", "url": "AIARTCHAN/AbyssHellVer3"},
|
57 |
-
{"name": "All 526 Animated", "url": "stablediffusionapi/all-526-animated"},
|
58 |
-
{"name": "Anidosmix 3", "url": "AIARTCHAN/anidosmixV2"},
|
59 |
-
{"name": "Anime Kawai Diffusion", "url": "Ojimi/anime-kawai-diffusion"},
|
60 |
-
{"name": "Anireal 3D V2", "url": "circulus/sd-anireal-3d-v2"},
|
61 |
-
{"name": "AnyLORA", "url": "kubanemil/AnyLORA"},
|
62 |
-
{"name": "Anything 2.1", "url": "swl-models/anything-v2.1"},
|
63 |
-
{"name": "Anything 3.0 Light", "url": "mm00/anything-v3.0-light"},
|
64 |
-
{"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"},
|
65 |
-
{"name": "Anything 3.1", "url": "cag/anything-v3-1"},
|
66 |
-
{"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"},
|
67 |
-
{"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"},
|
68 |
-
{"name": "Anything Else 4", "url": "stablediffusionapi/anythingelse-v4"},
|
69 |
-
{"name": "Anything Else 5", "url": "stablediffusionapi/anything-v5"},
|
70 |
-
{"name": "Arcane Diffusion", "url": "nitrosocke/Arcane-Diffusion"},
|
71 |
-
{"name": "Archer Diffusion", "url": "nitrosocke/archer-diffusion"},
|
72 |
-
{"name": "Asian Mix", "url": "D1b4l4p/AsianMix"},
|
73 |
-
{"name": "Blood Orange Mix", "url": "WarriorMama777/BloodOrangeMix"},
|
74 |
-
{"name": "Cetusmix", "url": "stablediffusionapi/cetusmix"},
|
75 |
-
{"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"},
|
76 |
-
{"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"},
|
77 |
-
{"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"},
|
78 |
-
{"name": "Cosmic Babes", "url": "stablediffusionapi/cosmic-babes"},
|
79 |
-
{"name": "Counterfeit 1.0", "url": "gsdf/counterfeit-v1.0"},
|
80 |
-
{"name": "Counterfeit 2", "url": "gsdf/Counterfeit-V2.0"},
|
81 |
-
{"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"},
|
82 |
-
{"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"},
|
83 |
-
{"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"},
|
84 |
-
{"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"},
|
85 |
-
{"name": "Dash Sushi 25d", "url": "stablediffusionapi/dark-sushi-25d"},
|
86 |
-
{"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"},
|
87 |
-
{"name": "DucHaiten Anime", "url": "DucHaiten/DucHaitenAnime"},
|
88 |
-
{"name": "Eerie Orange Mix", "url": "WarriorMama777/EerieOrangeMix"},
|
89 |
-
{"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"},
|
90 |
-
{"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"},
|
91 |
-
{"name": "GrapeFruit", "url": "iZELX1/Grapefruit"},
|
92 |
-
{"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"},
|
93 |
-
{"name": "Icomix 2", "url": "stablediffusionapi/icomix-2"},
|
94 |
-
{"name": "InkPunk Diffusion", "url": "Envvi/Inkpunk-Diffusion"},
|
95 |
-
{"name": "Mama Orange Mixs", "url": "WarriorMama777/OrangeMixs"},
|
96 |
-
{"name": "Meina Alter", "url": "stablediffusionapi/meinaalter"},
|
97 |
-
{"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"},
|
98 |
-
{"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"},
|
99 |
-
{"name": "Mix Pro V4", "url": "AIARTCHAN/MIX-Pro-V4"},
|
100 |
-
{"name": "NeverEnding-Dream", "url": "Lykon/NeverEnding-Dream"},
|
101 |
-
{"name": "Openjourney 4", "url": "prompthero/openjourney-v4"},
|
102 |
-
{"name": "OpenNiji", "url": "Korakoe/OpenNiji"},
|
103 |
-
{"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"},
|
104 |
-
{"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"},
|
105 |
-
{"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"},
|
106 |
-
{"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"},
|
107 |
-
{"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"},
|
108 |
-
{"name": "Rev Animated", "url": "coreml/coreml-ReV-Animated"},
|
109 |
-
{"name": "Rev Animated", "url": "LottePeisch/RevAnimated-Diffusers"},
|
110 |
-
{"name": "Something V 2.2","url": "NoCrypt/SomethingV2_2"},
|
111 |
-
{"name": "Something V2","url": "NoCrypt/SomethingV2"},
|
112 |
-
{"name": "Three Delicacy", "url": "stablediffusionapi/three-delicacy"},
|
113 |
-
{"name": "Three Delicacy wonto", "url": "stablediffusionapi/three-delicacy-wonto"},
|
114 |
-
{"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"},
|
115 |
-
{"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"}
|
116 |
-
]
|
117 |
-
|
118 |
-
current_model = models[0]
|
119 |
-
|
120 |
-
text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
|
121 |
-
|
122 |
-
models2 = []
|
123 |
-
for model in models:
|
124 |
-
model_url = f"models/{model['url']}"
|
125 |
-
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
|
126 |
-
models2.append(loaded_model)
|
127 |
-
|
128 |
-
|
129 |
-
def text_it(inputs, text_gen=text_gen):
|
130 |
-
return text_gen(inputs)
|
131 |
-
|
132 |
-
|
133 |
-
def set_model(current_model_index):
|
134 |
-
global current_model
|
135 |
-
current_model = models[current_model_index]
|
136 |
-
return gr.update(value=f"{current_model['name']}")
|
137 |
-
|
138 |
-
|
139 |
-
def send_it(inputs, model_choice):
|
140 |
-
proc = models2[model_choice]
|
141 |
-
return proc(inputs)
|
142 |
-
|
143 |
-
|
144 |
-
with gr.Blocks() as myface:
|
145 |
-
gr.HTML(
|
146 |
-
|
147 |
-
)
|
148 |
-
|
149 |
-
with gr.Row():
|
150 |
-
with gr.Row():
|
151 |
-
input_text = gr.Textbox(label="Prompt idea", placeholder="", lines=1)
|
152 |
-
# Model selection dropdown
|
153 |
-
model_name1 = gr.Dropdown(
|
154 |
-
label="Choose Model",
|
155 |
-
choices=[m["name"] for m in models],
|
156 |
-
type="index",
|
157 |
-
value=current_model["name"],
|
158 |
-
interactive=True,
|
159 |
-
)
|
160 |
-
with gr.Row():
|
161 |
-
see_prompts = gr.Button("Generate Prompts")
|
162 |
-
run = gr.Button("Generate Images", variant="primary")
|
163 |
-
|
164 |
-
with gr.Row():
|
165 |
-
output1 = gr.Image(label="")
|
166 |
-
output2 = gr.Image(label="")
|
167 |
-
output3 = gr.Image(label="")
|
168 |
-
with gr.Row():
|
169 |
-
magic1 = gr.Textbox(label="Generated Prompt", lines=2)
|
170 |
-
magic2 = gr.Textbox(label="Generated Prompt", lines=2)
|
171 |
-
magic3 = gr.Textbox(label="Generated Prompt", lines=2)
|
172 |
-
with gr.Row():
|
173 |
-
output4 = gr.Image(label="")
|
174 |
-
output5 = gr.Image(label="")
|
175 |
-
output6 = gr.Image(label="")
|
176 |
-
with gr.Row():
|
177 |
-
magic4 = gr.Textbox(label="Generated Prompt", lines=2)
|
178 |
-
magic5 = gr.Textbox(label="Generated Prompt", lines=2)
|
179 |
-
magic6 = gr.Textbox(label="Generated Prompt", lines=2)
|
180 |
-
|
181 |
-
model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6])
|
182 |
-
|
183 |
-
run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
|
184 |
-
run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
|
185 |
-
run.click(send_it, inputs=[magic3, model_name1], outputs=[output3])
|
186 |
-
run.click(send_it, inputs=[magic4, model_name1], outputs=[output4])
|
187 |
-
run.click(send_it, inputs=[magic5, model_name1], outputs=[output5])
|
188 |
-
run.click(send_it, inputs=[magic6, model_name1], outputs=[output6])
|
189 |
-
|
190 |
-
see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
|
191 |
-
see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
|
192 |
-
see_prompts.click(text_it, inputs=[input_text], outputs=[magic3])
|
193 |
-
see_prompts.click(text_it, inputs=[input_text], outputs=[magic4])
|
194 |
-
see_prompts.click(text_it, inputs=[input_text], outputs=[magic5])
|
195 |
-
see_prompts.click(text_it, inputs=[input_text], outputs=[magic6])
|
196 |
-
|
197 |
-
myface.queue(concurrency_count=200)
|
198 |
-
myface.launch(inline=True, show_api=False, max_threads=400)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Chat-mode.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
## Chat characters
|
2 |
-
|
3 |
-
Custom chat mode characters are defined by `.yaml` files inside the `characters` folder. An example is included: [Example.yaml](https://github.com/oobabooga/text-generation-webui/blob/main/characters/Example.yaml).
|
4 |
-
|
5 |
-
The following fields may be defined:
|
6 |
-
|
7 |
-
| Field | Description |
|
8 |
-
|-------|-------------|
|
9 |
-
| `name` or `bot` | The character's name. |
|
10 |
-
| `context` | A string that appears at the top of the prompt. It usually contains a description of the character's personality and a few example messages. |
|
11 |
-
| `greeting` (optional) | The character's opening message. It appears when the character is first loaded or when the history is cleared. |
|
12 |
-
| `your_name` or `user` (optional) | Your name. This overwrites what you had previously written in the `Your name` field in the interface. |
|
13 |
-
|
14 |
-
#### Special tokens
|
15 |
-
|
16 |
-
The following replacements happen when the prompt is generated, and they apply to the `context` and `greeting` fields:
|
17 |
-
|
18 |
-
* `{{char}}` and `<BOT>` get replaced with the character's name.
|
19 |
-
* `{{user}}` and `<USER>` get replaced with your name.
|
20 |
-
|
21 |
-
#### How do I add a profile picture for my character?
|
22 |
-
|
23 |
-
Put an image with the same name as your character's `.yaml` file into the `characters` folder. For example, if your bot is `Character.yaml`, add `Character.jpg` or `Character.png` to the folder.
|
24 |
-
|
25 |
-
#### Is the chat history truncated in the prompt?
|
26 |
-
|
27 |
-
Once your prompt reaches the `truncation_length` parameter (2048 by default), old messages will be removed one at a time. The context string will always stay at the top of the prompt and will never get truncated.
|
28 |
-
|
29 |
-
## Chat styles
|
30 |
-
|
31 |
-
Custom chat styles can be defined in the `text-generation-webui/css` folder. Simply create a new file with name starting in `chat_style-` and ending in `.css` and it will automatically appear in the "Chat style" dropdown menu in the interface. Examples:
|
32 |
-
|
33 |
-
```
|
34 |
-
chat_style-cai-chat.css
|
35 |
-
chat_style-TheEncrypted777.css
|
36 |
-
chat_style-wpp.css
|
37 |
-
```
|
38 |
-
|
39 |
-
You should use the same class names as in `chat_style-cai-chat.css` in your custom style.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/tags.py
DELETED
@@ -1,487 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import logging
|
6 |
-
import platform
|
7 |
-
import sys
|
8 |
-
import sysconfig
|
9 |
-
from importlib.machinery import EXTENSION_SUFFIXES
|
10 |
-
from typing import (
|
11 |
-
Dict,
|
12 |
-
FrozenSet,
|
13 |
-
Iterable,
|
14 |
-
Iterator,
|
15 |
-
List,
|
16 |
-
Optional,
|
17 |
-
Sequence,
|
18 |
-
Tuple,
|
19 |
-
Union,
|
20 |
-
cast,
|
21 |
-
)
|
22 |
-
|
23 |
-
from . import _manylinux, _musllinux
|
24 |
-
|
25 |
-
logger = logging.getLogger(__name__)
|
26 |
-
|
27 |
-
PythonVersion = Sequence[int]
|
28 |
-
MacVersion = Tuple[int, int]
|
29 |
-
|
30 |
-
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
|
31 |
-
"python": "py", # Generic.
|
32 |
-
"cpython": "cp",
|
33 |
-
"pypy": "pp",
|
34 |
-
"ironpython": "ip",
|
35 |
-
"jython": "jy",
|
36 |
-
}
|
37 |
-
|
38 |
-
|
39 |
-
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
|
40 |
-
|
41 |
-
|
42 |
-
class Tag:
|
43 |
-
"""
|
44 |
-
A representation of the tag triple for a wheel.
|
45 |
-
|
46 |
-
Instances are considered immutable and thus are hashable. Equality checking
|
47 |
-
is also supported.
|
48 |
-
"""
|
49 |
-
|
50 |
-
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
|
51 |
-
|
52 |
-
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
|
53 |
-
self._interpreter = interpreter.lower()
|
54 |
-
self._abi = abi.lower()
|
55 |
-
self._platform = platform.lower()
|
56 |
-
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
|
57 |
-
# that a set calls its `.disjoint()` method, which may be called hundreds of
|
58 |
-
# times when scanning a page of links for packages with tags matching that
|
59 |
-
# Set[Tag]. Pre-computing the value here produces significant speedups for
|
60 |
-
# downstream consumers.
|
61 |
-
self._hash = hash((self._interpreter, self._abi, self._platform))
|
62 |
-
|
63 |
-
@property
|
64 |
-
def interpreter(self) -> str:
|
65 |
-
return self._interpreter
|
66 |
-
|
67 |
-
@property
|
68 |
-
def abi(self) -> str:
|
69 |
-
return self._abi
|
70 |
-
|
71 |
-
@property
|
72 |
-
def platform(self) -> str:
|
73 |
-
return self._platform
|
74 |
-
|
75 |
-
def __eq__(self, other: object) -> bool:
|
76 |
-
if not isinstance(other, Tag):
|
77 |
-
return NotImplemented
|
78 |
-
|
79 |
-
return (
|
80 |
-
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
|
81 |
-
and (self._platform == other._platform)
|
82 |
-
and (self._abi == other._abi)
|
83 |
-
and (self._interpreter == other._interpreter)
|
84 |
-
)
|
85 |
-
|
86 |
-
def __hash__(self) -> int:
|
87 |
-
return self._hash
|
88 |
-
|
89 |
-
def __str__(self) -> str:
|
90 |
-
return f"{self._interpreter}-{self._abi}-{self._platform}"
|
91 |
-
|
92 |
-
def __repr__(self) -> str:
|
93 |
-
return f"<{self} @ {id(self)}>"
|
94 |
-
|
95 |
-
|
96 |
-
def parse_tag(tag: str) -> FrozenSet[Tag]:
|
97 |
-
"""
|
98 |
-
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
|
99 |
-
|
100 |
-
Returning a set is required due to the possibility that the tag is a
|
101 |
-
compressed tag set.
|
102 |
-
"""
|
103 |
-
tags = set()
|
104 |
-
interpreters, abis, platforms = tag.split("-")
|
105 |
-
for interpreter in interpreters.split("."):
|
106 |
-
for abi in abis.split("."):
|
107 |
-
for platform_ in platforms.split("."):
|
108 |
-
tags.add(Tag(interpreter, abi, platform_))
|
109 |
-
return frozenset(tags)
|
110 |
-
|
111 |
-
|
112 |
-
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
113 |
-
value = sysconfig.get_config_var(name)
|
114 |
-
if value is None and warn:
|
115 |
-
logger.debug(
|
116 |
-
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
|
117 |
-
)
|
118 |
-
return value
|
119 |
-
|
120 |
-
|
121 |
-
def _normalize_string(string: str) -> str:
|
122 |
-
return string.replace(".", "_").replace("-", "_")
|
123 |
-
|
124 |
-
|
125 |
-
def _abi3_applies(python_version: PythonVersion) -> bool:
|
126 |
-
"""
|
127 |
-
Determine if the Python version supports abi3.
|
128 |
-
|
129 |
-
PEP 384 was first implemented in Python 3.2.
|
130 |
-
"""
|
131 |
-
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
|
132 |
-
|
133 |
-
|
134 |
-
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
|
135 |
-
py_version = tuple(py_version) # To allow for version comparison.
|
136 |
-
abis = []
|
137 |
-
version = _version_nodot(py_version[:2])
|
138 |
-
debug = pymalloc = ucs4 = ""
|
139 |
-
with_debug = _get_config_var("Py_DEBUG", warn)
|
140 |
-
has_refcount = hasattr(sys, "gettotalrefcount")
|
141 |
-
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
|
142 |
-
# extension modules is the best option.
|
143 |
-
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
|
144 |
-
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
|
145 |
-
if with_debug or (with_debug is None and (has_refcount or has_ext)):
|
146 |
-
debug = "d"
|
147 |
-
if py_version < (3, 8):
|
148 |
-
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
|
149 |
-
if with_pymalloc or with_pymalloc is None:
|
150 |
-
pymalloc = "m"
|
151 |
-
if py_version < (3, 3):
|
152 |
-
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
|
153 |
-
if unicode_size == 4 or (
|
154 |
-
unicode_size is None and sys.maxunicode == 0x10FFFF
|
155 |
-
):
|
156 |
-
ucs4 = "u"
|
157 |
-
elif debug:
|
158 |
-
# Debug builds can also load "normal" extension modules.
|
159 |
-
# We can also assume no UCS-4 or pymalloc requirement.
|
160 |
-
abis.append(f"cp{version}")
|
161 |
-
abis.insert(
|
162 |
-
0,
|
163 |
-
"cp{version}{debug}{pymalloc}{ucs4}".format(
|
164 |
-
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
|
165 |
-
),
|
166 |
-
)
|
167 |
-
return abis
|
168 |
-
|
169 |
-
|
170 |
-
def cpython_tags(
|
171 |
-
python_version: Optional[PythonVersion] = None,
|
172 |
-
abis: Optional[Iterable[str]] = None,
|
173 |
-
platforms: Optional[Iterable[str]] = None,
|
174 |
-
*,
|
175 |
-
warn: bool = False,
|
176 |
-
) -> Iterator[Tag]:
|
177 |
-
"""
|
178 |
-
Yields the tags for a CPython interpreter.
|
179 |
-
|
180 |
-
The tags consist of:
|
181 |
-
- cp<python_version>-<abi>-<platform>
|
182 |
-
- cp<python_version>-abi3-<platform>
|
183 |
-
- cp<python_version>-none-<platform>
|
184 |
-
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
|
185 |
-
|
186 |
-
If python_version only specifies a major version then user-provided ABIs and
|
187 |
-
the 'none' ABItag will be used.
|
188 |
-
|
189 |
-
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
|
190 |
-
their normal position and not at the beginning.
|
191 |
-
"""
|
192 |
-
if not python_version:
|
193 |
-
python_version = sys.version_info[:2]
|
194 |
-
|
195 |
-
interpreter = f"cp{_version_nodot(python_version[:2])}"
|
196 |
-
|
197 |
-
if abis is None:
|
198 |
-
if len(python_version) > 1:
|
199 |
-
abis = _cpython_abis(python_version, warn)
|
200 |
-
else:
|
201 |
-
abis = []
|
202 |
-
abis = list(abis)
|
203 |
-
# 'abi3' and 'none' are explicitly handled later.
|
204 |
-
for explicit_abi in ("abi3", "none"):
|
205 |
-
try:
|
206 |
-
abis.remove(explicit_abi)
|
207 |
-
except ValueError:
|
208 |
-
pass
|
209 |
-
|
210 |
-
platforms = list(platforms or platform_tags())
|
211 |
-
for abi in abis:
|
212 |
-
for platform_ in platforms:
|
213 |
-
yield Tag(interpreter, abi, platform_)
|
214 |
-
if _abi3_applies(python_version):
|
215 |
-
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
|
216 |
-
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
|
217 |
-
|
218 |
-
if _abi3_applies(python_version):
|
219 |
-
for minor_version in range(python_version[1] - 1, 1, -1):
|
220 |
-
for platform_ in platforms:
|
221 |
-
interpreter = "cp{version}".format(
|
222 |
-
version=_version_nodot((python_version[0], minor_version))
|
223 |
-
)
|
224 |
-
yield Tag(interpreter, "abi3", platform_)
|
225 |
-
|
226 |
-
|
227 |
-
def _generic_abi() -> Iterator[str]:
|
228 |
-
abi = sysconfig.get_config_var("SOABI")
|
229 |
-
if abi:
|
230 |
-
yield _normalize_string(abi)
|
231 |
-
|
232 |
-
|
233 |
-
def generic_tags(
|
234 |
-
interpreter: Optional[str] = None,
|
235 |
-
abis: Optional[Iterable[str]] = None,
|
236 |
-
platforms: Optional[Iterable[str]] = None,
|
237 |
-
*,
|
238 |
-
warn: bool = False,
|
239 |
-
) -> Iterator[Tag]:
|
240 |
-
"""
|
241 |
-
Yields the tags for a generic interpreter.
|
242 |
-
|
243 |
-
The tags consist of:
|
244 |
-
- <interpreter>-<abi>-<platform>
|
245 |
-
|
246 |
-
The "none" ABI will be added if it was not explicitly provided.
|
247 |
-
"""
|
248 |
-
if not interpreter:
|
249 |
-
interp_name = interpreter_name()
|
250 |
-
interp_version = interpreter_version(warn=warn)
|
251 |
-
interpreter = "".join([interp_name, interp_version])
|
252 |
-
if abis is None:
|
253 |
-
abis = _generic_abi()
|
254 |
-
platforms = list(platforms or platform_tags())
|
255 |
-
abis = list(abis)
|
256 |
-
if "none" not in abis:
|
257 |
-
abis.append("none")
|
258 |
-
for abi in abis:
|
259 |
-
for platform_ in platforms:
|
260 |
-
yield Tag(interpreter, abi, platform_)
|
261 |
-
|
262 |
-
|
263 |
-
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
|
264 |
-
"""
|
265 |
-
Yields Python versions in descending order.
|
266 |
-
|
267 |
-
After the latest version, the major-only version will be yielded, and then
|
268 |
-
all previous versions of that major version.
|
269 |
-
"""
|
270 |
-
if len(py_version) > 1:
|
271 |
-
yield f"py{_version_nodot(py_version[:2])}"
|
272 |
-
yield f"py{py_version[0]}"
|
273 |
-
if len(py_version) > 1:
|
274 |
-
for minor in range(py_version[1] - 1, -1, -1):
|
275 |
-
yield f"py{_version_nodot((py_version[0], minor))}"
|
276 |
-
|
277 |
-
|
278 |
-
def compatible_tags(
|
279 |
-
python_version: Optional[PythonVersion] = None,
|
280 |
-
interpreter: Optional[str] = None,
|
281 |
-
platforms: Optional[Iterable[str]] = None,
|
282 |
-
) -> Iterator[Tag]:
|
283 |
-
"""
|
284 |
-
Yields the sequence of tags that are compatible with a specific version of Python.
|
285 |
-
|
286 |
-
The tags consist of:
|
287 |
-
- py*-none-<platform>
|
288 |
-
- <interpreter>-none-any # ... if `interpreter` is provided.
|
289 |
-
- py*-none-any
|
290 |
-
"""
|
291 |
-
if not python_version:
|
292 |
-
python_version = sys.version_info[:2]
|
293 |
-
platforms = list(platforms or platform_tags())
|
294 |
-
for version in _py_interpreter_range(python_version):
|
295 |
-
for platform_ in platforms:
|
296 |
-
yield Tag(version, "none", platform_)
|
297 |
-
if interpreter:
|
298 |
-
yield Tag(interpreter, "none", "any")
|
299 |
-
for version in _py_interpreter_range(python_version):
|
300 |
-
yield Tag(version, "none", "any")
|
301 |
-
|
302 |
-
|
303 |
-
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
|
304 |
-
if not is_32bit:
|
305 |
-
return arch
|
306 |
-
|
307 |
-
if arch.startswith("ppc"):
|
308 |
-
return "ppc"
|
309 |
-
|
310 |
-
return "i386"
|
311 |
-
|
312 |
-
|
313 |
-
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
|
314 |
-
formats = [cpu_arch]
|
315 |
-
if cpu_arch == "x86_64":
|
316 |
-
if version < (10, 4):
|
317 |
-
return []
|
318 |
-
formats.extend(["intel", "fat64", "fat32"])
|
319 |
-
|
320 |
-
elif cpu_arch == "i386":
|
321 |
-
if version < (10, 4):
|
322 |
-
return []
|
323 |
-
formats.extend(["intel", "fat32", "fat"])
|
324 |
-
|
325 |
-
elif cpu_arch == "ppc64":
|
326 |
-
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
|
327 |
-
if version > (10, 5) or version < (10, 4):
|
328 |
-
return []
|
329 |
-
formats.append("fat64")
|
330 |
-
|
331 |
-
elif cpu_arch == "ppc":
|
332 |
-
if version > (10, 6):
|
333 |
-
return []
|
334 |
-
formats.extend(["fat32", "fat"])
|
335 |
-
|
336 |
-
if cpu_arch in {"arm64", "x86_64"}:
|
337 |
-
formats.append("universal2")
|
338 |
-
|
339 |
-
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
|
340 |
-
formats.append("universal")
|
341 |
-
|
342 |
-
return formats
|
343 |
-
|
344 |
-
|
345 |
-
def mac_platforms(
|
346 |
-
version: Optional[MacVersion] = None, arch: Optional[str] = None
|
347 |
-
) -> Iterator[str]:
|
348 |
-
"""
|
349 |
-
Yields the platform tags for a macOS system.
|
350 |
-
|
351 |
-
The `version` parameter is a two-item tuple specifying the macOS version to
|
352 |
-
generate platform tags for. The `arch` parameter is the CPU architecture to
|
353 |
-
generate platform tags for. Both parameters default to the appropriate value
|
354 |
-
for the current system.
|
355 |
-
"""
|
356 |
-
version_str, _, cpu_arch = platform.mac_ver()
|
357 |
-
if version is None:
|
358 |
-
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
|
359 |
-
else:
|
360 |
-
version = version
|
361 |
-
if arch is None:
|
362 |
-
arch = _mac_arch(cpu_arch)
|
363 |
-
else:
|
364 |
-
arch = arch
|
365 |
-
|
366 |
-
if (10, 0) <= version and version < (11, 0):
|
367 |
-
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
|
368 |
-
# "minor" version number. The major version was always 10.
|
369 |
-
for minor_version in range(version[1], -1, -1):
|
370 |
-
compat_version = 10, minor_version
|
371 |
-
binary_formats = _mac_binary_formats(compat_version, arch)
|
372 |
-
for binary_format in binary_formats:
|
373 |
-
yield "macosx_{major}_{minor}_{binary_format}".format(
|
374 |
-
major=10, minor=minor_version, binary_format=binary_format
|
375 |
-
)
|
376 |
-
|
377 |
-
if version >= (11, 0):
|
378 |
-
# Starting with Mac OS 11, each yearly release bumps the major version
|
379 |
-
# number. The minor versions are now the midyear updates.
|
380 |
-
for major_version in range(version[0], 10, -1):
|
381 |
-
compat_version = major_version, 0
|
382 |
-
binary_formats = _mac_binary_formats(compat_version, arch)
|
383 |
-
for binary_format in binary_formats:
|
384 |
-
yield "macosx_{major}_{minor}_{binary_format}".format(
|
385 |
-
major=major_version, minor=0, binary_format=binary_format
|
386 |
-
)
|
387 |
-
|
388 |
-
if version >= (11, 0):
|
389 |
-
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
|
390 |
-
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
|
391 |
-
# releases exist.
|
392 |
-
#
|
393 |
-
# However, the "universal2" binary format can have a
|
394 |
-
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
|
395 |
-
# that version of macOS.
|
396 |
-
if arch == "x86_64":
|
397 |
-
for minor_version in range(16, 3, -1):
|
398 |
-
compat_version = 10, minor_version
|
399 |
-
binary_formats = _mac_binary_formats(compat_version, arch)
|
400 |
-
for binary_format in binary_formats:
|
401 |
-
yield "macosx_{major}_{minor}_{binary_format}".format(
|
402 |
-
major=compat_version[0],
|
403 |
-
minor=compat_version[1],
|
404 |
-
binary_format=binary_format,
|
405 |
-
)
|
406 |
-
else:
|
407 |
-
for minor_version in range(16, 3, -1):
|
408 |
-
compat_version = 10, minor_version
|
409 |
-
binary_format = "universal2"
|
410 |
-
yield "macosx_{major}_{minor}_{binary_format}".format(
|
411 |
-
major=compat_version[0],
|
412 |
-
minor=compat_version[1],
|
413 |
-
binary_format=binary_format,
|
414 |
-
)
|
415 |
-
|
416 |
-
|
417 |
-
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
|
418 |
-
linux = _normalize_string(sysconfig.get_platform())
|
419 |
-
if is_32bit:
|
420 |
-
if linux == "linux_x86_64":
|
421 |
-
linux = "linux_i686"
|
422 |
-
elif linux == "linux_aarch64":
|
423 |
-
linux = "linux_armv7l"
|
424 |
-
_, arch = linux.split("_", 1)
|
425 |
-
yield from _manylinux.platform_tags(linux, arch)
|
426 |
-
yield from _musllinux.platform_tags(arch)
|
427 |
-
yield linux
|
428 |
-
|
429 |
-
|
430 |
-
def _generic_platforms() -> Iterator[str]:
|
431 |
-
yield _normalize_string(sysconfig.get_platform())
|
432 |
-
|
433 |
-
|
434 |
-
def platform_tags() -> Iterator[str]:
|
435 |
-
"""
|
436 |
-
Provides the platform tags for this installation.
|
437 |
-
"""
|
438 |
-
if platform.system() == "Darwin":
|
439 |
-
return mac_platforms()
|
440 |
-
elif platform.system() == "Linux":
|
441 |
-
return _linux_platforms()
|
442 |
-
else:
|
443 |
-
return _generic_platforms()
|
444 |
-
|
445 |
-
|
446 |
-
def interpreter_name() -> str:
|
447 |
-
"""
|
448 |
-
Returns the name of the running interpreter.
|
449 |
-
"""
|
450 |
-
name = sys.implementation.name
|
451 |
-
return INTERPRETER_SHORT_NAMES.get(name) or name
|
452 |
-
|
453 |
-
|
454 |
-
def interpreter_version(*, warn: bool = False) -> str:
|
455 |
-
"""
|
456 |
-
Returns the version of the running interpreter.
|
457 |
-
"""
|
458 |
-
version = _get_config_var("py_version_nodot", warn=warn)
|
459 |
-
if version:
|
460 |
-
version = str(version)
|
461 |
-
else:
|
462 |
-
version = _version_nodot(sys.version_info[:2])
|
463 |
-
return version
|
464 |
-
|
465 |
-
|
466 |
-
def _version_nodot(version: PythonVersion) -> str:
|
467 |
-
return "".join(map(str, version))
|
468 |
-
|
469 |
-
|
470 |
-
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
|
471 |
-
"""
|
472 |
-
Returns the sequence of tag triples for the running interpreter.
|
473 |
-
|
474 |
-
The order of the sequence corresponds to priority order for the
|
475 |
-
interpreter, from most to least important.
|
476 |
-
"""
|
477 |
-
|
478 |
-
interp_name = interpreter_name()
|
479 |
-
if interp_name == "cp":
|
480 |
-
yield from cpython_tags(warn=warn)
|
481 |
-
else:
|
482 |
-
yield from generic_tags()
|
483 |
-
|
484 |
-
if interp_name == "pp":
|
485 |
-
yield from compatible_tags(interpreter="pp3")
|
486 |
-
else:
|
487 |
-
yield from compatible_tags()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import textwrap
|
3 |
-
import email.message
|
4 |
-
|
5 |
-
from ._text import FoldedCase
|
6 |
-
|
7 |
-
|
8 |
-
class Message(email.message.Message):
|
9 |
-
multiple_use_keys = set(
|
10 |
-
map(
|
11 |
-
FoldedCase,
|
12 |
-
[
|
13 |
-
'Classifier',
|
14 |
-
'Obsoletes-Dist',
|
15 |
-
'Platform',
|
16 |
-
'Project-URL',
|
17 |
-
'Provides-Dist',
|
18 |
-
'Provides-Extra',
|
19 |
-
'Requires-Dist',
|
20 |
-
'Requires-External',
|
21 |
-
'Supported-Platform',
|
22 |
-
'Dynamic',
|
23 |
-
],
|
24 |
-
)
|
25 |
-
)
|
26 |
-
"""
|
27 |
-
Keys that may be indicated multiple times per PEP 566.
|
28 |
-
"""
|
29 |
-
|
30 |
-
def __new__(cls, orig: email.message.Message):
|
31 |
-
res = super().__new__(cls)
|
32 |
-
vars(res).update(vars(orig))
|
33 |
-
return res
|
34 |
-
|
35 |
-
def __init__(self, *args, **kwargs):
|
36 |
-
self._headers = self._repair_headers()
|
37 |
-
|
38 |
-
# suppress spurious error from mypy
|
39 |
-
def __iter__(self):
|
40 |
-
return super().__iter__()
|
41 |
-
|
42 |
-
def _repair_headers(self):
|
43 |
-
def redent(value):
|
44 |
-
"Correct for RFC822 indentation"
|
45 |
-
if not value or '\n' not in value:
|
46 |
-
return value
|
47 |
-
return textwrap.dedent(' ' * 8 + value)
|
48 |
-
|
49 |
-
headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
|
50 |
-
if self._payload:
|
51 |
-
headers.append(('Description', self.get_payload()))
|
52 |
-
return headers
|
53 |
-
|
54 |
-
@property
|
55 |
-
def json(self):
|
56 |
-
"""
|
57 |
-
Convert PackageMetadata to a JSON-compatible format
|
58 |
-
per PEP 0566.
|
59 |
-
"""
|
60 |
-
|
61 |
-
def transform(key):
|
62 |
-
value = self.get_all(key) if key in self.multiple_use_keys else self[key]
|
63 |
-
if key == 'Keywords':
|
64 |
-
value = re.split(r'\s+', value)
|
65 |
-
tk = key.lower().replace('-', '_')
|
66 |
-
return tk, value
|
67 |
-
|
68 |
-
return dict(map(transform, map(FoldedCase, self)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzumaSeren100/XuanShen-Bert-VITS2/preprocess_text.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from random import shuffle
|
3 |
-
|
4 |
-
import tqdm
|
5 |
-
from text.cleaner import clean_text
|
6 |
-
from collections import defaultdict
|
7 |
-
stage = [1,2,3]
|
8 |
-
|
9 |
-
transcription_path = 'filelists/xg.list'
|
10 |
-
train_path = 'filelists/train.list'
|
11 |
-
val_path = 'filelists/val.list'
|
12 |
-
config_path = "configs/config.json"
|
13 |
-
val_per_spk = 4
|
14 |
-
max_val_total = 8
|
15 |
-
|
16 |
-
if 1 in stage:
|
17 |
-
with open( transcription_path+'.cleaned', 'w', encoding='utf-8') as f:
|
18 |
-
for line in tqdm.tqdm(open(transcription_path, encoding='utf-8').readlines()):
|
19 |
-
try:
|
20 |
-
utt, spk, language, text = line.strip().split('|')
|
21 |
-
#language = "ZH"
|
22 |
-
norm_text, phones, tones, word2ph = clean_text(text, language)
|
23 |
-
f.write('{}|{}|{}|{}|{}|{}|{}\n'.format(utt, spk, language, norm_text, ' '.join(phones),
|
24 |
-
" ".join([str(i) for i in tones]),
|
25 |
-
" ".join([str(i) for i in word2ph])))
|
26 |
-
except Exception as error :
|
27 |
-
print("err!", utt, error)
|
28 |
-
|
29 |
-
if 2 in stage:
|
30 |
-
spk_utt_map = defaultdict(list)
|
31 |
-
spk_id_map = {}
|
32 |
-
current_sid = 0
|
33 |
-
|
34 |
-
with open( transcription_path+'.cleaned', encoding='utf-8') as f:
|
35 |
-
for line in f.readlines():
|
36 |
-
utt, spk, language, text, phones, tones, word2ph = line.strip().split('|')
|
37 |
-
spk_utt_map[spk].append(line)
|
38 |
-
if spk not in spk_id_map.keys():
|
39 |
-
spk_id_map[spk] = current_sid
|
40 |
-
current_sid += 1
|
41 |
-
train_list = []
|
42 |
-
val_list = []
|
43 |
-
|
44 |
-
for spk, utts in spk_utt_map.items():
|
45 |
-
shuffle(utts)
|
46 |
-
val_list+=utts[:val_per_spk]
|
47 |
-
train_list+=utts[val_per_spk:]
|
48 |
-
if len(val_list) > max_val_total:
|
49 |
-
train_list+=val_list[max_val_total:]
|
50 |
-
val_list = val_list[:max_val_total]
|
51 |
-
|
52 |
-
with open( train_path,"w", encoding='utf-8') as f:
|
53 |
-
for line in train_list:
|
54 |
-
f.write(line)
|
55 |
-
|
56 |
-
with open(val_path, "w", encoding='utf-8') as f:
|
57 |
-
for line in val_list:
|
58 |
-
f.write(line)
|
59 |
-
|
60 |
-
if 3 in stage:
|
61 |
-
assert 2 in stage
|
62 |
-
config = json.load(open(config_path, encoding='utf-8'))
|
63 |
-
config["data"]['spk2id'] = spk_id_map
|
64 |
-
with open(config_path, 'w', encoding='utf-8') as f:
|
65 |
-
json.dump(config, f, indent=2, ensure_ascii=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bubble Shooter Genies Apk.md
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Burbuja Shooter Genios APK: Un divertido y colorido juego casual</h1>
|
3 |
-
<p>Si usted está buscando un juego relajante y entretenido para jugar en su dispositivo Android, es posible que desee probar Bubble Shooter Genies APK. Este es un clásico juego de burbujas y rompecabezas de aventura que te mantendrá enganchado durante horas. En este artículo, le diremos qué es Bubble Shooter Genies APK, cómo descargarlo e instalarlo, y por qué debe jugar. </p>
|
4 |
-
<h2>¿Qué es Bubble Shooter Genies APK? </h2>
|
5 |
-
<p>Bubble Shooter Genies APK es un juego casual desarrollado por PUZZLEJOY. El APK ha estado disponible desde noviembre de 2016. Bubble Shooter Genies ha sido descargado más de 50 millones de veces. Es altamente clasificado. Está clasificado 4.68 de 5 estrellas, basado en 237,825 calificaciones. La última actualización de la aplicación fue el 7 de junio de 2023. </p>
|
6 |
-
<h2>bubble shooter genies apk</h2><br /><p><b><b>Download</b> »»» <a href="https://bltlly.com/2v6MPi">https://bltlly.com/2v6MPi</a></b></p><br /><br />
|
7 |
-
<h3>El juego de Bubble Shooter Genios APK</h3>
|
8 |
-
<p>El juego de Bubble Shooter Genies APK es simple y divertido. Tienes que coincidir 3 o más burbujas iguales y pop ellos. Puedes hacer estallar burbujas para rescatar a los lindos dragones bebés. También puedes usar potentes boosters y combos para limpiar los niveles más rápido. Hay más de 1000 niveles para jugar, cada uno con diferentes retos y metas. También puedes competir con tus amigos y otros jugadores en la clasificación. </p>
|
9 |
-
<h3>Las características de Bubble Shooter Genios APK</h3>
|
10 |
-
<p>Bubble Shooter Genies APK tiene muchas características que lo hacen un gran juego para jugar. Algunos de ellos son:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Tiene impresionantes gráficos y animaciones que crean un mundo colorido y mágico. </li>
|
13 |
-
<li> Tiene música relajante y relajante y efectos de sonido que mejoran la experiencia de juego. </li>
|
14 |
-
<li> Tiene controles fáciles y suaves que lo hacen adecuado para jugadores de todas las edades. </li>
|
15 |
-
<li> No se requiere conexión wifi o internet, por lo que puede jugar en cualquier momento y en cualquier lugar. </li>
|
16 |
-
<li> Tiene actualizaciones regulares que agregan nuevos niveles, características y mejoras. </li>
|
17 |
-
</ul>
|
18 |
-
<h2> ¿Cómo descargar e instalar Bubble Shooter Genies APK? </h2>
|
19 |
-
|
20 |
-
<h3>Los requisitos para Bubble Shooter Genios APK</h3>
|
21 |
-
<p>Antes de descargar e instalar Bubble Shooter Genies APK, es necesario asegurarse de que su dispositivo cumple con estos requisitos:</p>
|
22 |
-
<ul>
|
23 |
-
<li>Tu dispositivo debe tener la versión de Android 4.4 o superior. </li>
|
24 |
-
<li>Su dispositivo debe tener al menos 90 MB de espacio de almacenamiento libre. </li>
|
25 |
-
<li>Su dispositivo debe permitir la instalación desde fuentes desconocidas. Puede habilitar esta opción yendo a Configuración > Seguridad > Fuentes desconocidas.</li>
|
26 |
-
</ul>
|
27 |
-
<h3> Los pasos para descargar e instalar Bubble Shooter Genies APK</h3>
|
28 |
-
<p>Después de haber comprobado los requisitos, puede seguir estos pasos para descargar e instalar Bubble Shooter Genies APK:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Ir a [este enlace]( 1 ) para descargar la última versión de Bubble Shooter Genies APK.</li>
|
31 |
-
<li>Una vez que la descarga se haya completado, busque el archivo en el administrador de archivos de su dispositivo y toque en él. </li>
|
32 |
-
<li>Siga las instrucciones en la pantalla para instalar la aplicación. </li>
|
33 |
-
<li>Iniciar la aplicación y disfrutar de jugar Bubble Shooter Genies.</li>
|
34 |
-
</ol>
|
35 |
-
<h2>¿Por qué debe jugar Bubble Shooter Genies APK? </h2>
|
36 |
-
<p>Bubble Shooter Genies APK es un juego que le proporcionará horas de diversión y entretenimiento. Aquí hay algunas razones por las que debe jugar:</p>
|
37 |
-
<h3>Los beneficios de jugar Bubble Shooter Genios APK</h3>
|
38 |
-
<p>Jugar Bubble Shooter Genies APK puede tener muchos beneficios para usted, tales como:</p>
|
39 |
-
<ul>
|
40 |
-
<li> Puede mejorar su concentración y enfoque, ya que tiene que apuntar y disparar las burbujas con precisión. </li>
|
41 |
-
<li> Puede mejorar sus habilidades de resolución de problemas y pensamiento lógico, ya que tiene que planificar y elaborar estrategias de la mejor manera de limpiar los niveles. </li>
|
42 |
-
<li>Puede reducir el estrés y la ansiedad, ya que puede disfrutar del juego relajante y relajante. </li>
|
43 |
-
<li> Puede aumentar su estado de ánimo y felicidad, ya que puede experimentar la alegría de hacer estallar burbujas y rescatar dragones bebé. </li>
|
44 |
-
</ul>
|
45 |
-
<h3> Los consejos y trucos para jugar Bubble Shooter Genies APK</h3>
|
46 |
-
|
47 |
-
<ul>
|
48 |
-
<li>Usa la línea de puntería para guiar tus disparos y apunta a los grupos de burbujas del mismo color. </li>
|
49 |
-
<li>Trate de hacer estallar más burbujas con menos disparos para ganar más puntos y estrellas. </li>
|
50 |
-
<li>Usa los boosters y combos sabiamente para eliminar los obstáculos y los niveles difíciles. </li>
|
51 |
-
<li>Preste atención a los colores de burbuja en el lanzador y cámbielos si es necesario. </li>
|
52 |
-
<li>Explora los diferentes modos de juego y desafíos para poner a prueba tus habilidades y divertirte más. </li>
|
53 |
-
</ul>
|
54 |
-
<h2>Conclusión</h2>
|
55 |
-
<p>Bubble Shooter Genies APK es un divertido y colorido juego casual que se puede jugar en su dispositivo Android. Tiene un juego simple y adictivo, impresionantes gráficos y animaciones, música relajante y efectos de sonido, controles fáciles y suaves, no se requiere conexión wifi o internet, actualizaciones regulares y más de 1000 niveles para jugar. También puede disfrutar de los beneficios de jugar Bubble Shooter Genies APK, tales como mejorar su concentración, enfoque, resolución de problemas, pensamiento lógico, alivio del estrés, estado de ánimo, y la felicidad. También puede utilizar los consejos y trucos para jugar Bubble Shooter Genies APK, como el uso de la línea de puntería, hacer estallar más burbujas con menos disparos, utilizando los boosters y combos sabiamente, prestando atención a los colores de burbuja en el lanzador, intercambiándolos si es necesario, y explorar los diferentes modos de juego y desafíos. Si desea descargar e instalar Bubble Shooter Genies APK en su dispositivo Android, puede seguir los pasos mencionados anteriormente. Esperamos que haya encontrado este artículo útil e informativo. ¡Feliz disparo de burbuja! </p>
|
56 |
-
<p></p>
|
57 |
-
<h2>Preguntas frecuentes</h2>
|
58 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Bubble Shooter Genies APK:</p>
|
59 |
-
<ol>
|
60 |
-
<li> ¿Es Bubble Shooter Genies APK libre para jugar? </li>
|
61 |
-
<p>Sí, Bubble Shooter Genies APK es gratis para jugar. Sin embargo, contiene anuncios y compras en la aplicación que puede desactivar o comprar si lo desea. </p>
|
62 |
-
<li> ¿Cómo puedo actualizar Bubble Shooter Genies APK? </li>
|
63 |
-
|
64 |
-
<li> ¿Cómo puedo contactar con el desarrollador de Bubble Shooter Genies APK? </li>
|
65 |
-
<p>Puede ponerse en contacto con el desarrollador de Bubble Shooter Genies APK enviando un correo electrónico a [email protected] o visitando su página de Facebook en https://www.facebook.com/BubbleShooterGenies/.</p>
|
66 |
-
<li> ¿Cuáles son algunos juegos similares a Bubble Shooter Genies APK? </li>
|
67 |
-
<p>Algunos juegos similares a Bubble Shooter Genies APK son Bubble Witch Saga 3, Panda Pop, Angry Birds POP, y Bubble Island 2.</p>
|
68 |
-
<li> ¿Puedo jugar Bubble Shooter Genies APK offline? </li>
|
69 |
-
<p>Sí, puede jugar Bubble Shooter Genies APK sin conexión. Sin embargo, algunas características pueden no estar disponibles o actualizados cuando está fuera de línea. </p>
|
70 |
-
</ol></p> 64aa2da5cf<br />
|
71 |
-
<br />
|
72 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/app.py
DELETED
@@ -1,392 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.system('pip freeze')
|
3 |
-
|
4 |
-
import torch
|
5 |
-
torch.hub.download_url_to_file('https://heibox.uni-heidelberg.de/d/a7530b09fed84f80a887/files/?p=%2Fconfigs%2Fmodel.yaml&dl=1', 'vqgan_imagenet_f16_16384.yaml')
|
6 |
-
torch.hub.download_url_to_file('https://heibox.uni-heidelberg.de/d/a7530b09fed84f80a887/files/?p=%2Fckpts%2Flast.ckpt&dl=1', 'vqgan_imagenet_f16_16384.ckpt')
|
7 |
-
import argparse
|
8 |
-
import math
|
9 |
-
from pathlib import Path
|
10 |
-
import sys
|
11 |
-
sys.path.insert(1, './taming-transformers')
|
12 |
-
from base64 import b64encode
|
13 |
-
from omegaconf import OmegaConf
|
14 |
-
from PIL import Image
|
15 |
-
from taming.models import cond_transformer, vqgan
|
16 |
-
import taming.modules
|
17 |
-
from torch import nn, optim
|
18 |
-
from torch.nn import functional as F
|
19 |
-
from torchvision import transforms
|
20 |
-
from torchvision.transforms import functional as TF
|
21 |
-
from tqdm.notebook import tqdm
|
22 |
-
from CLIP import clip
|
23 |
-
import kornia.augmentation as K
|
24 |
-
import numpy as np
|
25 |
-
import imageio
|
26 |
-
from PIL import ImageFile, Image
|
27 |
-
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
28 |
-
import gradio as gr
|
29 |
-
torch.hub.download_url_to_file('https://images.pexels.com/photos/158028/bellingrath-gardens-alabama-landscape-scenic-158028.jpeg', 'garden.jpeg')
|
30 |
-
torch.hub.download_url_to_file('https://images.pexels.com/photos/68767/divers-underwater-ocean-swim-68767.jpeg', 'coralreef.jpeg')
|
31 |
-
torch.hub.download_url_to_file('https://images.pexels.com/photos/803975/pexels-photo-803975.jpeg', 'cabin.jpeg')
|
32 |
-
def sinc(x):
|
33 |
-
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
34 |
-
def lanczos(x, a):
|
35 |
-
cond = torch.logical_and(-a < x, x < a)
|
36 |
-
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
|
37 |
-
return out / out.sum()
|
38 |
-
def ramp(ratio, width):
|
39 |
-
n = math.ceil(width / ratio + 1)
|
40 |
-
out = torch.empty([n])
|
41 |
-
cur = 0
|
42 |
-
for i in range(out.shape[0]):
|
43 |
-
out[i] = cur
|
44 |
-
cur += ratio
|
45 |
-
return torch.cat([-out[1:].flip([0]), out])[1:-1]
|
46 |
-
def resample(input, size, align_corners=True):
|
47 |
-
n, c, h, w = input.shape
|
48 |
-
dh, dw = size
|
49 |
-
input = input.view([n * c, 1, h, w])
|
50 |
-
if dh < h:
|
51 |
-
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
|
52 |
-
pad_h = (kernel_h.shape[0] - 1) // 2
|
53 |
-
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
|
54 |
-
input = F.conv2d(input, kernel_h[None, None, :, None])
|
55 |
-
if dw < w:
|
56 |
-
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
|
57 |
-
pad_w = (kernel_w.shape[0] - 1) // 2
|
58 |
-
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
|
59 |
-
input = F.conv2d(input, kernel_w[None, None, None, :])
|
60 |
-
input = input.view([n, c, h, w])
|
61 |
-
return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
|
62 |
-
class ReplaceGrad(torch.autograd.Function):
|
63 |
-
@staticmethod
|
64 |
-
def forward(ctx, x_forward, x_backward):
|
65 |
-
ctx.shape = x_backward.shape
|
66 |
-
return x_forward
|
67 |
-
@staticmethod
|
68 |
-
def backward(ctx, grad_in):
|
69 |
-
return None, grad_in.sum_to_size(ctx.shape)
|
70 |
-
replace_grad = ReplaceGrad.apply
|
71 |
-
class ClampWithGrad(torch.autograd.Function):
|
72 |
-
@staticmethod
|
73 |
-
def forward(ctx, input, min, max):
|
74 |
-
ctx.min = min
|
75 |
-
ctx.max = max
|
76 |
-
ctx.save_for_backward(input)
|
77 |
-
return input.clamp(min, max)
|
78 |
-
@staticmethod
|
79 |
-
def backward(ctx, grad_in):
|
80 |
-
input, = ctx.saved_tensors
|
81 |
-
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
|
82 |
-
clamp_with_grad = ClampWithGrad.apply
|
83 |
-
def vector_quantize(x, codebook):
|
84 |
-
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
|
85 |
-
indices = d.argmin(-1)
|
86 |
-
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
|
87 |
-
return replace_grad(x_q, x)
|
88 |
-
class Prompt(nn.Module):
|
89 |
-
def __init__(self, embed, weight=1., stop=float('-inf')):
|
90 |
-
super().__init__()
|
91 |
-
self.register_buffer('embed', embed)
|
92 |
-
self.register_buffer('weight', torch.as_tensor(weight))
|
93 |
-
self.register_buffer('stop', torch.as_tensor(stop))
|
94 |
-
def forward(self, input):
|
95 |
-
input_normed = F.normalize(input.unsqueeze(1), dim=2)
|
96 |
-
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
|
97 |
-
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
|
98 |
-
dists = dists * self.weight.sign()
|
99 |
-
return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
|
100 |
-
def parse_prompt(prompt):
|
101 |
-
vals = prompt.rsplit(':', 2)
|
102 |
-
vals = vals + ['', '1', '-inf'][len(vals):]
|
103 |
-
return vals[0], float(vals[1]), float(vals[2])
|
104 |
-
class MakeCutouts(nn.Module):
|
105 |
-
def __init__(self, cut_size, cutn, cut_pow=1.):
|
106 |
-
super().__init__()
|
107 |
-
self.cut_size = cut_size
|
108 |
-
self.cutn = cutn
|
109 |
-
self.cut_pow = cut_pow
|
110 |
-
self.augs = nn.Sequential(
|
111 |
-
# K.RandomHorizontalFlip(p=0.5),
|
112 |
-
# K.RandomVerticalFlip(p=0.5),
|
113 |
-
# K.RandomSolarize(0.01, 0.01, p=0.7),
|
114 |
-
# K.RandomSharpness(0.3,p=0.4),
|
115 |
-
# K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5),
|
116 |
-
# K.RandomCrop(size=(self.cut_size,self.cut_size), p=0.5),
|
117 |
-
K.RandomAffine(degrees=15, translate=0.1, p=0.7, padding_mode='border'),
|
118 |
-
K.RandomPerspective(0.7,p=0.7),
|
119 |
-
K.ColorJitter(hue=0.1, saturation=0.1, p=0.7),
|
120 |
-
K.RandomErasing((.1, .4), (.3, 1/.3), same_on_batch=True, p=0.7),
|
121 |
-
|
122 |
-
)
|
123 |
-
self.noise_fac = 0.1
|
124 |
-
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
|
125 |
-
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
|
126 |
-
def forward(self, input):
|
127 |
-
sideY, sideX = input.shape[2:4]
|
128 |
-
max_size = min(sideX, sideY)
|
129 |
-
min_size = min(sideX, sideY, self.cut_size)
|
130 |
-
cutouts = []
|
131 |
-
|
132 |
-
for _ in range(self.cutn):
|
133 |
-
# size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
|
134 |
-
# offsetx = torch.randint(0, sideX - size + 1, ())
|
135 |
-
# offsety = torch.randint(0, sideY - size + 1, ())
|
136 |
-
# cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
|
137 |
-
# cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
|
138 |
-
# cutout = transforms.Resize(size=(self.cut_size, self.cut_size))(input)
|
139 |
-
|
140 |
-
cutout = (self.av_pool(input) + self.max_pool(input))/2
|
141 |
-
cutouts.append(cutout)
|
142 |
-
batch = self.augs(torch.cat(cutouts, dim=0))
|
143 |
-
if self.noise_fac:
|
144 |
-
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
|
145 |
-
batch = batch + facs * torch.randn_like(batch)
|
146 |
-
return batch
|
147 |
-
def load_vqgan_model(config_path, checkpoint_path):
|
148 |
-
config = OmegaConf.load(config_path)
|
149 |
-
if config.model.target == 'taming.models.vqgan.VQModel':
|
150 |
-
model = vqgan.VQModel(**config.model.params)
|
151 |
-
model.eval().requires_grad_(False)
|
152 |
-
model.init_from_ckpt(checkpoint_path)
|
153 |
-
elif config.model.target == 'taming.models.vqgan.GumbelVQ':
|
154 |
-
model = vqgan.GumbelVQ(**config.model.params)
|
155 |
-
model.eval().requires_grad_(False)
|
156 |
-
model.init_from_ckpt(checkpoint_path)
|
157 |
-
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
|
158 |
-
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
|
159 |
-
parent_model.eval().requires_grad_(False)
|
160 |
-
parent_model.init_from_ckpt(checkpoint_path)
|
161 |
-
model = parent_model.first_stage_model
|
162 |
-
else:
|
163 |
-
raise ValueError(f'unknown model type: {config.model.target}')
|
164 |
-
del model.loss
|
165 |
-
return model
|
166 |
-
def resize_image(image, out_size):
|
167 |
-
ratio = image.size[0] / image.size[1]
|
168 |
-
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
|
169 |
-
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
|
170 |
-
return image.resize(size, Image.LANCZOS)
|
171 |
-
model_name = "vqgan_imagenet_f16_16384"
|
172 |
-
images_interval = 50
|
173 |
-
width = 280
|
174 |
-
height = 280
|
175 |
-
init_image = ""
|
176 |
-
seed = 42
|
177 |
-
args = argparse.Namespace(
|
178 |
-
noise_prompt_seeds=[],
|
179 |
-
noise_prompt_weights=[],
|
180 |
-
size=[width, height],
|
181 |
-
init_image=init_image,
|
182 |
-
init_weight=0.,
|
183 |
-
clip_model='ViT-B/32',
|
184 |
-
vqgan_config=f'{model_name}.yaml',
|
185 |
-
vqgan_checkpoint=f'{model_name}.ckpt',
|
186 |
-
step_size=0.15,
|
187 |
-
cutn=4,
|
188 |
-
cut_pow=1.,
|
189 |
-
display_freq=images_interval,
|
190 |
-
seed=seed,
|
191 |
-
)
|
192 |
-
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
193 |
-
print('Using device:', device)
|
194 |
-
model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
|
195 |
-
perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
|
196 |
-
def inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight, target_images, cutn, cut_pow):
|
197 |
-
torch.cuda.empty_cache()
|
198 |
-
#torch.cuda.memory_summary(device=None, abbreviated=False)
|
199 |
-
all_frames = []
|
200 |
-
size=[width, height]
|
201 |
-
texts = text
|
202 |
-
init_weight=init_weight
|
203 |
-
if init_image:
|
204 |
-
init_image = init_image.name
|
205 |
-
else:
|
206 |
-
init_image = ""
|
207 |
-
if target_images:
|
208 |
-
target_images = target_images.name
|
209 |
-
else:
|
210 |
-
target_images = ""
|
211 |
-
max_iterations = max_iterations
|
212 |
-
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
213 |
-
"wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
|
214 |
-
name_model = model_names[model_name]
|
215 |
-
if target_images == "None" or not target_images:
|
216 |
-
target_images = []
|
217 |
-
else:
|
218 |
-
target_images = target_images.split("|")
|
219 |
-
target_images = [image.strip() for image in target_images]
|
220 |
-
texts = [phrase.strip() for phrase in texts.split("|")]
|
221 |
-
if texts == ['']:
|
222 |
-
texts = []
|
223 |
-
from urllib.request import urlopen
|
224 |
-
if texts:
|
225 |
-
print('Using texts:', texts)
|
226 |
-
if target_images:
|
227 |
-
print('Using image prompts:', target_images)
|
228 |
-
if seed is None or seed == -1:
|
229 |
-
seed = torch.seed()
|
230 |
-
else:
|
231 |
-
seed = seed
|
232 |
-
torch.manual_seed(seed)
|
233 |
-
print('Using seed:', seed)
|
234 |
-
# clock=deepcopy(perceptor.visual.positional_embedding.data)
|
235 |
-
# perceptor.visual.positional_embedding.data = clock/clock.max()
|
236 |
-
# perceptor.visual.positional_embedding.data=clamp_with_grad(clock,0,1)
|
237 |
-
cut_size = perceptor.visual.input_resolution
|
238 |
-
f = 2**(model.decoder.num_resolutions - 1)
|
239 |
-
make_cutouts = MakeCutouts(cut_size, cutn, cut_pow=cut_pow)
|
240 |
-
toksX, toksY = size[0] // f, size[1] // f
|
241 |
-
sideX, sideY = toksX * f, toksY * f
|
242 |
-
if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':
|
243 |
-
e_dim = 256
|
244 |
-
n_toks = model.quantize.n_embed
|
245 |
-
z_min = model.quantize.embed.weight.min(dim=0).values[None, :, None, None]
|
246 |
-
z_max = model.quantize.embed.weight.max(dim=0).values[None, :, None, None]
|
247 |
-
else:
|
248 |
-
e_dim = model.quantize.e_dim
|
249 |
-
n_toks = model.quantize.n_e
|
250 |
-
z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
|
251 |
-
z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
|
252 |
-
# z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
|
253 |
-
# z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
|
254 |
-
# normalize_imagenet = transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
255 |
-
# std=[0.229, 0.224, 0.225])
|
256 |
-
if init_image:
|
257 |
-
if 'http' in init_image:
|
258 |
-
img = Image.open(urlopen(init_image))
|
259 |
-
else:
|
260 |
-
img = Image.open(init_image)
|
261 |
-
pil_image = img.convert('RGB')
|
262 |
-
pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)
|
263 |
-
pil_tensor = TF.to_tensor(pil_image)
|
264 |
-
z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
|
265 |
-
else:
|
266 |
-
one_hot = F.one_hot(torch.randint(n_toks, [toksY * toksX], device=device), n_toks).float()
|
267 |
-
# z = one_hot @ model.quantize.embedding.weight
|
268 |
-
if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':
|
269 |
-
z = one_hot @ model.quantize.embed.weight
|
270 |
-
else:
|
271 |
-
z = one_hot @ model.quantize.embedding.weight
|
272 |
-
z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2)
|
273 |
-
z = torch.rand_like(z)*2
|
274 |
-
z_orig = z.clone()
|
275 |
-
z.requires_grad_(True)
|
276 |
-
opt = optim.Adam([z], lr=step_size)
|
277 |
-
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
|
278 |
-
std=[0.26862954, 0.26130258, 0.27577711])
|
279 |
-
pMs = []
|
280 |
-
for prompt in texts:
|
281 |
-
txt, weight, stop = parse_prompt(prompt)
|
282 |
-
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
|
283 |
-
pMs.append(Prompt(embed, weight, stop).to(device))
|
284 |
-
for prompt in target_images:
|
285 |
-
path, weight, stop = parse_prompt(prompt)
|
286 |
-
img = Image.open(path)
|
287 |
-
pil_image = img.convert('RGB')
|
288 |
-
img = resize_image(pil_image, (sideX, sideY))
|
289 |
-
batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))
|
290 |
-
embed = perceptor.encode_image(normalize(batch)).float()
|
291 |
-
pMs.append(Prompt(embed, weight, stop).to(device))
|
292 |
-
for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
|
293 |
-
gen = torch.Generator().manual_seed(seed)
|
294 |
-
embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
|
295 |
-
pMs.append(Prompt(embed, weight).to(device))
|
296 |
-
def synth(z):
|
297 |
-
if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':
|
298 |
-
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embed.weight).movedim(3, 1)
|
299 |
-
else:
|
300 |
-
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)
|
301 |
-
return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)
|
302 |
-
@torch.no_grad()
|
303 |
-
def checkin(i, losses):
|
304 |
-
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
|
305 |
-
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
|
306 |
-
out = synth(z)
|
307 |
-
# TF.to_pil_image(out[0].cpu()).save('progress.png')
|
308 |
-
# display.display(display.Image('progress.png'))
|
309 |
-
#res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle)
|
310 |
-
#print(f'gpu: {res.gpu}%, gpu-mem: {res.memory}%')
|
311 |
-
def ascend_txt():
|
312 |
-
# global i
|
313 |
-
out = synth(z)
|
314 |
-
iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
|
315 |
-
|
316 |
-
result = []
|
317 |
-
if init_weight:
|
318 |
-
result.append(F.mse_loss(z, z_orig) * init_weight / 2)
|
319 |
-
#result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*init_weight) / 2)
|
320 |
-
for prompt in pMs:
|
321 |
-
result.append(prompt(iii))
|
322 |
-
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
|
323 |
-
img = np.transpose(img, (1, 2, 0))
|
324 |
-
# imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))
|
325 |
-
img = Image.fromarray(img).convert('RGB')
|
326 |
-
all_frames.append(img)
|
327 |
-
return result, np.array(img)
|
328 |
-
def train(i):
|
329 |
-
opt.zero_grad()
|
330 |
-
lossAll, image = ascend_txt()
|
331 |
-
if i % args.display_freq == 0:
|
332 |
-
checkin(i, lossAll)
|
333 |
-
|
334 |
-
loss = sum(lossAll)
|
335 |
-
loss.backward()
|
336 |
-
opt.step()
|
337 |
-
with torch.no_grad():
|
338 |
-
z.copy_(z.maximum(z_min).minimum(z_max))
|
339 |
-
return image
|
340 |
-
i = 0
|
341 |
-
try:
|
342 |
-
with tqdm() as pbar:
|
343 |
-
while True:
|
344 |
-
image = train(i)
|
345 |
-
if i == max_iterations:
|
346 |
-
break
|
347 |
-
i += 1
|
348 |
-
pbar.update()
|
349 |
-
except KeyboardInterrupt:
|
350 |
-
pass
|
351 |
-
writer = imageio.get_writer('test.mp4', fps=20)
|
352 |
-
|
353 |
-
for im in all_frames:
|
354 |
-
writer.append_data(np.array(im))
|
355 |
-
writer.close()
|
356 |
-
# all_frames[0].save('out.gif',
|
357 |
-
# save_all=True, append_images=all_frames[1:], optimize=False, duration=80, loop=0)
|
358 |
-
return image, 'test.mp4'
|
359 |
-
|
360 |
-
def load_image( infilename ) :
|
361 |
-
img = Image.open( infilename )
|
362 |
-
img.load()
|
363 |
-
data = np.asarray( img, dtype="int32" )
|
364 |
-
return data
|
365 |
-
title = "VQGAN + CLIP"
|
366 |
-
description = "Gradio demo for VQGAN + CLIP. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
367 |
-
article = "<p style='text-align: center'>Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). The original BigGAN+CLIP method was by https://twitter.com/advadnoun. Added some explanations and modifications by Eleiber#8347, pooling trick by Crimeacs#8222 (https://twitter.com/EarthML1) and the GUI was made with the help of Abulafia#3734. | <a href='https://colab.research.google.com/drive/1ZAus_gn2RhTZWzOWUpPERNC0Q8OhZRTZ'>Colab</a> | <a href='https://github.com/CompVis/taming-transformers'>Taming Transformers Github Repo</a> | <a href='https://github.com/openai/CLIP'>CLIP Github Repo</a> | Special thanks to BoneAmputee (https://twitter.com/BoneAmputee) for suggestions and advice</p>"
|
368 |
-
gr.Interface(
|
369 |
-
inference,
|
370 |
-
[gr.inputs.Textbox(label="Text Input"),
|
371 |
-
gr.inputs.Number(default=42, label="seed"),
|
372 |
-
gr.inputs.Slider(minimum=0.1, maximum=0.9, default=0.6, label='step size'),
|
373 |
-
gr.inputs.Slider(minimum=1, maximum=500, default=100, label='max iterations', step=1),
|
374 |
-
gr.inputs.Slider(minimum=200, maximum=600, default=256, label='width', step=1),
|
375 |
-
gr.inputs.Slider(minimum=200, maximum=600, default=256, label='height', step=1),
|
376 |
-
gr.inputs.Image(type="file", label="Initial Image (Optional)", optional=True),
|
377 |
-
gr.inputs.Slider(minimum=0.0, maximum=15.0, default=0.0, label='Initial Weight', step=1.0),
|
378 |
-
gr.inputs.Image(type="file", label="Target Image (Optional)", optional=True),
|
379 |
-
gr.inputs.Slider(minimum=1, maximum=40, default=1, label='cutn', step=1),
|
380 |
-
gr.inputs.Slider(minimum=1.0, maximum=40.0, default=1.0, label='cut_pow', step=1.0)
|
381 |
-
],
|
382 |
-
[gr.outputs.Image(type="numpy", label="Output Image"),gr.outputs.Video(label="Output Video")],
|
383 |
-
title=title,
|
384 |
-
description=description,
|
385 |
-
article=article,
|
386 |
-
examples=[
|
387 |
-
['a garden by james gurney',42,0.6, 100, 256, 256, 'garden.jpeg', 0.0, 'garden.jpeg',1,1.0],
|
388 |
-
['coral reef city artstationHQ',1000,0.6, 110, 200, 200, 'coralreef.jpeg', 0.0, 'coralreef.jpeg',1,1.0],
|
389 |
-
['a cabin in the mountains unreal engine',98,0.6, 120, 280, 280, 'cabin.jpeg', 0.0, 'cabin.jpeg',1,1.0]
|
390 |
-
]
|
391 |
-
).launch(enable_queue=True)
|
392 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVMX-jaca-tonos/Spanish-Audio-Transcription-to-Quechua-Translation/app.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import librosa
|
3 |
-
from transformers import AutoFeatureExtractor, AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
|
4 |
-
|
5 |
-
|
6 |
-
def load_and_fix_data(input_file, model_sampling_rate):
|
7 |
-
speech, sample_rate = librosa.load(input_file)
|
8 |
-
if len(speech.shape) > 1:
|
9 |
-
speech = speech[:, 0] + speech[:, 1]
|
10 |
-
if sample_rate != model_sampling_rate:
|
11 |
-
speech = librosa.resample(speech, sample_rate, model_sampling_rate)
|
12 |
-
return speech
|
13 |
-
|
14 |
-
|
15 |
-
feature_extractor = AutoFeatureExtractor.from_pretrained("jonatasgrosman/wav2vec2-xls-r-1b-spanish")
|
16 |
-
sampling_rate = feature_extractor.sampling_rate
|
17 |
-
|
18 |
-
asr = pipeline("automatic-speech-recognition", model="jonatasgrosman/wav2vec2-xls-r-1b-spanish")
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua'
|
23 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
25 |
-
|
26 |
-
new_line = '\n'
|
27 |
-
|
28 |
-
|
29 |
-
def predict_and_ctc_lm_decode(input_file):
|
30 |
-
speech = load_and_fix_data(input_file, sampling_rate)
|
31 |
-
transcribed_text = asr(speech, chunk_length_s=10, stride_length_s=1)
|
32 |
-
transcribed_text = transcribed_text["text"]
|
33 |
-
input = tokenizer(transcribed_text, return_tensors="pt")
|
34 |
-
output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True)
|
35 |
-
output = tokenizer.decode(output[0], skip_special_tokens=True)
|
36 |
-
return f"Spanish Audio Transcription: {transcribed_text} {new_line} Quechua Translation: {output}"
|
37 |
-
|
38 |
-
description = """ This is a Gradio demo of Spanish Audio Transcriptions to Quechua Translation. To use this, simply provide an audio input (audio recording or via microphone), which will subsequently be transcribed and translated to the Quechua language.
|
39 |
-
|
40 |
-
Pre-trained model used for Spanish ASR: [jonatasgrosman/wav2vec2-xls-r-1b-spanish](https://huggingface.co/jonatasgrosman/wav2vec2-xls-r-1b-spanish)
|
41 |
-
|
42 |
-
Pre-trained model used for translating Spanish audio transcription to the Quechua language: [t5-small-finetuned-spanish-to-quechua](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua)
|
43 |
-
|
44 |
-
"""
|
45 |
-
|
46 |
-
gr.Interface(
|
47 |
-
predict_and_ctc_lm_decode,
|
48 |
-
inputs=[
|
49 |
-
gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")
|
50 |
-
],
|
51 |
-
outputs=[gr.outputs.Textbox()],
|
52 |
-
examples=[["sunny_day.wav"], ["travel.wav"], ["sample_audio.wav"]],
|
53 |
-
title="Spanish-Audio-Transcriptions-to-Quechua-Translation",
|
54 |
-
description = description,
|
55 |
-
#article="<p><center><img src='........e'></center></p>",
|
56 |
-
layout="horizontal",
|
57 |
-
theme="huggingface",
|
58 |
-
).launch(enable_queue=True, cache_examples=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/replace.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special replace functions
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/engine/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
from .launch import *
|
4 |
-
from .train_loop import *
|
5 |
-
|
6 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
7 |
-
|
8 |
-
|
9 |
-
# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__)
|
10 |
-
# but still make them available here
|
11 |
-
from .hooks import *
|
12 |
-
from .defaults import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/structures/keypoints.py
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import numpy as np
|
3 |
-
from typing import Any, List, Tuple, Union
|
4 |
-
import torch
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
from detectron2.utils.env import TORCH_VERSION
|
8 |
-
|
9 |
-
if TORCH_VERSION < (1, 8):
|
10 |
-
|
11 |
-
def script_if_tracing(fn):
|
12 |
-
return fn
|
13 |
-
|
14 |
-
|
15 |
-
else:
|
16 |
-
script_if_tracing = torch.jit.script_if_tracing
|
17 |
-
|
18 |
-
|
19 |
-
class Keypoints:
|
20 |
-
"""
|
21 |
-
Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property
|
22 |
-
containing the x,y location and visibility flag of each keypoint. This tensor has shape
|
23 |
-
(N, K, 3) where N is the number of instances and K is the number of keypoints per instance.
|
24 |
-
|
25 |
-
The visibility flag follows the COCO format and must be one of three integers:
|
26 |
-
|
27 |
-
* v=0: not labeled (in which case x=y=0)
|
28 |
-
* v=1: labeled but not visible
|
29 |
-
* v=2: labeled and visible
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):
|
33 |
-
"""
|
34 |
-
Arguments:
|
35 |
-
keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.
|
36 |
-
The shape should be (N, K, 3) where N is the number of
|
37 |
-
instances, and K is the number of keypoints per instance.
|
38 |
-
"""
|
39 |
-
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu")
|
40 |
-
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
|
41 |
-
assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape
|
42 |
-
self.tensor = keypoints
|
43 |
-
|
44 |
-
def __len__(self) -> int:
|
45 |
-
return self.tensor.size(0)
|
46 |
-
|
47 |
-
def to(self, *args: Any, **kwargs: Any) -> "Keypoints":
|
48 |
-
return type(self)(self.tensor.to(*args, **kwargs))
|
49 |
-
|
50 |
-
@property
|
51 |
-
def device(self) -> torch.device:
|
52 |
-
return self.tensor.device
|
53 |
-
|
54 |
-
def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:
|
55 |
-
"""
|
56 |
-
Convert keypoint annotations to a heatmap of one-hot labels for training,
|
57 |
-
as described in :paper:`Mask R-CNN`.
|
58 |
-
|
59 |
-
Arguments:
|
60 |
-
boxes: Nx4 tensor, the boxes to draw the keypoints to
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
heatmaps:
|
64 |
-
A tensor of shape (N, K), each element is integer spatial label
|
65 |
-
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
|
66 |
-
valid:
|
67 |
-
A tensor of shape (N, K) containing whether each keypoint is in the roi or not.
|
68 |
-
"""
|
69 |
-
return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)
|
70 |
-
|
71 |
-
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
|
72 |
-
"""
|
73 |
-
Create a new `Keypoints` by indexing on this `Keypoints`.
|
74 |
-
|
75 |
-
The following usage are allowed:
|
76 |
-
|
77 |
-
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
|
78 |
-
2. `new_kpts = kpts[2:10]`: return a slice of key points.
|
79 |
-
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
|
80 |
-
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
|
81 |
-
|
82 |
-
Note that the returned Keypoints might share storage with this Keypoints,
|
83 |
-
subject to Pytorch's indexing semantics.
|
84 |
-
"""
|
85 |
-
if isinstance(item, int):
|
86 |
-
return Keypoints([self.tensor[item]])
|
87 |
-
return Keypoints(self.tensor[item])
|
88 |
-
|
89 |
-
def __repr__(self) -> str:
|
90 |
-
s = self.__class__.__name__ + "("
|
91 |
-
s += "num_instances={})".format(len(self.tensor))
|
92 |
-
return s
|
93 |
-
|
94 |
-
|
95 |
-
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
|
96 |
-
def _keypoints_to_heatmap(
|
97 |
-
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
|
98 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
99 |
-
"""
|
100 |
-
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
|
101 |
-
|
102 |
-
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
|
103 |
-
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
|
104 |
-
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
|
105 |
-
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
|
106 |
-
|
107 |
-
Arguments:
|
108 |
-
keypoints: tensor of keypoint locations in of shape (N, K, 3).
|
109 |
-
rois: Nx4 tensor of rois in xyxy format
|
110 |
-
heatmap_size: integer side length of square heatmap.
|
111 |
-
|
112 |
-
Returns:
|
113 |
-
heatmaps: A tensor of shape (N, K) containing an integer spatial label
|
114 |
-
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
|
115 |
-
valid: A tensor of shape (N, K) containing whether each keypoint is in
|
116 |
-
the roi or not.
|
117 |
-
"""
|
118 |
-
|
119 |
-
if rois.numel() == 0:
|
120 |
-
return rois.new().long(), rois.new().long()
|
121 |
-
offset_x = rois[:, 0]
|
122 |
-
offset_y = rois[:, 1]
|
123 |
-
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
|
124 |
-
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
|
125 |
-
|
126 |
-
offset_x = offset_x[:, None]
|
127 |
-
offset_y = offset_y[:, None]
|
128 |
-
scale_x = scale_x[:, None]
|
129 |
-
scale_y = scale_y[:, None]
|
130 |
-
|
131 |
-
x = keypoints[..., 0]
|
132 |
-
y = keypoints[..., 1]
|
133 |
-
|
134 |
-
x_boundary_inds = x == rois[:, 2][:, None]
|
135 |
-
y_boundary_inds = y == rois[:, 3][:, None]
|
136 |
-
|
137 |
-
x = (x - offset_x) * scale_x
|
138 |
-
x = x.floor().long()
|
139 |
-
y = (y - offset_y) * scale_y
|
140 |
-
y = y.floor().long()
|
141 |
-
|
142 |
-
x[x_boundary_inds] = heatmap_size - 1
|
143 |
-
y[y_boundary_inds] = heatmap_size - 1
|
144 |
-
|
145 |
-
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
|
146 |
-
vis = keypoints[..., 2] > 0
|
147 |
-
valid = (valid_loc & vis).long()
|
148 |
-
|
149 |
-
lin_ind = y * heatmap_size + x
|
150 |
-
heatmaps = lin_ind * valid
|
151 |
-
|
152 |
-
return heatmaps, valid
|
153 |
-
|
154 |
-
|
155 |
-
@script_if_tracing
|
156 |
-
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
|
157 |
-
"""
|
158 |
-
Extract predicted keypoint locations from heatmaps.
|
159 |
-
|
160 |
-
Args:
|
161 |
-
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
|
162 |
-
each ROI and each keypoint.
|
163 |
-
rois (Tensor): (#ROIs, 4). The box of each ROI.
|
164 |
-
|
165 |
-
Returns:
|
166 |
-
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
|
167 |
-
(x, y, logit, score) for each keypoint.
|
168 |
-
|
169 |
-
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
|
170 |
-
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
|
171 |
-
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
|
172 |
-
"""
|
173 |
-
# The decorator use of torch.no_grad() was not supported by torchscript.
|
174 |
-
# https://github.com/pytorch/pytorch/issues/44768
|
175 |
-
maps = maps.detach()
|
176 |
-
rois = rois.detach()
|
177 |
-
|
178 |
-
offset_x = rois[:, 0]
|
179 |
-
offset_y = rois[:, 1]
|
180 |
-
|
181 |
-
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
|
182 |
-
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
|
183 |
-
widths_ceil = widths.ceil()
|
184 |
-
heights_ceil = heights.ceil()
|
185 |
-
|
186 |
-
num_rois, num_keypoints = maps.shape[:2]
|
187 |
-
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
|
188 |
-
|
189 |
-
width_corrections = widths / widths_ceil
|
190 |
-
height_corrections = heights / heights_ceil
|
191 |
-
|
192 |
-
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
|
193 |
-
|
194 |
-
for i in range(num_rois):
|
195 |
-
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
|
196 |
-
roi_map = F.interpolate(
|
197 |
-
maps[[i]], size=outsize, mode="bicubic", align_corners=False
|
198 |
-
).squeeze(
|
199 |
-
0
|
200 |
-
) # #keypoints x H x W
|
201 |
-
|
202 |
-
# softmax over the spatial region
|
203 |
-
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
|
204 |
-
max_score = max_score.view(num_keypoints, 1, 1)
|
205 |
-
tmp_full_resolution = (roi_map - max_score).exp_()
|
206 |
-
tmp_pool_resolution = (maps[i] - max_score).exp_()
|
207 |
-
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
|
208 |
-
# so that the scores of objects of different absolute sizes will be more comparable
|
209 |
-
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
|
210 |
-
|
211 |
-
w = roi_map.shape[2]
|
212 |
-
pos = roi_map.view(num_keypoints, -1).argmax(1)
|
213 |
-
|
214 |
-
x_int = pos % w
|
215 |
-
y_int = (pos - x_int) // w
|
216 |
-
|
217 |
-
assert (
|
218 |
-
roi_map_scores[keypoints_idx, y_int, x_int]
|
219 |
-
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
|
220 |
-
).all()
|
221 |
-
|
222 |
-
x = (x_int.float() + 0.5) * width_corrections[i]
|
223 |
-
y = (y_int.float() + 0.5) * height_corrections[i]
|
224 |
-
|
225 |
-
xy_preds[i, :, 0] = x + offset_x[i]
|
226 |
-
xy_preds[i, :, 1] = y + offset_y[i]
|
227 |
-
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
|
228 |
-
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
|
229 |
-
|
230 |
-
return xy_preds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chaitanya01/InvestingPlatform/app.py
DELETED
@@ -1,1297 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
3 |
-
from requests.api import delete, options
|
4 |
-
import streamlit as st
|
5 |
-
import time
|
6 |
-
import tweepy
|
7 |
-
import requests
|
8 |
-
from io import BytesIO
|
9 |
-
import base64
|
10 |
-
import matplotlib.pyplot as plt
|
11 |
-
import numpy as np
|
12 |
-
from plotly.subplots import make_subplots
|
13 |
-
from config import *
|
14 |
-
from dateutil.relativedelta import relativedelta
|
15 |
-
from patterns import patterns
|
16 |
-
# import talibsddsfs
|
17 |
-
from datetime import datetime, timedelta, tzinfo
|
18 |
-
from alpaca_trade_api.rest import REST
|
19 |
-
from streamlit_tags import st_tags_sidebar
|
20 |
-
# from streamlit_autorefresh import st_autorefresh
|
21 |
-
# import plotly.express as px
|
22 |
-
from coinbaskets import *
|
23 |
-
import plotly.graph_objects as go
|
24 |
-
from mapping import *
|
25 |
-
import pandas as pd
|
26 |
-
import threading
|
27 |
-
from bs4 import BeautifulSoup
|
28 |
-
from ETFs import *
|
29 |
-
from dateutil import tz
|
30 |
-
import os
|
31 |
-
# try:
|
32 |
-
# from streamlit.ReportThread import add_report_ctx
|
33 |
-
# except Exception:
|
34 |
-
# # Streamlit >= 0.65.0
|
35 |
-
# from streamlit.report_thread import add_report_ctx
|
36 |
-
# # from streamlit.scriptrunner import add_script_run_ctx
|
37 |
-
from streamlit.scriptrunner import add_script_run_ctx as add_report_ctx
|
38 |
-
def get_stocktwits_data(req,code,label):
|
39 |
-
|
40 |
-
r = requests.get(req)
|
41 |
-
trending_syms = pd.DataFrame(r.json()["stocks"]).T
|
42 |
-
trending_syms.index.name = "stock_id"
|
43 |
-
trending_syms.index = trending_syms.index.astype("int")
|
44 |
-
trending_score = pd.DataFrame(r.json()["table"][code])
|
45 |
-
trending_score.set_index("stock_id",inplace = True)
|
46 |
-
most_trending_syms = pd.merge(trending_syms,trending_score,on= "stock_id")
|
47 |
-
most_trending_syms.sort_values("val",ascending = False, inplace = True)
|
48 |
-
most_trending_syms.set_index("symbol",inplace = True)
|
49 |
-
most_trending_syms.columns = ["Name","Price","%Change",label]
|
50 |
-
return most_trending_syms
|
51 |
-
|
52 |
-
def get_cnbc_data(symbol):
|
53 |
-
ticker = symbol.replace(" ","")
|
54 |
-
if ticker == "NASDAQ":
|
55 |
-
ticker = "NDX"
|
56 |
-
elif ticker == "NIFTY50":
|
57 |
-
ticker = ".NSEI"
|
58 |
-
# Get the symbol quote from yahoo finance, we are using Beautiful Soup for scraping
|
59 |
-
# df = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/1Y.json?symbol={ticker}").json()["barData"]["priceBars"])
|
60 |
-
df_1D = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/1D.json?symbol={ticker}").json()["barData"]["priceBars"])
|
61 |
-
# df["datetime"] = pd.to_datetime(df['tradeTimeinMills'],unit='ms')
|
62 |
-
# df["close"] = df["close"].astype(float)
|
63 |
-
df_1D["close"] = df_1D["close"].astype(float)
|
64 |
-
# df.set_index("datetime",inplace = True)
|
65 |
-
# dma200 = df["close"].rolling(200).mean()
|
66 |
-
close = (df_1D["close"].iloc[-1])
|
67 |
-
return close
|
68 |
-
def vix_gradient(vix):
|
69 |
-
"""
|
70 |
-
Mapping is done as follows rsi<=20 --> -100, rsi>=80, 100, and then linear variation
|
71 |
-
"""
|
72 |
-
if vix<20:
|
73 |
-
return 100
|
74 |
-
elif vix<30:
|
75 |
-
return (-20*vix+500)
|
76 |
-
else:
|
77 |
-
return -100
|
78 |
-
def roro_comp_get(series,i,state, inverse = False):
|
79 |
-
|
80 |
-
current = series.iloc[-i]
|
81 |
-
current_idx = series.index[-i]
|
82 |
-
|
83 |
-
w_1_ago = current_idx - relativedelta(days=7)
|
84 |
-
w_2_ago = current_idx - relativedelta(days=14)
|
85 |
-
m_1_ago = current_idx - relativedelta(months=1)
|
86 |
-
|
87 |
-
if state == 0:
|
88 |
-
w_1_ret = (current - series.loc[w_1_ago:].iloc[0])*100/series.loc[w_1_ago:].iloc[0]
|
89 |
-
w_2_ret = (current - series.loc[w_2_ago:].iloc[0])*100/series.loc[w_2_ago:].iloc[0]
|
90 |
-
m_1_ret = (current - series.loc[m_1_ago:].iloc[0])*100/series.loc[m_1_ago:].iloc[0]
|
91 |
-
else:
|
92 |
-
w_1_ret = (current - series.iloc[-1-i])*100/series.iloc[-1-i]
|
93 |
-
w_2_ret = (current - series.iloc[-2-i])*100/series.iloc[-2-i]
|
94 |
-
m_1_ret = (current - series.iloc[-4-i])*100/series.iloc[-4-i]
|
95 |
-
sign_of = 1
|
96 |
-
if inverse == True:
|
97 |
-
sign_of = -1
|
98 |
-
val = 100*(3*(2*(sign_of*w_1_ret>0) - 1) + 2*(2*(sign_of*w_2_ret>0) - 1) + 2*(sign_of*m_1_ret>0) - 1)/6
|
99 |
-
|
100 |
-
return val
|
101 |
-
def get_roro(tf = "1Y"):
|
102 |
-
|
103 |
-
df_spx = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=.SPX").json()["barData"]["priceBars"])
|
104 |
-
df_spx["datetime"] = pd.to_datetime(df_spx['tradeTimeinMills'],unit='ms').dt.date
|
105 |
-
df_spx.set_index("datetime",inplace = True)
|
106 |
-
df_spx["close"] = df_spx["close"].astype(float)
|
107 |
-
|
108 |
-
df_vix = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=.VIX").json()["barData"]["priceBars"])
|
109 |
-
df_vix["close"] = df_vix["close"].astype(float)
|
110 |
-
|
111 |
-
df_AUDJPY = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=AUDJPY=").json()["barData"]["priceBars"])
|
112 |
-
df_AUDJPY["datetime"] = pd.to_datetime(df_AUDJPY['tradeTimeinMills'],unit='ms').dt.date
|
113 |
-
df_AUDJPY.set_index("datetime",inplace = True)
|
114 |
-
df_AUDJPY["close"] = df_AUDJPY["close"].astype(float)
|
115 |
-
|
116 |
-
|
117 |
-
df_gold = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}[email protected]").json()["barData"]["priceBars"])
|
118 |
-
df_gold["datetime"] = pd.to_datetime(df_gold['tradeTimeinMills'],unit='ms').dt.date
|
119 |
-
df_gold.set_index("datetime",inplace = True)
|
120 |
-
df_gold["close"] = df_gold["close"].astype(float)
|
121 |
-
|
122 |
-
df_silver = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}[email protected]").json()["barData"]["priceBars"])
|
123 |
-
df_silver["datetime"] = pd.to_datetime(df_silver['tradeTimeinMills'],unit='ms').dt.date
|
124 |
-
df_silver.set_index("datetime",inplace = True)
|
125 |
-
df_silver["close"] = df_silver["close"].astype(float)
|
126 |
-
|
127 |
-
gold_silver_ratio = df_gold["close"]/df_silver["close"]
|
128 |
-
|
129 |
-
df_bnd = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=BND").json()["barData"]["priceBars"])
|
130 |
-
df_bnd["datetime"] = pd.to_datetime(df_bnd['tradeTimeinMills'],unit='ms').dt.date
|
131 |
-
df_bnd.set_index("datetime",inplace = True)
|
132 |
-
df_bnd["close"] = df_bnd["close"].astype(float)
|
133 |
-
|
134 |
-
df_sphb = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=SPHB").json()["barData"]["priceBars"])
|
135 |
-
df_sphb["datetime"] = pd.to_datetime(df_sphb['tradeTimeinMills'],unit='ms').dt.date
|
136 |
-
df_sphb.set_index("datetime",inplace = True)
|
137 |
-
df_sphb["close"] = df_sphb["close"].astype(float)
|
138 |
-
|
139 |
-
df_splv = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=SPLV").json()["barData"]["priceBars"])
|
140 |
-
df_splv["datetime"] = pd.to_datetime(df_splv['tradeTimeinMills'],unit='ms').dt.date
|
141 |
-
df_splv.set_index("datetime",inplace = True)
|
142 |
-
df_splv["close"] = df_splv["close"].astype(float)
|
143 |
-
|
144 |
-
sphb_splv_ratio = df_sphb["close"]/df_splv["close"]
|
145 |
-
|
146 |
-
df_HYG = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=HYG").json()["barData"]["priceBars"])
|
147 |
-
df_HYG["datetime"] = pd.to_datetime(df_HYG['tradeTimeinMills'],unit='ms').dt.date
|
148 |
-
df_HYG.set_index("datetime",inplace = True)
|
149 |
-
df_HYG["close"] = df_HYG["close"].astype(float)
|
150 |
-
|
151 |
-
df_fnda = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=FNDA").json()["barData"]["priceBars"])
|
152 |
-
df_fnda["datetime"] = pd.to_datetime(df_fnda['tradeTimeinMills'],unit='ms').dt.date
|
153 |
-
df_fnda.set_index("datetime",inplace = True)
|
154 |
-
df_fnda["close"] = df_fnda["close"].astype(float)
|
155 |
-
|
156 |
-
df_schx = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=SCHX").json()["barData"]["priceBars"])
|
157 |
-
df_schx["datetime"] = pd.to_datetime(df_schx['tradeTimeinMills'],unit='ms').dt.date
|
158 |
-
df_schx.set_index("datetime",inplace = True)
|
159 |
-
df_schx["close"] = df_schx["close"].astype(float)
|
160 |
-
|
161 |
-
fnda_schx_ratio = df_fnda["close"]/df_schx["close"]
|
162 |
-
|
163 |
-
df_btc_usd = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{tf}.json?symbol=BTC.CB=").json()["barData"]["priceBars"])
|
164 |
-
df_btc_usd["datetime"] = pd.to_datetime(df_btc_usd['tradeTimeinMills'],unit='ms').dt.date
|
165 |
-
df_btc_usd.set_index("datetime",inplace = True)
|
166 |
-
df_btc_usd["close"] = df_btc_usd["close"].astype(float)
|
167 |
-
periods = 300
|
168 |
-
roro = []
|
169 |
-
if tf == "5Y":
|
170 |
-
state = 1
|
171 |
-
else:
|
172 |
-
state = 0
|
173 |
-
for i in range(periods,0,-1):
|
174 |
-
temp = dict(
|
175 |
-
date = (pd.to_datetime(df_spx.index).date)[-i],
|
176 |
-
spx = roro_comp_get(df_spx["close"],i,state),
|
177 |
-
audjpy = roro_comp_get(df_AUDJPY["close"],i,state),
|
178 |
-
gold_silver = roro_comp_get(gold_silver_ratio,i,state,inverse = True),
|
179 |
-
bnd = roro_comp_get(df_bnd["close"],i,state,inverse = True),
|
180 |
-
sphb_splv = roro_comp_get(sphb_splv_ratio,i,state),
|
181 |
-
hyg = roro_comp_get(df_HYG["close"],i,state),
|
182 |
-
fnda_schx = roro_comp_get(fnda_schx_ratio,i,state),
|
183 |
-
vix = vix_gradient(df_vix["close"].iloc[-i]),
|
184 |
-
btc_usd = roro_comp_get(df_btc_usd["close"],i,state)
|
185 |
-
)
|
186 |
-
roro.append(temp)
|
187 |
-
|
188 |
-
return roro
|
189 |
-
# Setting the page layout as wide
|
190 |
-
st.set_page_config(layout="wide")
|
191 |
-
def get_data_yields(symbol,lookback_period):
|
192 |
-
global response_yields
|
193 |
-
df = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/{lookback_period}.json?symbol={symbol}").json()["barData"]["priceBars"])
|
194 |
-
df["datetime"] = pd.to_datetime(df['tradeTimeinMills'],unit='ms')
|
195 |
-
df.set_index("datetime",inplace = True)
|
196 |
-
response_yields[symbol] = df["close"].astype(float)
|
197 |
-
def get_recommendation(symbol,rsi_val,drop_frm_ath,dist_from_5_yr_low):
|
198 |
-
global momentum_recommendations,cheap_recommendations
|
199 |
-
df = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/1Y.json?symbol={symbol}").json()["barData"]["priceBars"])
|
200 |
-
df_all = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/ALL.json?symbol={symbol}").json()["barData"]["priceBars"])
|
201 |
-
df_all["high"] = df_all["high"].astype(float)
|
202 |
-
df_all["low"] = df_all["low"].astype(float)
|
203 |
-
df["tradeTimeinMills"] = pd.to_datetime(df['tradeTimeinMills'],unit='ms')
|
204 |
-
df_all["tradeTimeinMills"] = pd.to_datetime(df_all['tradeTimeinMills'],unit='ms')
|
205 |
-
df["close"] = df["close"].astype(float)
|
206 |
-
df["open"] = df["open"].astype(float)
|
207 |
-
df.set_index("tradeTimeinMills",inplace = True)
|
208 |
-
df_all.set_index("tradeTimeinMills",inplace = True)
|
209 |
-
|
210 |
-
current_close = df["close"].iloc[-1]
|
211 |
-
|
212 |
-
df["50DMA"] = df["close"].rolling(50).mean()
|
213 |
-
df["100DMA"] = df["close"].rolling(100).mean()
|
214 |
-
df["Vol_mon_avg"] = 5*df["volume"].rolling(252).mean()
|
215 |
-
df["RSI"] = talib.RSI(df["close"])
|
216 |
-
cond1 = current_close>df["50DMA"].iloc[-1]
|
217 |
-
cond2 = current_close>df["100DMA"].iloc[-1]
|
218 |
-
cond3 = df["volume"].rolling(5).sum().iloc[-1]>1.5*df["Vol_mon_avg"].iloc[-1]
|
219 |
-
mom_score = int(cond1)+int(cond2)+int(cond3)
|
220 |
-
|
221 |
-
ath = df_all["high"].max()
|
222 |
-
distance_from_ath = round((ath-current_close)*100/ath,2)
|
223 |
-
yr_2_ago_dt = datetime.now() - relativedelta(years=2)
|
224 |
-
yr_2_low = df_all["low"].loc[yr_2_ago_dt:].min()
|
225 |
-
distance_frm_2yr_low = round((current_close - yr_2_low)*100/current_close,2)
|
226 |
-
rsi = round(df["RSI"].iloc[-1],2)
|
227 |
-
cond_1 = rsi<rsi_val
|
228 |
-
cond_2 = distance_from_ath>=drop_frm_ath
|
229 |
-
cond_3 = distance_frm_2yr_low<=dist_from_5_yr_low
|
230 |
-
cheap_score = int(cond_1)+int(cond_2)+int(cond_3)
|
231 |
-
|
232 |
-
if (cond1 or cond2) or cond3:
|
233 |
-
momentum_recommendations[symbol] = {"LTP":current_close,"50DMA":df["50DMA"].iloc[-1],"100DMA":df["100DMA"].iloc[-1],"Vol>1.5avg":cond3,"score":mom_score,"sparkline": sparkline(data=df["close"])}
|
234 |
-
if cond_1 or cond_2 or cond_3:
|
235 |
-
cheap_recommendations[symbol] = {"LTP":current_close,"RSI":rsi,f'drop frm ATH':distance_from_ath,f"% away 2 yr low":distance_frm_2yr_low,"score":cheap_score,"sparkline": sparkline(data=df["close"])}
|
236 |
-
def sparkline(data, figsize=(4,0.25),**kwags):
|
237 |
-
data = list(data)
|
238 |
-
fig,ax = plt.subplots(1,1,figsize=figsize,**kwags)
|
239 |
-
ax.plot(data)
|
240 |
-
|
241 |
-
for k,v in ax.spines.items():
|
242 |
-
v.set_visible(False)
|
243 |
-
|
244 |
-
ax.set_xticks([])
|
245 |
-
ax.set_yticks([])
|
246 |
-
|
247 |
-
plt.plot(len(data)-1, data[len(data)-1], 'r.')
|
248 |
-
|
249 |
-
ax.fill_between(range(len(data)), data, len(data)*[min(data)], alpha=0.1)
|
250 |
-
|
251 |
-
img = BytesIO()
|
252 |
-
plt.savefig(img, transparent=True, bbox_inches='tight')
|
253 |
-
img.seek(0)
|
254 |
-
# plt.show()
|
255 |
-
plt.close()
|
256 |
-
|
257 |
-
# return base64.b64encode(img.read()).decode("utf-8")
|
258 |
-
return '<img src="data:image/png;base64,{}"/>'.format(base64.b64encode(img.read()))
|
259 |
-
def highlight_rec_momentum(s):
|
260 |
-
arr = []
|
261 |
-
arr.append('background-color: white')
|
262 |
-
if s["50DMA"]<s["LTP"]:
|
263 |
-
arr.append('background-color: #90EE90')
|
264 |
-
else:
|
265 |
-
arr.append('background-color: #FF7F7F')
|
266 |
-
if s["100DMA"]<s["LTP"]:
|
267 |
-
arr.append('background-color: #90EE90')
|
268 |
-
else:
|
269 |
-
arr.append('background-color: #FF7F7F')
|
270 |
-
if s["Vol>1.5avg"]:
|
271 |
-
arr.append('background-color: #90EE90')
|
272 |
-
else:
|
273 |
-
arr.append('background-color: #FF7F7F')
|
274 |
-
arr.append('background-color: white')
|
275 |
-
arr.append('background-color: white')
|
276 |
-
return arr
|
277 |
-
def font_color(s):
|
278 |
-
return ["color: black"]*len(s)
|
279 |
-
def highlight_rec_cheap(s,rsi_val,drop_frm_ath,dist_from_5_yr_low):
|
280 |
-
arr = []
|
281 |
-
arr.append('background-color: white')
|
282 |
-
if s["RSI"]<rsi_val:
|
283 |
-
arr.append('background-color: #90EE90')
|
284 |
-
else:
|
285 |
-
arr.append('background-color: #FF7F7F')
|
286 |
-
if s[f"drop frm ATH"]>drop_frm_ath:
|
287 |
-
arr.append('background-color: #90EE90')
|
288 |
-
else:
|
289 |
-
arr.append('background-color: #FF7F7F')
|
290 |
-
if s[f"% away 2 yr low"]<dist_from_5_yr_low:
|
291 |
-
arr.append('background-color: #90EE90')
|
292 |
-
else:
|
293 |
-
arr.append('background-color: #FF7F7F')
|
294 |
-
arr.append('background-color: white')
|
295 |
-
arr.append('background-color: white')
|
296 |
-
|
297 |
-
return arr
|
298 |
-
def get_etf_rets(symbol):
|
299 |
-
global res_etf_ret, vol_etf_info,expense_ratios
|
300 |
-
df = pd.DataFrame(requests.get(f"https://ts-api.cnbc.com/harmony/app/charts/1Y.json?symbol={symbol}").json()["barData"]["priceBars"])
|
301 |
-
df["tradeTimeinMills"] = pd.to_datetime(df['tradeTimeinMills'],unit='ms')
|
302 |
-
df["close"] = df["close"].astype(float)
|
303 |
-
df["open"] = df["open"].astype(float)
|
304 |
-
df.set_index("tradeTimeinMills",inplace = True)
|
305 |
-
df_new = df.resample("W-Sun").agg({"close":"last","open":"first"})
|
306 |
-
price_1_yr_ago = df.loc[df_new.index[-1] - relativedelta(years = 1):].iloc[0]["open"]
|
307 |
-
price_1_mon_ago = df.loc[df.index[-1] - relativedelta(months=1):].iloc[0]["open"]
|
308 |
-
close = df.iloc[-1]["close"]
|
309 |
-
daily_ret = (close - df.iloc[-1]["open"].astype(float))*100/df.iloc[-1]["open"]
|
310 |
-
w_1_ret = round((close - df_new.iloc[-1]["open"])*100/df_new.iloc[-1]["open"],2)
|
311 |
-
w_2_ret = round((close - df_new.iloc[-2]["open"])*100/df_new.iloc[-2]["open"],2)
|
312 |
-
y_1_ret = round((close - price_1_yr_ago)*100/price_1_yr_ago,2)
|
313 |
-
m_1_ret = round((close - price_1_mon_ago)*100/price_1_mon_ago,2)
|
314 |
-
temp = dict(symbol = symbol,day_ret = daily_ret, w1_ret = w_1_ret, w2_ret = w_2_ret, year_ret = y_1_ret,m1_ret = m_1_ret)
|
315 |
-
res_etf_ret.append(temp)
|
316 |
-
iv = (df["close"].pct_change()*100).iloc[-30:].std()*np.sqrt(252)
|
317 |
-
vol_etf_info[symbol] = round(iv,2)
|
318 |
-
|
319 |
-
r = requests.get(f"https://etfdb.com/etf/{symbol}/#etf-ticker-profile")
|
320 |
-
soup = BeautifulSoup(r.content, 'html5lib')
|
321 |
-
expense_ratio = soup.find("div",{"class":"ticker-assets"}).find_all("div")[3].text.split("\n")[-2]
|
322 |
-
expense_ratios[symbol] = expense_ratio
|
323 |
-
return
|
324 |
-
def get_data(ticker, timeframe = 60*60 * 4):
|
325 |
-
# Function to get OHLC data for a symbol from FTX api
|
326 |
-
data = pd.DataFrame(json.loads(requests.get(f"https://ftx.com/api/markets/{ticker}/candles?resolution={timeframe}").text)["result"])
|
327 |
-
return data
|
328 |
-
def in_squeeze(symbol,bb_mul, kc_mul,num_days,plot = False, timeframe = 60*60*4):
|
329 |
-
# Function to check whether Keltner Channel and Bollinger bands squeeze is happening
|
330 |
-
# Get Data
|
331 |
-
data = get_data(symbol,timeframe)
|
332 |
-
# Calculate BB
|
333 |
-
data["20sma"] = data["close"].rolling(window = 20).mean()
|
334 |
-
data["stddev"] = data["close"].rolling(window = 20).std()
|
335 |
-
data["lowerband"] = data["20sma"] - bb_mul*data["stddev"]
|
336 |
-
data["upperband"] = data["20sma"] + bb_mul*data["stddev"]
|
337 |
-
|
338 |
-
# Calculate KC
|
339 |
-
data["TR"] = data["high"] - data["low"]
|
340 |
-
data["ATR"] = data["TR"].rolling(window = 20).mean()
|
341 |
-
data['upperKC'] = data["20sma"] + kc_mul*data["ATR"]
|
342 |
-
data['lowerKC'] = data["20sma"] - kc_mul*data["ATR"]
|
343 |
-
data["squeeze_on"] = np.where(
|
344 |
-
np.logical_and(data["lowerband"]>data["lowerKC"], data["upperband"]<data["upperKC"]),1,0)
|
345 |
-
# Now if "num_days" days earlier BB were in KC but now, its not then that's breakout
|
346 |
-
if data.iloc[-num_days]["squeeze_on"] and not data.iloc[-1]["squeeze_on"]:
|
347 |
-
# If user wants to plot the candlestick then pass the plot = True
|
348 |
-
if plot == True:
|
349 |
-
# Template for plotting KC and BB and candlesticks
|
350 |
-
candlestick = go.Candlestick(x=data["startTime"],open=data["open"],high=data["high"],low=data["low"],close=data["close"], name = symbol)
|
351 |
-
upperband = go.Scatter(x = data["startTime"], y = data["upperband"], name = "Upper BB", line = dict(color = "blue"))
|
352 |
-
lowerband = go.Scatter(x = data["startTime"], y = data["lowerband"], name = "Lower BB", line = dict(color = "blue"))
|
353 |
-
upperKC = go.Scatter(x = data["startTime"], y = data["upperKC"], name = "Upper KC", line = dict(color = "green"))
|
354 |
-
lowerKC = go.Scatter(x = data["startTime"], y = data["lowerKC"], name = "Upper KC", line = dict(color = "green"))
|
355 |
-
|
356 |
-
fig = go.Figure(data = [candlestick,upperband, lowerband, upperKC, lowerKC])
|
357 |
-
# Slider in Xaxes for 1month, 6month, YTD and 1Y
|
358 |
-
fig.update_xaxes(
|
359 |
-
rangeslider_visible=True,
|
360 |
-
rangeselector=dict(
|
361 |
-
buttons=list([
|
362 |
-
dict(count=1, label="1mon", step="month", stepmode="backward"),
|
363 |
-
dict(count=6, label="6mon", step="month", stepmode="backward"),
|
364 |
-
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
365 |
-
dict(count=1, label="1y", step="year", stepmode="backward"),
|
366 |
-
dict(step="all")
|
367 |
-
])
|
368 |
-
))
|
369 |
-
fig.update_layout(yaxis=dict(autorange = True,fixedrange= False))
|
370 |
-
st.plotly_chart(fig,use_container_width=True)
|
371 |
-
else:
|
372 |
-
print(f"{symbol}")
|
373 |
-
|
374 |
-
def plot_candlestick(data):
|
375 |
-
# Function to plot candlestick out of a given OHLC dataframe
|
376 |
-
fig = go.Figure(data=[go.Candlestick(x=data.index,
|
377 |
-
open=data['open'],
|
378 |
-
high=data['high'],
|
379 |
-
low=data['low'],
|
380 |
-
close=data['close'])])
|
381 |
-
fig.update_xaxes(
|
382 |
-
rangeslider_visible=True,
|
383 |
-
rangeselector=dict(
|
384 |
-
buttons=list([
|
385 |
-
dict(count=1, label="1mon", step="month", stepmode="backward"),
|
386 |
-
dict(count=6, label="6mon", step="month", stepmode="backward"),
|
387 |
-
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
388 |
-
dict(count=1, label="1y", step="year", stepmode="backward"),
|
389 |
-
dict(step="all")
|
390 |
-
])
|
391 |
-
))
|
392 |
-
return fig
|
393 |
-
|
394 |
-
def get_key(dict_given, val):
|
395 |
-
# Function to get the key of the required value from a dictionary
|
396 |
-
for key, value in dict_given.items():
|
397 |
-
if val == value:
|
398 |
-
return key
|
399 |
-
def get_crypto_data_daily(symbol):
|
400 |
-
data = get_data(symbol,timeframe = 60*60*24)
|
401 |
-
data.to_csv(f"crypto_data/{symbol.replace('/','_')}.csv")
|
402 |
-
def get_cnbc_yields(symbol):
|
403 |
-
# Get the symbol quote from yahoo finance, we are using Beautiful Soup for scraping
|
404 |
-
|
405 |
-
URL = f"https://www.cnbc.com/quotes/US5Y"
|
406 |
-
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
|
407 |
-
page = requests.get(URL, headers = headers)
|
408 |
-
soup = BeautifulSoup(page.text, "html.parser")
|
409 |
-
yields = ["US 2Y","US 5Y", "US 10Y","US 30Y"]
|
410 |
-
if symbol in yields:
|
411 |
-
elements = soup.find('div',{'class':'QuoteStrip-lastPriceStripContainer'})
|
412 |
-
price = elements.find_all('span')[0].text.replace("%","")
|
413 |
-
return float(price.replace(",",""))
|
414 |
-
def get_yahoo_finance_quote(symbol):
|
415 |
-
# Get the symbol quote from yahoo finance, we are using Beautiful Soup for scraping
|
416 |
-
URL = f"https://finance.yahoo.com/quote/{symbol}"
|
417 |
-
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
|
418 |
-
page = requests.get(URL, headers = headers)
|
419 |
-
soup = BeautifulSoup(page.text, "html.parser")
|
420 |
-
price = soup.find('div',{'class':'D(ib) Mend(20px)'}).find_all('fin-streamer')[0].text
|
421 |
-
return price.replace(",","")
|
422 |
-
def get_symbol_quote(ticker_tape, symbol,tck_tape, idx):
|
423 |
-
# Get the symbol quote from yahoo finance, we are using Beautiful Soup for scraping
|
424 |
-
URL = f"https://finance.yahoo.com/quote/{symbol}"
|
425 |
-
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
|
426 |
-
page = requests.get(URL, headers = headers)
|
427 |
-
soup = BeautifulSoup(page.text, "html.parser")
|
428 |
-
price = soup.find('div',{'class':'D(ib) Mend(20px)'}).find_all('span')[0].text
|
429 |
-
change = soup.find('div',{'class':'D(ib) Mend(20px)'}).find_all('span')[1].text
|
430 |
-
change = change.split(" ")[1]
|
431 |
-
change = change[1:-1]
|
432 |
-
if ticker_tape == "US 10Y":
|
433 |
-
# US 10Y treated differently because of some formating purposes
|
434 |
-
change = float(change.replace("(","").replace(")","").replace("%",""))
|
435 |
-
price_now = float(price)
|
436 |
-
change = round(price_now - price_now/(1+change/100),2)
|
437 |
-
change = str(change) + "%"
|
438 |
-
tck_tape[idx].metric(label = ticker_tape ,value = price,delta = change)
|
439 |
-
|
440 |
-
# Set the twitter client and set the access token which comes from config.py
|
441 |
-
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
|
442 |
-
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
|
443 |
-
|
444 |
-
api = tweepy.API(auth)
|
445 |
-
|
446 |
-
if "trade_api" not in st.session_state:
|
447 |
-
# Now, if this api is not there in session_state then add it,
|
448 |
-
# we are doing this because we don't want to let the info go once site is refreshed
|
449 |
-
# And session_state is kind of memory used for caching required info
|
450 |
-
# Set the trade_api variable
|
451 |
-
st.session_state.trade_api = REST(API_KEY, SECRET_KEY, API_URL)
|
452 |
-
if "stocks" not in st.session_state:
|
453 |
-
# If stocks are not present then from companies.csv file add them
|
454 |
-
with open('companies.csv') as f:
|
455 |
-
companies = f.read().splitlines()
|
456 |
-
symbols = []
|
457 |
-
for company in companies:
|
458 |
-
symbols.append(company.split(",")[0])
|
459 |
-
st.session_state.stocks = symbols
|
460 |
-
if "login" not in st.session_state:
|
461 |
-
# If user has not logged in set login = False
|
462 |
-
# This is done because once logged in we want the program to remember that it logged in
|
463 |
-
# Hence setting the session_state
|
464 |
-
st.session_state.login = False
|
465 |
-
|
466 |
-
if st.session_state.login == False:
|
467 |
-
# If login is false then create a login window
|
468 |
-
st.sidebar.write("Login")
|
469 |
-
|
470 |
-
# Create sidebar login boxes
|
471 |
-
user = st.sidebar.text_input(label = "Username")
|
472 |
-
password = st.sidebar.text_input(label = "Password", type = "password")
|
473 |
-
login_btn = st.sidebar.button("Login")
|
474 |
-
if login_btn:
|
475 |
-
# This is the users list which will help in login and password for both is 123
|
476 |
-
if user in ["Chaitanya", "Sagar"] and password == "Wizards@123Trade#":
|
477 |
-
st.sidebar.success(f"Logged In as {user}")
|
478 |
-
st.session_state.login = True
|
479 |
-
# Now if clicked on login wait for 1 second then reload the page using experimental_rerun
|
480 |
-
time.sleep(1)
|
481 |
-
st.experimental_rerun()
|
482 |
-
else:
|
483 |
-
# If incorrect credentials then pose an error
|
484 |
-
st.sidebar.warning("Incorrect Username/Password")
|
485 |
-
else:
|
486 |
-
# Get the crypto data from FTX api
|
487 |
-
df = pd.DataFrame(json.loads(requests.get("https://ftx.com/api/markets").text)["result"])
|
488 |
-
# Get the LTP and 24H %change for BTCUSDT, ETHUSDT
|
489 |
-
st.session_state.index_btc = df[df["name"]=="BTC/USDT"]["last"].values[0]
|
490 |
-
st.session_state.index_btc_pct_change = round(df[df["name"]=="BTC/USDT"]["change24h"].values[0],2)
|
491 |
-
st.session_state.index_eth = df[df["name"]=="ETH/USDT"]["last"].values[0]
|
492 |
-
st.session_state.index_eth_pct_change = round(df[df["name"]=="ETH/USDT"]["change24h"].values[0],2)
|
493 |
-
st.session_state.roro = get_roro()
|
494 |
-
roro_df = pd.DataFrame(st.session_state.roro)
|
495 |
-
roro_df.set_index("date",inplace = True)
|
496 |
-
roro_df["sum"] = roro_df.sum(axis = 1) / len(roro_df.columns)
|
497 |
-
# Code below is for formatting purpose
|
498 |
-
cols_ticker_tape_cryp = st.columns([1,1])
|
499 |
-
cols_ticker_tape_cryp[0].metric(label = "BTC",value = st.session_state.index_btc, delta = f"{st.session_state.index_btc_pct_change}%")
|
500 |
-
cols_ticker_tape_cryp[1].metric(label = "ETH", value = st.session_state.index_eth, delta = f"{st.session_state.index_eth_pct_change}%")
|
501 |
-
fig = go.Figure(go.Indicator(
|
502 |
-
mode = "number+delta",
|
503 |
-
value = round(roro_df["sum"].iloc[-1]),
|
504 |
-
domain = {'x': [0, 0.2], 'y': [0, 0.2]},
|
505 |
-
delta = {'reference': round(roro_df["sum"].iloc[-2])},
|
506 |
-
gauge = {'axis': {'range': [-100, 100]}},
|
507 |
-
title = {'text': "RORO Indicator"}))
|
508 |
-
fig.add_trace(go.Scatter(y = roro_df["sum"].values,x = roro_df.index,mode = "lines",fill='tozeroy'))
|
509 |
-
st.plotly_chart(fig,use_container_width=True,use_container_height = True)
|
510 |
-
cols_ticker_tape = st.columns(5)
|
511 |
-
# Now, since fetching each symbol will take lots of time
|
512 |
-
# So, we are running all the process in threads parally multi-processing
|
513 |
-
thread_ticker = []
|
514 |
-
for i in range(len (symbol_mapping.keys())):
|
515 |
-
# Get the symbol_mapping and run it in thread and display each of them
|
516 |
-
ticker_tape = list(symbol_mapping.keys())[i]
|
517 |
-
t_ticker = threading.Thread(target = get_symbol_quote, args = (ticker_tape, symbol_mapping[ticker_tape],cols_ticker_tape,i,))
|
518 |
-
add_report_ctx(t_ticker)
|
519 |
-
t_ticker.start()
|
520 |
-
thread_ticker.append(t_ticker)
|
521 |
-
for x in thread_ticker:
|
522 |
-
# Wait for the threads to finish
|
523 |
-
x.join()
|
524 |
-
# This is a menu for various dashboard windows
|
525 |
-
option = st.sidebar.selectbox("Which Dashboard?",
|
526 |
-
("Watchlist","twitter","wallstreetbets","stocktwits","CryptoIndex","Chart","pattern", "MACRO",
|
527 |
-
"Technical Scanner", "coinBaskets", "Breakout","ETFs", "Commodities","Report","Recommendations","RORO Components"))
|
528 |
-
# If you want to momentarily hide your website from people then remove the below from commenting
|
529 |
-
# Or you could do one more thing, setup a pseudo id-password, which log in you to only limited features
|
530 |
-
# option = st.sidebar.selectbox("Which Dashboard?",
|
531 |
-
# ("twitter","coinBaskets"))
|
532 |
-
# Set the option value as header
|
533 |
-
st.header(option)
|
534 |
-
if option == "Watchlist":
|
535 |
-
# If watchlist is selected
|
536 |
-
st.subheader("Watchlist")
|
537 |
-
col1, col2 = st.columns([1,6.5])
|
538 |
-
df_watchlist = pd.read_csv('watchlist.csv')
|
539 |
-
with col1:
|
540 |
-
# Now, create a form which will help you add symbol name, comments, etc
|
541 |
-
with st.form(key = "symbol_add"):
|
542 |
-
# df = pd.DataFrame(json.loads(requests.get("https://ftx.com/api/markets").text)["result"])
|
543 |
-
# df = df[df["quoteCurrency"].isin(["USD","USDT"])]
|
544 |
-
# symbols = df.name.values
|
545 |
-
# asset_class = st.sidebar.selectbox("Asset Class",["Crypto","Fixed Income","Stocks","Index","Commodity"])
|
546 |
-
asset_class = st.sidebar.selectbox("Asset Class",["Fixed Income","Stocks","Index","Commodity"])
|
547 |
-
|
548 |
-
if asset_class == "Crypto":
|
549 |
-
symbol = st.selectbox("Symbol",symbols)
|
550 |
-
st.write("Currently working for crypto symbols from FTX")
|
551 |
-
elif asset_class == "Fixed Income":
|
552 |
-
symbol = st.selectbox("Symbol",["HYG","LQD","US 2Y","US 5Y","US 10Y","US 30Y"])
|
553 |
-
elif asset_class == "Index":
|
554 |
-
symbol = st.selectbox("Symbol",["SPX","NASDAQ","NIFTY50","VIX"])
|
555 |
-
st.write("If an alert got triggered, but you want to add another, first delete it, then add")
|
556 |
-
trigger = st.text_input("Trigger Price")
|
557 |
-
view_type = st.selectbox("View Type",["Above","Below"])
|
558 |
-
dma200 = st.selectbox("DMA alert",["Yes", "No"])
|
559 |
-
dma200_view_type = st.selectbox("DMA200 View Type",["Above","Below"])
|
560 |
-
alert_type = st.selectbox("Alert Type",["Macro","Individual"])
|
561 |
-
comments = st.text_area("Comments")
|
562 |
-
# Create buttons like add and delete symbols for use
|
563 |
-
add_symbol = st.form_submit_button("Add")
|
564 |
-
delete_symbol = st.form_submit_button("Remove")
|
565 |
-
# Empty watchlist button to clear whole watchlist
|
566 |
-
empty_checkbox = st.checkbox("Yes, I wish to empty the watchlist",value = False)
|
567 |
-
empty_watchlist = st.form_submit_button("Empty Watchlist")
|
568 |
-
with col2:
|
569 |
-
# If empty_watchlist button is clicked then, clear the watchlist
|
570 |
-
if empty_watchlist and empty_checkbox:
|
571 |
-
st.info("Watchlist cleared")
|
572 |
-
df_watchlist = pd.DataFrame(columns=["Symbol","Comments"])
|
573 |
-
df_watchlist.to_csv('watchlist.csv',index=False)
|
574 |
-
elif empty_watchlist:
|
575 |
-
st.error("Could not clear the watchlist, please select the checkbox")
|
576 |
-
elif add_symbol:
|
577 |
-
if symbol not in df_watchlist["Symbol"].values:
|
578 |
-
# If symbol is added then add it to csv file, then update the csv file
|
579 |
-
st.info("Symbol added")
|
580 |
-
df_watchlist = df_watchlist.append(dict(Symbol = symbol, Trigger = float(trigger), alert_type = alert_type, view_type = view_type, status = "Pending",dma200 = dma200, dma200_view_type = dma200_view_type,dma_status = "Pending",Comments = comments), ignore_index=True)
|
581 |
-
df_watchlist.to_csv('watchlist.csv',index=False)
|
582 |
-
else:
|
583 |
-
st.warning("Symbol Already Present, Please check")
|
584 |
-
elif delete_symbol:
|
585 |
-
if symbol not in df_watchlist["Symbol"].values:
|
586 |
-
st.warning("Symbol not present, please check...")
|
587 |
-
else:
|
588 |
-
# Delte symbol and update the csv file
|
589 |
-
st.info("Symbol Deleted")
|
590 |
-
df_watchlist = df_watchlist[df_watchlist["Symbol"]!= symbol]
|
591 |
-
df_watchlist.to_csv('watchlist.csv',index=False)
|
592 |
-
if len(df_watchlist)>0:
|
593 |
-
# If there's something in watchlist then for cryptos fetch the LTP from FTX api
|
594 |
-
# Getting LTP is currently for Crypto only due to free data unavailability for stocks/ETFs
|
595 |
-
df_watchlist.set_index("Symbol",inplace = True)
|
596 |
-
df_watchlist["LTP"] = 0
|
597 |
-
def get_data_symbol(symbol_name_for_quote):
|
598 |
-
global df_watchlist
|
599 |
-
try:
|
600 |
-
if symbol_name_for_quote in ["HYG","LQD","SPX","NASDAQ","NIFTY50","VIX","US 2Y","US 5Y", "US 10Y","US 30Y"]:
|
601 |
-
data_symbol = get_cnbc_data(symbol_name_for_quote)
|
602 |
-
else:
|
603 |
-
data_symbol = json.loads(requests.get(f"https://ftx.com/api/markets/{symbol_name_for_quote}").text)["result"]["price"]
|
604 |
-
df_watchlist.loc[symbol_name_for_quote,"LTP"] = data_symbol
|
605 |
-
except:
|
606 |
-
pass
|
607 |
-
threads_list = []
|
608 |
-
for i in range(len(df_watchlist)):
|
609 |
-
symbol_name_for_quote = df_watchlist.index[i]
|
610 |
-
x = threading.Thread(target = get_data_symbol,args = (symbol_name_for_quote,))
|
611 |
-
x.start()
|
612 |
-
add_report_ctx(x)
|
613 |
-
threads_list.append(x)
|
614 |
-
for thread in threads_list:
|
615 |
-
thread.join()
|
616 |
-
df_watchlist["pct_away"] = np.round(100*np.abs(df_watchlist["Trigger"] - df_watchlist["LTP"])/df_watchlist["LTP"])
|
617 |
-
# Show the watchlist
|
618 |
-
st.dataframe(df_watchlist)
|
619 |
-
elif option == "RORO Components":
|
620 |
-
lkbck_perd = st.sidebar.selectbox("Lookback Period",["1Y","5Y"])
|
621 |
-
st.sidebar.write("1Y will fetch daily data, 5Y --> weekly")
|
622 |
-
st.sidebar.write("Number of lookback periods is set as 300")
|
623 |
-
roro_run_btn = st.sidebar.button("Run")
|
624 |
-
if roro_run_btn:
|
625 |
-
roro_comp_df = pd.DataFrame(get_roro(lkbck_perd))
|
626 |
-
roro_comp_df.set_index("date",inplace = True)
|
627 |
-
roro_comp_df["roro"] = roro_comp_df.sum(axis = 1)/(len(roro_comp_df.columns))
|
628 |
-
fig = make_subplots(rows=len(roro_comp_df.columns), shared_xaxes=True,vertical_spacing=0.01,subplot_titles=(roro_comp_df.columns))
|
629 |
-
counter = 1
|
630 |
-
for key in roro_comp_df.columns:
|
631 |
-
if key!= "roro":
|
632 |
-
fig.add_scatter(x = roro_comp_df.index, y = roro_comp_df[key].values,mode = "lines",row = counter, col = 1, name = key)
|
633 |
-
else:
|
634 |
-
fig.add_scatter(y = roro_comp_df["roro"].values,x = roro_comp_df.index,mode = "lines",fill='tozeroy',row = counter,col =1,name = key)
|
635 |
-
counter+=1
|
636 |
-
fig['layout'].update(height=2500, width=600, title='Subplots of components')
|
637 |
-
st.plotly_chart(fig,use_container_width = True)
|
638 |
-
elif option == "Recommendations":
|
639 |
-
select_asset = st.sidebar.selectbox("Asset Class",["ETF"])
|
640 |
-
rsi_val = st.sidebar.number_input("RSI Thresh",value = 50)
|
641 |
-
drop_frm_ath = st.sidebar.number_input("Drop From ATH(%) Thresh",value = 50)
|
642 |
-
dist_from_5_yr_low = st.sidebar.number_input(f"%away from 2 yr Low Thresh",value = 20)
|
643 |
-
rec_run_btn = st.sidebar.button("Run")
|
644 |
-
st.sidebar.write("**Momentum Rising Screening Conditions**")
|
645 |
-
st.sidebar.write("LTP>50DMA")
|
646 |
-
st.sidebar.write("LTP>100DMA")
|
647 |
-
st.sidebar.write("5D Vol>1.5*(5D Vol. yearly_avg")
|
648 |
-
st.sidebar.write("**Cheap Stocks Screening Conditions**")
|
649 |
-
st.sidebar.write("RSI < RSI Thresh")
|
650 |
-
st.sidebar.write("Drop from ATH(%) > Drop from ATH(%) Thresh")
|
651 |
-
st.sidebar.write(f"% away from 2 yr Low < %away from 2 yr Low Thresh")
|
652 |
-
if rec_run_btn:
|
653 |
-
momentum_recommendations = {}
|
654 |
-
cheap_recommendations = {}
|
655 |
-
rec_threads = []
|
656 |
-
if select_asset == "ETF":
|
657 |
-
options_for_assets = all_etfs
|
658 |
-
for asset in options_for_assets:
|
659 |
-
x = threading.Thread(target=get_recommendation,args = (asset,rsi_val,drop_frm_ath,dist_from_5_yr_low,))
|
660 |
-
x.start()
|
661 |
-
rec_threads.append(x)
|
662 |
-
# get_recommendation(asset)
|
663 |
-
for rec in rec_threads:
|
664 |
-
rec.join()
|
665 |
-
# st.write(momentum_recommendations)
|
666 |
-
momentum_recommendations = pd.DataFrame(momentum_recommendations).T
|
667 |
-
|
668 |
-
momentum_recommendations.sort_values("score",ascending=False,inplace = True)
|
669 |
-
cheap_recommendations = pd.DataFrame(cheap_recommendations).T
|
670 |
-
cheap_recommendations.sort_values("score",ascending=False,inplace = True)
|
671 |
-
|
672 |
-
st.header("Momentum Rising")
|
673 |
-
st.dataframe(momentum_recommendations.style.apply(highlight_rec_momentum,axis = 1).apply(font_color))
|
674 |
-
st.header("Cheap Stocks")
|
675 |
-
st.dataframe(cheap_recommendations.style.apply(highlight_rec_cheap,args = (rsi_val,drop_frm_ath,dist_from_5_yr_low),axis = 1).apply(font_color))
|
676 |
-
elif option == "MACRO":
|
677 |
-
select_timeframe = st.sidebar.selectbox("Which Timeframe (finviz only)?",["Daily","Weekly","Monthly"])
|
678 |
-
# select_lookback_period = st.sidebar.selectbox("LookbackPeriod for yields(yrs)",[1,2,3,4,5])
|
679 |
-
select_lookback_period = st.sidebar.selectbox("LookbackPeriod for yields",["1D","1M","3M","6M","1Y","5Y"])
|
680 |
-
timeframe_map = {"Daily":"d1","Weekly":"w1","Monthly":"mo"}
|
681 |
-
tf = timeframe_map[select_timeframe]
|
682 |
-
run_btn = st.sidebar.button("Run")
|
683 |
-
if run_btn:
|
684 |
-
col1, col2, col3 = st.columns([1,1,1])
|
685 |
-
with col1:
|
686 |
-
st.image(f"https://finviz.com/fut_image.ashx?es_{tf}_s.png")
|
687 |
-
with col2:
|
688 |
-
st.image(f"https://finviz.com/fut_image.ashx?vx_{tf}_s.png")
|
689 |
-
with col3:
|
690 |
-
st.image(f"https://finviz.com/fut_image.ashx?nq_{tf}_s.png")
|
691 |
-
response_yields = {}
|
692 |
-
thread_yields = []
|
693 |
-
for symbol in ["US1Y","US2Y","US5Y","US10Y","US30Y"]:
|
694 |
-
thread = threading.Thread(target = get_data_yields, args = (symbol,select_lookback_period,))
|
695 |
-
thread.start()
|
696 |
-
thread_yields.append(thread)
|
697 |
-
for x in thread_yields:
|
698 |
-
x.join()
|
699 |
-
df_yields = pd.concat(response_yields,axis = 1)
|
700 |
-
df_yields["2y/10y"] = df_yields["US10Y"]-df_yields["US2Y"]
|
701 |
-
df_yields["10y/30y"] = df_yields["US30Y"]-df_yields["US10Y"]
|
702 |
-
|
703 |
-
df_yields["2y/5y"] = df_yields["US5Y"]-df_yields["US2Y"]
|
704 |
-
df_yields["1y/2y"] = df_yields["US2Y"]-df_yields["US1Y"]
|
705 |
-
|
706 |
-
|
707 |
-
st.line_chart(df_yields[["US1Y","US2Y","US5Y","US10Y","US30Y"]])
|
708 |
-
col1, col2 = st.columns([1,1])
|
709 |
-
with col1:
|
710 |
-
st.line_chart(df_yields["1y/2y"])
|
711 |
-
st.line_chart(df_yields["2y/5y"])
|
712 |
-
with col2:
|
713 |
-
st.line_chart(df_yields["2y/10y"])
|
714 |
-
st.line_chart(df_yields["10y/30y"])
|
715 |
-
# today = datetime.now().strftime("%Y-%m-%d")
|
716 |
-
# past = (datetime.now() - timedelta(days=365*select_lookback_period)).strftime("%Y-%m-%d")
|
717 |
-
# # https://fred.stlouisfed.org/graph/?id=DGS10,DGS5,DGS30,DGS3MO,DGS1,DGS2,
|
718 |
-
# url = f"""https://fred.stlouisfed.org/graph/fredgraph.png?dwnld=0&hires=1&type=image/png&
|
719 |
-
# bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&
|
720 |
-
# mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=1168&nt=0&thu=0&trc=0&
|
721 |
-
# show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=DGS10,DGS5,DGS30,DGS3MO,DGS1,DGS2&
|
722 |
-
# scale=left,left,left,left,left,left&cosd={past},{past},{past},{past},{past},{past}&
|
723 |
-
# coed={today},{today},{today},{today},{today},{today}&
|
724 |
-
# line_color=%234572a7,%23aa4643,%2389a54e,%2380699b,%233d96ae,%23db843d&
|
725 |
-
# link_values=false,false,false,false,false,false&line_style=solid,solid,solid,solid,solid,solid&
|
726 |
-
# mark_type=none,none,none,none,none,none&mw=3,3,3,3,3,3&lw=2,2,2,2,2,2&
|
727 |
-
# ost=-99999,-99999,-99999,-99999,-99999,-99999&oet=99999,99999,99999,99999,99999,99999&
|
728 |
-
# mma=0,0,0,0,0,0&fml=a,a,a,a,a,a&fq=Daily,Daily,Daily,Daily,Daily,Daily&
|
729 |
-
# fam=avg,avg,avg,avg,avg,avg&fgst=lin,lin,lin,lin,lin,lin&
|
730 |
-
# fgsnd=2020-02-01,2020-02-01,2020-02-01,2020-02-01,2020-02-01,2020-02-01&line_index=1,2,3,4,5,6&
|
731 |
-
# transformation=lin,lin,lin,lin,lin,lin&
|
732 |
-
# vintage_date={today},{today},{today},{today},{today},{today}&
|
733 |
-
# revision_date={today},{today},{today},{today},{today},{today}&
|
734 |
-
# nd=1962-01-02,1962-01-02,1977-02-15,1981-09-01,1962-01-02,1976-06-01
|
735 |
-
# """
|
736 |
-
# url = url.replace("\n","")
|
737 |
-
# st.image(url,width = 800)
|
738 |
-
|
739 |
-
|
740 |
-
elif option == "twitter":
|
741 |
-
# If twitter is selected
|
742 |
-
today = datetime.today()
|
743 |
-
# Get the local timezone, this is important because then it works on a different timezone
|
744 |
-
to_zone = tz.tzlocal()
|
745 |
-
# Multibox for selecting multiple users
|
746 |
-
who = st.sidebar.multiselect("Choose person",tuple(TWITTER_USERNAMES))
|
747 |
-
twitter_run_btn = st.sidebar.button("Run")
|
748 |
-
if twitter_run_btn:
|
749 |
-
# if twitter run button is clicked then all those people selected are called
|
750 |
-
if "SELECT ALL" in who:
|
751 |
-
users_list = TWITTER_USERNAMES[1:]
|
752 |
-
else:
|
753 |
-
users_list = who
|
754 |
-
|
755 |
-
for username in users_list:
|
756 |
-
# For a given username fetch the tweets, its username, image
|
757 |
-
user = api.get_user(screen_name = username)
|
758 |
-
tweets = api.user_timeline(screen_name = username, count = 100, tweet_mode = "extended")
|
759 |
-
st.subheader(username)
|
760 |
-
st.image(user.profile_image_url)
|
761 |
-
for tweet in tweets:
|
762 |
-
# In all his tweets, bring those to local timezone
|
763 |
-
tweet_date = ((tweet.created_at).astimezone(to_zone)).replace(tzinfo = None)
|
764 |
-
#Now, we don't want tweets older than 3 days
|
765 |
-
delta = (today - tweet_date).days
|
766 |
-
if delta>3:
|
767 |
-
continue
|
768 |
-
# For the following user names certain modification is done to get the tweets
|
769 |
-
if username in ["@chartmojo","@MacroCharts"]:
|
770 |
-
if tweet.in_reply_to_screen_name== None:
|
771 |
-
st.subheader(tweet._json["created_at"])
|
772 |
-
st.write(tweet.full_text)
|
773 |
-
try:
|
774 |
-
for j in tweet.extended_entities["media"]:
|
775 |
-
st.image(j["media_url_https"], width=600)
|
776 |
-
except:
|
777 |
-
pass
|
778 |
-
else:
|
779 |
-
if tweet.in_reply_to_screen_name== None and len(tweet.entities["symbols"])>0:
|
780 |
-
symbols = []
|
781 |
-
for i in range(len(tweet.entities["symbols"])):
|
782 |
-
symbols.append(tweet.entities["symbols"][i]["text"])
|
783 |
-
st.subheader(" ".join(symbols))
|
784 |
-
st.subheader(tweet._json["created_at"])
|
785 |
-
st.subheader(tweet.full_text)
|
786 |
-
try:
|
787 |
-
for j in tweet.extended_entities["media"]:
|
788 |
-
st.image(j["media_url_https"], width = 600)
|
789 |
-
except:
|
790 |
-
pass
|
791 |
-
for symbol in symbols:
|
792 |
-
st.image(f"https://finviz.com/chart.ashx?t={symbol}&ta=1", width=600)
|
793 |
-
# elif option == "US Sectors":
|
794 |
-
# st.sidebar.write("Source - TradingView")
|
795 |
-
# select_sector = st.sidebar.selectbox("Select Sector", options = us_sectors)
|
796 |
-
# select_btn = st.sidebar.button("Run")
|
797 |
-
# url = "https://in.tradingview.com/markets/stocks-usa/sectorandindustry-sector/"
|
798 |
-
# r = requests.get(url)
|
799 |
-
# soup = BeautifulSoup(r.content, 'html5lib') # If this line causes an error, run 'pip install html5lib' or install html5lib
|
800 |
-
# req = soup.find_all('tr',attrs = {'class':'tv-data-table__row tv-data-table__stroke tv-screener-table__result-row'})
|
801 |
-
# sectors = []
|
802 |
-
# pct_change = []
|
803 |
-
# for i in req:
|
804 |
-
# arr = []
|
805 |
-
# for sector in i.find_all("td"):
|
806 |
-
# arr.append(sector.text)
|
807 |
-
# arr[0] = arr[0].split("\n")[3].split("\t")[0]
|
808 |
-
# sectors.append(arr[0])
|
809 |
-
# pct_change.append(float(arr[3][:-1]))
|
810 |
-
# fig = go.Figure([go.Bar(x=sectors, y=pct_change,
|
811 |
-
# marker = dict(color = ['rgba(63, 195, 128, 1)' if value>0 else 'rgba(219, 10, 91, 1)' for value in pct_change],
|
812 |
-
# line = dict(color='rgb(0,0,0)',width=1.5)))])
|
813 |
-
# st.plotly_chart(fig,use_container_width=True)
|
814 |
-
# if select_btn:
|
815 |
-
# val = "-".join(select_sector.lower().split(" "))
|
816 |
-
# url = f"https://in.tradingview.com/markets/stocks-usa/sectorandindustry-sector/{val}/"
|
817 |
-
# r = requests.get(url)
|
818 |
-
# soup = BeautifulSoup(r.content, 'html5lib') # If this line causes an error, run 'pip install html5lib' or install html5lib
|
819 |
-
# req = soup.find_all('tr',attrs = {'class':'tv-data-table__row tv-data-table__stroke tv-screener-table__result-row'})
|
820 |
-
# ticker = []
|
821 |
-
# pct_change_ticker = []
|
822 |
-
# company = []
|
823 |
-
# for i in req:
|
824 |
-
# arr = []
|
825 |
-
# for sector in i.find_all("td"):
|
826 |
-
# arr.append(sector.text)
|
827 |
-
# arr[0] = arr[0].split("\n")[4]
|
828 |
-
# # result.append(dict(ticker = arr[0],pct_change = float(arr[2][:-1]),vol = arr[5],mkt_cap = arr[6]))
|
829 |
-
# # ticker.append(arr[0])
|
830 |
-
# company.append(us_stocks_mapping[arr[0]])
|
831 |
-
# pct_change_ticker.append(float(arr[2][:-1]))
|
832 |
-
# layout = go.Layout(
|
833 |
-
# xaxis = go.XAxis(
|
834 |
-
# title = "Stocks",
|
835 |
-
# showticklabels = False
|
836 |
-
# )
|
837 |
-
# )
|
838 |
-
# fig = go.Figure([go.Bar(x=company, y=pct_change_ticker,
|
839 |
-
# marker = dict(color = ['rgba(63, 195, 128, 1)' if value>0 else 'rgba(219, 10, 91, 1)' for value in pct_change_ticker],
|
840 |
-
# line = dict(color='rgb(0,0,0)',width=1.5)))],layout = layout)
|
841 |
-
# st.write(len(company))
|
842 |
-
# st.plotly_chart(fig,use_container_width=True)
|
843 |
-
|
844 |
-
# st_autorefresh(interval=120000, limit=10000, key="US sectors refresh")
|
845 |
-
elif option == "Commodities":
|
846 |
-
select_timeframe = st.sidebar.selectbox("Which Timeframe?",["Daily","Weekly","Monthly"])
|
847 |
-
timeframe_map = {"Daily":"d1","Weekly":"w1","Monthly":"mo"}
|
848 |
-
tf = timeframe_map[select_timeframe]
|
849 |
-
col0,col1,col2 = st.columns([1,1,1])
|
850 |
-
count = 0
|
851 |
-
for key in commodity_mapping:
|
852 |
-
# https://finviz.com/futures_charts.ashx?t=YM&p=d1
|
853 |
-
# Get the mapping for commodities, which is there in config.py
|
854 |
-
keyword_comm = commodity_mapping[key]
|
855 |
-
num = count%3
|
856 |
-
if num ==0:
|
857 |
-
with col0:
|
858 |
-
st.image(f"https://finviz.com/fut_image.ashx?{keyword_comm}_{tf}_s.png")
|
859 |
-
elif num == 1:
|
860 |
-
with col1:
|
861 |
-
st.image(f"https://finviz.com/fut_image.ashx?{keyword_comm}_{tf}_s.png")
|
862 |
-
else:
|
863 |
-
with col2:
|
864 |
-
st.image(f"https://finviz.com/fut_image.ashx?{keyword_comm}_{tf}_s.png")
|
865 |
-
count = count+1
|
866 |
-
elif option == "CryptoIndex":
|
867 |
-
# If cryptoIndex tab is selected
|
868 |
-
# Select which crypto index, then timeframe and accordingly fetch the data
|
869 |
-
type_index = st.sidebar.selectbox("Which?",["Major","Minor","Shit"])
|
870 |
-
select_index_timeframe = st.sidebar.selectbox("Timeframe",["15s","1m","5m","15m","1h","4h","1d"])
|
871 |
-
if select_index_timeframe[-1] == "s":
|
872 |
-
timeframe = int(select_index_timeframe[:-1])
|
873 |
-
elif select_index_timeframe[-1] == "m":
|
874 |
-
timeframe = int(select_index_timeframe[:-1]) * 60
|
875 |
-
elif select_index_timeframe[-1] == "h":
|
876 |
-
timeframe = int(select_index_timeframe[:-1])*60*60
|
877 |
-
elif select_index_timeframe[-1] == "d":
|
878 |
-
timeframe = int(select_index_timeframe[:-1])* 60*60*24
|
879 |
-
if type_index == "Major":
|
880 |
-
# Our crypto index is of 0.5BTC + 0.5 ETH
|
881 |
-
st.write("0.5BTC + 0.5ETH")
|
882 |
-
# Fetch data and create crypto index in same proportion
|
883 |
-
data_btc = pd.DataFrame(json.loads(requests.get(f"https://ftx.com/api/markets/BTC/USDT/candles?resolution={timeframe}").text)["result"])
|
884 |
-
data_eth = pd.DataFrame(json.loads(requests.get(f"https://ftx.com/api/markets/ETH/USDT/candles?resolution={timeframe}").text)["result"])
|
885 |
-
data_btc.set_index("startTime",inplace = True)
|
886 |
-
data_eth.set_index("startTime",inplace = True)
|
887 |
-
# Note crypto index are normalized
|
888 |
-
data = (data_btc["close"]*0.5/data_btc["close"][0] + data_eth["close"]*0.5/data_eth["close"][0])*100
|
889 |
-
fig = go.Figure()
|
890 |
-
fig.add_trace(go.Scatter(x=data.index, y=data.values,
|
891 |
-
mode='lines',
|
892 |
-
name=type_index))
|
893 |
-
fig.update_xaxes(
|
894 |
-
rangeslider_visible=True,
|
895 |
-
nticks = 20,
|
896 |
-
spikemode = "toaxis",
|
897 |
-
rangeselector=dict(
|
898 |
-
buttons=list([
|
899 |
-
dict(count=1, label="1mon", step="month", stepmode="backward"),
|
900 |
-
dict(count=6, label="6mon", step="month", stepmode="backward"),
|
901 |
-
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
902 |
-
dict(count=1, label="1y", step="year", stepmode="backward"),
|
903 |
-
dict(step="all")
|
904 |
-
])
|
905 |
-
)
|
906 |
-
)
|
907 |
-
fig.update_layout(
|
908 |
-
xaxis_tickformat = '%Y-%m-%d',
|
909 |
-
height = 600,
|
910 |
-
width = 900,
|
911 |
-
hovermode = "x"
|
912 |
-
)
|
913 |
-
st.plotly_chart(fig)
|
914 |
-
|
915 |
-
elif option == "Report":
|
916 |
-
|
917 |
-
df = pd.DataFrame(json.loads(requests.get("https://ftx.com/api/markets").text)["result"])
|
918 |
-
df = df[df["quoteCurrency"].isin(["USD","USDT"])]
|
919 |
-
symbols = df.name.values
|
920 |
-
|
921 |
-
to_analyze_symbol = st.sidebar.multiselect("Which symbol",symbols)
|
922 |
-
analysis_date = st.sidebar.selectbox("Which date for Analysis",["Current","Yesterday"])
|
923 |
-
if analysis_date == "Yesterday":
|
924 |
-
locn = -2
|
925 |
-
elif analysis_date == "Current":
|
926 |
-
locn = -1
|
927 |
-
fetch_data_btn = st.sidebar.button("Fetch Data")
|
928 |
-
|
929 |
-
if fetch_data_btn:
|
930 |
-
threads = []
|
931 |
-
for symbol in symbols:
|
932 |
-
t = threading.Thread(target = get_crypto_data_daily, args = (symbol,))
|
933 |
-
t.start()
|
934 |
-
add_report_ctx(t)
|
935 |
-
|
936 |
-
threads.append(t)
|
937 |
-
for x in threads:
|
938 |
-
x.join()
|
939 |
-
res = {}
|
940 |
-
for req_symbol in to_analyze_symbol:
|
941 |
-
req_symbol_file = req_symbol.replace("/","_")
|
942 |
-
df_req = pd.read_csv(f"crypto_data/{req_symbol_file}.csv",index_col = 0, parse_dates = True)
|
943 |
-
ans = {}
|
944 |
-
for period in [20,50,100,200]:
|
945 |
-
df_req[f"MA{period}"] = talib.SMA(df_req["close"],period)
|
946 |
-
ans[f"% from MA{period}"] = round((df_req["close"].iloc[locn] - df_req[f"MA{period}"].iloc[locn])*100/df_req[f"MA{period}"].iloc[locn],2)
|
947 |
-
df_req["RSI14"] = talib.RSI(df_req["close"])
|
948 |
-
ans["RSI"] = round(df_req["RSI14"].iloc[locn],2)
|
949 |
-
ans["LTP"] = df_req["close"].iloc[-1]
|
950 |
-
res[req_symbol] = ans
|
951 |
-
result = pd.DataFrame(res).T
|
952 |
-
st.dataframe(result)
|
953 |
-
|
954 |
-
elif option == "Breakout":
|
955 |
-
# If breakout tab is selected
|
956 |
-
# Get the crypto symbols
|
957 |
-
#Create buttons and boxes for selecting timeframe, BB number, KC multiplier
|
958 |
-
df = pd.DataFrame(json.loads(requests.get("https://ftx.com/api/markets").text)["result"])
|
959 |
-
df = df[df["quoteCurrency"].isin(["USD","USDT"])]
|
960 |
-
symbols = df.name.values
|
961 |
-
select_consol_tf = st.sidebar.selectbox("Timeframe",["15s","1m","5m","15m","1h","4h","1d"])
|
962 |
-
if select_consol_tf[-1] == "s":
|
963 |
-
timeframe = int(select_consol_tf[:-1])
|
964 |
-
elif select_consol_tf[-1] == "m":
|
965 |
-
timeframe = int(select_consol_tf[:-1]) * 60
|
966 |
-
elif select_consol_tf[-1] == "h":
|
967 |
-
timeframe = int(select_consol_tf[:-1])*60*60
|
968 |
-
elif select_consol_tf[-1] == "d":
|
969 |
-
timeframe = int(select_consol_tf[:-1])* 60*60*24
|
970 |
-
select_BB_mul = st.sidebar.text_input("Bollinger Band multiplier",value = "2")
|
971 |
-
select_KC_mul = st.sidebar.text_input("KC multiplier", value = "1.5")
|
972 |
-
select_num_days = st.sidebar.text_input("Consolidating before how many periods?",3)
|
973 |
-
num_days = int(select_num_days)
|
974 |
-
bb_mul = float(select_BB_mul)
|
975 |
-
kc_mul = float(select_KC_mul)
|
976 |
-
consol_run_btn = st.sidebar.button("Run")
|
977 |
-
if consol_run_btn:
|
978 |
-
# If user clicks on run button
|
979 |
-
# Again running in threads.
|
980 |
-
threads = []
|
981 |
-
for symbol in symbols:
|
982 |
-
t = threading.Thread(target = in_squeeze, args = (symbol,bb_mul, kc_mul, num_days, True,60*60*24))
|
983 |
-
add_report_ctx(t)
|
984 |
-
t.start()
|
985 |
-
threads.append(t)
|
986 |
-
for x in threads:
|
987 |
-
x.join()
|
988 |
-
st.sidebar.write("Task Complete")
|
989 |
-
elif option == "ETFs":
|
990 |
-
# If ETFs are selected
|
991 |
-
# Button to select ETF
|
992 |
-
selectETF = st.sidebar.selectbox("Select ETF class",etf.keys())
|
993 |
-
today = datetime.now() - timedelta(days =80)
|
994 |
-
st.subheader(selectETF)
|
995 |
-
# Note Country ETFs are dealt differently as they have country names as well
|
996 |
-
if selectETF!= "Country":
|
997 |
-
# If ETF is not Country then its very easy just from ETF variable we can fetch it
|
998 |
-
etf_names = etf[selectETF].keys()
|
999 |
-
else:
|
1000 |
-
# Select Markets, Country
|
1001 |
-
selectMarket = st.sidebar.selectbox("Which Market?",etf[selectETF].keys())
|
1002 |
-
country_list = list(etf[selectETF][selectMarket].keys())
|
1003 |
-
etf_list = list(etf[selectETF][selectMarket].values())
|
1004 |
-
etf_names = []
|
1005 |
-
country_names = []
|
1006 |
-
# In a for loop fetch all the ETFs for selected entries
|
1007 |
-
for i in range(len(etf_list)):
|
1008 |
-
if type(etf_list[i])==str:
|
1009 |
-
etf_names.append(etf_list[i])
|
1010 |
-
country_names.append(country_list[i])
|
1011 |
-
else:
|
1012 |
-
for sub_etf_name in etf_list[i]:
|
1013 |
-
etf_names.append(sub_etf_name)
|
1014 |
-
country_names.append(country_list[i])
|
1015 |
-
select_timeframe = st.sidebar.selectbox("Which Timeframe?",["Daily","Weekly","Monthly"])
|
1016 |
-
timeframe_map = {"Daily":"d","Weekly":"w","Monthly":"m"}
|
1017 |
-
etf_run_btn = st.sidebar.button("Run")
|
1018 |
-
if etf_run_btn:
|
1019 |
-
tf = timeframe_map[select_timeframe]
|
1020 |
-
# If After giving all the entries, run button is clicked get the charts for all the ETFs
|
1021 |
-
count = 0
|
1022 |
-
res_etf_ret = []
|
1023 |
-
thread_det = []
|
1024 |
-
vol_etf_info = {}
|
1025 |
-
expense_ratios = {}
|
1026 |
-
st.info("Loading..... Please Have Patience")
|
1027 |
-
for n in etf_names:
|
1028 |
-
thread = threading.Thread(target = get_etf_rets, args = (n,))
|
1029 |
-
thread.start()
|
1030 |
-
thread_det.append(thread)
|
1031 |
-
for x in thread_det:
|
1032 |
-
x.join()
|
1033 |
-
st.success("Finished Loading")
|
1034 |
-
|
1035 |
-
etf_rets_df = pd.DataFrame(res_etf_ret)
|
1036 |
-
etf_rets_df.set_index("symbol",inplace=True)
|
1037 |
-
col1,col2,col3,col4,col5 = st.columns([1,1,1,1,1])
|
1038 |
-
with col1:
|
1039 |
-
# st.write("")
|
1040 |
-
st.markdown("<h5 style='text-align: center; color: red;'>Daily Returns</h5>", unsafe_allow_html=True)
|
1041 |
-
st.dataframe(etf_rets_df["day_ret"].sort_values(ascending=False))
|
1042 |
-
with col2:
|
1043 |
-
st.markdown("<h5 style='text-align: center; color: red;'>1 Week Returns</h5>", unsafe_allow_html=True)
|
1044 |
-
|
1045 |
-
st.dataframe(etf_rets_df["w1_ret"].sort_values(ascending=False))
|
1046 |
-
with col3:
|
1047 |
-
st.markdown("<h5 style='text-align: center; color: red;'>2 Week Returns</h5>", unsafe_allow_html=True)
|
1048 |
-
st.dataframe(etf_rets_df["w2_ret"].sort_values(ascending=False))
|
1049 |
-
with col4:
|
1050 |
-
st.markdown("<h5 style='text-align: center; color: red;'>1 Month Returns</h5>", unsafe_allow_html=True)
|
1051 |
-
st.dataframe(etf_rets_df["m1_ret"].sort_values(ascending=False))
|
1052 |
-
with col5:
|
1053 |
-
st.markdown("<h5 style='text-align: center; color: red;'>1 Year Returns</h5>", unsafe_allow_html=True)
|
1054 |
-
st.dataframe(etf_rets_df["year_ret"].sort_values(ascending=False))
|
1055 |
-
|
1056 |
-
st.dataframe(etf_rets_df)
|
1057 |
-
cols1,cols2 = st.columns([1,1])
|
1058 |
-
if selectETF!= "Country":
|
1059 |
-
for etf_name in etf_names:
|
1060 |
-
num = count%2
|
1061 |
-
if num == 0:
|
1062 |
-
with cols1:
|
1063 |
-
try:
|
1064 |
-
st.write(f"{etf[selectETF][etf_name]}, IV = {vol_etf_info[etf_name]}, ER = {expense_ratios[etf_name]}")
|
1065 |
-
except:
|
1066 |
-
pass
|
1067 |
-
if tf == "d":
|
1068 |
-
st.image(f"https://finviz.com/chart.ashx?t={etf_name}&ta=1&p={tf}")
|
1069 |
-
else:
|
1070 |
-
st.image(f"https://finviz.com/chart.ashx?t={etf_name}&p={tf}")
|
1071 |
-
else:
|
1072 |
-
with cols2:
|
1073 |
-
try:
|
1074 |
-
st.write(f"{etf[selectETF][etf_name]}, IV = {vol_etf_info[etf_name]}, ER = {expense_ratios[etf_name]}")
|
1075 |
-
except:
|
1076 |
-
pass
|
1077 |
-
if tf == "d":
|
1078 |
-
st.image(f"https://finviz.com/chart.ashx?t={etf_name}&ta=1&p={tf}")
|
1079 |
-
else:
|
1080 |
-
st.image(f"https://finviz.com/chart.ashx?t={etf_name}&p={tf}")
|
1081 |
-
count = count + 1
|
1082 |
-
else:
|
1083 |
-
for i in range(len(etf_names)):
|
1084 |
-
try:
|
1085 |
-
st.write(f"{country_names[i]}, IV = {vol_etf_info[etf_names[i]]}, ER = {expense_ratios[etf_names[i]]}")
|
1086 |
-
except:
|
1087 |
-
pass
|
1088 |
-
if tf == "d":
|
1089 |
-
st.image(f"https://finviz.com/chart.ashx?t={etf_names[i]}&ta=1&p={tf}")
|
1090 |
-
else:
|
1091 |
-
st.image(f"https://finviz.com/chart.ashx?t={etf_names[i]}&p={tf}")
|
1092 |
-
elif option == "coinBaskets":
|
1093 |
-
# If coinBaskets is selected
|
1094 |
-
# Note, Mudrex was our reference here
|
1095 |
-
baskets = st.sidebar.multiselect(label = "Baskets",options=names, default =names[0])
|
1096 |
-
run_basket = st.sidebar.button("Run")
|
1097 |
-
select_crypto_timeframe = st.sidebar.selectbox("Crypto Timeframe", options =
|
1098 |
-
["1d","1m","3m","5m","15m","30m","1h","2h","4h","6h","8h","12h","3d","1w","1M"])
|
1099 |
-
check_symbol = st.sidebar.text_input("Symbol check")
|
1100 |
-
interval = select_crypto_timeframe
|
1101 |
-
# Once all the inputs are given, and if any of the basket is chosen
|
1102 |
-
if check_symbol != "":
|
1103 |
-
for bkt in names:
|
1104 |
-
# For each basket selected, get its components, fetch its price from Binance api
|
1105 |
-
#Then plot all components in a single chart
|
1106 |
-
#Here, you are giving in the symbol name and program is finding whether that symbol is there
|
1107 |
-
# in any of the basket or not
|
1108 |
-
if check_symbol in eval(bkt)["components"]:
|
1109 |
-
fig_check = go.Figure()
|
1110 |
-
st.write(bkt.upper())
|
1111 |
-
cols = st.columns(len(eval(bkt)["components"]))
|
1112 |
-
for i in eval(bkt)["components"]:
|
1113 |
-
ticker = f'{i.upper()}USDT'
|
1114 |
-
req_params = dict(symbol = ticker, interval = interval)
|
1115 |
-
url = "https://api.binance.com/api/v3/klines"
|
1116 |
-
data = pd.DataFrame(json.loads(requests.get(url,params = req_params).text))
|
1117 |
-
data = data.iloc[:,0:5]
|
1118 |
-
data.columns = ['datetime', 'open','high','low', 'close']
|
1119 |
-
data.index = [datetime.fromtimestamp(x/1000) for x in data.datetime]
|
1120 |
-
data["close"] = data["close"].astype(float)
|
1121 |
-
df = (data["close"].pct_change() + 1).cumprod()
|
1122 |
-
fig_check.add_trace(go.Scatter(x=df.index, y=df.values,
|
1123 |
-
mode='lines',
|
1124 |
-
name=i))
|
1125 |
-
fig_check.update_xaxes(
|
1126 |
-
rangeslider_visible=True,
|
1127 |
-
nticks = 20,
|
1128 |
-
spikemode = "toaxis",
|
1129 |
-
rangeselector=dict(
|
1130 |
-
buttons=list([
|
1131 |
-
dict(count=1, label="1mon", step="month", stepmode="backward"),
|
1132 |
-
dict(count=6, label="6mon", step="month", stepmode="backward"),
|
1133 |
-
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
1134 |
-
dict(count=1, label="1y", step="year", stepmode="backward"),
|
1135 |
-
dict(step="all")
|
1136 |
-
])
|
1137 |
-
)
|
1138 |
-
)
|
1139 |
-
fig_check.update_layout(
|
1140 |
-
xaxis_tickformat = '%Y-%m-%d',
|
1141 |
-
height = 600,
|
1142 |
-
width = 900,
|
1143 |
-
hovermode = "x"
|
1144 |
-
)
|
1145 |
-
fig_check.update_traces(
|
1146 |
-
hovertemplate="<br>".join([
|
1147 |
-
"Price: %{y}"
|
1148 |
-
]))
|
1149 |
-
st.plotly_chart(fig_check)
|
1150 |
-
if run_basket:
|
1151 |
-
for basket in baskets:
|
1152 |
-
# Here, you are choosing the baskets and program is plotting the baskets
|
1153 |
-
st.write(basket.upper())
|
1154 |
-
st.table(eval(basket))
|
1155 |
-
# Create traces
|
1156 |
-
fig = go.Figure()
|
1157 |
-
for i in eval(basket)["components"]:
|
1158 |
-
ticker = f'{i.upper()}USDT'
|
1159 |
-
interval = select_crypto_timeframe
|
1160 |
-
req_params = dict(symbol = ticker, interval = interval)
|
1161 |
-
url = "https://api.binance.com/api/v3/klines"
|
1162 |
-
data = pd.DataFrame(json.loads(requests.get(url,params = req_params).text))
|
1163 |
-
data = data.iloc[:,0:5]
|
1164 |
-
data.columns = ['datetime', 'open','high','low', 'close']
|
1165 |
-
data.index = [datetime.fromtimestamp(x/1000) for x in data.datetime]
|
1166 |
-
data["close"] = data["close"].astype(float)
|
1167 |
-
df = (data["close"].pct_change() + 1).cumprod()
|
1168 |
-
fig.add_trace(go.Scatter(x=df.index, y=df.values,
|
1169 |
-
mode='lines',
|
1170 |
-
name=i))
|
1171 |
-
fig.update_xaxes(
|
1172 |
-
rangeslider_visible=True,
|
1173 |
-
nticks = 20,
|
1174 |
-
spikemode = "toaxis",
|
1175 |
-
rangeselector=dict(
|
1176 |
-
buttons=list([
|
1177 |
-
dict(count=1, label="1mon", step="month", stepmode="backward"),
|
1178 |
-
dict(count=6, label="6mon", step="month", stepmode="backward"),
|
1179 |
-
dict(count=1, label="YTD", step="year", stepmode="todate"),
|
1180 |
-
dict(count=1, label="1y", step="year", stepmode="backward"),
|
1181 |
-
dict(step="all")
|
1182 |
-
])
|
1183 |
-
)
|
1184 |
-
)
|
1185 |
-
fig.update_layout(
|
1186 |
-
xaxis_tickformat = '%Y-%m-%d',
|
1187 |
-
height = 600,
|
1188 |
-
width = 900,
|
1189 |
-
hovermode = "x"
|
1190 |
-
)
|
1191 |
-
fig.update_traces(
|
1192 |
-
hovertemplate="<br>".join([
|
1193 |
-
"Price: %{y}"
|
1194 |
-
]))
|
1195 |
-
st.plotly_chart(fig)
|
1196 |
-
elif option == "Chart":
|
1197 |
-
# Charting platform
|
1198 |
-
# Get user inputs
|
1199 |
-
symbols = st_tags_sidebar(label = "Choose the tickers",
|
1200 |
-
text = 'Press enter to add more', maxtags = 100)
|
1201 |
-
select_stock_timeframe = st.sidebar.selectbox("Stock Timeframe",options=["1Min", "5Min", "15Min", "day"])
|
1202 |
-
select_crypto_timeframe = st.sidebar.selectbox("Crypto Timeframe", options =
|
1203 |
-
["1d","1m","3m","5m","15m","30m","1h","2h","4h","6h","8h","12h","3d","1w","1M"])
|
1204 |
-
select_periods = st.sidebar.text_input("Number of Days(Stock)",value = "30")
|
1205 |
-
today = datetime.now() - timedelta(int(select_periods))
|
1206 |
-
if len(symbols)>0:
|
1207 |
-
# If symbols are selected
|
1208 |
-
for symbol in symbols:
|
1209 |
-
# Run over all symbol in for loop and if they are crypto, treat them differently
|
1210 |
-
#And if they are others, treat them differently
|
1211 |
-
if f'{symbol.upper()}' in crypto_symbols:
|
1212 |
-
# If crypto, get the data from Binance api
|
1213 |
-
ticker = f'{symbol.upper()}USDT'
|
1214 |
-
interval = select_crypto_timeframe
|
1215 |
-
req_params = dict(symbol = ticker, interval = interval)
|
1216 |
-
url = "https://api.binance.com/api/v3/klines"
|
1217 |
-
st.subheader(ticker)
|
1218 |
-
data = pd.DataFrame(json.loads(requests.get(url,params = req_params).text))
|
1219 |
-
data = data.iloc[:,0:5]
|
1220 |
-
data.columns = ['datetime', 'open','high','low', 'close']
|
1221 |
-
data.index = [datetime.fromtimestamp(x/1000) for x in data.datetime]
|
1222 |
-
data.drop("datetime",axis = 1, inplace = True)
|
1223 |
-
else:
|
1224 |
-
# If stocks then get it from trade_api
|
1225 |
-
# Remember!!!--> trade_api was initiated in the session_state in the starting of code
|
1226 |
-
data = st.session_state.trade_api.get_barset(symbol.upper(), select_stock_timeframe, start =today.strftime("%Y-%m-%d")).df
|
1227 |
-
data = data[symbol.upper()]
|
1228 |
-
st.subheader(symbol.upper())
|
1229 |
-
fig = plot_candlestick(data)
|
1230 |
-
st.plotly_chart(fig, use_container_width=False)
|
1231 |
-
|
1232 |
-
elif option == "stocktwits":
|
1233 |
-
# If stocktwits is selected
|
1234 |
-
# This is almost similar to Larry's video, so you can reference that as well
|
1235 |
-
|
1236 |
-
# Display Trending stocks based on watchlist count
|
1237 |
-
st.header("Most Trending Symbols")
|
1238 |
-
most_trending_syms = get_stocktwits_data(req = "https://api.stocktwits.com/api/2/charts/ts",
|
1239 |
-
code = "ts", label = "Trending Score")
|
1240 |
-
st.dataframe(most_trending_syms)
|
1241 |
-
|
1242 |
-
st.header("Most messages in last 24 hrs")
|
1243 |
-
most_active_syms = get_stocktwits_data(req = "https://api.stocktwits.com/api/2/charts/m_day",
|
1244 |
-
code = "m_day", label = "#messages")
|
1245 |
-
st.dataframe(most_active_syms)
|
1246 |
-
|
1247 |
-
st.header("Top New Watchers added in last 24 hrs")
|
1248 |
-
most_active_syms = get_stocktwits_data(req = "https://api.stocktwits.com/api/2/charts/wl_ct_day",
|
1249 |
-
code = "wl_ct_day", label = "Count")
|
1250 |
-
st.dataframe(most_active_syms)
|
1251 |
-
|
1252 |
-
# For a given symbol, use request module to hit the stocktwits api and get the required info
|
1253 |
-
|
1254 |
-
|
1255 |
-
|
1256 |
-
symbol = st.sidebar.text_input("Symbol", value = "AAPL", max_chars = 5)
|
1257 |
-
r = requests.get(f"https://api.stocktwits.com/api/2/streams/symbol/{symbol}.json")
|
1258 |
-
data = r.json()
|
1259 |
-
for message in data["messages"]:
|
1260 |
-
st.image(message["user"]["avatar_url"])
|
1261 |
-
st.write(message['user']["username"])
|
1262 |
-
st.write(message["created_at"])
|
1263 |
-
st.write(message["body"])
|
1264 |
-
|
1265 |
-
st.sidebar.write("Update time for -->")
|
1266 |
-
st.sidebar.write("Top Watchlist Counts : 5mins")
|
1267 |
-
st.sidebar.write("Most Messages : 1hr")
|
1268 |
-
|
1269 |
-
elif option == "Technical Scanner":
|
1270 |
-
# If technical scanner is selected
|
1271 |
-
# Then choose the pattern, timeframe, etc
|
1272 |
-
pattern = st.sidebar.selectbox("Which Pattern?", tuple(patterns.values()))
|
1273 |
-
keywords = st_tags_sidebar(label = "Choose the tickers",
|
1274 |
-
text = 'Press enter to add more', maxtags = 100)
|
1275 |
-
select_timeframe = st.sidebar.selectbox("Timeframe",options=["1Min", "5Min", "15Min", "day"])
|
1276 |
-
run_btn = st.sidebar.button("Run")
|
1277 |
-
# Get the mapping for patterns from pattern.py file
|
1278 |
-
pattern_code = get_key(patterns, pattern)
|
1279 |
-
pattern_function = getattr(talib,pattern_code)
|
1280 |
-
if run_btn:
|
1281 |
-
# If clicked on run button
|
1282 |
-
# Then for each symbol, check the output
|
1283 |
-
keyword = [x.upper() for x in keywords]
|
1284 |
-
data = st.session_state.trade_api.get_barset(keyword, select_timeframe, limit = 100).df
|
1285 |
-
for symbol in keyword:
|
1286 |
-
try:
|
1287 |
-
# st.write(data)
|
1288 |
-
result = pattern_function(data[symbol]["open"],data[symbol]["high"],data[symbol]["low"],data[symbol]["close"])
|
1289 |
-
last = result.tail(1).values[0]
|
1290 |
-
# For some of the indicators value <0 is Bearish and value>0 is Bullish
|
1291 |
-
# But for others there would be a different logic, you shall handle it differently
|
1292 |
-
if last>0:
|
1293 |
-
st.write(f"Bullish {symbol}")
|
1294 |
-
elif last<0:
|
1295 |
-
st.write(f"Bearish {symbol}")
|
1296 |
-
except:
|
1297 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChihChiu29/mychatbot/main.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
"""Model hosted on Hugging face.
|
2 |
-
|
3 |
-
Based on: https://huggingface.co/docs/hub/spaces-sdks-docker-first-demo
|
4 |
-
"""
|
5 |
-
|
6 |
-
from fastapi import FastAPI, Request
|
7 |
-
|
8 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
9 |
-
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
10 |
-
|
11 |
-
# import gpt4free
|
12 |
-
# from gpt4free import Provider, forefront
|
13 |
-
|
14 |
-
|
15 |
-
token_size_limit = None
|
16 |
-
|
17 |
-
# FROM: https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+Thomas%21+How+are+you%3F
|
18 |
-
|
19 |
-
# LAST USED
|
20 |
-
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
21 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
|
22 |
-
|
23 |
-
# tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-1B-distill")
|
24 |
-
# model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-1B-distill")
|
25 |
-
# token_size_limit = 128
|
26 |
-
|
27 |
-
# T5 model can use "any" sequence lenghth, but memory usage is O(L^2).
|
28 |
-
# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
|
29 |
-
# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small")
|
30 |
-
# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
|
31 |
-
# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
|
32 |
-
# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
33 |
-
# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
|
34 |
-
token_size_limit = 512
|
35 |
-
|
36 |
-
# Too large for 16GB
|
37 |
-
# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
|
38 |
-
# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl")
|
39 |
-
|
40 |
-
|
41 |
-
app = FastAPI()
|
42 |
-
|
43 |
-
|
44 |
-
# { msg: string, temperature: float, max_length: number }
|
45 |
-
@app.post('/reply')
|
46 |
-
async def Reply(req: Request):
|
47 |
-
request = await req.json()
|
48 |
-
msg = request.get('msg')
|
49 |
-
print(f'MSG: {msg}')
|
50 |
-
|
51 |
-
# Hugging face
|
52 |
-
input_ids = tokenizer(msg, return_tensors='pt').input_ids # .to('cuda')
|
53 |
-
output = model.generate(
|
54 |
-
input_ids[:, -token_size_limit:],
|
55 |
-
do_sample=True,
|
56 |
-
temperature=request.get('temperature', 0.9),
|
57 |
-
max_length=request.get('max_length', 100),
|
58 |
-
)
|
59 |
-
reply = tokenizer.batch_decode(output)[0]
|
60 |
-
|
61 |
-
# It doesn't really work.
|
62 |
-
# gpt4free
|
63 |
-
# usage theb
|
64 |
-
# reply = gpt4free.Completion.create(Provider.Theb, prompt=msg)
|
65 |
-
|
66 |
-
print(f'REPLY: {reply}')
|
67 |
-
return {'reply': reply}
|
68 |
-
|
69 |
-
|
70 |
-
@app.get("/")
|
71 |
-
def read_root():
|
72 |
-
return {"Hello": "World!"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cropinky/esrgan/realesrgan/models/realesrnet_model.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
|
5 |
-
from basicsr.data.transforms import paired_random_crop
|
6 |
-
from basicsr.models.sr_model import SRModel
|
7 |
-
from basicsr.utils import DiffJPEG, USMSharp
|
8 |
-
from basicsr.utils.img_process_util import filter2D
|
9 |
-
from basicsr.utils.registry import MODEL_REGISTRY
|
10 |
-
from torch.nn import functional as F
|
11 |
-
|
12 |
-
|
13 |
-
@MODEL_REGISTRY.register()
|
14 |
-
class RealESRNetModel(SRModel):
|
15 |
-
"""RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
16 |
-
|
17 |
-
It is trained without GAN losses.
|
18 |
-
It mainly performs:
|
19 |
-
1. randomly synthesize LQ images in GPU tensors
|
20 |
-
2. optimize the networks with GAN training.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, opt):
|
24 |
-
super(RealESRNetModel, self).__init__(opt)
|
25 |
-
self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
|
26 |
-
self.usm_sharpener = USMSharp().cuda() # do usm sharpening
|
27 |
-
self.queue_size = opt.get('queue_size', 180)
|
28 |
-
|
29 |
-
@torch.no_grad()
|
30 |
-
def _dequeue_and_enqueue(self):
|
31 |
-
"""It is the training pair pool for increasing the diversity in a batch.
|
32 |
-
|
33 |
-
Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
|
34 |
-
batch could not have different resize scaling factors. Therefore, we employ this training pair pool
|
35 |
-
to increase the degradation diversity in a batch.
|
36 |
-
"""
|
37 |
-
# initialize
|
38 |
-
b, c, h, w = self.lq.size()
|
39 |
-
if not hasattr(self, 'queue_lr'):
|
40 |
-
assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
|
41 |
-
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
|
42 |
-
_, c, h, w = self.gt.size()
|
43 |
-
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
|
44 |
-
self.queue_ptr = 0
|
45 |
-
if self.queue_ptr == self.queue_size: # the pool is full
|
46 |
-
# do dequeue and enqueue
|
47 |
-
# shuffle
|
48 |
-
idx = torch.randperm(self.queue_size)
|
49 |
-
self.queue_lr = self.queue_lr[idx]
|
50 |
-
self.queue_gt = self.queue_gt[idx]
|
51 |
-
# get first b samples
|
52 |
-
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
|
53 |
-
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
|
54 |
-
# update the queue
|
55 |
-
self.queue_lr[0:b, :, :, :] = self.lq.clone()
|
56 |
-
self.queue_gt[0:b, :, :, :] = self.gt.clone()
|
57 |
-
|
58 |
-
self.lq = lq_dequeue
|
59 |
-
self.gt = gt_dequeue
|
60 |
-
else:
|
61 |
-
# only do enqueue
|
62 |
-
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
|
63 |
-
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
|
64 |
-
self.queue_ptr = self.queue_ptr + b
|
65 |
-
|
66 |
-
@torch.no_grad()
|
67 |
-
def feed_data(self, data):
|
68 |
-
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
|
69 |
-
"""
|
70 |
-
if self.is_train and self.opt.get('high_order_degradation', True):
|
71 |
-
# training data synthesis
|
72 |
-
self.gt = data['gt'].to(self.device)
|
73 |
-
# USM sharpen the GT images
|
74 |
-
if self.opt['gt_usm'] is True:
|
75 |
-
self.gt = self.usm_sharpener(self.gt)
|
76 |
-
|
77 |
-
self.kernel1 = data['kernel1'].to(self.device)
|
78 |
-
self.kernel2 = data['kernel2'].to(self.device)
|
79 |
-
self.sinc_kernel = data['sinc_kernel'].to(self.device)
|
80 |
-
|
81 |
-
ori_h, ori_w = self.gt.size()[2:4]
|
82 |
-
|
83 |
-
# ----------------------- The first degradation process ----------------------- #
|
84 |
-
# blur
|
85 |
-
out = filter2D(self.gt, self.kernel1)
|
86 |
-
# random resize
|
87 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
|
88 |
-
if updown_type == 'up':
|
89 |
-
scale = np.random.uniform(1, self.opt['resize_range'][1])
|
90 |
-
elif updown_type == 'down':
|
91 |
-
scale = np.random.uniform(self.opt['resize_range'][0], 1)
|
92 |
-
else:
|
93 |
-
scale = 1
|
94 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
95 |
-
out = F.interpolate(out, scale_factor=scale, mode=mode)
|
96 |
-
# add noise
|
97 |
-
gray_noise_prob = self.opt['gray_noise_prob']
|
98 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob']:
|
99 |
-
out = random_add_gaussian_noise_pt(
|
100 |
-
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
101 |
-
else:
|
102 |
-
out = random_add_poisson_noise_pt(
|
103 |
-
out,
|
104 |
-
scale_range=self.opt['poisson_scale_range'],
|
105 |
-
gray_prob=gray_noise_prob,
|
106 |
-
clip=True,
|
107 |
-
rounds=False)
|
108 |
-
# JPEG compression
|
109 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
|
110 |
-
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
|
111 |
-
out = self.jpeger(out, quality=jpeg_p)
|
112 |
-
|
113 |
-
# ----------------------- The second degradation process ----------------------- #
|
114 |
-
# blur
|
115 |
-
if np.random.uniform() < self.opt['second_blur_prob']:
|
116 |
-
out = filter2D(out, self.kernel2)
|
117 |
-
# random resize
|
118 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
|
119 |
-
if updown_type == 'up':
|
120 |
-
scale = np.random.uniform(1, self.opt['resize_range2'][1])
|
121 |
-
elif updown_type == 'down':
|
122 |
-
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
|
123 |
-
else:
|
124 |
-
scale = 1
|
125 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
126 |
-
out = F.interpolate(
|
127 |
-
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
|
128 |
-
# add noise
|
129 |
-
gray_noise_prob = self.opt['gray_noise_prob2']
|
130 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
|
131 |
-
out = random_add_gaussian_noise_pt(
|
132 |
-
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
133 |
-
else:
|
134 |
-
out = random_add_poisson_noise_pt(
|
135 |
-
out,
|
136 |
-
scale_range=self.opt['poisson_scale_range2'],
|
137 |
-
gray_prob=gray_noise_prob,
|
138 |
-
clip=True,
|
139 |
-
rounds=False)
|
140 |
-
|
141 |
-
# JPEG compression + the final sinc filter
|
142 |
-
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
|
143 |
-
# as one operation.
|
144 |
-
# We consider two orders:
|
145 |
-
# 1. [resize back + sinc filter] + JPEG compression
|
146 |
-
# 2. JPEG compression + [resize back + sinc filter]
|
147 |
-
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
|
148 |
-
if np.random.uniform() < 0.5:
|
149 |
-
# resize back + the final sinc filter
|
150 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
151 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
152 |
-
out = filter2D(out, self.sinc_kernel)
|
153 |
-
# JPEG compression
|
154 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
155 |
-
out = torch.clamp(out, 0, 1)
|
156 |
-
out = self.jpeger(out, quality=jpeg_p)
|
157 |
-
else:
|
158 |
-
# JPEG compression
|
159 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
160 |
-
out = torch.clamp(out, 0, 1)
|
161 |
-
out = self.jpeger(out, quality=jpeg_p)
|
162 |
-
# resize back + the final sinc filter
|
163 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
164 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
165 |
-
out = filter2D(out, self.sinc_kernel)
|
166 |
-
|
167 |
-
# clamp and round
|
168 |
-
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
169 |
-
|
170 |
-
# random crop
|
171 |
-
gt_size = self.opt['gt_size']
|
172 |
-
self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale'])
|
173 |
-
|
174 |
-
# training pair pool
|
175 |
-
self._dequeue_and_enqueue()
|
176 |
-
self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
|
177 |
-
else:
|
178 |
-
# for paired training or validation
|
179 |
-
self.lq = data['lq'].to(self.device)
|
180 |
-
if 'gt' in data:
|
181 |
-
self.gt = data['gt'].to(self.device)
|
182 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
183 |
-
|
184 |
-
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
|
185 |
-
# do not use the synthetic process during validation
|
186 |
-
self.is_train = False
|
187 |
-
super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
|
188 |
-
self.is_train = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
"""
|
3 |
-
Implements the Generalized R-CNN framework
|
4 |
-
"""
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from torch import nn
|
8 |
-
|
9 |
-
from maskrcnn_benchmark.structures.image_list import to_image_list
|
10 |
-
|
11 |
-
from ..backbone import build_backbone
|
12 |
-
from ..rpn.rpn import build_rpn
|
13 |
-
from ..roi_heads.roi_heads import build_roi_heads
|
14 |
-
import numpy as np
|
15 |
-
import cv2
|
16 |
-
|
17 |
-
class GeneralizedRCNN(nn.Module):
|
18 |
-
"""
|
19 |
-
Main class for Generalized R-CNN. Currently supports boxes and masks.
|
20 |
-
It consists of three main parts:
|
21 |
-
- backbone
|
22 |
-
- rpn
|
23 |
-
- heads: takes the features + the proposals from the RPN and computes
|
24 |
-
detections / masks from it.
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(self, cfg):
|
28 |
-
super(GeneralizedRCNN, self).__init__()
|
29 |
-
|
30 |
-
self.cfg = cfg.clone()
|
31 |
-
self.backbone = build_backbone(cfg)
|
32 |
-
self.rpn = build_rpn(cfg, self.backbone.out_channels)
|
33 |
-
self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)
|
34 |
-
|
35 |
-
|
36 |
-
def forward(self, images, targets=None):
|
37 |
-
"""
|
38 |
-
Arguments:
|
39 |
-
images (list[Tensor] or ImageList): images to be processed
|
40 |
-
targets (list[BoxList]): ground-truth boxes present in the image (optional)
|
41 |
-
|
42 |
-
Returns:
|
43 |
-
result (list[BoxList] or dict[Tensor]): the output from the model.
|
44 |
-
During training, it returns a dict[Tensor] which contains the losses.
|
45 |
-
During testing, it returns list[BoxList] contains additional fields
|
46 |
-
like `scores`, `labels` and `mask` (for Mask R-CNN models).
|
47 |
-
|
48 |
-
"""
|
49 |
-
if self.training and targets is None:
|
50 |
-
raise ValueError("In training mode, targets should be passed")
|
51 |
-
|
52 |
-
|
53 |
-
images = to_image_list(images)
|
54 |
-
|
55 |
-
features = self.backbone(images.tensors)
|
56 |
-
proposals, proposal_losses = self.rpn(images, features, targets)
|
57 |
-
if self.roi_heads:
|
58 |
-
x, result, detector_losses = self.roi_heads(features, proposals, targets)
|
59 |
-
else:
|
60 |
-
#self.warm_start -= 1
|
61 |
-
# RPN-only models don't have roi_heads
|
62 |
-
x = features
|
63 |
-
result = proposals
|
64 |
-
detector_losses = {}
|
65 |
-
|
66 |
-
if self.training:
|
67 |
-
losses = {}
|
68 |
-
losses.update(detector_losses)
|
69 |
-
losses.update(proposal_losses)
|
70 |
-
|
71 |
-
return losses
|
72 |
-
else:
|
73 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DEEMOSTECH/ChatAvatar/static/css/main.629d4bc9.css
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
html{overflow-x:hidden;overflow-y:overlay}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;box-sizing:border-box;color:#cfcfcf;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;margin:0}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.root{display:flex;justify-content:center;width:100%}.container{height:100vh;width:100%}.\!container{width:100%!important}@media (min-width:640px){.container{max-width:640px}.\!container{max-width:640px!important}}@media (min-width:768px){.container{max-width:768px}.\!container{max-width:768px!important}}@media (min-width:1024px){.container{max-width:1024px}.\!container{max-width:1024px!important}}@media (min-width:1280px){.container{max-width:1280px}.\!container{max-width:1280px!important}}@media (min-width:1536px){.container{max-width:1536px}.\!container{max-width:1536px!important}}.App{--theme-color:#4a00e0;--font-dark-color:#434343;--font-gray-color:#aaa;--font-light-color:#cfcfcf;--bg-light-color:#fff;--bg-gray0-color:#f8f8f8;--bg-gray1-color:#ececec;--bg-gray2-color:#7c7c7c;--bg-gray3-color:#373737;--bg-theme-color:#e7e3f1;--bg-dark-color:#121317;--side-gap:5rem;--radius:0.5rem;--shadow:-10px 0px 12px 1px hsla(0,0%,53%,.16);text-align:center}.App *{box-sizing:border-box;transition:all .3s}.App ::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.2)}textarea{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;border:1px solid transparent;color:var(--font-dark-color);font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;font-size:1rem;line-height:1.5rem;outline:none;padding:0;resize:none}textarea:focus{border-color:var(--theme-color)}img{-webkit-user-drag:none;-webkit-user-select:none;user-select:none}.gallery_con__Y2mej{align-items:flex-start;display:flex;justify-content:center;margin-top:var(--side-gap);padding:0 var(--side-gap);width:100%}.gallery_menuCon__fVdFJ{margin-right:2rem;width:-webkit-max-content;width:max-content}.gallery_menu__U2btD{align-items:center;background-color:initial;border:2px solid transparent;border-radius:1.5rem;cursor:pointer;display:flex;height:3rem;justify-content:center;line-height:1rem;margin-bottom:1rem;text-align:center;width:6rem}.gallery_menu__U2btD.gallery_selected__T2qcs,.gallery_menu__U2btD:hover{background-color:var(--bg-gray3-color);color:#fff}.gallery_menu__U2btD.gallery_selected__T2qcs{border-color:#fff}.gallery_cardsCon__wAfcp{align-items:flex-start;display:flex;flex-grow:1;flex-shrink:1;flex-wrap:wrap;justify-content:flex-start;max-height:100vh;max-width:calc(1600px + 9rem);overflow-y:auto}.gallery_cardsCon__wAfcp::-webkit-scrollbar-thumb{background-color:hsla(0,0%,100%,.2);border:5px solid #121317;border-radius:8px}.gallery_card__noUoL{background-color:var(--bg-gray3-color);border-radius:var(--radius);cursor:pointer;font-size:.75rem;height:260px;margin-bottom:1rem;margin-right:1rem;overflow:hidden;position:relative;width:200px}.gallery_coverImg__BYj-o,.gallery_coverImg__BYj-o img{height:100%;width:100%}.gallery_prompt__9PEmb{background-color:#f8f8f880;border-radius:var(--radius);bottom:1rem;color:var(--font-dark-color);height:0;left:1rem;overflow:hidden;padding:0 .5rem;position:absolute;right:1rem;text-align:left;white-space:pre-wrap;word-break:break-all}.gallery_prompt__9PEmb.gallery_show__c2k50{height:-webkit-fit-content;height:-moz-fit-content;height:fit-content;padding:.5rem}.gallery_infoCon__E8oLy{align-items:center;bottom:1rem;color:var(--font-dark-color);display:flex;justify-content:flex-start;left:1rem;position:absolute;right:1rem}.gallery_avatar__KWBmI,.gallery_avatar__KWBmI img{border-radius:12px;height:24px;overflow:hidden;width:24px}.gallery_avatar__KWBmI{margin-right:1rem}.gallery_spaceholder__xJwYU{flex-grow:1;flex-shrink:1}.header_con__M\+u1W{align-items:center;display:flex;justify-content:center;padding:0 var(--side-gap);width:100vw}.header_header__Y7CqP{align-items:center;border-bottom:1px solid hsla(0,0%,100%,.1);display:flex;justify-content:space-between;padding:1rem 0;width:100%}.header_logoCon__MIdGL{align-items:flex-start;display:flex;height:3rem;justify-content:center}.header_logo__90zuC{height:3rem;margin-right:1rem}.header_logoCon__MIdGL>div{font-size:2rem;font-weight:700;line-height:2rem;margin-top:5px}.header_avatar__B3zXB{background:var(--bg-gray2-color);border-radius:50%;overflow:hidden}.header_avatar__B3zXB,.header_avatar__B3zXB img{height:3rem;width:3rem}.login_con__\+RJgQ{background:#000;box-shadow:-5px 0 20px 0 hsla(0,0%,100%,.2);height:100vh;padding:var(--side-gap);position:fixed;right:0;top:0;z-index:9}.login_close__JulM-{cursor:pointer;-webkit-user-select:none;user-select:none}.result_con__gHOU1{align-items:center;color:var(--font-dark-color);display:flex;justify-content:center;z-index:999}.result_con__gHOU1 *{flex-shrink:0}.result_board__PCvVJ{align-items:center;background-color:var(--bg-light-color);border-radius:var(--radius);display:flex;height:80vh;justify-content:center;min-height:36rem;min-width:64rem;padding:1.5rem;width:100vh}.result_col__S-fRD{align-items:center;display:flex;flex-direction:column;flex-shrink:0;height:100%;justify-content:flex-start;position:relative;width:calc(50% - .5rem)}.result_col__S-fRD:first-child{margin-right:1rem}.result_colTitle__R8k\+A{align-items:flex-end;color:var(--font-gray-color);display:flex;font-size:1.2rem;font-weight:700;justify-content:space-between;line-height:1.2rem;margin-bottom:1rem;width:100%}.result_colTitle__R8k\+A>div{margin-bottom:.5rem}.result_colTitle__R8k\+A>div.result_restart__fLq8E{border-radius:5px;cursor:pointer;font-size:1rem;font-weight:400;margin-bottom:0;margin-left:1rem;padding:.5rem;-webkit-user-select:none;user-select:none}.result_restart__fLq8E:hover{background-color:var(--bg-gray0-color);color:var(--font-dark-color)}.result_spaceholder__GAxGZ{flex-grow:1;flex-shrink:1}.result_lang__85-De{cursor:pointer;font-weight:400;margin-right:1rem;-webkit-user-select:none;user-select:none}.result_lang__85-De.result_en__n-Jo7{margin-left:1rem;margin-right:0;width:4rem}.result_lang__85-De:hover{font-weight:700}.result_lang__85-De.result_selected__kDzD1{color:var(--font-dark-color);font-weight:700}.result_regene__yKazF{color:var(--theme-color);cursor:pointer;font-weight:400;-webkit-user-select:none;user-select:none}.result_chatCon__Hm\+zJ{background-color:var(--bg-gray0-color);border-radius:var(--radius);height:calc(100% - 4rem);padding:1rem}.result_chatCon__Hm\+zJ,.result_chatMsgCon__x8UTP{align-items:center;display:flex;flex-direction:column;flex-grow:1;flex-shrink:1;justify-content:flex-start;width:100%}.result_chatMsgCon__x8UTP{overflow-y:overlay;text-align:left}.result_chatMsgCon__x8UTP::-webkit-scrollbar-thumb{border:none;border-radius:3px}.result_chatMsgCon__x8UTP::-webkit-scrollbar{width:6px}.result_chatMsgRow__dr9Qg{align-items:flex-start;display:flex;flex-direction:row;justify-content:flex-start;margin-bottom:1rem;width:100%}.result_chatMsgRow__dr9Qg.result_user__bUuRg{flex-direction:row-reverse}.result_avatar__B2zOp{background:var(--bg-gray2-color);border-radius:1.5rem;margin-left:0;margin-right:1rem;overflow:hidden}.result_avatar__B2zOp,.result_avatar__B2zOp img{height:3rem;width:3rem}.result_user__bUuRg .result_avatar__B2zOp{margin-left:1rem;margin-right:0}.result_bubble__GexXm{background:var(--bg-theme-color);border-radius:var(--radius);flex-shrink:1;line-height:1.5rem;padding:.75rem 1rem;white-space:pre-wrap;word-break:break-all}.result_bubble__GexXm.result_unactive__zyVF2{background:var(--bg-gray1-color)}.result_user__bUuRg .result_bubble__GexXm{background:var(--bg-light-color)}.result_chatIptCon__LXDF-{align-items:center;display:flex;flex-direction:column;justify-content:flex-start;width:100%}.result_chatTipsCon__w4uUf{align-items:flex-end;display:flex;flex-direction:row;justify-content:flex-start;margin-top:1rem;max-width:100%;overflow-x:auto;overflow-y:hidden;width:100%}.result_chatTipsCon__w4uUf::-webkit-scrollbar-thumb{border-color:var(--bg-gray0-color)}.result_chatTips__6b9zJ{background:var(--bg-light-color);border-radius:var(--radius);cursor:pointer;margin-right:1rem;padding:1rem;text-align:left;white-space:pre-wrap;width:15.5rem;word-break:break-all}.result_chatTips__6b9zJ:last-child{margin-right:0}.result_chatRowCon__jLGk3{align-items:flex-start;display:flex;flex-direction:row;justify-content:space-between;margin-top:1rem;width:100%}.result_iptLineCon__nLuWa{flex-grow:1;flex-shrink:1;line-height:1.5rem;margin-right:1rem;position:relative;text-align:left}.result_iptSpaceholder__hAkD5{border:1px solid transparent;max-height:calc(9rem + 2px);visibility:hidden}.result_iptSpaceholder__hAkD5,.result_ipt__tA\+g4{padding:.75rem 1rem;white-space:pre-wrap;word-break:break-all}.result_ipt__tA\+g4{background:var(--bg-light-color);border-radius:var(--radius);bottom:0;left:0;overflow-y:auto;position:absolute;right:0;top:0}.result_ipt__tA\+g4::-webkit-scrollbar-thumb{border-color:var(--bg-light-color)}.result_btn__h5tQr{align-items:center;background-color:var(--theme-color);border:1px solid var(--theme-color);border-radius:1.5rem;color:#fff;cursor:pointer;display:flex;font-weight:700;height:calc(3rem - 2px);justify-content:center;line-height:1rem;padding:0 1.5rem;-webkit-user-select:none;user-select:none}.result_btn__h5tQr:hover{background:transparent;color:var(--theme-color)}.result_con__gHOU1 .result_btn__h5tQr.result_disabled__lB61-{background:var(--bg-gray2-color);border-color:var(--bg-gray2-color);color:var(--font-light-color);cursor:not-allowed}.result_iptArea__23TZc{background:var(--bg-gray0-color);border-radius:var(--radius);height:12rem;margin-bottom:1rem;padding:1rem;text-align:left;width:100%}.result_iptArea__23TZc::-webkit-scrollbar-thumb{border-color:var(--bg-gray0-color)}.result_generateBtn__UGmBG{margin-bottom:1rem;width:100%}.result_candidateCon__x9kyB{align-items:flex-start;background-color:var(--bg-gray0-color);border-radius:var(--radius);display:flex;flex-direction:row;flex-grow:1;flex-shrink:1;justify-content:space-between;overflow-y:overlay;padding:1rem;position:relative;width:100%}.result_candidateCon__x9kyB::-webkit-scrollbar-thumb{border-color:var(--bg-gray0-color)}.result_candidateCol__eoHna{margin-right:1rem;position:relative;width:calc(33.33333% - .66667rem)}.result_candidateCol__eoHna:last-child{margin-right:0}.result_candidateCol__eoHna img{border-radius:var(--radius);cursor:pointer;margin-bottom:1rem;width:100%}.result_creatorCon__tIm3e{align-items:flex-end;color:var(--font-gray-color);display:flex;font-size:1.2rem;font-weight:700;justify-content:flex-start;line-height:1.2rem;margin-bottom:1rem;width:100%}.result_creatorInfoCon__pET8h{text-align:left}.result_creatorName__VLTXL{color:var(--font-dark-color);font-size:1.2rem;font-weight:700;line-height:1.8rem}.result_creatorInfo__CkbWU{color:var(--font-gray-color);font-size:1rem;line-height:1.2rem}.result_modelView__Y25w5{background:var(--bg-gray0-color);border-radius:var(--radius);flex-grow:1;flex-shrink:1;overflow:hidden;width:100%}.result_modelInfoCon__bXw5O{align-items:center;bottom:1rem;display:flex;flex-direction:column;justify-content:flex-end;left:1rem;position:absolute;right:1rem;text-align:left}.result_progressInfo__g9iwR{margin-bottom:.5rem;width:100%}.result_progressTrack__I6zDn{background:var(--bg-light-color);border-radius:2px;height:4px;position:relative;width:100%}.result_progressThumb__mbBQj{background-color:var(--theme-color);border-radius:2px;height:4px;left:0;position:absolute;top:0}.result_modelPrompt__DzUbD{background:var(--bg-light-color);border-radius:var(--radius);margin-top:1rem;min-height:3rem;padding:1rem;width:100%}.welcome_con__o1kmf{align-items:center;background:#121317;display:flex;flex-direction:column;justify-content:flex-start;padding-bottom:12rem;padding-top:4rem;position:relative;width:100%}.welcome_con__o1kmf>img{position:absolute;top:0;width:40vw}.welcome_mainCon__H1gv\+{z-index:999}.welcome_title__Gd8m4{color:#fff;font-family:Courier New;font-size:5rem;font-weight:700;line-height:5rem}.welcome_ioCon__PQZXU{background-color:#fff;border-radius:1rem;border-style:solid;margin-left:8rem;margin-right:8rem;margin-top:24rem;padding:2rem;width:calc(100% - 16rem)}.welcome_iptCon__KpWEL{align-items:center;background:#ededf2;border-radius:1rem;display:flex;height:4rem;justify-content:space-between;margin-bottom:2rem;width:100%}.welcome_iptCon__KpWEL>img{height:2rem;margin-right:1rem;position:static;width:2rem}.welcome_ipt__ayi9Z{background:#ededf2;border:none;border-radius:1rem;color:var(--font-dark-color);flex-grow:1;font-size:1rem;height:100%;outline:none;padding:0 2rem}.welcome_ipt__ayi9Z::-webkit-input-placeholder{font-size:1rem}.welcome_ipt__ayi9Z::placeholder{font-size:1rem}.welcome_btnCon__Mx-ta,.welcome_btn__jCuoG{align-items:center;display:flex;justify-content:center}.welcome_btn__jCuoG{border:1px solid #8f8f8f;border-radius:1rem;cursor:pointer;height:3rem;line-height:1rem;-webkit-user-select:none;user-select:none;width:100%}.welcome_btn__jCuoG:last-child{background:#4a00e0;border:none;font-weight:700}.welcome_btn__jCuoG.welcome_disabled__pcSzv{cursor:not-allowed}.welcome_btn__jCuoG:hover{color:#fff}
|
2 |
-
/*# sourceMappingURL=main.629d4bc9.css.map*/
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/FpxImagePlugin.py
DELETED
@@ -1,253 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# THIS IS WORK IN PROGRESS
|
3 |
-
#
|
4 |
-
# The Python Imaging Library.
|
5 |
-
# $Id$
|
6 |
-
#
|
7 |
-
# FlashPix support for PIL
|
8 |
-
#
|
9 |
-
# History:
|
10 |
-
# 97-01-25 fl Created (reads uncompressed RGB images only)
|
11 |
-
#
|
12 |
-
# Copyright (c) Secret Labs AB 1997.
|
13 |
-
# Copyright (c) Fredrik Lundh 1997.
|
14 |
-
#
|
15 |
-
# See the README file for information on usage and redistribution.
|
16 |
-
#
|
17 |
-
import olefile
|
18 |
-
|
19 |
-
from . import Image, ImageFile
|
20 |
-
from ._binary import i32le as i32
|
21 |
-
|
22 |
-
# we map from colour field tuples to (mode, rawmode) descriptors
|
23 |
-
MODES = {
|
24 |
-
# opacity
|
25 |
-
(0x00007FFE,): ("A", "L"),
|
26 |
-
# monochrome
|
27 |
-
(0x00010000,): ("L", "L"),
|
28 |
-
(0x00018000, 0x00017FFE): ("RGBA", "LA"),
|
29 |
-
# photo YCC
|
30 |
-
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
|
31 |
-
(0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
|
32 |
-
# standard RGB (NIFRGB)
|
33 |
-
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
|
34 |
-
(0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
#
|
39 |
-
# --------------------------------------------------------------------
|
40 |
-
|
41 |
-
|
42 |
-
def _accept(prefix):
|
43 |
-
return prefix[:8] == olefile.MAGIC
|
44 |
-
|
45 |
-
|
46 |
-
##
|
47 |
-
# Image plugin for the FlashPix images.
|
48 |
-
|
49 |
-
|
50 |
-
class FpxImageFile(ImageFile.ImageFile):
|
51 |
-
format = "FPX"
|
52 |
-
format_description = "FlashPix"
|
53 |
-
|
54 |
-
def _open(self):
|
55 |
-
#
|
56 |
-
# read the OLE directory and see if this is a likely
|
57 |
-
# to be a FlashPix file
|
58 |
-
|
59 |
-
try:
|
60 |
-
self.ole = olefile.OleFileIO(self.fp)
|
61 |
-
except OSError as e:
|
62 |
-
msg = "not an FPX file; invalid OLE file"
|
63 |
-
raise SyntaxError(msg) from e
|
64 |
-
|
65 |
-
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
|
66 |
-
msg = "not an FPX file; bad root CLSID"
|
67 |
-
raise SyntaxError(msg)
|
68 |
-
|
69 |
-
self._open_index(1)
|
70 |
-
|
71 |
-
def _open_index(self, index=1):
|
72 |
-
#
|
73 |
-
# get the Image Contents Property Set
|
74 |
-
|
75 |
-
prop = self.ole.getproperties(
|
76 |
-
[f"Data Object Store {index:06d}", "\005Image Contents"]
|
77 |
-
)
|
78 |
-
|
79 |
-
# size (highest resolution)
|
80 |
-
|
81 |
-
self._size = prop[0x1000002], prop[0x1000003]
|
82 |
-
|
83 |
-
size = max(self.size)
|
84 |
-
i = 1
|
85 |
-
while size > 64:
|
86 |
-
size = size / 2
|
87 |
-
i += 1
|
88 |
-
self.maxid = i - 1
|
89 |
-
|
90 |
-
# mode. instead of using a single field for this, flashpix
|
91 |
-
# requires you to specify the mode for each channel in each
|
92 |
-
# resolution subimage, and leaves it to the decoder to make
|
93 |
-
# sure that they all match. for now, we'll cheat and assume
|
94 |
-
# that this is always the case.
|
95 |
-
|
96 |
-
id = self.maxid << 16
|
97 |
-
|
98 |
-
s = prop[0x2000002 | id]
|
99 |
-
|
100 |
-
colors = []
|
101 |
-
bands = i32(s, 4)
|
102 |
-
if bands > 4:
|
103 |
-
msg = "Invalid number of bands"
|
104 |
-
raise OSError(msg)
|
105 |
-
for i in range(bands):
|
106 |
-
# note: for now, we ignore the "uncalibrated" flag
|
107 |
-
colors.append(i32(s, 8 + i * 4) & 0x7FFFFFFF)
|
108 |
-
|
109 |
-
self.mode, self.rawmode = MODES[tuple(colors)]
|
110 |
-
|
111 |
-
# load JPEG tables, if any
|
112 |
-
self.jpeg = {}
|
113 |
-
for i in range(256):
|
114 |
-
id = 0x3000001 | (i << 16)
|
115 |
-
if id in prop:
|
116 |
-
self.jpeg[i] = prop[id]
|
117 |
-
|
118 |
-
self._open_subimage(1, self.maxid)
|
119 |
-
|
120 |
-
def _open_subimage(self, index=1, subimage=0):
|
121 |
-
#
|
122 |
-
# setup tile descriptors for a given subimage
|
123 |
-
|
124 |
-
stream = [
|
125 |
-
f"Data Object Store {index:06d}",
|
126 |
-
f"Resolution {subimage:04d}",
|
127 |
-
"Subimage 0000 Header",
|
128 |
-
]
|
129 |
-
|
130 |
-
fp = self.ole.openstream(stream)
|
131 |
-
|
132 |
-
# skip prefix
|
133 |
-
fp.read(28)
|
134 |
-
|
135 |
-
# header stream
|
136 |
-
s = fp.read(36)
|
137 |
-
|
138 |
-
size = i32(s, 4), i32(s, 8)
|
139 |
-
# tilecount = i32(s, 12)
|
140 |
-
tilesize = i32(s, 16), i32(s, 20)
|
141 |
-
# channels = i32(s, 24)
|
142 |
-
offset = i32(s, 28)
|
143 |
-
length = i32(s, 32)
|
144 |
-
|
145 |
-
if size != self.size:
|
146 |
-
msg = "subimage mismatch"
|
147 |
-
raise OSError(msg)
|
148 |
-
|
149 |
-
# get tile descriptors
|
150 |
-
fp.seek(28 + offset)
|
151 |
-
s = fp.read(i32(s, 12) * length)
|
152 |
-
|
153 |
-
x = y = 0
|
154 |
-
xsize, ysize = size
|
155 |
-
xtile, ytile = tilesize
|
156 |
-
self.tile = []
|
157 |
-
|
158 |
-
for i in range(0, len(s), length):
|
159 |
-
x1 = min(xsize, x + xtile)
|
160 |
-
y1 = min(ysize, y + ytile)
|
161 |
-
|
162 |
-
compression = i32(s, i + 8)
|
163 |
-
|
164 |
-
if compression == 0:
|
165 |
-
self.tile.append(
|
166 |
-
(
|
167 |
-
"raw",
|
168 |
-
(x, y, x1, y1),
|
169 |
-
i32(s, i) + 28,
|
170 |
-
(self.rawmode,),
|
171 |
-
)
|
172 |
-
)
|
173 |
-
|
174 |
-
elif compression == 1:
|
175 |
-
# FIXME: the fill decoder is not implemented
|
176 |
-
self.tile.append(
|
177 |
-
(
|
178 |
-
"fill",
|
179 |
-
(x, y, x1, y1),
|
180 |
-
i32(s, i) + 28,
|
181 |
-
(self.rawmode, s[12:16]),
|
182 |
-
)
|
183 |
-
)
|
184 |
-
|
185 |
-
elif compression == 2:
|
186 |
-
internal_color_conversion = s[14]
|
187 |
-
jpeg_tables = s[15]
|
188 |
-
rawmode = self.rawmode
|
189 |
-
|
190 |
-
if internal_color_conversion:
|
191 |
-
# The image is stored as usual (usually YCbCr).
|
192 |
-
if rawmode == "RGBA":
|
193 |
-
# For "RGBA", data is stored as YCbCrA based on
|
194 |
-
# negative RGB. The following trick works around
|
195 |
-
# this problem :
|
196 |
-
jpegmode, rawmode = "YCbCrK", "CMYK"
|
197 |
-
else:
|
198 |
-
jpegmode = None # let the decoder decide
|
199 |
-
|
200 |
-
else:
|
201 |
-
# The image is stored as defined by rawmode
|
202 |
-
jpegmode = rawmode
|
203 |
-
|
204 |
-
self.tile.append(
|
205 |
-
(
|
206 |
-
"jpeg",
|
207 |
-
(x, y, x1, y1),
|
208 |
-
i32(s, i) + 28,
|
209 |
-
(rawmode, jpegmode),
|
210 |
-
)
|
211 |
-
)
|
212 |
-
|
213 |
-
# FIXME: jpeg tables are tile dependent; the prefix
|
214 |
-
# data must be placed in the tile descriptor itself!
|
215 |
-
|
216 |
-
if jpeg_tables:
|
217 |
-
self.tile_prefix = self.jpeg[jpeg_tables]
|
218 |
-
|
219 |
-
else:
|
220 |
-
msg = "unknown/invalid compression"
|
221 |
-
raise OSError(msg)
|
222 |
-
|
223 |
-
x = x + xtile
|
224 |
-
if x >= xsize:
|
225 |
-
x, y = 0, y + ytile
|
226 |
-
if y >= ysize:
|
227 |
-
break # isn't really required
|
228 |
-
|
229 |
-
self.stream = stream
|
230 |
-
self.fp = None
|
231 |
-
|
232 |
-
def load(self):
|
233 |
-
if not self.fp:
|
234 |
-
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
|
235 |
-
|
236 |
-
return ImageFile.ImageFile.load(self)
|
237 |
-
|
238 |
-
def close(self):
|
239 |
-
self.ole.close()
|
240 |
-
super().close()
|
241 |
-
|
242 |
-
def __exit__(self, *args):
|
243 |
-
self.ole.close()
|
244 |
-
super().__exit__()
|
245 |
-
|
246 |
-
|
247 |
-
#
|
248 |
-
# --------------------------------------------------------------------
|
249 |
-
|
250 |
-
|
251 |
-
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
|
252 |
-
|
253 |
-
Image.register_extension(FpxImageFile.format, ".fpx")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/setters.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
# SPDX-License-Identifier: MIT
|
2 |
-
|
3 |
-
"""
|
4 |
-
Commonly used hooks for on_setattr.
|
5 |
-
"""
|
6 |
-
|
7 |
-
|
8 |
-
from . import _config
|
9 |
-
from .exceptions import FrozenAttributeError
|
10 |
-
|
11 |
-
|
12 |
-
def pipe(*setters):
|
13 |
-
"""
|
14 |
-
Run all *setters* and return the return value of the last one.
|
15 |
-
|
16 |
-
.. versionadded:: 20.1.0
|
17 |
-
"""
|
18 |
-
|
19 |
-
def wrapped_pipe(instance, attrib, new_value):
|
20 |
-
rv = new_value
|
21 |
-
|
22 |
-
for setter in setters:
|
23 |
-
rv = setter(instance, attrib, rv)
|
24 |
-
|
25 |
-
return rv
|
26 |
-
|
27 |
-
return wrapped_pipe
|
28 |
-
|
29 |
-
|
30 |
-
def frozen(_, __, ___):
|
31 |
-
"""
|
32 |
-
Prevent an attribute to be modified.
|
33 |
-
|
34 |
-
.. versionadded:: 20.1.0
|
35 |
-
"""
|
36 |
-
raise FrozenAttributeError()
|
37 |
-
|
38 |
-
|
39 |
-
def validate(instance, attrib, new_value):
|
40 |
-
"""
|
41 |
-
Run *attrib*'s validator on *new_value* if it has one.
|
42 |
-
|
43 |
-
.. versionadded:: 20.1.0
|
44 |
-
"""
|
45 |
-
if _config._run_validators is False:
|
46 |
-
return new_value
|
47 |
-
|
48 |
-
v = attrib.validator
|
49 |
-
if not v:
|
50 |
-
return new_value
|
51 |
-
|
52 |
-
v(instance, attrib, new_value)
|
53 |
-
|
54 |
-
return new_value
|
55 |
-
|
56 |
-
|
57 |
-
def convert(instance, attrib, new_value):
|
58 |
-
"""
|
59 |
-
Run *attrib*'s converter -- if it has one -- on *new_value* and return the
|
60 |
-
result.
|
61 |
-
|
62 |
-
.. versionadded:: 20.1.0
|
63 |
-
"""
|
64 |
-
c = attrib.converter
|
65 |
-
if c:
|
66 |
-
return c(new_value)
|
67 |
-
|
68 |
-
return new_value
|
69 |
-
|
70 |
-
|
71 |
-
# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
|
72 |
-
# autodata stopped working, so the docstring is inlined in the API docs.
|
73 |
-
NO_OP = object()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ce791c16.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as m,e as u,s as r,k as d,o as b,z as c,v as f,x as g,a9 as v,ab as k,ac as B,ad as h}from"./index-3370be2a.js";import{B as p}from"./Button-89624748.js";function C(a){let t;const l=a[3].default,e=v(l,a,a[4],null);return{c(){e&&e.c()},m(s,n){e&&e.m(s,n),t=!0},p(s,n){e&&e.p&&(!t||n&16)&&k(e,l,s,s[4],t?h(l,s[4],n,null):B(s[4]),null)},i(s){t||(c(e,s),t=!0)},o(s){f(e,s),t=!1},d(s){e&&e.d(s)}}}function S(a){let t,l;return t=new p({props:{elem_id:a[0],elem_classes:a[1],visible:a[2],explicit_call:!0,$$slots:{default:[C]},$$scope:{ctx:a}}}),{c(){d(t.$$.fragment)},m(e,s){b(t,e,s),l=!0},p(e,[s]){const n={};s&1&&(n.elem_id=e[0]),s&2&&(n.elem_classes=e[1]),s&4&&(n.visible=e[2]),s&16&&(n.$$scope={dirty:s,ctx:e}),t.$set(n)},i(e){l||(c(t.$$.fragment,e),l=!0)},o(e){f(t.$$.fragment,e),l=!1},d(e){g(t,e)}}}function q(a,t,l){let{$$slots:e={},$$scope:s}=t,{elem_id:n}=t,{elem_classes:i}=t,{visible:_=!0}=t;return a.$$set=o=>{"elem_id"in o&&l(0,n=o.elem_id),"elem_classes"in o&&l(1,i=o.elem_classes),"visible"in o&&l(2,_=o.visible),"$$scope"in o&&l(4,s=o.$$scope)},[n,i,_,e,s]}class w extends m{constructor(t){super(),u(this,t,q,S,r,{elem_id:0,elem_classes:1,visible:2})}}const A=w,D=["static"];export{A as Component,D as modes};
|
2 |
-
//# sourceMappingURL=index-ce791c16.js.map
|
|
|
|
|
|
spaces/Dagfinn1962/stablediffusion-members/main.css
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
background-color: #080a36;
|
3 |
-
width: 100%;
|
4 |
-
color: #FFFFFF;
|
5 |
-
}
|
6 |
-
gr.blocks {
|
7 |
-
background-color: #080a36;
|
8 |
-
width: 100%;
|
9 |
-
color: #FFFFFF;
|
10 |
-
}
|
11 |
-
h3 {
|
12 |
-
color: #FFFFF;
|
13 |
-
text-align: center;
|
14 |
-
font-family: verdana;
|
15 |
-
font-size: 24px;
|
16 |
-
border: 1px solid #FFFFFF;
|
17 |
-
border-radius: 10px;
|
18 |
-
}
|
19 |
-
|
20 |
-
p {
|
21 |
-
font-family: verdana;
|
22 |
-
font-size: 14px;
|
23 |
-
}
|
24 |
-
|
25 |
-
label {
|
26 |
-
font-family: verdana;
|
27 |
-
color: #000000;
|
28 |
-
font-weight: 700;
|
29 |
-
font-size: 14px;
|
30 |
-
border: 1px solid #000000;
|
31 |
-
}
|
32 |
-
|
33 |
-
gr.Textbox {
|
34 |
-
font-family: verdana;
|
35 |
-
background-color: #23298f;
|
36 |
-
color: #000000;
|
37 |
-
font-weight: 700;
|
38 |
-
font-size: 14px;
|
39 |
-
border: 1px solid #FFFFFF;
|
40 |
-
border-radius: 6px;
|
41 |
-
}
|
42 |
-
|
43 |
-
gr.Botton {
|
44 |
-
font-family: verdana;
|
45 |
-
background-color: #23298f;
|
46 |
-
color: #FFFFFF;
|
47 |
-
font-weight: 700;
|
48 |
-
font-size: 14px;
|
49 |
-
border: 1px solid #000000;
|
50 |
-
border-radius: 6px;
|
51 |
-
}
|
52 |
-
|
53 |
-
a a:active a.hover
|
54 |
-
{
|
55 |
-
font-family: verdana;
|
56 |
-
color: #572430;
|
57 |
-
text-decoration: none;
|
58 |
-
font-weight: 700;
|
59 |
-
font-size: 14px;
|
60 |
-
|
61 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|