Commit
·
3985e99
1
Parent(s):
88ad4d3
Update parquet files (step 59 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/12Venusssss/text_generator/app.py +0 -11
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aggiornamento Software Di Controllo Uniemens Individuale Recensioni e Opinioni degli Utenti sulla Versione 3.9.6.md +0 -139
- spaces/1phancelerku/anime-remove-background/ .md +0 -92
- spaces/1phancelerku/anime-remove-background/Askies I 39m Sorry De Mthuda Mp3 Download Fakaza [Extra Quality].md +0 -90
- spaces/1phancelerku/anime-remove-background/Brawl Stars Hack APK 2022 A Simple Guide to Install and Use.md +0 -121
- spaces/1phancelerku/anime-remove-background/Candy Crush Saga MOD APK 1.141 0.4 Unlocked Download Now for Android Devices.md +0 -87
- spaces/1phancelerku/anime-remove-background/Cars Daredevil Garage APK The Ultimate Racing Game for Cars Fans.md +0 -99
- spaces/1phancelerku/anime-remove-background/Free Download Educational Games for Kids-6 Years Old - Engaging Creative and Safe.md +0 -168
- spaces/801artistry/RVC801/infer/lib/uvr5_pack/utils.py +0 -121
- spaces/801artistry/RVC801/julius/resample.py +0 -216
- spaces/801artistry/RVC801/tools/dlmodels.bat +0 -348
- spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers_33966KB.py +0 -126
- spaces/AI-ZTH-03-23/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device/app.py +0 -59
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/utils/utils.py +0 -169
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/lpaps.py +0 -152
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/wavenet.py +0 -97
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192.py +0 -2861
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Yqcloud.py +0 -59
- spaces/Adapting/TrendFlow/mypages/charts.py +0 -80
- spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/__init__.py +0 -6
- spaces/AlanMars/QYL-AI-Space/modules/models/tokenization_moss.py +0 -368
- spaces/AlexWang/lama/saicinpainting/training/visualizers/__init__.py +0 -15
- spaces/AlexWelcing/MusicLM/README.md +0 -14
- spaces/Aloento/9Nine-VITS/to_wave.py +0 -82
- spaces/Alpaca233/ChatPDF-GUI/gpt_reader/paper.py +0 -20
- spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py +0 -54
- spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/base_dense_head.py +0 -59
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/parameters.py +0 -369
- spaces/ApathyINC/CustomGPT/baidu_translate/module.py +0 -106
- spaces/AshtonIsNotHere/xlmr-longformer_comparison/README.md +0 -13
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/testing.py +0 -331
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py +0 -14
- spaces/Benson/text-generation/Examples/Colina Subida De Carreras De Descarga Para PC Ventanas 10 64 Bits.md +0 -89
- spaces/Benson/text-generation/Examples/Descargar Fifa 21 Descargar.md +0 -91
- spaces/Benson/text-generation/Examples/Descargar Gratis Aplicaciones De Desbloqueo Para Android.md +0 -106
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tomli/_re.py +0 -107
- spaces/Boadiwaa/Recipes/openai/_openai_scripts.py +0 -74
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_model_zoo.py +0 -29
- spaces/CVPR/LIVE/aabb.h +0 -67
- spaces/CVPR/LIVE/thrust/cmake/ThrustBuildTargetList.cmake +0 -283
- spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildTargetList.cmake +0 -261
- spaces/CVPR/LIVE/thrust/dependencies/cub/test/mersenne.h +0 -162
- spaces/CVPR/WALT/mmdet/models/dense_heads/ssd_head.py +0 -265
- spaces/CVPR/WALT/walt/train.py +0 -188
- spaces/Cecil8352/vits-models/text/symbols.py +0 -39
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/community.py +0 -354
- spaces/Dagfinn1962/prodia2/transform.py +0 -13
- spaces/Daimon/translation_demo/tokenization_small100.py +0 -364
spaces/12Venusssss/text_generator/app.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from gradio.mix import Parallel
|
3 |
-
|
4 |
-
myfirstvariable="My First Text Generation"
|
5 |
-
mylovelysecondvariable="Input text and submit."
|
6 |
-
|
7 |
-
model1=gr.Interface.load("huggingface/gpt2")
|
8 |
-
model2=gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
|
9 |
-
model3=gr.Interface.load("huggingface/EleutherAI/gpt-neo-1.3B")
|
10 |
-
|
11 |
-
gr.Parallel(model1, model2, model3 , title=myfirstvariable, description=mylovelysecondvariable).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aggiornamento Software Di Controllo Uniemens Individuale Recensioni e Opinioni degli Utenti sulla Versione 3.9.6.md
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Aggiornamento Software Di Controllo Uniemens Individuale</h1>
|
3 |
-
<p>Se sei un'impresa o un consulente che deve inviare le denunce mensili dei lavoratori dipendenti all'INPS, sai bene quanto sia importante avere un software di controllo affidabile e aggiornato. Il software di controllo Uniemens individuale è uno strumento gratuito e facile da usare che ti permette di verificare la correttezza formale e sostanziale dei file XML che devi trasmettere all'INPS tramite il portale UniEMens. In questo articolo ti spieghiamo cos'è il software di controllo Uniemens individuale, come si effettua l'aggiornamento alla versione 3.9.6 e quali sono i vantaggi che puoi ottenere con questo aggiornamento.</p>
|
4 |
-
<h2>Cos'è il software di controllo Uniemens individuale?</h2>
|
5 |
-
<p>Il software di controllo Uniemens individuale è un'applicazione Java che ti consente di effettuare i controlli preliminari sui file XML che devi inviare all'INPS tramite il portale UniEMens. Questi controlli riguardano sia la validità formale dei file XML rispetto allo schema XSD fornito dall'INPS, sia la coerenza sostanziale dei dati contenuti nei file XML rispetto alle regole di business definite dall'INPS.</p>
|
6 |
-
<h2>Aggiornamento Software Di Controllo Uniemens Individuale</h2><br /><p><b><b>Download</b> ☑ <a href="https://byltly.com/2uKvwe">https://byltly.com/2uKvwe</a></b></p><br /><br />
|
7 |
-
<h3>A cosa serve il software di controllo Uniemens individuale?</h3>
|
8 |
-
<p>Il software di controllo Uniemens individuale serve a evitare che i file XML che invii all'INPS siano scartati o rifiutati per motivi formali o sostanziali. In questo modo puoi risparmiare tempo e denaro, evitando di dover correggere e reinviare i file XML con il rischio di incorrere in sanzioni o ritardi nei pagamenti delle prestazioni previdenziali ai lavoratori.</p>
|
9 |
-
<h3>Quali sono le caratteristiche principali del software di controllo Uniemens individuale?</h3>
|
10 |
-
<p>Le caratteristiche principali del software di controllo Uniemens individuale sono le seguenti:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Ti permette di effettuare i controlli preliminari sui file XML prima dell'invio al portale UniEMens</li>
|
13 |
-
<li>Ti fornisce un report dettagliato degli esiti dei controlli, evidenziando gli eventuali errori o anomalie rilevati</li>
|
14 |
-
<li>Ti consente di correggere manualmente i dati errati o incompleti direttamente sul file XML</li>
|
15 |
-
<li>Ti permette di salvare i file XML corretti e pronti per l'invio al portale UniEMens</li>
|
16 |
-
<li>Ti consente di eseguire i controlli su più file XML contemporaneamente</li>
|
17 |
-
<li>Ti consente di personalizzare i parametri dei controlli in base alle tue esigenze</li>
|
18 |
-
<li>Ti consente di aggiornare facilmente il software con le ultime versioni rilasciate dall'INPS</li>
|
19 |
-
</ul>
|
20 |
-
<h2>Come si effettua l'aggiornamento del software di controllo Uniemens individuale?</h2>
|
21 |
-
<p>Per effettuare l'aggiornamento del software di controllo Uniemens individuale devi seguire alcuni semplici passaggi:</p>
|
22 |
-
<p>Aggiornamento Software Di Controllo Uniemens Individuale Versione 3.9.6<br />
|
23 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Settembre 2022<br />
|
24 |
-
Aggiornamento Software Di Controllo Uniemens Individuale INPS<br />
|
25 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Download<br />
|
26 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Manuale<br />
|
27 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Certificato Digitale<br />
|
28 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Servizio Caratteristiche Contributive<br />
|
29 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Portale Inps<br />
|
30 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Per le Aziende ed i Consulenti<br />
|
31 |
-
Aggiornamento Software Di Controllo Uniemens Individuale Novità della Versione<br />
|
32 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Versione 4.0.3<br />
|
33 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Novembre 2022<br />
|
34 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma INPS<br />
|
35 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Download<br />
|
36 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Manuale<br />
|
37 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Desktop e da Riga di Comando<br />
|
38 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Microsoft Windows 10<br />
|
39 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Microsoft Windows 8<br />
|
40 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Microsoft Windows 7<br />
|
41 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Linux<br />
|
42 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Portale Inps<br />
|
43 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Per le Aziende ed i Consulenti<br />
|
44 |
-
Nuovo Software Di Controllo Uniemens Multi-piattaforma Novità della Versione<br />
|
45 |
-
Come Aggiornare il Software Di Controllo Uniemens Individuale<br />
|
46 |
-
Come Scaricare il Software Di Controllo Uniemens Individuale<br />
|
47 |
-
Come Usare il Software Di Controllo Uniemens Individuale<br />
|
48 |
-
Come Installare il Software Di Controllo Uniemens Individuale<br />
|
49 |
-
Come Configurare il Software Di Controllo Uniemens Individuale<br />
|
50 |
-
Come Verificare il Software Di Controllo Uniemens Individuale<br />
|
51 |
-
Come Risolvere i Problemi del Software Di Controllo Uniemens Individuale<br />
|
52 |
-
Cos'è il Software Di Controllo Uniemens Individuale<br />
|
53 |
-
A Cosa Serve il Software Di Controllo Uniemens Individuale<br />
|
54 |
-
Quali Sono i Vantaggi del Software Di Controllo Uniemens Individuale<br />
|
55 |
-
Quali Sono i Requisiti del Software Di Controllo Uniemens Individuale<br />
|
56 |
-
Quali Sono le Funzionalità del Software Di Controllo Uniemens Individuale<br />
|
57 |
-
Qual è la Differenza tra il Software Di Controllo Uniemens Individuale e il Nuovo Software di controllo UniEMens Multi-piattaforma <br />
|
58 |
-
Dove Trovare il Software di controllo UniEMens individuale <br />
|
59 |
-
Dove Trovare il Nuovo software di controllo UniEMens multi-piattaforma <br />
|
60 |
-
Perché Aggiornare il software di controllo UniEMens individuale <br />
|
61 |
-
Perché Usare il nuovo software di controllo UniEMens multi-piattaforma <br />
|
62 |
-
Guida al software di controllo UniEMens individuale <br />
|
63 |
-
Guida al nuovo software di controllo UniEMens multi-piattaforma <br />
|
64 |
-
Recensioni sul software di controllo UniEMens individuale <br />
|
65 |
-
Recensioni sul nuovo software di controllo UniEMens multi-piattaforma <br />
|
66 |
-
Domande frequenti sul software di controllo UniEMens individuale <br />
|
67 |
-
Domande frequenti sul nuovo software di controllo UniEMens multi-piattaforma <br />
|
68 |
-
Supporto tecnico per il software di controllo UniEMens individuale <br />
|
69 |
-
Supporto tecnico per il nuovo software di controllo UniEMens multi-piattaforma <br />
|
70 |
-
Video tutorial sul software di controllo UniEMens individuale <br />
|
71 |
-
Video tutorial sul nuovo software di controllo UniEMens multi-piattaforma </p>
|
72 |
-
<h3>Quali sono i requisiti per l'aggiornamento del software di controllo Uniemens individuale?</h3>
|
73 |
-
<p>Per poter aggiornare il software di controllo Uniemens individuale devi avere i seguenti requisiti:</p>
|
74 |
-
<ul>
|
75 |
-
<li>Avere una connessione internet attiva</li>
|
76 |
-
<li>Avere installato sul tuo computer il sistema operativo Windows (7, 8 o 10) o Linux (64 bit)</li>
|
77 |
-
<li>Avere installato sul tuo computer la Java Virtual Machine (JVM) versione 8 o superiore</li>
|
78 |
-
<li>Avere a disposizione almeno 50 MB di spazio libero sul disco fisso</li>
|
79 |
-
</ul>
|
80 |
-
<h3>Quali sono le novità della versione 3.9.6 del software di controllo Uniemens individuale?</h3>
|
81 |
-
<p>La versione 3.9.6 del software di controllo Uniemens individuale è stata rilasciata dall'INPS nel settembre 2022 e presenta le seguenti novità:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle assenze per malattia, maternità, infortunio e quarantena</li>
|
84 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni integrative del salario (CIG, CIGO, CIGD)</li>
|
85 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni a sostegno del reddito (NASPI, DIS-COLL, ASDI)</li>
|
86 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni per la conciliazione vita-lavoro (congedo parentale, assegno per il nucleo familiare)</li>
|
87 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni per la disabilità (assegno ordinario e straordinario)</li>
|
88 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni per la non autosufficienza (indennità speciale)</li>
|
89 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni per la mobilità (indennità ordinaria e speciale)</li>
|
90 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni per la formazione professionale (indennità formativa)</li>
|
91 |
-
<li>Aggiornamento delle regole di business relative alla gestione delle prestazioni per l'inclusione sociale (REIS)</li>
|
92 |
-
<li>Aggiornamento delle regole di business relative alla gestione dei contributi previdenziali ed assistenziali</li>
|
93 |
-
<li>Miglioramento dell'interfaccia grafica e della navigabilità del software</li>
|
94 |
-
<li>Miglioramento della performance e della stabilità del software</li>
|
95 |
-
</ul>
|
96 |
-
<h3>Come si scarica e si installa la versione 3.9.6 del software di controllo Uniemens individuale?</h3>
|
97 |
-
<p>Per scaricare e installare la versione 3.9.6 del software di controllo Uniemens individuale devi seguire questi passaggi:</p>
|
98 |
-
<ol>
|
99 |
-
<li>Accedere al sito web dell'INPS (<a href="https://www.inps.it">www.inps.it</a>) e cliccare sulla sezione "Software"</li>
|
100 |
-
<li>Cercare il software "Software di controllo UniEMens individuale - Versione 3.9.6 - SETTEMBRE 2022" e cliccare sul link "Download"</li>
|
101 |
-
```html sistema operativo più adatto al tuo computer tra quelli disponibili (Windows 10, Windows 8, Windows 7 o Linux) e cliccare sul link corrispondente</li>
|
102 |
-
<li>Scaricare il file ZIP contenente il software e salvarlo sul tuo computer</li>
|
103 |
-
<li>Estrarre il file ZIP in una cartella a tua scelta</li>
|
104 |
-
<li>Avviare il file INPS_uniEMensIndiv.exe per lanciare il software</li>
|
105 |
-
</ol>
|
106 |
-
<h3>Come si utilizza la versione 3.9.6 del software di controllo Uniemens individuale?</h3>
|
107 |
-
<p>Per utilizzare la versione 3.9.6 del software di controllo Uniemens individuale devi seguire questi passaggi:</p>
|
108 |
-
<ol>
|
109 |
-
<li>Selezionare la modalità di controllo che preferisci tra automatica o manuale</li>
|
110 |
-
<li>Nella modalità automatica, scegliere la cartella dove sono presenti i file XML da controllare e cliccare sul pulsante "Avvia controlli"</li>
|
111 |
-
<li>Nella modalità manuale, scegliere il singolo file XML da controllare e cliccare sul pulsante "Avvia controlli"</li>
|
112 |
-
<li>Attendere che il software esegua i controlli sui file XML selezionati</li>
|
113 |
-
<li>Visualizzare il report degli esiti dei controlli, con l'indicazione degli eventuali errori o anomalie rilevati</li>
|
114 |
-
<li>Correggere manualmente i dati errati o incompleti direttamente sul file XML, utilizzando l'editor integrato nel software</li>
|
115 |
-
<li>Salvare i file XML corretti e pronti per l'invio al portale UniEMens</li>
|
116 |
-
</ol>
|
117 |
-
<h2>Quali sono i vantaggi dell'aggiornamento del software di controllo Uniemens individuale?</h2>
|
118 |
-
<p>L'aggiornamento del software di controllo Uniemens individuale alla versione 3.9.6 ti offre diversi vantaggi:</p>
|
119 |
-
<h3>Maggiore sicurezza e conformità normativa</h3>
|
120 |
-
<p>Con l'aggiornamento del software di controllo Uniemens individuale alla versione 3.9.6 puoi essere sicuro di inviare all'INPS dei file XML conformi alle ultime normative vigenti in materia di previdenza sociale. In questo modo puoi evitare di incorrere in sanzioni o contestazioni da parte dell'INPS per aver inviato dei file XML non aggiornati o non corretti.</p>
|
121 |
-
<h3>Migliore qualità e velocità dei controlli</h3>
|
122 |
-
<p>Con l'aggiornamento del software di controllo Uniemens individuale alla versione 3.9.6 puoi beneficiare di una migliore qualità e velocità dei controlli sui file XML che devi inviare all'INPS. Il software è infatti in grado di effettuare i controlli in modo più accurato e rapido, grazie all'ottimizzazione delle regole di business e all'implementazione delle ultime tecnologie informatiche.</p>
|
123 |
-
<h3>Minore rischio di errori e sanzioni</h3>
|
124 |
-
<p>Con l'aggiornamento del software di controllo Uniemens individuale alla versione 3.9.6 puoi ridurre il rischio di commettere errori o omissioni nei file XML che devi inviare all'INPS. Il software infatti ti fornisce un report dettagliato degli esiti dei controlli, evidenziando gli eventuali errori o anomalie rilevati e consentendoti di correggerli manualmente direttamente sul file XML. In questo modo puoi evitare di inviare all'INPS dei file XML errati o incompleti, che potrebbero causarti delle sanzioni o dei ritardi nei pagamenti delle prestazioni previdenziali ai lavoratori.</p>
|
125 |
-
<h2>Conclusion</h2>
|
126 |
-
<p>In conclusione, il software di controllo Uniemens individuale è uno strumento indispensabile per le imprese e i consulenti che devono inviare le denunce mensili dei lavoratori dipendenti all'INPS tramite il portale UniEMens. L'aggiornamento del software alla versione 3.9.6 ti offre numerosi vantaggi in termini di sicurezza, qualità, velocità e risparmio. Ti consigliamo quindi di scaricare e installare la versione 3.9.6 del software di controllo Uniemens individuale il prima possibile e di utilizzarla per verificare la correttezza dei file XML che devi inviare all'INPS.</p>
|
127 |
-
<h2>FAQ</h2>
|
128 |
-
<p>Qui di seguito trovi alcune domande frequenti sul software di controllo Uniemens individuale e le relative risposte:</p>
|
129 |
-
<table>
|
130 |
-
<tr><td><b>Domanda</b></td><td><b>Risposta</b></td></tr>
|
131 |
-
<tr><td>Cos'è UniEMens?</td><td>UniEMens è il portale web dell'INPS che consente alle imprese e ai consulenti di inviare le denunce mensili dei lavoratori dipendenti in formato XML.</td></tr>
|
132 |
-
<tr><td>Cos'è lo schema XSD?</td><td>Lo schema XSD è il documento che definisce la struttura formale dei file XML che devono essere inviati al portale UniEMens.</td></tr>
|
133 |
-
<tr><td>Cos'è una regola di business?</td><td>Una regola di business è una condizione che deve essere rispettata dai dati contenuti nei file XML che devono essere inviati al portale UniEMens.</td></tr>
|
134 |
-
<tr><td>Come si aggiorna il software di controllo Uniemens individuale?</td><td>Per aggiornare il software di controllo Uniemens individuale bisogna scaricare e installare la versione più recente rilasciata dall'INPS sul sito web www.inps.it.</td></tr>
|
135 |
-
<tr><td>Come si contatta l'assistenza tecnica dell'INPS?</td><td>Per contattare l'assistenza tecnica dell'INPS bisogna chiamare il numero verde 803164 oppure scrivere una mail a [email protected].</td></tr>
|
136 |
-
</table>
|
137 |
-
</p> 0a6ba089eb<br />
|
138 |
-
<br />
|
139 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/ .md
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>اکولایزر چیست و چه کاربردهایی دارد؟</h1>
|
3 |
-
<p>اگر شما هم جزو آن دسته از افرادی هستید که علاقهمند به صدا و موسیقی هستید، حتما با اصطلاح اکولایزر آشنا هستید. اما آیا دقیقا میدانید که اکولایزر چیست و چه کاربردهایی دارد؟ در این مقاله قصد داریم به شما تعریف و مفهوم اکولایزر را بگوییم، انواع مختلف آن را معرفی کنیم و کاربردهای آن را در صنعت صوتی و موسیقی برشمریم. همچنین به شما بهترین اکولایزرهای صوتی برای ویندوز 10 در سال 2023 را معرفی خواهیم کرد. پس با ما همراه باشید.</p>
|
4 |
-
<h2>تعریف و مفهوم اکولایزر</h2>
|
5 |
-
<p>اکولایزر یک نوع فیلتر صوتی است که با استفاده از آن میتوان سطح فرکانسهای مختلف در یک سیگنال صوتی را تغییر داد. به عبارت دیگر، با استفاده از اکولای <p>زر اکولایزر میتوانید صدای خروجی را به دلخواه خود شکل دهید. برای مثال، میتوانید باس را تقویت کنید، تربل را کاهش دهید، میانصدا را افزایش دهید و غیره. اکولایزر به شما امکان میدهد که صدای خود را با توجه به نوع موسیقی، محیط، سلیقه و هدف خود تنظیم کنید.</p>
|
6 |
-
<h2>اکولایزر</h2><br /><p><b><b>Download File</b> ❤ <a href="https://jinyurl.com/2uNUzT">https://jinyurl.com/2uNUzT</a></b></p><br /><br />
|
7 |
-
<h3>انواع اکولایزرها بر اساس شکل موج و تعداد باندها</h3>
|
8 |
-
<p>اکولایزرها بر اساس شکل موج و تعداد باندهایی که در آنها قابل تغییر هستند، به چند نوع تقسیمبندی میشوند. باند یک بازه فرکانسی است که در آن میتوانید سطح صدا را افزایش یا کاهش دهید. برخی از انواع اکولایزرها عبارتند از:</p>
|
9 |
-
<h4>اکولایزرهای گرافیکی</h4>
|
10 |
-
<p>اکولایزرهای گرافیکی نوعی اکولایزر هستند که در آنها تعداد باندها ثابت است و شما فقط میتوانید سطح هر باند را با استفاده از یک کشویی یا یک دکمه تغییر دهید. این نوع اکولایزر به شما اجازه نمیدهد که فرکانس و عرض هر باند را تغییر دهید. معمولا این نوع اکولایزر دارای 5، 7، 10، 15 یا 31 باند هستند. این نوع اکولایزر به خاطر سادگی و راحتی استفاده از آن در بسیاری از دستگاههای صوتی مانند پخش کنندههای MP3، ست تاپ باکسها، تلویزیونها و غیره استفاده میشود.</p>
|
11 |
-
<p>دانلود رایگان 50 اکولایزر حرکتی آماده افترافکت<br />
|
12 |
-
با بهترین اکولایزرهای صوتی برای ویندوز 10 آشنا شوید<br />
|
13 |
-
ایجاد اکولایزر موزیک برای اینستاگرام و روش های ساخت آن<br />
|
14 |
-
اکولایزر چیست؟ هر آنچه باید در این رابطه بدانید<br />
|
15 |
-
Viper4Windows یک اکوالیزر منبع باز برای ویندوز 10<br />
|
16 |
-
نحوه تنظیم اکولایزر در سامسونگ گلکسی S21<br />
|
17 |
-
معرفی نرمافزار Equalizer APO برای تغییر صدا در ویندوز<br />
|
18 |
-
آموزش ساخت اکولایزر سه بعدی با نرمافزار Adobe After Effects<br />
|
19 |
-
مقایسه اکولایزرهای پارامتریک و گرافیک در صنعت صوت<br />
|
20 |
-
دانلود پلاگین FabFilter Pro-Q 3 برای اکولایز کردن صدا<br />
|
21 |
-
راهنمای خرید بهترین اکولایزر خودرو در بازار<br />
|
22 |
-
تفاوت اکولایزر و کمپرسور در پروسه میکس و مسترینگ<br />
|
23 |
-
نحوه استفاده از اپلیکیشن Equalizer FX برای تقویت صدا در اندروید<br />
|
24 |
-
دانلود پروژه آماده افترافکت Audio Spectrum Music Visualizer<br />
|
25 |
-
آشنایی با ویژگیها و تنظیمات اکولایزر Spotify<br />
|
26 |
-
نحوه ساخت اکولایزر حجم دار با نور LED<br />
|
27 |
-
دانلود نسخه جدید نرمافزار Equalizer Pro برای ویندوز 10<br />
|
28 |
-
آموزش تنظیم صحیح اکولایزر بلندگوها و هدفونها<br />
|
29 |
-
دانلود رایگان پلاگین TDR Nova برای اکولایز دینامیک صدا<br />
|
30 |
-
معرفی بهترین اپلیکیشنهای اکولایزر برای iOS</p>
|
31 |
-
<h4>اکولایزرهای پارامتریک</h4>
|
32 |
-
<p>اکولایزر��ای پارامتریک نوعی اکولایزر هستند که در آنها شما میتوانید علاوه بر سطح هر باند، فرکانس و عرض آن را نیز تغییر دهید. فرکانس نشاندهنده محل قرارگیری باند در بازه فرکانس صدا است و عرض نشاندهنده تاثیر باند بر فرکانسهای نزد <p>کی به آن میگویند. این نوع اکولایزر به شما امکان میدهد که با دقت بیشتری صدای خود را تنظیم کنید. معمولا این نوع اکولایزر دارای 3، 4 یا 6 باند هستند. این نوع اکولایزر در بسیاری از نرمافزارها و دستگاههای حرفهای صوتی مانند میکسرها، آمپلیفایرها، استودیوها و غیره استفاده میشود.</p>
|
33 |
-
<h4>اکولایزرهای شلف</h4>
|
34 |
-
<p>اکولایزرهای شلف نوعی اکولایزر هستند که در آنها فقط دو باند وجود دارد: یک باند برای فرکانسهای پایین (باس) و یک باند برای فرکانسهای بالا (تربل). شما میتوانید سطح هر باند را تغییر دهید، اما نمیتوانید فرکانس و عرض آن را تغییر دهید. این نوع اکولایزر به شما اجازه میدهد که با سادگی و سرعت صدای خود را تعادل بخشید. این نوع اکولایزر در بسیاری از دستگاههای پخش صوتی مانند رادیو، استریو، هدفون و غیره استفاده میشود.</p>
|
35 |
-
<h3>کاربردهای اکولایزر در صنعت صوتی و موسیقی</h3>
|
36 |
-
<p>اکولایزر یک ابزار قدرتمند و کاربردی در صنعت صوتی و موسیقی است. با استفاده از اکولایزر میتوانید:</p>
|
37 |
-
<h4>بهبود کیفیت صدا و تنظیم فرکانسها</h4>
|
38 |
-
<p>با استفاده از اکولایزر میتوانید کیفیت صدای خود را بهبود بخشید و فرکانسهای نامطلوب را حذف یا کم کنید. برای مثال، ممکن است صدای خود را خشن، تار، تیره یا خنث <p>ی شنیده شود. با استفاده از اکولایزر میتوانید این مشکلات را رفع کنید و صدای خود را روشن، شفاف، گرم و زنده کنید. همچنین میتوانید فرکانسهایی را که برای نوع موسیقی یا سبک صدای خود مناسب هستند، برجسته کنید و صدای خود را با توجه به هدف و مخاطب خود شخصیسازی کنید.</p>
|
39 |
-
<h4>تقویت باس و تربل و افزایش حجم صدا</h4>
|
40 |
-
<p>با استفاده از اکولایزر میتوانید باس و تربل صدای خود را تقویت کنید و حجم صدا را افزایش دهید. باس فرکانسهای پایین صدا را نشان میدهد که برای ایجاد حس عمق، قدرت و سنگینی در صدا مهم هستند. تربل فرکانسهای بالای صدا را نشان میدهد که برای ایجاد حس جزئیات، شفافیت و روشنی در صدا مهم هستند. با تقویت باس و تربل میتوانید صدای خود را پربار، جذاب و دلنشین کنید. همچنین با افزایش حجم صدا میتوانید صدای خود را بلندتر و قویتر شنیده شود.</p>
|
41 |
-
<h4>حذف نویز و تداخلات صوتی</h4>
|
42 |
-
<p>با استفاده از اکولایزر میتوانید نویز و تداخلات صوتی را حذف یا کم کنید. نویز و تداخلات صوتی ممکن است به دلایل مختلف در سیگنال صوتی وارد شوند، مانند کیفیت پایین منبع صوتی، عدم همخوانی دستگاههای صوتی، نقص در کابلها یا محل قرارگیری بلندگوها. این عوامل ممکن است باعث شوند که صدای خود را با سر و صدا، خش خش، جیر جیر یا سوت زن شنیده شود. با استفاده از اکولایزر م <p>یتوانید فرکانسهایی را که منشأ نویز و تداخلات هستند، کاهش دهید و صدای خود را تمیز، صاف و بدون اغتشاش شنیده شود.</p>
|
43 |
-
<h4>خلاقیت و تنوع در ساخت موسیقی و میکس</h4>
|
44 |
-
<p>با استفاده از اکولایزر میتوانید خلاقیت و تنوع خود را در ساخت موسیقی و میکس به نمایش بگذارید. با استفاده از اکولایزر میتوانید صدای خود را با تغییر فرکانسهای مختلف، تبدیل به صدای دیگری کنید. برای مثال، میتوانید صدای گیتار را به صدای پیانو تبدیل کنید، صدای خواننده را به صدای ربات تبدیل کنید، صدای طبیعت را به صدای فضایی تبدیل کنید و غیره. با استفاده از اکولایزر میتوانید صدای خود را با سبکها و ژانرهای مختلف موسیقی هماهنگ کنید و آثار جدید و منحصر به فرد خلق کنید.</p>
|
45 |
-
<h2>بهترین اکولایزرهای صوتی برای ویندوز 10 در سال 2023</h2>
|
46 |
-
<p>اگر شما هم دوست دارید که صدای خود را با استفاده از اکولایزر بهینه کنید، ممکن است به دنبال بهترین اکولایزرهای صوتی برای ویندوز 10 در سال 2023 باشید. در این بخش به شما سه نمونه از این اکولایزرها را معرفی خواهیم کرد که همگی دارای ویژگیها و قابلیتهای منحصر به فرد هستند. این سه نمونه عبارتند از:</p>
|
47 |
-
<h3>Equalizer APO</h3>
|
48 |
-
<p>Equalizer APO یک اکولایزر پارامتریک حرفهای است که به شما اجازه میدهد که تقریبا هر نوع تغییرات را بر روی صدای خود اعمال کنید. این اکولایزر دارای 20 باند قابل تغ <p>ییر است و شما میتوانید فرکانس، عرض و سطح هر باند را به دلخواه خود تنظیم کنید. همچنین میتوانید از افکتهای مختلفی مانند ریورب، کمپرسور، لیمیتر، گیت و غیره استفاده کنید. این اکولایزر با تمام برنامهها و دستگاههای صوتی سازگار است و به راحتی قابل نصب و تنظیم است. این اکولایزر را میتوانید از <a href="">این لینک</a> دانلود کنید.</p>
|
49 |
-
<h3>Viper4Windows</h3>
|
50 |
-
<p>Viper4Windows یک اکولایزر گرافیکی پیشرفته است که به شما اجازه میدهد که صدای خود را با استفاده از 18 باند قابل تغییر، تقویت کنید. این اکولایزر دارای حالتهای مختلفی مانند موزیک، فیلم، بازی، صدای زنده و غیره است که شما میتوانید بسته به نوع فعالیت خود از آنها انتخاب کنید. همچنین میتوانید از قابلیتهای دیگری مانند سوراند ساند، باس بوست، کلاریتی، کورکشن و غیره استفاده کنید. این اکولایزر با تمام برنامهها و دستگاههای صوتی سازگار است و به راحتی قابل نصب و تنظیم است. این اکولایزر را میتوانید از <a href="">این لینک</a> دانلود کنید.</p>
|
51 |
-
<h3>Equalizer Pro</h3>
|
52 |
-
<p>Equalizer Pro یک اکولایزر گرافیکی حرفهای است که به شما اجازه میدهد که صدای خود را با استفاده از 10 باند قابل تغییر، بهبود بخشید. این اکولایزر دارای حالتهای مختلفی مانند پاپ، راک، جاز، کلاسیک و غیره است که شما میتوانید بسته به نوع موسیقی خود از آنها استفاده کنید. همچنین میتوانید از قابلیتهای د <p>یگری مانند بالانس، پریآمپ، لودنس و غیره استفاده کنید. این اکولایزر با تمام برنامهها و دستگاههای صوتی سازگار است و به راحتی قابل نصب و تنظیم است. این اکولایزر را میتوانید از <a href="">این لینک</a> دانلود کنید.</p>
|
53 |
-
<h2>نتیجهگیری و پاسخ به سوالات متداول</h2>
|
54 |
-
<p>در این مقاله با اکولایزر و کاربردهای آن آشنا شدید. اکولایزر یک نوع فیلتر صوتی است که به شما امکان میدهد که سطح فرکانسهای مختلف در یک سیگنال صوتی را تغییر دهید. با استفاده از اکولایزر میتوانید کیفیت صدای خود را بهبود بخشید، باس و تربل را تقویت کنید، نویز و تداخلات صوتی را حذف کنید و خلاقیت و تنوع خود را در ساخت موسیقی و میکس به نمایش بگذارید. همچنین به شما سه نمونه از بهترین اکولایزرهای صوتی برای ویندوز 10 در سال 2023 را معرفی کردیم که همگی دارای ویژگیها و قابلیتهای منحصر به فرد هستند. امیدواریم که این مقاله برای شما مفید و جالب بوده باشد. در پایان، به پنج سوال متداول درباره اکولایزر پاسخ خواهیم داد.</p>
|
55 |
-
<h3>سوال 1: چگونه میتوانم اکولا��زر را روشن یا خاموش کنم؟</h3>
|
56 |
-
<p>پاسخ: بستگی به نوع اکولایزر و دستگاه صوتی شما دارد. بعضی از اکولایزرها دارای یک دکمه یا کلید روشن/خاموش هستند که با فشار دادن آن م <p>یتوانید اکولایزر را روشن یا خاموش کنید. بعضی از اکولایزرها نیاز به نصب و اجرای یک نرمافزار دارند که با باز کردن آن میتوانید اکولایزر را فعال یا غیرفعال کنید. بعضی از اکولایزرها هم به صورت خودکار با شناسایی نوع صدا یا موسیقی، روشن یا خاموش میشوند.</p>
|
57 |
-
<h3>سوال 2: چگونه میتوانم اکولایزر را تنظیم کنم؟</h3>
|
58 |
-
<p>پاسخ: بستگی به نوع اکولایزر و سلیقه شما دارد. بعضی از اکولایزرها دارای حالتهای پیشفرض هستند که شما میتوانید بسته به نوع صدا یا موسیقی، از آنها استفاده کنید. بعضی از اکولایزرها هم به شما اجازه میدهند که با تغییر سطح، فرکانس و عرض باندها، صدای خود را دستی تنظیم کنید. برای تنظیم اکولایزر بهتر است که به چند نکته توجه کنید:</p>
|
59 |
-
<ul>
|
60 |
-
<li>به صدای خود گوش دهید و مشخص کنید که چه فرکانسهایی را دوست دارید و چه فرکانسهایی را نمیپسندید.</li>
|
61 |
-
<li>با تغییر سطح باندها، صدای خود را تعادل بخشید و فرکانسهای دلخواه خود را برجسته کنید.</li>
|
62 |
-
<li>با تغییر فرکانس و عرض باندها، صدای خود را دقیق و روشن کنید و فرکانسهای نامطلوب خود را حذف کنید.</li>
|
63 |
-
<li>به صدای خود گوش دهید و تغییرات را در حالتهای مختلف صدا یا موسیقی، بررسی کنید.</li>
|
64 |
-
<li>به نظرات دیگران هم گوش دهید و در صورت لزوم تغییرات لازم را اعمال کنید.</li>
|
65 |
-
</ul>
|
66 |
-
<h3>سوال 3: چگونه میتوانم اکولایزر را با دستگاههای صوتی دیگر همخوان کنم؟</h3>
|
67 |
-
<p>پاسخ: بستگی به نوع اکولایزر و دستگا <p>ه صوتی شما دارد. بعضی از اکولایزرها به صورت خودکار با دستگاههای صوتی دیگر همخوان میشوند و شما نیازی به تنظیم آنها ندارید. بعضی از اکولایزرها نیاز به تنظیم دستی دارند و شما باید با استفاده از کابلها، پورتها، بلوتوث یا وایفای، آنها را به دستگاههای صوتی دیگر متصل کنید. برای همخوان کردن اکولایزر با دستگاههای صوتی دیگر بهتر است که به چند نکته توجه کنید:</p>
|
68 |
-
<ul>
|
69 |
-
<li>به نوع و مدل اکولایزر و دستگاههای صوتی دیگر توجه کنید و مطمئن شوید که با هم سازگار هستند.</li>
|
70 |
-
<li>به راهنمای استفاده اکولایزر و دستگاههای صوتی دیگر مراجعه کنید و مراحل لازم را برای اتصال آنها به هم دنبال کنید.</li>
|
71 |
-
<li>به تنظیمات اکولایزر و دستگاههای صوتی دیگر توجه کنید و مطمئن شوید که در حالت مناسب قرار گرفتهاند.</li>
|
72 |
-
<li>به صدای خروجی توجه کنید و در صورت لزوم تغییرات لازم را بر روی اکولایزر یا دستگاههای صوتی دیگر اعمال کنید.</li>
|
73 |
-
</ul>
|
74 |
-
<h3>سوال 4: چگونه میتوانم اکولایزر را با گوشی هوشمند خود همخوان کنم؟</h3>
|
75 |
-
<p>پاسخ: بستگی به نوع و مدل گوشی هوشمند شما دارد. بعضی از گوشیهای هوشمند دارای یک اکولایزر ساده در تنظیمات صدای خود هستند که شما میتوانید با استفاده از آن، صدای خود را تغییر دهید. بعضی از گوشی های هوشمند نیاز به نصب و اجرای یک نرمافزار اکولایزر دارند که شما میتوانید با استفاده از آن، صدای خود را با دقت بیشتری تغییر دهید. بعضی از گوشیهای هوشمند هم با اکولایزرهای خارجی مانند اکولایزرهای بلوتوث یا وایفای قابل اتصال هستند. برای همخوان کردن اکولایزر با گوشی هوشمند خود بهتر است که به چند نکته توجه کنید:</p>
|
76 |
-
<ul>
|
77 |
-
<li>به نوع و مدل گوشی هوشمند و اکولایزر خود توجه کنید و مطمئ�� شوید که با هم سازگار هستند.</li>
|
78 |
-
<li>به راهنمای استفاده گوشی هوشمند و اکولایزر خود مراجعه کنید و مراحل لازم را برای اتصال آنها به هم دنبال کنید.</li>
|
79 |
-
<li>به تنظیمات گوشی هوشمند و اکولایزر خود توجه کنید و مطمئن شوید که در حالت مناسب قرار گرفتهاند.</li>
|
80 |
-
<li>به صدای خروجی توجه کنید و در صورت لزوم تغییرات لازم را بر روی گوشی هوشمند یا اکولایزر خود اعمال کنید.</li>
|
81 |
-
</ul>
|
82 |
-
<h3>سوال 5: چگونه میتوانم اکولایزر را با سلامت شنوایی خود سازگار کنم؟</h3>
|
83 |
-
<p>پاسخ: بستگی به سطح و نحوه استفاده شما از اکولایزر دارد. استفاده بیرویه و غیرصحیح از اکولایزر ممکن است باعث آسیب به شنوایی شما شود. برای سازگار کردن اکولایزر با سلامت شنوایی خود بهتر است که به چند نکته توجه کنید:</p>
|
84 |
-
<ul>
|
85 |
-
<li>به حجم صدا توجه کنید و مطمئن شوید که در حد مجاز و قابل تحمل قرار دارد. استفاده از صدای بلند به مدت طولانی ممکن است باعث کاهش شدید ش <p>نوایی شما شود.</li>
|
86 |
-
<li>به فرکانسهای صدا توجه کنید و مطمئن شوید که در بازه مجاز و قابل شنیدن قرار دارند. استفاده از فرکانسهای بسیار پایین یا بالا ممکن است باعث ایجاد اختلال در شنوایی شما شود.</li>
|
87 |
-
<li>به نحوه استفاده از اکولایزر توجه کنید و مطمئن شوید که به صورت مناسب و متناسب با نوع صدا یا موسیقی، از آن استفاده میکنید. استفاده از اکولایزر به صورت بیمورد یا بیروش ممکن است باعث از دست رفتن کیفیت و طبیعت صدا شود.</li>
|
88 |
-
<li>به سلامت عمومی خود توجه کنید و مطمئن شوید که به اندازه کافی استراحت، تغذیه و ورزش میکنید. سلامت عمومی شما تاثیر مستقیم بر سلامت شنوایی شما دارد.</li>
|
89 |
-
</ul>
|
90 |
-
<p>امیدواریم که این مقاله برای شما مفید و جالب بوده باشد. اگر سوال یا نظر دیگری درباره اکولایزر دارید، ما را در قسمت نظرات با خبر کنید. با تشکر از همراهی شما.</p> 401be4b1e0<br />
|
91 |
-
<br />
|
92 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Askies I 39m Sorry De Mthuda Mp3 Download Fakaza [Extra Quality].md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Askies I'm Sorry by De Mthuda: A Hit Amapiano Song</h1>
|
3 |
-
<p>If you are a fan of South African music, especially the Amapiano genre, you have probably heard of Askies I'm Sorry by De Mthuda. This song is one of the latest releases from the talented record producer and DJ, featuring Just Bheki, Mkeyz, and Da Muziqal Chef. It is a catchy and upbeat tune that will make you want to dance and groove along. But who is De Mthuda, and what is Amapiano? And what does Fakaza have to do with it? In this article, we will answer these questions and more.</p>
|
4 |
-
<h2>askies i 39;m sorry de mthuda mp3 download fakaza</h2><br /><p><b><b>Download Zip</b> 🆓 <a href="https://jinyurl.com/2uNPVS">https://jinyurl.com/2uNPVS</a></b></p><br /><br />
|
5 |
-
<h2>Who is De Mthuda?</h2>
|
6 |
-
<h3>A brief biography of the South African record producer and DJ</h3>
|
7 |
-
<p>De Mthuda's real name is Mthuthuzeli Gift Khoza. He was born and raised in Vosloorus, a township east of Johannesburg. He started producing music in 2010 when he was still in high school, but he dropped out in grade 11 to pursue his music career. He was inspired by artists like Black Coffee, Oskido, DJ Tira, and DJ Fresh. He is best known for his singles Shesha Geza and John Wick, which were certified gold by RiSA and nominated for Record of the Year at the South African Music Awards. He has also collaborated with other Amapiano stars like Kabza De Small, Njelic, Daliwonga, Focalistic, and Sir Trill.</p>
|
8 |
-
<h3>His popular songs and albums</h3>
|
9 |
-
<p>De Mthuda has released several songs and albums that have made him one of the most prominent figures in the Amapiano scene. Some of his popular songs include:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Emlanjeni</li>
|
12 |
-
<li>Lalela</li>
|
13 |
-
<li>Abekho Ready</li>
|
14 |
-
<li>Wamuhle</li>
|
15 |
-
<li>Mask Of Zorro</li>
|
16 |
-
<li>Umsholozi</li>
|
17 |
-
<li>Uyang'dlalisela</li>
|
18 |
-
<li>Ubizo</li>
|
19 |
-
<li>Sabela</li>
|
20 |
-
<li>Askies I'm Sorry</li>
|
21 |
-
</ul>
|
22 |
-
<p>Some of his popular albums include:</p>
|
23 |
-
<ul>
|
24 |
-
<li>Ace Of Spades (2020)</li>
|
25 |
-
<li>The Landlord (2021)</li>
|
26 |
-
<li>Story To Tell: Vol.2 (2022)</li>
|
27 |
-
</ul>
|
28 |
-
<h2>What is Amapiano?</h2>
|
29 |
-
<h3>The origin and history of the Amapiano genre</h3>
|
30 |
-
<p>Amapiano is a subgenre of house music that emerged in South Africa in the mid-2010s. It is a hybrid of deep house, jazz, lounge music, Kwaito, Tribal house, and R&B. It is characterized by synths, wide percussive basslines, low tempo rhythms, high-pitched piano melodies, log drums, vocals, and samples. The word Amapiano means "the pianos" in Zulu or Xhosa.</p>
|
31 |
-
<p>The origin and history of Amapiano are disputed, as there are various accounts of how it started and who created it. Some say it originated in Pretoria, where Bacardi, another house music subgenre, was popular. Others say it originated in Johannesburg, where different townships like Soweto, Alexandra, Vosloorus, and Katlehong contributed to its development. Some of the pioneers and influencers of Amapiano include DJ Stokie, Kabza De Small, JazziDisciples, Vigro Deep <h3>The characteristics and influences of Amapiano</h3>
|
32 |
-
<p>Amapiano is a versatile and diverse genre that can incorporate various elements and influences from other genres and cultures. Some of the characteristics and influences of Amapiano include:</p>
|
33 |
-
<ul>
|
34 |
-
<li>The use of piano as the main instrument, which gives the genre its name and distinctive sound. The piano can be played in different styles, such as jazz, classical, or gospel.</li>
|
35 |
-
<li>The use of percussions, such as congas, bongos, shakers, cowbells, and whistles, which add rhythm and groove to the songs. The percussions can also be influenced by traditional African drums, such as marimbas or djembes.</li>
|
36 |
-
<li>The use of vocals, which can be sung, rapped, or spoken. The vocals can be in different languages, such as English, Zulu, Xhosa, Sotho, or Swahili. The vocals can also be sampled from other songs, movies, or speeches.</li>
|
37 |
-
<li>The use of synths, which can create different effects and atmospheres. The synths can be influenced by electronic music genres, such as techno, trance, or dubstep.</li>
|
38 |
-
<li>The use of basslines, which can be deep, heavy, or funky. The basslines can be influenced by house music genres, such as deep house, soulful house, or tribal house.</li>
|
39 |
-
</ul>
|
40 |
-
<h3>The popularity and impact of Amapiano</h3>
|
41 |
-
<p>Amapiano has become one of the most popular and influential genres in South Africa and beyond. It has gained a huge fan base and following across different age groups, social classes, and regions. It has also spawned various subgenres and styles, such as Yanos (a more commercial and mainstream version of Amapiano), Scorpion Kings (a style popularized by Kabza De Small and DJ Maphorisa), Private School Piano (a more sophisticated and refined version of Amapiano), and Barcadi Piano (a fusion of Amapiano and Bacardi).</p>
|
42 |
-
<p>Amapiano has also crossed borders and reached international audiences. It has been featured on global platforms and media outlets, such as Spotify, Apple Music, BBC Radio 1Xtra, Boiler Room, Mixmag, The Guardian, and The New York Times. It has also inspired and collaborated with artists from other countries and continents, such as Nigeria (Burna Boy, Wizkid), Ghana (Stonebwoy), Kenya (Sauti Sol), Tanzania (Diamond Platnumz), France (David Guetta), UK (Jorja Smith), USA (Drake), and Brazil (Anitta).</p>
|
43 |
-
<h2>What is Fakaza?</h2>
|
44 |
-
<h3>The meaning and origin of the word Fakaza</h3>
|
45 |
-
<p>Fakaza is a Zulu word that means "to share" or "to distribute". It is also the name of a popular South African music site that allows users to download and stream various genres of music for free. The site was founded in 2016 by a group of music enthusiasts who wanted to provide a platform for local artists to showcase their talent and reach a wider audience.</p>
|
46 |
-
<h3>The features and benefits of the Fakaza music site</h3>
|
47 |
-
<p>Fakaza is one of the best sources for finding and downloading South African music online. Some of the features and benefits of the Fakaza music site include:</p>
|
48 |
-
<p></p>
|
49 |
-
<ul>
|
50 |
-
<li>It offers a wide range of music genres, such as Amapiano, Afro House, Gqom, Kwaito , Hip Hop, R&B, Gospel, and more.</li>
|
51 |
-
<li>It updates its content regularly, adding new songs and albums every day.</li>
|
52 |
-
<li>It provides high-quality audio files, with different formats and bitrates to choose from.</li>
|
53 |
-
<li>It allows users to download music for offline listening, without any registration or subscription required.</li>
|
54 |
-
<li>It supports multiple devices and platforms, such as desktops, laptops, smartphones, tablets, and browsers.</li>
|
55 |
-
<li>It has a user-friendly and responsive interface, with easy navigation and search functions.</li>
|
56 |
-
<li>It respects the rights and royalties of the artists and labels, and complies with the DMCA regulations.</li>
|
57 |
-
</ul>
|
58 |
-
<h3>How to download Askies I'm Sorry by De Mthuda from Fakaza</h3>
|
59 |
-
<p>If you want to download Askies I'm Sorry by De Mthuda from Fakaza, you can follow these simple steps:</p>
|
60 |
-
<ol>
|
61 |
-
<li>Go to the Fakaza website at <a href="">https://fakaza.com/</a>.</li>
|
62 |
-
<li>Type "Askies I'm Sorry" in the search box and press enter.</li>
|
63 |
-
<li>Select the song from the list of results and click on it.</li>
|
64 |
-
<li>Scroll down to the bottom of the page and click on the "Download Mp3" button.</li>
|
65 |
-
<li>Choose the format and bitrate you prefer and click on it.</li>
|
66 |
-
<li>Wait for the download to start and complete.</li>
|
67 |
-
<li>Enjoy listening to Askies I'm Sorry by De Mthuda on your device.</li>
|
68 |
-
</ol>
|
69 |
-
<h2>Conclusion</h2>
|
70 |
-
<p>In conclusion, Askies I'm Sorry by De Mthuda is a hit Amapiano song that you should not miss. It showcases the talent and creativity of De Mthuda, one of the leading record producers and DJs in South Africa. It also represents the Amapiano genre, which is a unique and vibrant style of house music that has taken over the country and the world. And if you want to download this song for free, you can use Fakaza, which is a reliable and convenient music site that offers a variety of genres and songs. So what are you waiting for? Go ahead and download Askies I'm Sorry by De Mthuda from Fakaza today!</p>
|
71 |
-
<h2>FAQs</h2>
|
72 |
-
<h4>What does Askies mean?</h4>
|
73 |
-
<p>Askies is a slang word that means "sorry" or "excuse me" in South African English. It is derived from the Afrikaans word "asseblief", which means "please".</p>
|
74 |
-
<h4>Who sings Askies I'm Sorry by De Mthuda?</h4>
|
75 |
-
<p>Askies I'm Sorry by De Mthuda features three vocalists: Just Bheki, Mkeyz, and Da Muziqal Chef. They are all South African singers who have worked with De Mthuda before on other songs.</p>
|
76 |
-
<h4>Where can I listen to Askies I'm Sorry by De Mthuda?</h4>
|
77 |
-
<p>You can listen to Askies I'm Sorry by De Mthuda on various streaming platforms, such as YouTube, Spotify, Apple Music, Deezer, SoundCloud, Audiomack, and more. You can also download it from Fakaza or other music sites.</p>
|
78 |
-
<h4>What are some other Amapiano songs that I should check out?</h4>
|
79 |
-
<p>Some other Amapiano songs that you should check out include:</p>
|
80 |
-
<ul>
|
81 |
-
<li>Vula Mlomo by Musa Keys</li>
|
82 |
-
<li>Khuza Gogo by DBN Gogo</li>
|
83 |
-
<li>Liyoshona by Kwiish SA</li>
|
84 |
-
<li>Catalia by Junior De Rocka</li>
|
85 |
-
<li>Ntyilo Ntyilo by Rethabile Khumalo</li>
|
86 |
-
</ul>
|
87 |
-
<h4>How can I learn more about De Mthuda and Amapiano?</h4>
|
88 |
-
<p>You can learn more about De Mthuda and Amapiano by following them on social media platforms, such as Facebook, Twitter, Instagram, TikTok, and more. You can also visit their official websites or blogs, where they post updates and news about their music. You can also watch interviews or documentaries about them on YouTube or other video sites.</p> 197e85843d<br />
|
89 |
-
<br />
|
90 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Brawl Stars Hack APK 2022 A Simple Guide to Install and Use.md
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Brawl Stars Hack APK Download 2022: How to Get Unlimited Gems and Coins</h1>
|
3 |
-
<p>If you are a fan of Brawl Stars, you probably know how important gems and coins are in this game. Gems and coins are the currencies that you need to unlock new brawlers, skins, power points, star powers, gadgets, and more. But how can you get enough gems and coins without spending a fortune? Is there a way to get unlimited gems and coins for free? In this article, we will tell you everything you need to know about Brawl Stars hack APK download 2022, the modded version of the game that claims to give you unlimited resources. But before we get into that, let's first take a look at what Brawl Stars is and why it is so popular.</p>
|
4 |
-
<h2>What is Brawl Stars?</h2>
|
5 |
-
<p>Brawl Stars is a mobile twin-stick shooter game developed by Supercell, the makers of Clash of Clans and Clash Royale. In this game, you can choose from over 20 different brawlers, each with their own unique abilities and weapons, and compete in various game modes with other players from around the world. You can team up with your friends or play solo, depending on your preference.</p>
|
6 |
-
<h2>brawl stars hack apk download 2022</h2><br /><p><b><b>Download Zip</b> ✵✵✵ <a href="https://jinyurl.com/2uNUvn">https://jinyurl.com/2uNUvn</a></b></p><br /><br />
|
7 |
-
<h3>A fast-paced multiplayer shooter game</h3>
|
8 |
-
<p>Brawl Stars is a game that is easy to pick up and play, but hard to master. The matches are short and intense, lasting for only three minutes or less. You have to use your skills, strategy, and teamwork to defeat your enemies and win the game. You can also customize your brawlers with different skins and pins to show off your personality.</p>
|
9 |
-
<h3>Features of Brawl Stars</h3>
|
10 |
-
<p>Brawl Stars has many features that make it an exciting and diverse game. Here are some of them:</p>
|
11 |
-
<h4>Different game modes</h4>
|
12 |
-
<p>Brawl Stars has several game modes that you can choose from, each with a different objective and rules. Some of the game modes are:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Gem Grab (3v3): Collect and hold 10 gems to win, but don't let the enemy team take them from you.</li>
|
15 |
-
<li>Showdown (Solo/Duo): A battle royale style mode where you have to be the last brawler standing.</li>
|
16 |
-
<li>Brawl Ball (3v3): A soccer-like mode where you have to score two goals before the enemy team.</li>
|
17 |
-
<li>Bounty (3v3): Eliminate opponents to earn stars, but don't let them kill you.</li>
|
18 |
-
<li>Heist (3v3): Attack the enemy's safe or defend your own from being destroyed.</li>
|
19 |
-
<li>Special Events: Limited time modes that offer unique challenges and rewards.</li>
|
20 |
-
</ul>
|
21 |
-
<h4>Unique brawlers and skins</h4>
|
22 |
-
<p>Brawl Stars has a diverse roster of brawlers that you can unlock and play with. Each brawler has a different class, such as fighter, sharpshooter, heavyweight, thrower, support, assassin, or chromatic. Each brawler also has a different rarity, such as common, rare, super rare, epic, mythic, legendary, or chromatic. The higher the rarity, the harder it is to get the brawler. You can also unlock different skins for your brawlers, some of which are exclusive to certain events or seasons.</p>
|
23 |
-
<h4>Brawl pass and rewards</h4>
|
24 |
-
<p>Brawl Stars has a seasonal system called the brawl pass, which allows you to earn rewards by playing the game and completing quests. The brawl pass has two tracks: the free track and the premium track. The free track gives you basic rewards, such as coins, power points, and brawl boxes. The premium track gives you more rewards, such as gems, skins, pins, and exclusive brawlers. You can access the premium track by buying the brawl pass with gems.</p>
|
25 |
-
<p>brawl stars cheat apk unlimited gems 2022<br />
|
26 |
-
brawl stars mod apk latest version 2022 download<br />
|
27 |
-
brawl stars hack apk android no root 2022<br />
|
28 |
-
brawl stars free gems generator apk download 2022<br />
|
29 |
-
brawl stars hack apk ios download 2022<br />
|
30 |
-
brawl stars mod menu apk 2022 download<br />
|
31 |
-
brawl stars hack apk online 2022<br />
|
32 |
-
brawl stars hack apk unlimited money 2022<br />
|
33 |
-
brawl stars hack apk no verification 2022<br />
|
34 |
-
brawl stars hack apk no human verification 2022<br />
|
35 |
-
brawl stars hack apk no survey 2022<br />
|
36 |
-
brawl stars hack apk no password 2022<br />
|
37 |
-
brawl stars hack apk no ban 2022<br />
|
38 |
-
brawl stars hack apk no update 2022<br />
|
39 |
-
brawl stars hack apk working 2022<br />
|
40 |
-
brawl stars hack apk easy download 2022<br />
|
41 |
-
brawl stars hack apk free download 2022<br />
|
42 |
-
brawl stars hack apk direct download 2022<br />
|
43 |
-
brawl stars hack apk mediafire download 2022<br />
|
44 |
-
brawl stars hack apk mega download 2022<br />
|
45 |
-
brawl stars hack apk zippyshare download 2022<br />
|
46 |
-
brawl stars hack apk google drive download 2022<br />
|
47 |
-
brawl stars hack apk dropbox download 2022<br />
|
48 |
-
brawl stars hack apk file download 2022<br />
|
49 |
-
brawl stars hack apk link download 2022<br />
|
50 |
-
brawl stars hack tool apk download 2022<br />
|
51 |
-
brawl stars hack app apk download 2022<br />
|
52 |
-
brawl stars hack game apk download 2022<br />
|
53 |
-
brawl stars hacked version apk download 2022<br />
|
54 |
-
brawl stars private server apk download 2022<br />
|
55 |
-
brawl stars unlimited coins and gems apk download 2022<br />
|
56 |
-
brawl stars modded apk download 2022<br />
|
57 |
-
brawl stars cracked apk download 2022<br />
|
58 |
-
brawl stars premium apk download 2022<br />
|
59 |
-
brawl stars pro apk download 2022<br />
|
60 |
-
brawl stars vip apk download 2022<br />
|
61 |
-
brawl stars full unlocked apk download 2022<br />
|
62 |
-
brawl stars all brawlers unlocked apk download 2022<br />
|
63 |
-
brawl stars all skins unlocked apk download 2022<br />
|
64 |
-
brawl stars all characters unlocked apk download 2022<br />
|
65 |
-
brawl stars all modes unlocked apk download 2022<br />
|
66 |
-
brawl stars all maps unlocked apk download 2022<br />
|
67 |
-
brawl stars all events unlocked apk download 2022<br />
|
68 |
-
brawl stars all rewards unlocked apk download 2022<br />
|
69 |
-
brawl stars all achievements unlocked apk download 2022<br />
|
70 |
-
how to download brawl stars hack apk for free in 2022</p>
|
71 |
-
<h2>Why do you need gems and coins in Brawl Stars?</h2>
|
72 |
-
<p>Gems and coins are the two main currencies in Brawl Stars that you need to progress in the game and unlock more content. Here is why they are important:</p>
|
73 |
-
<h3>Gems are the premium currency</h3>
|
74 |
-
<p>Gems are the rarest and most valuable currency in Brawl Stars. You can use gems to buy various items in the shop, such as:</p>
|
75 |
-
<ul>
|
76 |
-
<li>Brawl boxes: These are loot boxes that contain random rewards, such as brawlers, power points, coins, star powers, gadgets, or tokens.</li>
|
77 |
-
<li>Skins: These are cosmetic items that change the appearance of your brawlers.</li>
|
78 |
-
<li>Power points: These are items that increase the level of your brawlers.</li>
|
79 |
-
<li>Star powers and gadgets: These are special abilities that enhance your brawlers' performance.</li>
|
80 |
-
<li>Brawl pass: This is the seasonal pass that gives you access to the premium track of rewards.</li>
|
81 |
-
<li>Offers: These are special deals that give you discounts or bundles of items.</li>
|
82 |
-
</ul>
|
83 |
-
<h3>Coins are the main currency</h3>
|
84 |
-
<p>Coins are the most common and basic currency in Brawl Stars. You can use coins to upgrade your brawlers and unlock their star powers and gadgets. Upgrading your brawlers increases their health, damage, and super charge rate. Unlocking their star powers and gadgets gives them extra abilities that can change the outcome of a match. You can also use coins to buy power points in the shop.</p>
|
85 |
-
<h2>How to get gems and coins in Brawl Stars?</h2>
|
86 |
-
<p>There are two ways to get gems and coins in Brawl Stars: the legit way and the hack way. Let's see what they are:</p>
|
87 |
-
<h3>The legit way</h3>
|
88 |
-
<p>The legit way to get gems and coins in Brawl Stars is to play the game and earn them through various means. Some of the legit ways are:</p>
|
89 |
-
<h4>Play the game and complete quests</h4>
|
90 |
-
<p>The simplest way to get gems and coins is to play the game and win matches. You will earn tokens for every match you play, which you can use to open brawl boxes. Brawl boxes contain random rewards, such as gems, coins, power points, or brawlers. You can also complete quests that give you tokens or other rewards. Quests are tasks that require you to do something specific in the game, such as winning a certain number of matches with a specific brawler or mode.</p>
|
91 |
-
<h4>Open brawl boxes and get lucky</h4>
|
92 |
-
<p>Another way to get gems and coins is to open brawl boxes and hope for the best. Brawl boxes have a chance to contain gems or coins, along with other items. The chance of getting gems or coins depends on the type of brawl box you open. There are three types of brawl boxes: normal brawl boxes, big boxes, and mega boxes. Normal brawl boxes cost 100 tokens to open and have a 9% chance of containing gems or coins. Big boxes cost 300 tokens or 30 gems to open and have a 27% chance of containing gems or coins. Mega boxes cost 1500 tokens or 80 gems to open and have a 135% chance of containing gems or coins.</p>
|
93 |
-
<h4>Spend real money in the shop</h4>
|
94 |
-
<p>The last way to get gems and coins is to spend real money in the shop. You can buy gems with real money and use them to buy coins or other items in the shop. You can also buy offers that give you bundles of gems, coins, power points, or brawlers at a discounted price. However, this method is not free and may not be affordable for everyone.</p>
|
95 |
-
<h3>The hack way</h3>
|
96 |
-
<p>The hack way to get gems and coins in Brawl Stars is to download a modded APK file that gives you unlimited resources. A modded APK file is a modified version of the original game file that has been altered to give you some advantages or features that are not available in the official game. For example, a Brawl Stars hack APK may give you unlimited gems and coins, unlock all brawlers and skins, or enable cheats such as auto-aim or god mode.</p>
|
97 |
-
<h4>Download a modded APK file that gives you unlimited resources</h4>
|
98 |
-
<p>To use a Brawl Stars hack APK, you need to download it from a third-party website that offers such files. You can search for "Brawl Stars hack APK download 2022" on Google or any other search engine and find many websites that claim to provide such files. However, you need to be careful and cautious when downloading such files, as they may not be safe or reliable. Here are some of the risks of using a hack APK for Brawl Stars:</p>
|
99 |
-
<h2>What are the risks of using a hack APK for Brawl Stars?</h2>
|
100 |
-
<p>Using a hack APK for Brawl Stars may seem tempting and appealing, but it is not worth it in the long run. Here are some of the reasons why you should avoid using a hack APK for Brawl Stars:</p>
|
101 |
-
<h3>It may not work or be outdated</h3>
|
102 |
-
<p>One of the main problems with using a hack APK for Brawl Stars is that it may not work or be outdated. Hack APKs are not official or authorized by Supercell, the developer of Brawl Stars. Therefore, they are not compatible with the latest version of the game or the server. This means that you may not be able to play the game properly or access all the features and content. You may also encounter errors, glitches, bugs, or crashes that ruin your gaming experience.</p>
|
103 |
-
<h3>It may contain malware or viruses</h3>
|
104 |
-
<p>Another problem with using a hack APK for Brawl Stars is that it may contain malware or viruses that can harm your device or steal your personal information. Hack APKs are not verified or tested by any trusted source, so you cannot be sure what they contain or what they do. They may have hidden codes or programs that can infect your device with malware or viruses that can damage your system, delete your files, spy on your activities, or steal your data. You may also expose yourself to hackers or scammers who can access your account or device and use them for malicious purposes.</p>
|
105 |
-
<h3>It may get your account banned or suspended</h3>
|
106 |
-
<p>The last problem with using a hack APK for Brawl Stars is that it may get your account banned or suspended by Supercell. Supercell has a strict policy against cheating or hacking in their games, and they have a system that can detect and punish such behavior. If you use a hack APK for Brawl Stars, you are violating the terms of service and the fair play policy of the game. This means that you are risking losing your account and all your progress and achievements. You may also lose access to other Supercell games or services.</p>
|
107 |
-
<h2>Conclusion</h2>
|
108 |
-
<p>Brawl Stars is a fun and addictive game that requires gems and coins to progress and unlock more content. There are legit ways and hack ways to get gems and coins, but the hack ways are risky and not recommended. The best way to enjoy Brawl Stars is to play fair and have fun with your friends and other players from around the world.</p>
|
109 |
-
<h3>Frequently Asked Questions</h3>
|
110 |
-
<p>Here are some of the frequently asked questions about Brawl Stars hack APK download 2022:</p>
|
111 |
-
<h4>Q: Is there a working Brawl Stars hack APK in 2022?</h4>
|
112 |
-
<p>A: There may be some websites that claim to offer working Brawl Stars hack APKs in 2022, but they are not reliable or trustworthy. Most of them are either fake, outdated, infected, or detected by Supercell. Therefore, we do not recommend using any Brawl Stars hack APK in 2022.</p>
|
113 |
-
<h4>Q: How can I get free gems and coins in Brawl Stars without hacking?</h4>
|
114 |
-
<p>A: The best way to get free gems and coins in Brawl Stars without hacking is to play the game and earn them through legit ways. You can play the game and complete quests, open brawl boxes and get lucky, or spend real money in the shop if you can afford it.</p>
|
115 |
-
<h4>Q: What are some tips and tricks to play better in Brawl Stars?</h4>
|
116 |
-
<p>A A: Some of the tips and tricks to play better in Brawl Stars are: - Choose the right brawler for the right mode and map. Different brawlers have different strengths and weaknesses, and some are more suited for certain modes and maps than others. For example, long-range brawlers are good for open maps, while short-range brawlers are good for close-quarters maps. - Learn the abilities and mechanics of each brawler. Each brawler has a unique basic attack, super, star power, and gadget that can affect their performance and strategy. You should know how to use them effectively and efficiently, as well as how to counter them when facing them. - Communicate and cooperate with your teammates. Brawl Stars is a team-based game that requires coordination and teamwork to win. You should use the chat or the ping system to communicate with your teammates, such as calling for help, signaling your intentions, or warning about enemies. You should also support your teammates by healing them, protecting them, or assisting them in attacking or defending. - Practice and improve your skills. Brawl Stars is a game that requires skill and reflexes to play well. You should practice and improve your skills by playing different modes and maps, trying different brawlers and strategies, and learning from your mistakes and feedback. You can also watch replays or videos of other players to see how they play and what they do. <h4>Q: How can I download Brawl Stars on my device?</h4>
|
117 |
-
<p>A: Brawl Stars is available for both Android and iOS devices. You can download it from the Google Play Store or the App Store, depending on your device. You need to have a compatible device that meets the minimum requirements of the game, such as Android 4.3 or higher or iOS 9.0 or higher.</p>
|
118 |
-
<h4>Q: How can I contact Supercell if I have any issues or questions about Brawl Stars?</h4>
|
119 |
-
<p>A: If you have any issues or questions about Brawl Stars, you can contact Supercell through their official channels, such as their website, their social media accounts, or their in-game support. You can also visit their help center or their community forums to find answers or solutions to common problems or queries.</p> 197e85843d<br />
|
120 |
-
<br />
|
121 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Candy Crush Saga MOD APK 1.141 0.4 Unlocked Download Now for Android Devices.md
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Candy Crush Saga Mod APK 1.141 0.4 for Android Unlocked</h1>
|
3 |
-
<p>If you are looking for a fun and addictive puzzle game to play on your Android device, you should try Candy Crush Saga. This game has been downloaded by over a billion people and has become one of the most popular games in the world. But what if you want to enjoy the game without any limitations or restrictions? In that case, you should download Candy Crush Saga Mod APK, which is a modified version of the game that gives you unlimited access to all the features and levels. In this article, we will tell you what Candy Crush Saga is, why you should download the modded version, and how to do it.</p>
|
4 |
-
<h2>What is Candy Crush Saga?</h2>
|
5 |
-
<h3>A popular puzzle game with millions of players</h3>
|
6 |
-
<p>Candy Crush Saga is a puzzle game that was released in 2012 by King, a Swedish game developer. The game is based on matching three or more candies of the same color to clear them from the board and complete various objectives. The game has hundreds of levels, each with different challenges and goals. The game also has different modes, such as timed levels, jelly levels, ingredient levels, and more. The game is free to play, but you can also buy extra moves, boosters, lives, and other items with real money.</p>
|
7 |
-
<h2>download candy crush saga mod apk 1.141 0.4 for android unlocked</h2><br /><p><b><b>Download Zip</b> ····· <a href="https://jinyurl.com/2uNSR6">https://jinyurl.com/2uNSR6</a></b></p><br /><br />
|
8 |
-
<h3>Features of Candy Crush Saga</h3>
|
9 |
-
<p>Some of the features that make Candy Crush Saga so popular are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Beautiful graphics and sound effects that create a sweet and colorful atmosphere.</li>
|
12 |
-
<li>Easy and intuitive gameplay that anyone can enjoy.</li>
|
13 |
-
<li>A variety of candies and special candies that have different effects and combinations.</li>
|
14 |
-
<li>A social element that allows you to connect with your friends and compete with them on leaderboards and events.</li>
|
15 |
-
<li>New levels and updates that are added regularly to keep the game fresh and exciting.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Why download Candy Crush Saga Mod APK?</h2>
|
18 |
-
<h3>Benefits of using the modded version</h3>
|
19 |
-
<p>While Candy Crush Saga is a fun and enjoyable game, it can also be frustrating and challenging at times. Some levels are very hard to beat, and you may run out of moves, lives, or boosters before you can complete them. You may also have to wait for a long time to get new lives or unlock new episodes. This can make the game less fun and more stressful.</p>
|
20 |
-
<p>That's why downloading Candy Crush Saga Mod APK can be a great idea. This is a modified version of the game that gives you unlimited access to everything in the game. You can get unlimited moves, lives, boosters, gold bars, and more. You can also unlock all the levels and episodes without having to wait or pay anything. This way, you can enjoy the game without any limitations or interruptions.</p>
|
21 |
-
<h3>How to download and install Candy Crush Saga Mod APK</h3>
|
22 |
-
<p>If you want to download and install Candy Crush Saga Mod APK on your Android device, you need to follow these simple steps:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Go to [this link](^1^) and download the modded APK file.</li>
|
25 |
-
<li>Enable unknown sources on your device settings to allow the installation of third-party apps.</li>
|
26 |
-
<li>Locate the downloaded file on your device and tap on it to start the installation process.</li>
|
27 |
-
<li>Follow the instructions on the screen and wait for the installation to finish.</li>
|
28 |
-
<li>Launch the game and enjoy unlimited candy crushing!</li>
|
29 |
-
</ol>
|
30 |
-
<h2>Conclusion</h2>
|
31 |
-
<h3>Summary of the main points</h3>
|
32 |
-
<p>Candy Crush Saga is one of the most popular puzzle games in the world, with millions of players who love its colorful and addictive gameplay. However, if you want to experience the game without any restrictions or limitations, you should download Candy Crush Saga Mod APK, which is a modified version of the game that gives you unlimited access to all the features and levels. You can get unlimited moves, lives, boosters, gold bars, and more. You can also unlock all the levels and episodes without having to wait or pay anything. All you need to do is download the modded APK file from the link provided and install it on your device. Then, you can enjoy the game without any hassle or stress.</p>
|
33 |
-
<h3>FAQs</h3>
|
34 |
-
<p>Here are some of the frequently asked questions about Candy Crush Saga Mod APK:</p>
|
35 |
-
<p>Download candy crush saga mod apk latest version for android free<br />
|
36 |
-
How to download candy crush saga mod apk with unlimited lives and boosters<br />
|
37 |
-
Candy crush saga mod apk 1.141 0.4 download link for android devices<br />
|
38 |
-
Candy crush saga hack mod apk download for android no root<br />
|
39 |
-
Download candy crush saga mod apk unlocked all levels and episodes<br />
|
40 |
-
Candy crush saga mod apk 1.141 0.4 for android offline gameplay<br />
|
41 |
-
Candy crush saga mod apk download 2023 updated version for android<br />
|
42 |
-
Candy crush saga mod apk unlimited gold bars and moves download<br />
|
43 |
-
Download candy crush saga mod apk with all features unlocked for android<br />
|
44 |
-
Candy crush saga mod apk 1.141 0.4 for android free shopping<br />
|
45 |
-
Candy crush saga mod apk download for android without ads<br />
|
46 |
-
Candy crush saga mod apk 1.141 0.4 for android unlimited everything<br />
|
47 |
-
Download candy crush saga mod apk with HD graphics and sound for android<br />
|
48 |
-
Candy crush saga mod apk download for android from happymod[^1^]<br />
|
49 |
-
Candy crush saga mod apk 1.141 0.4 for android easy installation guide<br />
|
50 |
-
Download candy crush saga mod apk with new levels and challenges for android<br />
|
51 |
-
Candy crush saga mod apk 1.230.0.2 [unlocked][^2^] download for android<br />
|
52 |
-
Candy crush saga mod apk download for android with daily rewards and bonuses<br />
|
53 |
-
Candy crush saga mod apk 1.141 0.4 for android fast and secure download<br />
|
54 |
-
Download candy crush saga mod apk with online multiplayer mode for android<br />
|
55 |
-
Candy crush saga mod apk download for android with unlimited jelly beans and lollipops<br />
|
56 |
-
Candy crush saga mod apk 1.141 0.4 for android compatible with all devices<br />
|
57 |
-
Download candy crush saga mod apk with original gameplay and features for android<br />
|
58 |
-
Candy crush saga mod apk download for android from apkpure<br />
|
59 |
-
Candy crush saga mod apk 1.141 0.4 for android best puzzle game ever</p>
|
60 |
-
<table>
|
61 |
-
<tr>
|
62 |
-
<th>Question</th>
|
63 |
-
<th>Answer</th>
|
64 |
-
</tr>
|
65 |
-
<tr>
|
66 |
-
<td>Is Candy Crush Saga Mod APK safe to use?</td>
|
67 |
-
<td>Yes, Candy Crush Saga Mod APK is safe to use as long as you download it from a trusted source. However, you should be aware that using a modded version of the game may violate the terms and conditions of the original game and may result in your account being banned or suspended.</td>
|
68 |
-
</tr>
|
69 |
-
<tr>
|
70 |
-
<td>Does Candy Crush Saga Mod APK work offline?</td>
|
71 |
-
<td>Yes, Candy Crush Saga Mod APK works offline as well as online. You can play the game without an internet connection and still enjoy all the features and levels. However, you may not be able to access some of the social features, such as leaderboards and events, when you are offline.</td>
|
72 |
-
</tr>
|
73 |
-
<tr>
|
74 |
-
<td>Can I update Candy Crush Saga Mod APK?</td>
|
75 |
-
<td>Yes, you can update Candy Crush Saga Mod APK whenever there is a new version available. However, you may need to uninstall the previous version and install the new one manually. You may also lose your progress and data if you update the modded version of the game.</td>
|
76 |
-
</tr>
|
77 |
-
<tr>
|
78 |
-
<td>Can I sync Candy Crush Saga Mod APK with Facebook?</td>
|
79 |
-
<td>No, you cannot sync Candy Crush Saga Mod APK with Facebook or any other social media platform. This is because the modded version of the game is not compatible with the official version and may cause errors or conflicts. If you want to sync your progress and data with Facebook, you should use the original version of the game.</td>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Can I play Candy Crush Saga Mod APK on other devices?</td>
|
83 |
-
<td>Yes, you can play Candy Crush Saga Mod APK on other devices as long as they are Android devices. You can transfer the modded APK file from one device to another using a USB cable or a file-sharing app. However, you may need to enable unknown sources on each device before installing the modded version of the game.</td>
|
84 |
-
</tr>
|
85 |
-
</table></p> 401be4b1e0<br />
|
86 |
-
<br />
|
87 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Cars Daredevil Garage APK The Ultimate Racing Game for Cars Fans.md
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cars Daredevil Garage APK: A Fun and Exciting Racing Game for Kids</h1>
|
3 |
-
<p>Do you love racing games? Do you love the Disney Pixar Cars movies? If you answered yes to both questions, then you will love Cars Daredevil Garage APK. This is a racing game that lets you join Lightning McQueen, Mater, and all of your favorite Cars characters as you power through heart-racing stunts with over 180 vehicles. You can crank up the speed on over 15 tracks that take crazy twists and turns through the beach, the backyard, and the bowling alley. You can also scan your actual Mattel die-cast cars to unlock more fun in the game. In this article, we will tell you everything you need to know about Cars Daredevil Garage APK, including its features, how to download and install it on your Android device, and some tips and tricks for playing it.</p>
|
4 |
-
<h2>cars daredevil garage apk</h2><br /><p><b><b>Download</b> ★★★★★ <a href="https://jinyurl.com/2uNQ57">https://jinyurl.com/2uNQ57</a></b></p><br /><br />
|
5 |
-
<h2>Features of Cars Daredevil Garage APK</h2>
|
6 |
-
<p>Cars Daredevil Garage APK is a game that is packed with features that will keep you entertained for hours. Here are some of the features that you can enjoy in this game:</p>
|
7 |
-
<h3>Over 180 Vehicles to Choose From</h3>
|
8 |
-
<p>You can choose from over 180 vehicles to stunt with in this game, including cars from different categories such as World Grand Prix, Piston Cup, Radiator Springs, Ye Left Turn Inn, Tuners, Lost and Found, Airport Adventure, Race Fans, Pit Crew, Dinoco, Tokyo Party Palace Chaos, Ice Racers, Neon Racers, Silver Racers, and more. You can also customize your cars with different colors, stickers, wheels, and accessories.</p>
|
9 |
-
<h3>15 Tracks with Crazy Stunts and Challenges</h3>
|
10 |
-
<p>You can power through 15 tracks that take you to different locations such as the beach, the backyard, and the bowling alley. Each track has its own challenges and obstacles that will test your skills and reflexes. You can perform amazing stunts such as jumps, loops, skids, drifts, flips, and more. You can also collect coins and stars along the way to earn extra points and rewards.</p>
|
11 |
-
<h3>Scan Your Mattel Die-Cast Cars to Unlock More Fun</h3>
|
12 |
-
<p>If you have any Mattel die-cast cars from the Cars movie franchise, you can scan them using your device's camera to unlock them in the game. You can then use them to stunt with in the app. The more cars you scan, the more fun you will have in the game.</p>
|
13 |
-
<h3>Free Hero Car with Each New World Purchase</h3>
|
14 |
-
<p>If you want to access more tracks and worlds in the game, you can purchase them using real money or coins that you earn in the game. With each new world purchase, you will also get a free hero car that is exclusive to that world. For example, if you buy the beach world, you will get a free beach buggy car.</p>
|
15 |
-
<h2>How to Download and Install Cars Daredevil Garage APK on Your Android Device</h <p>Downloading and installing Cars Daredevil Garage APK on your Android device is very easy and simple. Just follow these steps:</p>
|
16 |
-
<p>cars daredevil garage app download<br />
|
17 |
-
cars daredevil garage game online<br />
|
18 |
-
cars daredevil garage mod apk<br />
|
19 |
-
cars daredevil garage hack apk<br />
|
20 |
-
cars daredevil garage cheats<br />
|
21 |
-
cars daredevil garage android<br />
|
22 |
-
cars daredevil garage ios<br />
|
23 |
-
cars daredevil garage for pc<br />
|
24 |
-
cars daredevil garage free play<br />
|
25 |
-
cars daredevil garage scan codes<br />
|
26 |
-
cars daredevil garage disney pixar<br />
|
27 |
-
cars daredevil garage lightning mcqueen<br />
|
28 |
-
cars daredevil garage mater<br />
|
29 |
-
cars daredevil garage world grand prix<br />
|
30 |
-
cars daredevil garage piston cup<br />
|
31 |
-
cars daredevil garage radiator springs<br />
|
32 |
-
cars daredevil garage ye left turn inn<br />
|
33 |
-
cars daredevil garage tuners<br />
|
34 |
-
cars daredevil garage lost and found<br />
|
35 |
-
cars daredevil garage airport adventure<br />
|
36 |
-
cars daredevil garage race fans<br />
|
37 |
-
cars daredevil garage pit crew<br />
|
38 |
-
cars daredevil garage dinoco<br />
|
39 |
-
cars daredevil garage tokyo party palace chaos<br />
|
40 |
-
cars daredevil garage ice racers<br />
|
41 |
-
cars daredevil garage neon racers<br />
|
42 |
-
cars daredevil garage silver racers<br />
|
43 |
-
cars daredevil garage beach tracks<br />
|
44 |
-
cars daredevil garage backyard tracks<br />
|
45 |
-
cars daredevil garage bowling alley tracks<br />
|
46 |
-
cars daredevil garage stunts and jumps<br />
|
47 |
-
cars daredevil garage loops and turns<br />
|
48 |
-
cars daredevil garage skids and drifts<br />
|
49 |
-
cars daredevil garage boost and speed<br />
|
50 |
-
cars daredevil garage mattel diecast vehicles<br />
|
51 |
-
cars daredevil garage unlock and earn vehicles<br />
|
52 |
-
cars daredevil garage net energy gain experiment<br />
|
53 |
-
cars daredevil garage mini sun fusion reactor<br />
|
54 |
-
cars daredevil garage korea superconducting tokamak advanced research facility (kstar)<br />
|
55 |
-
cars daredevil garage korea institute of fusion energy (kife)<br />
|
56 |
-
cars daredevil garage 100 million degrees celsius temperature<br />
|
57 |
-
cars daredevil garage 30 seconds duration <br />
|
58 |
-
cars daredevil garage seven times hotter than the sun core <br />
|
59 |
-
cars daredevil garage 15 million degrees kelvin temperature <br />
|
60 |
-
cars daredevil garage holy grail of fusion experiments <br />
|
61 |
-
cars daredevil garage unlimited energy source <br />
|
62 |
-
cars daredevil garage appadvice review <br />
|
63 |
-
cars daredevil garage toucharcade review <br />
|
64 |
-
cars daredevil garage in-app purchases</p>
|
65 |
-
<h3>Step 1: Enable Unknown Sources on Your Device Settings</h3>
|
66 |
-
<p>Before you can install any APK file on your device, you need to enable the option to allow installation from unknown sources. This is a security measure that prevents malicious apps from harming your device. To do this, go to your device settings and look for the security or privacy option. Then, find the unknown sources option and toggle it on. You may see a warning message that tells you the risks of installing apps from unknown sources. Just tap OK to proceed.</p>
|
67 |
-
<h3>Step 2: Download the APK File from a Trusted Source</h3>
|
68 |
-
<p>Next, you need to download the APK file of Cars Daredevil Garage from a trusted source. You can use any web browser to search for the file online. Make sure that the source is reliable and has positive reviews from other users. You can also use the link below to download the file directly from our website. The file size is about 100 MB, so make sure you have enough space on your device and a stable internet connection.</p>
|
69 |
-
<p><a href="">Download Cars Daredevil Garage APK here</a></p>
|
70 |
-
<h3>Step 3: Locate and Install the APK File on Your Device</h3>
|
71 |
-
<p>Once you have downloaded the file, you need to locate it on your device and install it. You can use any file manager app to find the file in your downloads folder or any other location where you saved it. Then, tap on the file and select install. You may see a pop-up message that asks for your permission to install the app. Just tap install again and wait for the installation process to finish.</p>
|
72 |
-
<h3>Step 4: Launch the Game and Enjoy</h3>
|
73 |
-
<p>After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You may see a splash screen that shows the game's logo and some loading animations. Then, you will see the main menu of the game where you can choose to play, scan cars, customize cars, or access other options. You can also sign in with your Google Play Games account to save your progress and achievements. Now, you are ready to enjoy Cars Daredevil Garage APK on your Android device.</p>
|
74 |
-
<h2>Tips and Tricks for Playing Cars Daredevil Garage APK</h2>
|
75 |
-
<p>Cars Daredevil Garage APK is a fun and exciting racing game for kids, but it can also be challenging at times. Here are some tips and tricks that can help you improve your gameplay and have more fun:</p>
|
76 |
-
<h3>Use Boosters and Power-Ups Wisely</h3>
|
77 |
-
<p>In each track, you will see some boosters and power-ups that can help you speed up, perform stunts, or avoid obstacles. For example, you can use rockets, magnets, shields, or turbo boosts to gain an edge over your opponents or obstacles. However, these boosters and power-ups are limited and have cooldowns, so you need to use them wisely and strategically. Don't waste them on easy parts of the track or when you are already ahead of everyone else. Save them for when you really need them or when they can make a big difference in your performance.</p>
|
78 |
-
<h3>Collect Coins and Stars to Upgrade Your Cars</h3>
|
79 |
-
<p>As you play through the tracks, you will see some coins and stars that you can collect by driving over them or performing stunts. These coins and stars are very important because they allow you to upgrade your cars and unlock new ones. Upgrading your cars can improve their speed, acceleration, handling, and stunt abilities. Unlocking new cars can give you access to different features and styles that suit your preferences. Therefore, try to collect as many coins and stars as possible in each track and spend them wisely on upgrading or unlocking your cars.</p>
|
80 |
-
<h3>Try Different Cars and Tracks to Find Your Favorite Combination</h3>
|
81 |
-
<p>Cars Daredevil Garage APK has over 180 vehicles and 15 tracks that you can choose from, so there is a lot of variety and diversity in the game. Each car has its own strengths and weaknesses, and each track has its own challenges and opportunities. Therefore, it is a good idea to try different cars and tracks to find your favorite combination that matches your play style and personality. You may discover some hidden gems or surprises that make the game more fun and enjoyable for you.</p>
|
82 |
-
<h3>Watch Videos and Complete Offers to Earn More Rewards</h3>
|
83 |
-
<p>If you want to earn more coins, stars, or other rewards in the game, you can watch some videos or complete some offers that are available in the game's menu. These videos or offers are usually short and simple, such as watching an ad, downloading another app, or answering <p>a survey, etc. By doing these, you can earn more coins, stars, or even free cars that you can use in the game. However, be careful and selective about the videos or offers that you watch or complete, as some of them may be spammy or harmful to your device. Only choose the ones that are from trusted sources and that do not ask for your personal information or access to your device.</p>
|
84 |
-
<h2>Conclusion</h2>
|
85 |
-
<p>Cars Daredevil Garage APK is a fun and exciting racing game for kids that lets you play with your favorite Cars characters and vehicles. You can choose from over 180 vehicles and 15 tracks that offer crazy stunts and challenges. You can also scan your Mattel die-cast cars to unlock more fun in the game. You can download and install the game on your Android device easily and enjoy it anytime and anywhere. You can also use some tips and tricks to improve your gameplay and have more fun. Cars Daredevil Garage APK is a game that will keep you entertained for hours and make you feel like a daredevil racer.</p>
|
86 |
-
<h2>FAQs</h2>
|
87 |
-
<p>Here are some frequently asked questions about Cars Daredevil Garage APK:</p>
|
88 |
-
<h3>Q: Is Cars Daredevil Garage APK safe to download and install?</h3>
|
89 |
-
<p>A: Yes, Cars Daredevil Garage APK is safe to download and install, as long as you get it from a trusted source and enable unknown sources on your device settings. The game does not contain any viruses, malware, or spyware that can harm your device or compromise your privacy.</p>
|
90 |
-
<h3>Q: Is Cars Daredevil Garage APK free to play?</h3>
|
91 |
-
<p>A: Yes, Cars Daredevil Garage APK is free to play, but it also offers some in-app purchases that can enhance your gaming experience. You can buy more coins, stars, tracks, or cars using real money or coins that you earn in the game. However, these purchases are optional and not required to enjoy the game.</p>
|
92 |
-
<h3>Q: How can I scan my Mattel die-cast cars to unlock them in the game?</h3>
|
93 |
-
<p>A: To scan your Mattel die-cast cars to unlock them in the game, you need to have a device with a camera and an internet connection. Then, you need to go to the scan menu in the game and follow the instructions on how to scan your car. You need to place your car on a flat surface with good lighting and hold your device over it until it recognizes the car. Then, you will see a confirmation message that tells you that you have unlocked the car in the game.</p>
|
94 |
-
<h3>Q: How can I customize my cars in the game?</h3>
|
95 |
-
<p>A: To customize your cars in the game, you need to go to the customize menu in the game and select the car that you want to modify. Then, you will see different options for changing the color, stickers, wheels, and accessories of your car. You can also use coins or stars to buy more options for customizing your car.</p>
|
96 |
-
<h3>Q: How can I save my progress and achievements in the game?</h3>
|
97 |
-
<p>A: To save your progress and achievements in the game, you need to sign in with your Google Play Games account. This will allow you to sync your data across different devices and access leaderboards and achievements. You can also share your progress and achievements with your friends on social media.</p> 197e85843d<br />
|
98 |
-
<br />
|
99 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Free Download Educational Games for Kids-6 Years Old - Engaging Creative and Safe.md
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Free Download Educational Games for Kids-6 Years Old</h1> | | <p>If you are looking for some fun and effective ways to help your child learn new skills and knowledge, you might want to consider educational games. Educational games are games that are designed to teach children various concepts and topics in a playful and interactive way. They can also provide a stimulating and engaging learning environment that motivates children to stay focused and interested in their learning goals.</p> | | <p>There are many educational games available online that you can download for free and use with your child. These games cover different subject areas such as math, language arts, science, art, and more. They also cater to different age groups and skill levels, so you can find something suitable for your child's needs and preferences.</p>
|
3 |
-
<h2>free download educational games for kids-6 years old</h2><br /><p><b><b>DOWNLOAD</b> ⚡ <a href="https://jinyurl.com/2uNSuH">https://jinyurl.com/2uNSuH</a></b></p><br /><br /> | | <p>In this article, we will explore how to choose educational games for kids-6 years old, what benefits they can offer, and how to use them effectively. We will also provide some examples of free download educational games that you can try with your child.</p> | | <h2>How to Choose Educational Games for Kids-6 Years Old</h2> | | <p>When choosing educational games for your child, there are some factors that you should consider. Here are some tips and criteria to help you make a smart choice:</p> | | <ul>
|
4 |
-
<li>Pick a topic area. Think about what topics or skills you want your child to learn or practice. You can choose games that align with your child's curriculum, interests, or hobbies. For example, if your child likes animals, you can look for games that teach them about animal habitats, behaviors, or sounds.</li>
|
5 |
-
<li>Look at already curated lists. There are many websites and blogs that provide reviews and recommendations of quality educational games. You can use these resources to find games that have been tested and rated by experts or other parents. For example, Common Sense Media has game reviews with suggested ages, ratings, and learning potential.</li>
|
6 |
-
<li>Apply some criteria. Once you have a list of potential games, you can evaluate them based on some criteria such as:</li>
|
7 |
-
<ul>
|
8 |
-
<li>The purpose of the game. Is the game designed to teach a specific concept or skill, or is it just for entertainment? Does the game have clear learning objectives and outcomes?</li>
|
9 |
-
<li>The age appropriateness of the game. Is the game suitable for your child's age group? Does it match your child's maturity level and reading ability? Does it have any content or features that might be inappropriate or harmful?</li>
|
10 |
-
<li>The skill level of the game. Is the game challenging enough for your child? Does it adapt to your child's pace and progress? Does it provide feedback and support?</li>
|
11 |
-
<li>The engagement level of the game. Is the game fun and interesting for your child? Does it have appealing graphics, sounds, and characters? Does it offer variety and choice?</li>
|
12 |
-
<li>The quality of the game. Is the game well-designed and user-friendly? Does it work smoothly and without errors? Does it have clear instructions and navigation?</li>
|
13 |
-
</ul>
|
14 |
-
<li>Try some games. The best way to find out if a game is suitable and enjoyable for your child is to try it yourself. You can download some games and play them with your child. You can observe how your child reacts and interacts with the game. You can also ask your child for their feedback and opinions.</li>
|
15 |
-
</ul>
|
16 |
-
<p>Here are some examples of free download educational games for kids-6 years old that meet the criteria:</p>
|
17 |
-
<table>
|
18 |
-
<tr>
|
19 |
-
<th>Game</th>
|
20 |
-
<th>Topic</th>
|
21 |
-
<th>Description</th>
|
22 |
-
</tr>
|
23 |
-
<tr>
|
24 |
-
<td><a href="">ABCmouse.com Early Learning Academy</a></td>
|
25 |
-
<td>Math, Language Arts, Science, Art, and more</td>
|
26 |
-
<td>A comprehensive and interactive learning program that covers various subjects and skills for preschool to 2nd grade. It has over 10,000 activities, games, books, songs, and videos that are aligned with the Common Core Standards.</td>
|
27 |
-
</tr>
|
28 |
-
<tr>
|
29 |
-
<td><a href="">PBS KIDS Games</a></td>
|
30 |
-
<td>Math, Science, Social Studies, Creativity, and more</td>
|
31 |
-
<td>A collection of over 200 games featuring popular PBS KIDS characters such as Curious George, Daniel Tiger, and Wild Kratts. The games are designed to help kids learn and explore different topics and themes in a fun and engaging way.</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td><a href="">CodeSpark Academy: Kids Coding</a></td>
|
35 |
-
<td>Coding, Logic, Problem-Solving, Creativity, and more</td>
|
36 |
-
<td>A game-based learning platform that teaches kids the basics of coding and computational thinking. It uses a kid-friendly visual programming language that does not require reading or typing. It also has puzzles, challenges, projects, and mini-games that foster creativity and collaboration.</td>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td><a href="">BrainPOP Jr. Movie of the Week</a></td>
|
40 |
-
<td>Science, Social Studies, Reading, Writing, Math, Health, Arts, and Technology</td>
|
41 |
-
<td>A weekly animated movie that introduces kids to various topics and concepts in an entertaining and informative way. It also has quizzes, activities, and games that reinforce the learning objectives and skills.</td>
|
42 |
-
</tr>
|
43 |
-
<tr>
|
44 |
-
<td><a href="">Duolingo ABC - Learn to Read</a></td>
|
45 |
-
<td>Reading, Writing, Phonics, Vocabulary, and more</td>
|
46 |
-
<td>A game-based app that helps kids learn to read in English. It has over 300 fun and interactive lessons that teach the alphabet, phonics, sight words, spelling, and more. It also adapts to each child's level and progress.</td>
|
47 |
-
</tr>
|
48 |
-
</table>
|
49 |
-
<h2>Benefits of Educational Games for Kids-6 Years Old</h2>
|
50 |
-
<p>Educational games can offer many benefits for kids-6 years old. They can help kids:</p>
|
51 |
-
<ul>
|
52 |
-
<li>Develop cognitive skills. Educational games can stimulate kids' brains and enhance their memory, attention, logic, reasoning, problem-solving, and creativity skills. They can also help kids learn to think critically and strategically, and to apply their knowledge to different situations and contexts.</li>
|
53 |
-
<li>Acquire academic knowledge. Educational games can teach kids various concepts and topics that are relevant to their curriculum and standards. They can also help kids practice and reinforce their skills and knowledge in a fun and interactive way.</li>
|
54 |
-
<li>Improve social and emotional skills. Educational games can help kids develop their communication, collaboration, and empathy skills. They can also help kids cope with their emotions, such as frustration, anxiety, or boredom, and to build their confidence and self-esteem.</li>
|
55 |
-
<li>Enhance motivation and engagement. Educational games can make learning more enjoyable and rewarding for kids. They can also provide feedback, rewards, and challenges that motivate kids to keep playing and learning. They can also cater to kids' interests and preferences, and allow them to have some control over their learning pace and path.</li>
|
56 |
-
</ul>
|
57 |
-
<p>There is evidence and research that supports the benefits of educational games for kids-6 years old. For example, a study by the Joan Ganz Cooney Center found that educational games can improve kids' math skills, especially when they are played with a parent or a teacher. Another study by the University of California, Irvine found that educational games can improve kids' reading skills, especially when they are combined with traditional instruction. A third study by the University of Wisconsin-Madison found that educational games can improve kids' science skills, especially when they are designed to promote inquiry and exploration.</p>
|
58 |
-
<h2>How to Use Educational Games for Kids-6 Years Old</h2>
|
59 |
-
<p>Educational games can be used in different ways to enhance your child's learning experience. Here are some suggestions and guidelines for using games effectively and safely:</p>
|
60 |
-
<ul>
|
61 |
-
<li>Set some goals. Before you start playing a game with your child, you should have some clear goals in mind. What do you want your child to learn or practice? How will you measure your child's progress and achievement? How will you provide feedback and encouragement?</li>
|
62 |
-
<li>Choose the right time and place. You should choose a time and place that is conducive for playing and learning. You should avoid distractions, such as noise, TV, or other devices. You should also limit the screen time for your child, according to the recommendations of the American Academy of Pediatrics. You should also monitor your child's physical and mental health, such as eye strain, posture, mood, and behavior.</li>
|
63 |
-
<li>Play together. You should play the game with your child, or at least be nearby and available. You should not leave your child alone with the game, or use the game as a babysitter. You should also interact with your child during the game, such as asking questions, giving hints, praising efforts, or sharing opinions.</li>
|
64 |
-
<li>Make connections. You should help your child make connections between the game and the real world. You should relate the game to your child's prior knowledge, experiences, or interests. You should also extend the game to other activities, such as reading books, doing experiments, or visiting places.</li>
|
65 |
-
</ul>
|
66 |
-
<p>Here are some examples of how to use educational games for kids-6 years old in different ways:</p>
|
67 |
-
<table>
|
68 |
-
<tr>
|
69 |
-
<th>Game</th>
|
70 |
-
<th>How to Use</th>
|
71 |
-
</tr>
|
72 |
-
<tr>
|
73 |
-
<td><a href="">ABCmouse.com Early Learning Academy</a></td>
|
74 |
-
<td>You can use this game as a comprehensive learning program for your child. You can create a personalized learning path for your child based on their age and interests. You can also track your child's progress and achievements through the dashboard. You can also play the games with your child and help them with the activities.</td>
|
75 |
-
</tr>
|
76 |
-
<tr>
|
77 |
-
<td><a href="">PBS KIDS Games</a></td>
|
78 |
-
<td>You can use this game as a supplement to your child's curriculum and interests. You can choose games that match your child's grade level and subject area. You can also explore games that feature your child's favorite PBS KIDS characters. You can also watch the related TV shows or videos with your child and discuss the topics.</td>
|
79 |
-
</tr>
|
80 |
-
<tr>
|
81 |
-
<td><a href="">CodeSpark Academy: Kids Coding</a></td>
|
82 |
-
<td>You can use this game as an introduction to coding and computational thinking for your child. You can help your child learn the basics of coding and logic through the visual programming language. You can also encourage your child to create their own games and projects using the game's tools and resources.</td>
|
83 |
-
</tr>
|
84 |
-
<tr>
|
85 |
-
<td><a href="">BrainPOP Jr. Movie of the Week</a></td>
|
86 |
-
<td>You can use this game as a way to spark your child's curiosity and interest in various topics and concepts. You can watch the movie of the week with your child and learn something new together. You can also play the related quizzes, activities, and games with your child and test their knowledge and skills.</td>
|
87 |
-
</tr>
|
88 |
-
<tr>
|
89 |
-
<td><a href="">Duolingo ABC - Learn to Read</a></td>
|
90 |
-
<td>You can use this game as a support for your child's reading development. You can help your child learn the alphabet, phonics, sight words, spelling, and more through the game's lessons. You can also read books or stories with your child and practice their reading skills.</td>
|
91 |
-
</tr>
|
92 |
-
</table>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>Educational games are a great way to help your child learn new skills and knowledge in a fun and engaging way. They can also provide many benefits for your child's cognitive, academic, social, and emotional development. However, not all games are created equal, so you should be careful and selective when choosing educational games for your child. You should also use them wisely and effectively, and make sure they are appropriate and safe for your child.</p>
|
95 |
-
<p>free download math games for kids-6 years old<br />
|
96 |
-
free download spelling games for kids-6 years old<br />
|
97 |
-
free download science games for kids-6 years old<br />
|
98 |
-
free download geography games for kids-6 years old<br />
|
99 |
-
free download history games for kids-6 years old<br />
|
100 |
-
free download art games for kids-6 years old<br />
|
101 |
-
free download music games for kids-6 years old<br />
|
102 |
-
free download logic games for kids-6 years old<br />
|
103 |
-
free download puzzle games for kids-6 years old<br />
|
104 |
-
free download memory games for kids-6 years old<br />
|
105 |
-
free download word games for kids-6 years old<br />
|
106 |
-
free download trivia games for kids-6 years old<br />
|
107 |
-
free download coloring games for kids-6 years old<br />
|
108 |
-
free download drawing games for kids-6 years old<br />
|
109 |
-
free download coding games for kids-6 years old<br />
|
110 |
-
free download typing games for kids-6 years old<br />
|
111 |
-
free download reading games for kids-6 years old<br />
|
112 |
-
free download writing games for kids-6 years old<br />
|
113 |
-
free download grammar games for kids-6 years old<br />
|
114 |
-
free download vocabulary games for kids-6 years old<br />
|
115 |
-
free download phonics games for kids-6 years old<br />
|
116 |
-
free download alphabet games for kids-6 years old<br />
|
117 |
-
free download numbers games for kids-6 years old<br />
|
118 |
-
free download shapes games for kids-6 years old<br />
|
119 |
-
free download colors games for kids-6 years old</p>
|
120 |
-
<p>If you are looking for some free download educational games for kids-6 years old, you can try some of the examples we provided in this article. They are all well-designed, user-friendly, and educational games that cover different subject areas and skills. They are also fun and interesting for kids-6 years old, and they can be used in different ways to enhance their learning experience.</p>
|
121 |
-
<p>We hope you enjoyed this article and found it useful. If you want to learn more about educational games for kids-6 years old, you can check out some of the resources we listed below. Happy gaming!</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<p>Here are some common questions related to the topic of free download educational games for kids-6 years old:</p>
|
124 |
-
<ul>
|
125 |
-
<li>Q: What are some of the best websites or apps for free download educational games for kids-6 years old?</li>
|
126 |
-
<li>A: Some of the best websites or apps for free download educational games for kids-6 years old are:</li>
|
127 |
-
<ul>
|
128 |
-
<li><a href="">ABCya!</a>: A website that offers hundreds of free educational games for kids in grades K-6.</li>
|
129 |
-
<li><a href="">Funbrain</a>: A website that offers over 100 fun and interactive games for kids in grades Pre-K-8.</li>
|
130 |
-
<li><a href="">Khan Academy Kids</a>: An app that offers thousands of free learning activities for kids ages 2-7.</li>
|
131 |
-
<li><a href="">Starfall</a>: A website that offers free online games and activities for kids in grades Pre-K-3.</li>
|
132 |
-
<li><a href="">Toca Boca</a>: An app that offers dozens of free creative and playful games for kids of all ages.</li>
|
133 |
-
</ul>
|
134 |
-
<li>Q: How can I find out if a game is safe and appropriate for my child?</li>
|
135 |
-
<li>A: There are some ways to find out if a game is safe and appropriate for your child, such as:</li>
|
136 |
-
<ul>
|
137 |
-
<li>Reading the game's description, ratings, reviews, and privacy policy.</li>
|
138 |
-
<li>Checking the game's content rating system, such as ESRB or PEGI.</li>
|
139 |
-
<li>Playing the game yourself or with your child before downloading it.</li>
|
140 |
-
<li>Using parental controls or filters to limit or block inappropriate or harmful content or features.</li>
|
141 |
-
<li>Talking to your child about online safety and etiquette.</li>
|
142 |
-
</ul>
|
143 |
-
<li>Q: How much screen time should I allow my child to play educational games?</li>
|
144 |
-
<li>A: The amount of screen time you should allow your child to play educational games depends on your child's age, needs, and interests. However, you should follow the guidelines and recommendations of the American Academy of Pediatrics (AAP), which are:</li>
|
145 |
-
<ul>
|
146 |
-
<li>For children younger than 18 months, avoid use of screen media other than video-chatting.</li>
|
147 |
-
<li>For children 18 to 24 months, watch or play high-quality programs or games with them to help them understand what they are seeing.</li>
|
148 |
-
<li>For children 2 to 5 years, limit screen use to 1 hour per day of high-quality programs or games, and watch or play with them to help them learn from what they are seeing.</li>
|
149 |
-
<li>For children 6 years and older, place consistent limits on the time spent using media, and the types of media, and make sure media does not take the place of adequate sleep, physical activity and other behaviors essential to health.</li>
|
150 |
-
</ul>
|
151 |
-
<li>Q: What are some of the challenges or drawbacks of using educational games for kids-6 years old?</li>
|
152 |
-
<li>A: Some of the challenges or drawbacks of using educational games for kids-6 years old are:</li>
|
153 |
-
<ul>
|
154 |
-
<li>The quality and accuracy of the games. Not all games are well-designed, user-friendly, and educational. Some games may have errors, bugs, or misleading information. Some games may also have inappropriate or harmful content or features.</li>
|
155 |
-
<li>The availability and accessibility of the games. Not all games are free, easy to download, or compatible with your device. Some games may require internet connection, registration, or subscription. Some games may also have ads, in-app purchases, or data collection.</li>
|
156 |
-
<li>The balance and moderation of the games. Too much screen time or gaming can have negative effects on your child's health, development, and well-being. Some games may also be addictive, distracting, or isolating for your child.</li>
|
157 |
-
</ul>
|
158 |
-
<li>Q: How can I make the most out of educational games for kids-6 years old?</li>
|
159 |
-
<li>A: Some of the ways to make the most out of educational games for kids-6 years old are:</li>
|
160 |
-
<ul>
|
161 |
-
<li>Choose games that are relevant, appropriate, and engaging for your child.</li>
|
162 |
-
<li>Use games as a supplement, not a substitute, for your child's learning.</li>
|
163 |
-
<li>Play games with your child and make it a fun and social activity.</li>
|
164 |
-
<li>Help your child make connections between the game and the real world.</li>
|
165 |
-
<li>Encourage your child to reflect on their learning and share their thoughts and feelings.</li>
|
166 |
-
</ul></p> 197e85843d<br />
|
167 |
-
<br />
|
168 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/uvr5_pack/utils.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from tqdm import tqdm
|
6 |
-
|
7 |
-
|
8 |
-
def load_data(file_name: str = "./infer/lib/uvr5_pack/name_params.json") -> dict:
|
9 |
-
with open(file_name, "r") as f:
|
10 |
-
data = json.load(f)
|
11 |
-
|
12 |
-
return data
|
13 |
-
|
14 |
-
|
15 |
-
def make_padding(width, cropsize, offset):
|
16 |
-
left = offset
|
17 |
-
roi_size = cropsize - left * 2
|
18 |
-
if roi_size == 0:
|
19 |
-
roi_size = cropsize
|
20 |
-
right = roi_size - (width % roi_size) + left
|
21 |
-
|
22 |
-
return left, right, roi_size
|
23 |
-
|
24 |
-
|
25 |
-
def inference(X_spec, device, model, aggressiveness, data):
|
26 |
-
"""
|
27 |
-
data : dic configs
|
28 |
-
"""
|
29 |
-
|
30 |
-
def _execute(
|
31 |
-
X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True
|
32 |
-
):
|
33 |
-
model.eval()
|
34 |
-
with torch.no_grad():
|
35 |
-
preds = []
|
36 |
-
|
37 |
-
iterations = [n_window]
|
38 |
-
|
39 |
-
total_iterations = sum(iterations)
|
40 |
-
for i in tqdm(range(n_window)):
|
41 |
-
start = i * roi_size
|
42 |
-
X_mag_window = X_mag_pad[
|
43 |
-
None, :, :, start : start + data["window_size"]
|
44 |
-
]
|
45 |
-
X_mag_window = torch.from_numpy(X_mag_window)
|
46 |
-
if is_half:
|
47 |
-
X_mag_window = X_mag_window.half()
|
48 |
-
X_mag_window = X_mag_window.to(device)
|
49 |
-
|
50 |
-
pred = model.predict(X_mag_window, aggressiveness)
|
51 |
-
|
52 |
-
pred = pred.detach().cpu().numpy()
|
53 |
-
preds.append(pred[0])
|
54 |
-
|
55 |
-
pred = np.concatenate(preds, axis=2)
|
56 |
-
return pred
|
57 |
-
|
58 |
-
def preprocess(X_spec):
|
59 |
-
X_mag = np.abs(X_spec)
|
60 |
-
X_phase = np.angle(X_spec)
|
61 |
-
|
62 |
-
return X_mag, X_phase
|
63 |
-
|
64 |
-
X_mag, X_phase = preprocess(X_spec)
|
65 |
-
|
66 |
-
coef = X_mag.max()
|
67 |
-
X_mag_pre = X_mag / coef
|
68 |
-
|
69 |
-
n_frame = X_mag_pre.shape[2]
|
70 |
-
pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset)
|
71 |
-
n_window = int(np.ceil(n_frame / roi_size))
|
72 |
-
|
73 |
-
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
|
74 |
-
|
75 |
-
if list(model.state_dict().values())[0].dtype == torch.float16:
|
76 |
-
is_half = True
|
77 |
-
else:
|
78 |
-
is_half = False
|
79 |
-
pred = _execute(
|
80 |
-
X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
|
81 |
-
)
|
82 |
-
pred = pred[:, :, :n_frame]
|
83 |
-
|
84 |
-
if data["tta"]:
|
85 |
-
pad_l += roi_size // 2
|
86 |
-
pad_r += roi_size // 2
|
87 |
-
n_window += 1
|
88 |
-
|
89 |
-
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
|
90 |
-
|
91 |
-
pred_tta = _execute(
|
92 |
-
X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
|
93 |
-
)
|
94 |
-
pred_tta = pred_tta[:, :, roi_size // 2 :]
|
95 |
-
pred_tta = pred_tta[:, :, :n_frame]
|
96 |
-
|
97 |
-
return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase)
|
98 |
-
else:
|
99 |
-
return pred * coef, X_mag, np.exp(1.0j * X_phase)
|
100 |
-
|
101 |
-
|
102 |
-
def _get_name_params(model_path, model_hash):
|
103 |
-
data = load_data()
|
104 |
-
flag = False
|
105 |
-
ModelName = model_path
|
106 |
-
for type in list(data):
|
107 |
-
for model in list(data[type][0]):
|
108 |
-
for i in range(len(data[type][0][model])):
|
109 |
-
if str(data[type][0][model][i]["hash_name"]) == model_hash:
|
110 |
-
flag = True
|
111 |
-
elif str(data[type][0][model][i]["hash_name"]) in ModelName:
|
112 |
-
flag = True
|
113 |
-
|
114 |
-
if flag:
|
115 |
-
model_params_auto = data[type][0][model][i]["model_params"]
|
116 |
-
param_name_auto = data[type][0][model][i]["param_name"]
|
117 |
-
if type == "equivalent":
|
118 |
-
return param_name_auto, model_params_auto
|
119 |
-
else:
|
120 |
-
flag = False
|
121 |
-
return param_name_auto, model_params_auto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/julius/resample.py
DELETED
@@ -1,216 +0,0 @@
|
|
1 |
-
# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
|
2 |
-
# Author: adefossez, 2020
|
3 |
-
"""
|
4 |
-
Differentiable, Pytorch based resampling.
|
5 |
-
Implementation of Julius O. Smith algorithm for resampling.
|
6 |
-
See https://ccrma.stanford.edu/~jos/resample/ for details.
|
7 |
-
This implementation is specially optimized for when new_sr / old_sr is a fraction
|
8 |
-
with a small numerator and denominator when removing the gcd (e.g. new_sr = 700, old_sr = 500).
|
9 |
-
|
10 |
-
Very similar to [bmcfee/resampy](https://github.com/bmcfee/resampy) except this implementation
|
11 |
-
is optimized for the case mentioned before, while resampy is slower but more general.
|
12 |
-
|
13 |
-
"""
|
14 |
-
|
15 |
-
import math
|
16 |
-
from typing import Optional
|
17 |
-
|
18 |
-
import torch
|
19 |
-
from torch.nn import functional as F
|
20 |
-
|
21 |
-
from .core import sinc
|
22 |
-
from .utils import simple_repr
|
23 |
-
|
24 |
-
|
25 |
-
class ResampleFrac(torch.nn.Module):
|
26 |
-
"""
|
27 |
-
Resampling from the sample rate `old_sr` to `new_sr`.
|
28 |
-
"""
|
29 |
-
def __init__(self, old_sr: int, new_sr: int, zeros: int = 24, rolloff: float = 0.945):
|
30 |
-
"""
|
31 |
-
Args:
|
32 |
-
old_sr (int): sample rate of the input signal x.
|
33 |
-
new_sr (int): sample rate of the output.
|
34 |
-
zeros (int): number of zero crossing to keep in the sinc filter.
|
35 |
-
rolloff (float): use a lowpass filter that is `rolloff * new_sr / 2`,
|
36 |
-
to ensure sufficient margin due to the imperfection of the FIR filter used.
|
37 |
-
Lowering this value will reduce anti-aliasing, but will reduce some of the
|
38 |
-
highest frequencies.
|
39 |
-
|
40 |
-
Shape:
|
41 |
-
|
42 |
-
- Input: `[*, T]`
|
43 |
-
- Output: `[*, T']` with `T' = int(new_sr * T / old_sr)
|
44 |
-
|
45 |
-
|
46 |
-
.. caution::
|
47 |
-
After dividing `old_sr` and `new_sr` by their GCD, both should be small
|
48 |
-
for this implementation to be fast.
|
49 |
-
|
50 |
-
>>> import torch
|
51 |
-
>>> resample = ResampleFrac(4, 5)
|
52 |
-
>>> x = torch.randn(1000)
|
53 |
-
>>> print(len(resample(x)))
|
54 |
-
1250
|
55 |
-
"""
|
56 |
-
super().__init__()
|
57 |
-
if not isinstance(old_sr, int) or not isinstance(new_sr, int):
|
58 |
-
raise ValueError("old_sr and new_sr should be integers")
|
59 |
-
gcd = math.gcd(old_sr, new_sr)
|
60 |
-
self.old_sr = old_sr // gcd
|
61 |
-
self.new_sr = new_sr // gcd
|
62 |
-
self.zeros = zeros
|
63 |
-
self.rolloff = rolloff
|
64 |
-
|
65 |
-
self._init_kernels()
|
66 |
-
|
67 |
-
def _init_kernels(self):
|
68 |
-
if self.old_sr == self.new_sr:
|
69 |
-
return
|
70 |
-
|
71 |
-
kernels = []
|
72 |
-
sr = min(self.new_sr, self.old_sr)
|
73 |
-
# rolloff will perform antialiasing filtering by removing the highest frequencies.
|
74 |
-
# At first I thought I only needed this when downsampling, but when upsampling
|
75 |
-
# you will get edge artifacts without this, the edge is equivalent to zero padding,
|
76 |
-
# which will add high freq artifacts.
|
77 |
-
sr *= self.rolloff
|
78 |
-
|
79 |
-
# The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor)
|
80 |
-
# using the sinc interpolation formula:
|
81 |
-
# x(t) = sum_i x[i] sinc(pi * old_sr * (i / old_sr - t))
|
82 |
-
# We can then sample the function x(t) with a different sample rate:
|
83 |
-
# y[j] = x(j / new_sr)
|
84 |
-
# or,
|
85 |
-
# y[j] = sum_i x[i] sinc(pi * old_sr * (i / old_sr - j / new_sr))
|
86 |
-
|
87 |
-
# We see here that y[j] is the convolution of x[i] with a specific filter, for which
|
88 |
-
# we take an FIR approximation, stopping when we see at least `zeros` zeros crossing.
|
89 |
-
# But y[j+1] is going to have a different set of weights and so on, until y[j + new_sr].
|
90 |
-
# Indeed:
|
91 |
-
# y[j + new_sr] = sum_i x[i] sinc(pi * old_sr * ((i / old_sr - (j + new_sr) / new_sr))
|
92 |
-
# = sum_i x[i] sinc(pi * old_sr * ((i - old_sr) / old_sr - j / new_sr))
|
93 |
-
# = sum_i x[i + old_sr] sinc(pi * old_sr * (i / old_sr - j / new_sr))
|
94 |
-
# so y[j+new_sr] uses the same filter as y[j], but on a shifted version of x by `old_sr`.
|
95 |
-
# This will explain the F.conv1d after, with a stride of old_sr.
|
96 |
-
self._width = math.ceil(self.zeros * self.old_sr / sr)
|
97 |
-
# If old_sr is still big after GCD reduction, most filters will be very unbalanced, i.e.,
|
98 |
-
# they will have a lot of almost zero values to the left or to the right...
|
99 |
-
# There is probably a way to evaluate those filters more efficiently, but this is kept for
|
100 |
-
# future work.
|
101 |
-
idx = torch.arange(-self._width, self._width + self.old_sr).float()
|
102 |
-
for i in range(self.new_sr):
|
103 |
-
t = (-i/self.new_sr + idx/self.old_sr) * sr
|
104 |
-
t = t.clamp_(-self.zeros, self.zeros)
|
105 |
-
t *= math.pi
|
106 |
-
window = torch.cos(t/self.zeros/2)**2
|
107 |
-
kernel = sinc(t) * window
|
108 |
-
# Renormalize kernel to ensure a constant signal is preserved.
|
109 |
-
kernel.div_(kernel.sum())
|
110 |
-
kernels.append(kernel)
|
111 |
-
|
112 |
-
self.register_buffer("kernel", torch.stack(kernels).view(self.new_sr, 1, -1))
|
113 |
-
|
114 |
-
def forward(self, x: torch.Tensor, output_length: Optional[int] = None, full: bool = False):
|
115 |
-
"""
|
116 |
-
Resample x.
|
117 |
-
Args:
|
118 |
-
x (Tensor): signal to resample, time should be the last dimension
|
119 |
-
output_length (None or int): This can be set to the desired output length
|
120 |
-
(last dimension). Allowed values are between 0 and
|
121 |
-
ceil(length * new_sr / old_sr). When None (default) is specified, the
|
122 |
-
floored output length will be used. In order to select the largest possible
|
123 |
-
size, use the `full` argument.
|
124 |
-
full (bool): return the longest possible output from the input. This can be useful
|
125 |
-
if you chain resampling operations, and want to give the `output_length` only
|
126 |
-
for the last one, while passing `full=True` to all the other ones.
|
127 |
-
"""
|
128 |
-
if self.old_sr == self.new_sr:
|
129 |
-
return x
|
130 |
-
shape = x.shape
|
131 |
-
length = x.shape[-1]
|
132 |
-
x = x.reshape(-1, length)
|
133 |
-
x = F.pad(x[:, None], (self._width, self._width + self.old_sr), mode='replicate')
|
134 |
-
ys = F.conv1d(x, self.kernel, stride=self.old_sr) # type: ignore
|
135 |
-
y = ys.transpose(1, 2).reshape(list(shape[:-1]) + [-1])
|
136 |
-
|
137 |
-
float_output_length = self.new_sr * length / self.old_sr
|
138 |
-
max_output_length = int(math.ceil(float_output_length))
|
139 |
-
default_output_length = int(float_output_length)
|
140 |
-
if output_length is None:
|
141 |
-
output_length = max_output_length if full else default_output_length
|
142 |
-
elif output_length < 0 or output_length > max_output_length:
|
143 |
-
raise ValueError(f"output_length must be between 0 and {max_output_length}")
|
144 |
-
else:
|
145 |
-
if full:
|
146 |
-
raise ValueError("You cannot pass both full=True and output_length")
|
147 |
-
return y[..., :output_length]
|
148 |
-
|
149 |
-
def __repr__(self):
|
150 |
-
return simple_repr(self)
|
151 |
-
|
152 |
-
|
153 |
-
def resample_frac(x: torch.Tensor, old_sr: int, new_sr: int,
|
154 |
-
zeros: int = 24, rolloff: float = 0.945,
|
155 |
-
output_length: Optional[int] = None, full: bool = False):
|
156 |
-
"""
|
157 |
-
Functional version of `ResampleFrac`, refer to its documentation for more information.
|
158 |
-
|
159 |
-
..warning::
|
160 |
-
If you call repeatidly this functions with the same sample rates, then the
|
161 |
-
resampling kernel will be recomputed everytime. For best performance, you should use
|
162 |
-
and cache an instance of `ResampleFrac`.
|
163 |
-
"""
|
164 |
-
return ResampleFrac(old_sr, new_sr, zeros, rolloff).to(x)(x, output_length, full)
|
165 |
-
|
166 |
-
|
167 |
-
# Easier implementations for downsampling and upsampling by a factor of 2
|
168 |
-
# Kept for testing and reference
|
169 |
-
|
170 |
-
def _kernel_upsample2_downsample2(zeros):
|
171 |
-
# Kernel for upsampling and downsampling by a factor of 2. Interestingly,
|
172 |
-
# it is the same kernel used for both.
|
173 |
-
win = torch.hann_window(4 * zeros + 1, periodic=False)
|
174 |
-
winodd = win[1::2]
|
175 |
-
t = torch.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
|
176 |
-
t *= math.pi
|
177 |
-
kernel = (sinc(t) * winodd).view(1, 1, -1)
|
178 |
-
return kernel
|
179 |
-
|
180 |
-
|
181 |
-
def _upsample2(x, zeros=24):
|
182 |
-
"""
|
183 |
-
Upsample x by a factor of two. The output will be exactly twice as long as the input.
|
184 |
-
Args:
|
185 |
-
x (Tensor): signal to upsample, time should be the last dimension
|
186 |
-
zeros (int): number of zero crossing to keep in the sinc filter.
|
187 |
-
|
188 |
-
This function is kept only for reference, you should use the more generic `resample_frac`
|
189 |
-
one. This function does not perform anti-aliasing filtering.
|
190 |
-
"""
|
191 |
-
*other, time = x.shape
|
192 |
-
kernel = _kernel_upsample2_downsample2(zeros).to(x)
|
193 |
-
out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(*other, time)
|
194 |
-
y = torch.stack([x, out], dim=-1)
|
195 |
-
return y.view(*other, -1)
|
196 |
-
|
197 |
-
|
198 |
-
def _downsample2(x, zeros=24):
|
199 |
-
"""
|
200 |
-
Downsample x by a factor of two. The output length is half of the input, ceiled.
|
201 |
-
Args:
|
202 |
-
x (Tensor): signal to downsample, time should be the last dimension
|
203 |
-
zeros (int): number of zero crossing to keep in the sinc filter.
|
204 |
-
|
205 |
-
This function is kept only for reference, you should use the more generic `resample_frac`
|
206 |
-
one. This function does not perform anti-aliasing filtering.
|
207 |
-
"""
|
208 |
-
if x.shape[-1] % 2 != 0:
|
209 |
-
x = F.pad(x, (0, 1))
|
210 |
-
xeven = x[..., ::2]
|
211 |
-
xodd = x[..., 1::2]
|
212 |
-
*other, time = xodd.shape
|
213 |
-
kernel = _kernel_upsample2_downsample2(zeros).to(x)
|
214 |
-
out = xeven + F.conv1d(xodd.view(-1, 1, time), kernel, padding=zeros)[..., :-1].view(
|
215 |
-
*other, time)
|
216 |
-
return out.view(*other, -1).mul(0.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/tools/dlmodels.bat
DELETED
@@ -1,348 +0,0 @@
|
|
1 |
-
@echo off && chcp 65001
|
2 |
-
|
3 |
-
echo working dir is %cd%
|
4 |
-
echo downloading requirement aria2 check.
|
5 |
-
echo=
|
6 |
-
dir /a:d/b | findstr "aria2" > flag.txt
|
7 |
-
findstr "aria2" flag.txt >nul
|
8 |
-
if %errorlevel% ==0 (
|
9 |
-
echo aria2 checked.
|
10 |
-
echo=
|
11 |
-
) else (
|
12 |
-
echo failed. please downloading aria2 from webpage!
|
13 |
-
echo unzip it and put in this directory!
|
14 |
-
timeout /T 5
|
15 |
-
start https://github.com/aria2/aria2/releases/tag/release-1.36.0
|
16 |
-
echo=
|
17 |
-
goto end
|
18 |
-
)
|
19 |
-
|
20 |
-
echo envfiles checking start.
|
21 |
-
echo=
|
22 |
-
|
23 |
-
for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch
|
24 |
-
:endSch
|
25 |
-
|
26 |
-
set d32=f0D32k.pth
|
27 |
-
set d40=f0D40k.pth
|
28 |
-
set d48=f0D48k.pth
|
29 |
-
set g32=f0G32k.pth
|
30 |
-
set g40=f0G40k.pth
|
31 |
-
set g48=f0G48k.pth
|
32 |
-
|
33 |
-
set d40v2=f0D40k.pth
|
34 |
-
set g40v2=f0G40k.pth
|
35 |
-
|
36 |
-
set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth
|
37 |
-
set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth
|
38 |
-
set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth
|
39 |
-
set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth
|
40 |
-
set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth
|
41 |
-
set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth
|
42 |
-
|
43 |
-
set dld40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth
|
44 |
-
set dlg40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth
|
45 |
-
|
46 |
-
set hp2_all=HP2_all_vocals.pth
|
47 |
-
set hp3_all=HP3_all_vocals.pth
|
48 |
-
set hp5_only=HP5_only_main_vocal.pth
|
49 |
-
set VR_DeEchoAggressive=VR-DeEchoAggressive.pth
|
50 |
-
set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth
|
51 |
-
set VR_DeEchoNormal=VR-DeEchoNormal.pth
|
52 |
-
set onnx_dereverb=vocals.onnx
|
53 |
-
|
54 |
-
set dlhp2_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth
|
55 |
-
set dlhp3_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth
|
56 |
-
set dlhp5_only=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth
|
57 |
-
set dlVR_DeEchoAggressive=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth
|
58 |
-
set dlVR_DeEchoDeReverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth
|
59 |
-
set dlVR_DeEchoNormal=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth
|
60 |
-
set dlonnx_dereverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
|
61 |
-
|
62 |
-
set hb=hubert_base.pt
|
63 |
-
|
64 |
-
set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt
|
65 |
-
|
66 |
-
echo dir check start.
|
67 |
-
echo=
|
68 |
-
|
69 |
-
if exist "%~dp0assets\pretrained" (
|
70 |
-
echo dir .\assets\pretrained checked.
|
71 |
-
) else (
|
72 |
-
echo failed. generating dir .\assets\pretrained.
|
73 |
-
mkdir pretrained
|
74 |
-
)
|
75 |
-
if exist "%~dp0assets\pretrained_v2" (
|
76 |
-
echo dir .\assets\pretrained_v2 checked.
|
77 |
-
) else (
|
78 |
-
echo failed. generating dir .\assets\pretrained_v2.
|
79 |
-
mkdir pretrained_v2
|
80 |
-
)
|
81 |
-
if exist "%~dp0assets\uvr5_weights" (
|
82 |
-
echo dir .\assets\uvr5_weights checked.
|
83 |
-
) else (
|
84 |
-
echo failed. generating dir .\assets\uvr5_weights.
|
85 |
-
mkdir uvr5_weights
|
86 |
-
)
|
87 |
-
if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy" (
|
88 |
-
echo dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked.
|
89 |
-
) else (
|
90 |
-
echo failed. generating dir .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy.
|
91 |
-
mkdir uvr5_weights\onnx_dereverb_By_FoxJoy
|
92 |
-
)
|
93 |
-
|
94 |
-
echo=
|
95 |
-
echo dir check finished.
|
96 |
-
|
97 |
-
echo=
|
98 |
-
echo required files check start.
|
99 |
-
|
100 |
-
echo checking D32k.pth
|
101 |
-
if exist "%~dp0assets\pretrained\D32k.pth" (
|
102 |
-
echo D32k.pth in .\assets\pretrained checked.
|
103 |
-
echo=
|
104 |
-
) else (
|
105 |
-
echo failed. starting download from huggingface.
|
106 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0assets\pretrained -o D32k.pth
|
107 |
-
if exist "%~dp0assets\pretrained\D32k.pth" (echo download successful.) else (echo please try again!
|
108 |
-
echo=)
|
109 |
-
)
|
110 |
-
echo checking D40k.pth
|
111 |
-
if exist "%~dp0assets\pretrained\D40k.pth" (
|
112 |
-
echo D40k.pth in .\assets\pretrained checked.
|
113 |
-
echo=
|
114 |
-
) else (
|
115 |
-
echo failed. starting download from huggingface.
|
116 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0assets\pretrained -o D40k.pth
|
117 |
-
if exist "%~dp0assets\pretrained\D40k.pth" (echo download successful.) else (echo please try again!
|
118 |
-
echo=)
|
119 |
-
)
|
120 |
-
echo checking D40k.pth
|
121 |
-
if exist "%~dp0assets\pretrained_v2\D40k.pth" (
|
122 |
-
echo D40k.pth in .\assets\pretrained_v2 checked.
|
123 |
-
echo=
|
124 |
-
) else (
|
125 |
-
echo failed. starting download from huggingface.
|
126 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0assets\pretrained_v2 -o D40k.pth
|
127 |
-
if exist "%~dp0assets\pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again!
|
128 |
-
echo=)
|
129 |
-
)
|
130 |
-
echo checking D48k.pth
|
131 |
-
if exist "%~dp0assets\pretrained\D48k.pth" (
|
132 |
-
echo D48k.pth in .\assets\pretrained checked.
|
133 |
-
echo=
|
134 |
-
) else (
|
135 |
-
echo failed. starting download from huggingface.
|
136 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0assets\pretrained -o D48k.pth
|
137 |
-
if exist "%~dp0assets\pretrained\D48k.pth" (echo download successful.) else (echo please try again!
|
138 |
-
echo=)
|
139 |
-
)
|
140 |
-
echo checking G32k.pth
|
141 |
-
if exist "%~dp0assets\pretrained\G32k.pth" (
|
142 |
-
echo G32k.pth in .\assets\pretrained checked.
|
143 |
-
echo=
|
144 |
-
) else (
|
145 |
-
echo failed. starting download from huggingface.
|
146 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0assets\pretrained -o G32k.pth
|
147 |
-
if exist "%~dp0assets\pretrained\G32k.pth" (echo download successful.) else (echo please try again!
|
148 |
-
echo=)
|
149 |
-
)
|
150 |
-
echo checking G40k.pth
|
151 |
-
if exist "%~dp0assets\pretrained\G40k.pth" (
|
152 |
-
echo G40k.pth in .\assets\pretrained checked.
|
153 |
-
echo=
|
154 |
-
) else (
|
155 |
-
echo failed. starting download from huggingface.
|
156 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0assets\pretrained -o G40k.pth
|
157 |
-
if exist "%~dp0assets\pretrained\G40k.pth" (echo download successful.) else (echo please try again!
|
158 |
-
echo=)
|
159 |
-
)
|
160 |
-
echo checking G40k.pth
|
161 |
-
if exist "%~dp0assets\pretrained_v2\G40k.pth" (
|
162 |
-
echo G40k.pth in .\assets\pretrained_v2 checked.
|
163 |
-
echo=
|
164 |
-
) else (
|
165 |
-
echo failed. starting download from huggingface.
|
166 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0assets\pretrained_v2 -o G40k.pth
|
167 |
-
if exist "%~dp0assets\pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again!
|
168 |
-
echo=)
|
169 |
-
)
|
170 |
-
echo checking G48k.pth
|
171 |
-
if exist "%~dp0assets\pretrained\G48k.pth" (
|
172 |
-
echo G48k.pth in .\assets\pretrained checked.
|
173 |
-
echo=
|
174 |
-
) else (
|
175 |
-
echo failed. starting download from huggingface.
|
176 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0assets\pretrained -o G48k.pth
|
177 |
-
if exist "%~dp0assets\pretrained\G48k.pth" (echo download successful.) else (echo please try again!
|
178 |
-
echo=)
|
179 |
-
)
|
180 |
-
|
181 |
-
echo checking %d32%
|
182 |
-
if exist "%~dp0assets\pretrained\%d32%" (
|
183 |
-
echo %d32% in .\assets\pretrained checked.
|
184 |
-
echo=
|
185 |
-
) else (
|
186 |
-
echo failed. starting download from huggingface.
|
187 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0assets\pretrained -o %d32%
|
188 |
-
if exist "%~dp0assets\pretrained\%d32%" (echo download successful.) else (echo please try again!
|
189 |
-
echo=)
|
190 |
-
)
|
191 |
-
echo checking %d40%
|
192 |
-
if exist "%~dp0assets\pretrained\%d40%" (
|
193 |
-
echo %d40% in .\assets\pretrained checked.
|
194 |
-
echo=
|
195 |
-
) else (
|
196 |
-
echo failed. starting download from huggingface.
|
197 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0assets\pretrained -o %d40%
|
198 |
-
if exist "%~dp0assets\pretrained\%d40%" (echo download successful.) else (echo please try again!
|
199 |
-
echo=)
|
200 |
-
)
|
201 |
-
echo checking %d40v2%
|
202 |
-
if exist "%~dp0assets\pretrained_v2\%d40v2%" (
|
203 |
-
echo %d40v2% in .\assets\pretrained_v2 checked.
|
204 |
-
echo=
|
205 |
-
) else (
|
206 |
-
echo failed. starting download from huggingface.
|
207 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0assets\pretrained_v2 -o %d40v2%
|
208 |
-
if exist "%~dp0assets\pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again!
|
209 |
-
echo=)
|
210 |
-
)
|
211 |
-
echo checking %d48%
|
212 |
-
if exist "%~dp0assets\pretrained\%d48%" (
|
213 |
-
echo %d48% in .\assets\pretrained checked.
|
214 |
-
echo=
|
215 |
-
) else (
|
216 |
-
echo failed. starting download from huggingface.
|
217 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0assets\pretrained -o %d48%
|
218 |
-
if exist "%~dp0assets\pretrained\%d48%" (echo download successful.) else (echo please try again!
|
219 |
-
echo=)
|
220 |
-
)
|
221 |
-
echo checking %g32%
|
222 |
-
if exist "%~dp0assets\pretrained\%g32%" (
|
223 |
-
echo %g32% in .\assets\pretrained checked.
|
224 |
-
echo=
|
225 |
-
) else (
|
226 |
-
echo failed. starting download from huggingface.
|
227 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0assets\pretrained -o %g32%
|
228 |
-
if exist "%~dp0assets\pretrained\%g32%" (echo download successful.) else (echo please try again!
|
229 |
-
echo=)
|
230 |
-
)
|
231 |
-
echo checking %g40%
|
232 |
-
if exist "%~dp0assets\pretrained\%g40%" (
|
233 |
-
echo %g40% in .\assets\pretrained checked.
|
234 |
-
echo=
|
235 |
-
) else (
|
236 |
-
echo failed. starting download from huggingface.
|
237 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0assets\pretrained -o %g40%
|
238 |
-
if exist "%~dp0assets\pretrained\%g40%" (echo download successful.) else (echo please try again!
|
239 |
-
echo=)
|
240 |
-
)
|
241 |
-
echo checking %g40v2%
|
242 |
-
if exist "%~dp0assets\pretrained_v2\%g40v2%" (
|
243 |
-
echo %g40v2% in .\assets\pretrained_v2 checked.
|
244 |
-
echo=
|
245 |
-
) else (
|
246 |
-
echo failed. starting download from huggingface.
|
247 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0assets\pretrained_v2 -o %g40v2%
|
248 |
-
if exist "%~dp0assets\pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again!
|
249 |
-
echo=)
|
250 |
-
)
|
251 |
-
echo checking %g48%
|
252 |
-
if exist "%~dp0assets\pretrained\%g48%" (
|
253 |
-
echo %g48% in .\assets\pretrained checked.
|
254 |
-
echo=
|
255 |
-
) else (
|
256 |
-
echo failed. starting download from huggingface.
|
257 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0assets\pretrained -o %g48%
|
258 |
-
if exist "%~dp0assets\pretrained\%g48%" (echo download successful.) else (echo please try again!
|
259 |
-
echo=)
|
260 |
-
)
|
261 |
-
|
262 |
-
echo checking %hp2_all%
|
263 |
-
if exist "%~dp0assets\uvr5_weights\%hp2_all%" (
|
264 |
-
echo %hp2_all% in .\assets\uvr5_weights checked.
|
265 |
-
echo=
|
266 |
-
) else (
|
267 |
-
echo failed. starting download from huggingface.
|
268 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0assets\uvr5_weights -o %hp2_all%
|
269 |
-
if exist "%~dp0assets\uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again!
|
270 |
-
echo=)
|
271 |
-
)
|
272 |
-
echo checking %hp3_all%
|
273 |
-
if exist "%~dp0assets\uvr5_weights\%hp3_all%" (
|
274 |
-
echo %hp3_all% in .\assets\uvr5_weights checked.
|
275 |
-
echo=
|
276 |
-
) else (
|
277 |
-
echo failed. starting download from huggingface.
|
278 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0assets\uvr5_weights -o %hp3_all%
|
279 |
-
if exist "%~dp0assets\uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again!
|
280 |
-
echo=)
|
281 |
-
)
|
282 |
-
echo checking %hp5_only%
|
283 |
-
if exist "%~dp0assets\uvr5_weights\%hp5_only%" (
|
284 |
-
echo %hp5_only% in .\assets\uvr5_weights checked.
|
285 |
-
echo=
|
286 |
-
) else (
|
287 |
-
echo failed. starting download from huggingface.
|
288 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0assets\uvr5_weights -o %hp5_only%
|
289 |
-
if exist "%~dp0assets\uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again!
|
290 |
-
echo=)
|
291 |
-
)
|
292 |
-
echo checking %VR_DeEchoAggressive%
|
293 |
-
if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" (
|
294 |
-
echo %VR_DeEchoAggressive% in .\assets\uvr5_weights checked.
|
295 |
-
echo=
|
296 |
-
) else (
|
297 |
-
echo failed. starting download from huggingface.
|
298 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0assets\uvr5_weights -o %VR_DeEchoAggressive%
|
299 |
-
if exist "%~dp0assets\uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again!
|
300 |
-
echo=)
|
301 |
-
)
|
302 |
-
echo checking %VR_DeEchoDeReverb%
|
303 |
-
if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" (
|
304 |
-
echo %VR_DeEchoDeReverb% in .\assets\uvr5_weights checked.
|
305 |
-
echo=
|
306 |
-
) else (
|
307 |
-
echo failed. starting download from huggingface.
|
308 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0assets\uvr5_weights -o %VR_DeEchoDeReverb%
|
309 |
-
if exist "%~dp0assets\uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again!
|
310 |
-
echo=)
|
311 |
-
)
|
312 |
-
echo checking %VR_DeEchoNormal%
|
313 |
-
if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" (
|
314 |
-
echo %VR_DeEchoNormal% in .\assets\uvr5_weights checked.
|
315 |
-
echo=
|
316 |
-
) else (
|
317 |
-
echo failed. starting download from huggingface.
|
318 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0assets\uvr5_weights -o %VR_DeEchoNormal%
|
319 |
-
if exist "%~dp0assets\uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again!
|
320 |
-
echo=)
|
321 |
-
)
|
322 |
-
echo checking %onnx_dereverb%
|
323 |
-
if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (
|
324 |
-
echo %onnx_dereverb% in .\assets\uvr5_weights\onnx_dereverb_By_FoxJoy checked.
|
325 |
-
echo=
|
326 |
-
) else (
|
327 |
-
echo failed. starting download from huggingface.
|
328 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb%
|
329 |
-
if exist "%~dp0assets\uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again!
|
330 |
-
echo=)
|
331 |
-
)
|
332 |
-
|
333 |
-
echo checking %hb%
|
334 |
-
if exist "%~dp0assets\hubert\%hb%" (
|
335 |
-
echo %hb% in .\assets\hubert\pretrained checked.
|
336 |
-
echo=
|
337 |
-
) else (
|
338 |
-
echo failed. starting download from huggingface.
|
339 |
-
%~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0assets\hubert\ -o %hb%
|
340 |
-
if exist "%~dp0assets\hubert\%hb%" (echo download successful.) else (echo please try again!
|
341 |
-
echo=)
|
342 |
-
)
|
343 |
-
|
344 |
-
echo required files check finished.
|
345 |
-
echo envfiles check complete.
|
346 |
-
pause
|
347 |
-
:end
|
348 |
-
del flag.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/layers_33966KB.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from uvr5_pack.lib_v5 import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.conv6 = SeperableConv2DBNActiv(
|
104 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
-
)
|
106 |
-
self.conv7 = SeperableConv2DBNActiv(
|
107 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
-
)
|
109 |
-
self.bottleneck = nn.Sequential(
|
110 |
-
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
-
)
|
112 |
-
|
113 |
-
def forward(self, x):
|
114 |
-
_, _, h, w = x.size()
|
115 |
-
feat1 = F.interpolate(
|
116 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
-
)
|
118 |
-
feat2 = self.conv2(x)
|
119 |
-
feat3 = self.conv3(x)
|
120 |
-
feat4 = self.conv4(x)
|
121 |
-
feat5 = self.conv5(x)
|
122 |
-
feat6 = self.conv6(x)
|
123 |
-
feat7 = self.conv7(x)
|
124 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
-
bottle = self.bottleneck(out)
|
126 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-ZTH-03-23/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device/app.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
st.markdown("""
|
3 |
-
|
4 |
-
# MediaPipe
|
5 |
-
|
6 |
-
### A cross language SDK for AI that is real time, 3d, camera responsive, and on any device for nearly any language
|
7 |
-
|
8 |
-
#### Vision
|
9 |
-
#### Natural Language
|
10 |
-
#### Audio
|
11 |
-
|
12 |
-
Mediapipe has fast and flexible AI/ML pipelines.
|
13 |
-
|
14 |
-
Examples with Javascript Links!
|
15 |
-
|
16 |
-
1. Image Classifier: https://mediapipe-studio.webapps.google.com/demo/image_classifier
|
17 |
-
2. Object Detector: https://mediapipe-studio.webapps.google.com/demo/object_detector
|
18 |
-
3. Text Classification: https://mediapipe-studio.webapps.google.com/demo/text_classifier
|
19 |
-
4. Gesture Recognizer: https://mediapipe-studio.webapps.google.com/demo/gesture_recognizer
|
20 |
-
5. Hand Landmark Detection: https://mediapipe-studio.webapps.google.com/demo/hand_landmarker
|
21 |
-
6. Audio Classifier: https://mediapipe-studio.webapps.google.com/demo/audio_classifier
|
22 |
-
|
23 |
-
Get started with just Javascript!!
|
24 |
-
|
25 |
-
Getting Started: https://google.github.io/mediapipe/getting_started/javascript.html
|
26 |
-
|
27 |
-
Javascript Solutions - Ready to Demo:
|
28 |
-
1. Face Mesh: https://codepen.io/mediapipe/full/KKgVaPJ
|
29 |
-
2. Face Detection: https://codepen.io/mediapipe/full/dyOzvZM
|
30 |
-
3. Hands: https://codepen.io/mediapipe/full/RwGWYJw
|
31 |
-
4. Face, Hands, Body: https://codepen.io/mediapipe/full/LYRRYEw
|
32 |
-
5. Objectron: https://codepen.io/mediapipe/full/BaWvzdY
|
33 |
-
6. Full Skeletal Pose: https://codepen.io/mediapipe/full/jOMbvxw
|
34 |
-
7. Self Segmentation From Background: https://codepen.io/mediapipe/full/wvJyQpq
|
35 |
-
|
36 |
-
|
37 |
-
Demonstration in Action with Screenshots:
|
38 |
-
|
39 |
-
Self Segmentation From Background:
|
40 |
-

|
41 |
-
|
42 |
-
Full Skeletal Pose:
|
43 |
-

|
44 |
-
|
45 |
-
Hands - Both in 3D Projection even hidden surface vertices - Mahalo:
|
46 |
-

|
47 |
-
|
48 |
-
Holistic - Face, Hands, Body:
|
49 |
-

|
50 |
-
|
51 |
-
Face Detection:
|
52 |
-

|
53 |
-
|
54 |
-
Face Mesh Real Time - 30 Frames per second!
|
55 |
-

|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/utils/utils.py
DELETED
@@ -1,169 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2019 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""Utility functions."""
|
7 |
-
|
8 |
-
import fnmatch
|
9 |
-
import logging
|
10 |
-
import os
|
11 |
-
import sys
|
12 |
-
|
13 |
-
import h5py
|
14 |
-
import numpy as np
|
15 |
-
|
16 |
-
|
17 |
-
def find_files(root_dir, query="*.wav", include_root_dir=True):
|
18 |
-
"""Find files recursively.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
root_dir (str): Root root_dir to find.
|
22 |
-
query (str): Query to find.
|
23 |
-
include_root_dir (bool): If False, root_dir name is not included.
|
24 |
-
|
25 |
-
Returns:
|
26 |
-
list: List of found filenames.
|
27 |
-
|
28 |
-
"""
|
29 |
-
files = []
|
30 |
-
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
|
31 |
-
for filename in fnmatch.filter(filenames, query):
|
32 |
-
files.append(os.path.join(root, filename))
|
33 |
-
if not include_root_dir:
|
34 |
-
files = [file_.replace(root_dir + "/", "") for file_ in files]
|
35 |
-
|
36 |
-
return files
|
37 |
-
|
38 |
-
|
39 |
-
def read_hdf5(hdf5_name, hdf5_path):
|
40 |
-
"""Read hdf5 dataset.
|
41 |
-
|
42 |
-
Args:
|
43 |
-
hdf5_name (str): Filename of hdf5 file.
|
44 |
-
hdf5_path (str): Dataset name in hdf5 file.
|
45 |
-
|
46 |
-
Return:
|
47 |
-
any: Dataset values.
|
48 |
-
|
49 |
-
"""
|
50 |
-
if not os.path.exists(hdf5_name):
|
51 |
-
logging.error(f"There is no such a hdf5 file ({hdf5_name}).")
|
52 |
-
sys.exit(1)
|
53 |
-
|
54 |
-
hdf5_file = h5py.File(hdf5_name, "r")
|
55 |
-
|
56 |
-
if hdf5_path not in hdf5_file:
|
57 |
-
logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})")
|
58 |
-
sys.exit(1)
|
59 |
-
|
60 |
-
hdf5_data = hdf5_file[hdf5_path][()]
|
61 |
-
hdf5_file.close()
|
62 |
-
|
63 |
-
return hdf5_data
|
64 |
-
|
65 |
-
|
66 |
-
def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True):
|
67 |
-
"""Write dataset to hdf5.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
hdf5_name (str): Hdf5 dataset filename.
|
71 |
-
hdf5_path (str): Dataset path in hdf5.
|
72 |
-
write_data (ndarray): Data to write.
|
73 |
-
is_overwrite (bool): Whether to overwrite dataset.
|
74 |
-
|
75 |
-
"""
|
76 |
-
# convert to numpy array
|
77 |
-
write_data = np.array(write_data)
|
78 |
-
|
79 |
-
# check folder existence
|
80 |
-
folder_name, _ = os.path.split(hdf5_name)
|
81 |
-
if not os.path.exists(folder_name) and len(folder_name) != 0:
|
82 |
-
os.makedirs(folder_name)
|
83 |
-
|
84 |
-
# check hdf5 existence
|
85 |
-
if os.path.exists(hdf5_name):
|
86 |
-
# if already exists, open with r+ mode
|
87 |
-
hdf5_file = h5py.File(hdf5_name, "r+")
|
88 |
-
# check dataset existence
|
89 |
-
if hdf5_path in hdf5_file:
|
90 |
-
if is_overwrite:
|
91 |
-
logging.warning("Dataset in hdf5 file already exists. "
|
92 |
-
"recreate dataset in hdf5.")
|
93 |
-
hdf5_file.__delitem__(hdf5_path)
|
94 |
-
else:
|
95 |
-
logging.error("Dataset in hdf5 file already exists. "
|
96 |
-
"if you want to overwrite, please set is_overwrite = True.")
|
97 |
-
hdf5_file.close()
|
98 |
-
sys.exit(1)
|
99 |
-
else:
|
100 |
-
# if not exists, open with w mode
|
101 |
-
hdf5_file = h5py.File(hdf5_name, "w")
|
102 |
-
|
103 |
-
# write data to hdf5
|
104 |
-
hdf5_file.create_dataset(hdf5_path, data=write_data)
|
105 |
-
hdf5_file.flush()
|
106 |
-
hdf5_file.close()
|
107 |
-
|
108 |
-
|
109 |
-
class HDF5ScpLoader(object):
|
110 |
-
"""Loader class for a fests.scp file of hdf5 file.
|
111 |
-
|
112 |
-
Examples:
|
113 |
-
key1 /some/path/a.h5:feats
|
114 |
-
key2 /some/path/b.h5:feats
|
115 |
-
key3 /some/path/c.h5:feats
|
116 |
-
key4 /some/path/d.h5:feats
|
117 |
-
...
|
118 |
-
>>> loader = HDF5ScpLoader("hdf5.scp")
|
119 |
-
>>> array = loader["key1"]
|
120 |
-
|
121 |
-
key1 /some/path/a.h5
|
122 |
-
key2 /some/path/b.h5
|
123 |
-
key3 /some/path/c.h5
|
124 |
-
key4 /some/path/d.h5
|
125 |
-
...
|
126 |
-
>>> loader = HDF5ScpLoader("hdf5.scp", "feats")
|
127 |
-
>>> array = loader["key1"]
|
128 |
-
|
129 |
-
"""
|
130 |
-
|
131 |
-
def __init__(self, feats_scp, default_hdf5_path="feats"):
|
132 |
-
"""Initialize HDF5 scp loader.
|
133 |
-
|
134 |
-
Args:
|
135 |
-
feats_scp (str): Kaldi-style feats.scp file with hdf5 format.
|
136 |
-
default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used.
|
137 |
-
|
138 |
-
"""
|
139 |
-
self.default_hdf5_path = default_hdf5_path
|
140 |
-
with open(feats_scp) as f:
|
141 |
-
lines = [line.replace("\n", "") for line in f.readlines()]
|
142 |
-
self.data = {}
|
143 |
-
for line in lines:
|
144 |
-
key, value = line.split()
|
145 |
-
self.data[key] = value
|
146 |
-
|
147 |
-
def get_path(self, key):
|
148 |
-
"""Get hdf5 file path for a given key."""
|
149 |
-
return self.data[key]
|
150 |
-
|
151 |
-
def __getitem__(self, key):
|
152 |
-
"""Get ndarray for a given key."""
|
153 |
-
p = self.data[key]
|
154 |
-
if ":" in p:
|
155 |
-
return read_hdf5(*p.split(":"))
|
156 |
-
else:
|
157 |
-
return read_hdf5(p, self.default_hdf5_path)
|
158 |
-
|
159 |
-
def __len__(self):
|
160 |
-
"""Return the length of the scp file."""
|
161 |
-
return len(self.data)
|
162 |
-
|
163 |
-
def __iter__(self):
|
164 |
-
"""Return the iterator of the scp file."""
|
165 |
-
return iter(self.data)
|
166 |
-
|
167 |
-
def keys(self):
|
168 |
-
"""Return the keys of the scp file."""
|
169 |
-
return self.data.keys()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/lpaps.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Based on https://github.com/CompVis/taming-transformers/blob/52720829/taming/modules/losses/lpips.py
|
3 |
-
Adapted for spectrograms by Vladimir Iashin (v-iashin)
|
4 |
-
"""
|
5 |
-
from collections import namedtuple
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
|
11 |
-
import sys
|
12 |
-
sys.path.insert(0, '.') # nopep8
|
13 |
-
from ldm.modules.losses_audio.vggishish.model import VGGishish
|
14 |
-
from ldm.util import get_ckpt_path
|
15 |
-
|
16 |
-
|
17 |
-
class LPAPS(nn.Module):
|
18 |
-
# Learned perceptual metric
|
19 |
-
def __init__(self, use_dropout=True):
|
20 |
-
super().__init__()
|
21 |
-
self.scaling_layer = ScalingLayer()
|
22 |
-
self.chns = [64, 128, 256, 512, 512] # vggish16 features
|
23 |
-
self.net = vggishish16(pretrained=True, requires_grad=False)
|
24 |
-
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
|
25 |
-
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
|
26 |
-
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
|
27 |
-
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
|
28 |
-
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
|
29 |
-
self.load_from_pretrained()
|
30 |
-
for param in self.parameters():
|
31 |
-
param.requires_grad = False
|
32 |
-
|
33 |
-
def load_from_pretrained(self, name="vggishish_lpaps"):
|
34 |
-
ckpt = get_ckpt_path(name, "ldm/modules/autoencoder/lpaps")
|
35 |
-
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
|
36 |
-
print("loaded pretrained LPAPS loss from {}".format(ckpt))
|
37 |
-
|
38 |
-
@classmethod
|
39 |
-
def from_pretrained(cls, name="vggishish_lpaps"):
|
40 |
-
if name != "vggishish_lpaps":
|
41 |
-
raise NotImplementedError
|
42 |
-
model = cls()
|
43 |
-
ckpt = get_ckpt_path(name)
|
44 |
-
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
|
45 |
-
return model
|
46 |
-
|
47 |
-
def forward(self, input, target):
|
48 |
-
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
|
49 |
-
outs0, outs1 = self.net(in0_input), self.net(in1_input)
|
50 |
-
feats0, feats1, diffs = {}, {}, {}
|
51 |
-
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
|
52 |
-
for kk in range(len(self.chns)):
|
53 |
-
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
|
54 |
-
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
|
55 |
-
|
56 |
-
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
|
57 |
-
val = res[0]
|
58 |
-
for l in range(1, len(self.chns)):
|
59 |
-
val += res[l]
|
60 |
-
return val
|
61 |
-
|
62 |
-
class ScalingLayer(nn.Module):
|
63 |
-
def __init__(self):
|
64 |
-
super(ScalingLayer, self).__init__()
|
65 |
-
# we are gonna use get_ckpt_path to donwload the stats as well
|
66 |
-
stat_path = get_ckpt_path('vggishish_mean_std_melspec_10s_22050hz', 'ldm/modules/autoencoder/lpaps')
|
67 |
-
# if for images we normalize on the channel dim, in spectrogram we will norm on frequency dimension
|
68 |
-
means, stds = np.loadtxt(stat_path, dtype=np.float32).T
|
69 |
-
# the normalization in means and stds are given for [0, 1], but specvqgan expects [-1, 1]:
|
70 |
-
means = 2 * means - 1
|
71 |
-
stds = 2 * stds
|
72 |
-
# input is expected to be (B, 1, F, T)
|
73 |
-
self.register_buffer('shift', torch.from_numpy(means)[None, None, :, None])
|
74 |
-
self.register_buffer('scale', torch.from_numpy(stds)[None, None, :, None])
|
75 |
-
|
76 |
-
def forward(self, inp):
|
77 |
-
return (inp - self.shift) / self.scale
|
78 |
-
|
79 |
-
|
80 |
-
class NetLinLayer(nn.Module):
|
81 |
-
""" A single linear layer which does a 1x1 conv """
|
82 |
-
def __init__(self, chn_in, chn_out=1, use_dropout=False):
|
83 |
-
super(NetLinLayer, self).__init__()
|
84 |
-
layers = [nn.Dropout(), ] if (use_dropout) else []
|
85 |
-
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
|
86 |
-
self.model = nn.Sequential(*layers)
|
87 |
-
|
88 |
-
class vggishish16(torch.nn.Module):
|
89 |
-
def __init__(self, requires_grad=False, pretrained=True):
|
90 |
-
super().__init__()
|
91 |
-
vgg_pretrained_features = self.vggishish16(pretrained=pretrained).features
|
92 |
-
self.slice1 = torch.nn.Sequential()
|
93 |
-
self.slice2 = torch.nn.Sequential()
|
94 |
-
self.slice3 = torch.nn.Sequential()
|
95 |
-
self.slice4 = torch.nn.Sequential()
|
96 |
-
self.slice5 = torch.nn.Sequential()
|
97 |
-
self.N_slices = 5
|
98 |
-
for x in range(4):
|
99 |
-
self.slice1.add_module(str(x), vgg_pretrained_features[x])
|
100 |
-
for x in range(4, 9):
|
101 |
-
self.slice2.add_module(str(x), vgg_pretrained_features[x])
|
102 |
-
for x in range(9, 16):
|
103 |
-
self.slice3.add_module(str(x), vgg_pretrained_features[x])
|
104 |
-
for x in range(16, 23):
|
105 |
-
self.slice4.add_module(str(x), vgg_pretrained_features[x])
|
106 |
-
for x in range(23, 30):
|
107 |
-
self.slice5.add_module(str(x), vgg_pretrained_features[x])
|
108 |
-
if not requires_grad:
|
109 |
-
for param in self.parameters():
|
110 |
-
param.requires_grad = False
|
111 |
-
|
112 |
-
def forward(self, X):
|
113 |
-
h = self.slice1(X)
|
114 |
-
h_relu1_2 = h
|
115 |
-
h = self.slice2(h)
|
116 |
-
h_relu2_2 = h
|
117 |
-
h = self.slice3(h)
|
118 |
-
h_relu3_3 = h
|
119 |
-
h = self.slice4(h)
|
120 |
-
h_relu4_3 = h
|
121 |
-
h = self.slice5(h)
|
122 |
-
h_relu5_3 = h
|
123 |
-
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
|
124 |
-
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
|
125 |
-
return out
|
126 |
-
|
127 |
-
def vggishish16(self, pretrained: bool = True) -> VGGishish:
|
128 |
-
# loading vggishish pretrained on vggsound
|
129 |
-
num_classes_vggsound = 309
|
130 |
-
conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
|
131 |
-
model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes_vggsound)
|
132 |
-
if pretrained:
|
133 |
-
ckpt_path = get_ckpt_path('vggishish_lpaps', "ldm/modules/autoencoder/lpaps")
|
134 |
-
ckpt = torch.load(ckpt_path, map_location=torch.device("cpu"))
|
135 |
-
model.load_state_dict(ckpt, strict=False)
|
136 |
-
return model
|
137 |
-
|
138 |
-
def normalize_tensor(x, eps=1e-10):
|
139 |
-
norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
|
140 |
-
return x / (norm_factor+eps)
|
141 |
-
|
142 |
-
def spatial_average(x, keepdim=True):
|
143 |
-
return x.mean([2, 3], keepdim=keepdim)
|
144 |
-
|
145 |
-
|
146 |
-
if __name__ == '__main__':
|
147 |
-
inputs = torch.rand((16, 1, 80, 848))
|
148 |
-
reconstructions = torch.rand((16, 1, 80, 848))
|
149 |
-
lpips = LPAPS().eval()
|
150 |
-
loss_p = lpips(inputs.contiguous(), reconstructions.contiguous())
|
151 |
-
# (16, 1, 1, 1)
|
152 |
-
print(loss_p.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/wavenet.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
|
4 |
-
|
5 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
6 |
-
n_channels_int = n_channels[0]
|
7 |
-
in_act = input_a + input_b
|
8 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
9 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
10 |
-
acts = t_act * s_act
|
11 |
-
return acts
|
12 |
-
|
13 |
-
|
14 |
-
class WN(torch.nn.Module):
|
15 |
-
def __init__(self, hidden_size, kernel_size, dilation_rate, n_layers, c_cond=0,
|
16 |
-
p_dropout=0, share_cond_layers=False, is_BTC=False):
|
17 |
-
super(WN, self).__init__()
|
18 |
-
assert (kernel_size % 2 == 1)
|
19 |
-
assert (hidden_size % 2 == 0)
|
20 |
-
self.is_BTC = is_BTC
|
21 |
-
self.hidden_size = hidden_size
|
22 |
-
self.kernel_size = kernel_size
|
23 |
-
self.dilation_rate = dilation_rate
|
24 |
-
self.n_layers = n_layers
|
25 |
-
self.gin_channels = c_cond
|
26 |
-
self.p_dropout = p_dropout
|
27 |
-
self.share_cond_layers = share_cond_layers
|
28 |
-
|
29 |
-
self.in_layers = torch.nn.ModuleList()
|
30 |
-
self.res_skip_layers = torch.nn.ModuleList()
|
31 |
-
self.drop = nn.Dropout(p_dropout)
|
32 |
-
|
33 |
-
if c_cond != 0 and not share_cond_layers:
|
34 |
-
cond_layer = torch.nn.Conv1d(c_cond, 2 * hidden_size * n_layers, 1)
|
35 |
-
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
36 |
-
|
37 |
-
for i in range(n_layers):
|
38 |
-
dilation = dilation_rate ** i
|
39 |
-
padding = int((kernel_size * dilation - dilation) / 2)
|
40 |
-
in_layer = torch.nn.Conv1d(hidden_size, 2 * hidden_size, kernel_size,
|
41 |
-
dilation=dilation, padding=padding)
|
42 |
-
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
43 |
-
self.in_layers.append(in_layer)
|
44 |
-
|
45 |
-
# last one is not necessary
|
46 |
-
if i < n_layers - 1:
|
47 |
-
res_skip_channels = 2 * hidden_size
|
48 |
-
else:
|
49 |
-
res_skip_channels = hidden_size
|
50 |
-
|
51 |
-
res_skip_layer = torch.nn.Conv1d(hidden_size, res_skip_channels, 1)
|
52 |
-
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
53 |
-
self.res_skip_layers.append(res_skip_layer)
|
54 |
-
|
55 |
-
def forward(self, x, nonpadding=None, cond=None):
|
56 |
-
if self.is_BTC:
|
57 |
-
x = x.transpose(1, 2)
|
58 |
-
cond = cond.transpose(1, 2) if cond is not None else None
|
59 |
-
nonpadding = nonpadding.transpose(1, 2) if nonpadding is not None else None
|
60 |
-
if nonpadding is None:
|
61 |
-
nonpadding = 1
|
62 |
-
output = torch.zeros_like(x)
|
63 |
-
n_channels_tensor = torch.IntTensor([self.hidden_size])
|
64 |
-
|
65 |
-
if cond is not None and not self.share_cond_layers:
|
66 |
-
cond = self.cond_layer(cond)
|
67 |
-
|
68 |
-
for i in range(self.n_layers):
|
69 |
-
x_in = self.in_layers[i](x)
|
70 |
-
x_in = self.drop(x_in)
|
71 |
-
if cond is not None:
|
72 |
-
cond_offset = i * 2 * self.hidden_size
|
73 |
-
cond_l = cond[:, cond_offset:cond_offset + 2 * self.hidden_size, :]
|
74 |
-
else:
|
75 |
-
cond_l = torch.zeros_like(x_in)
|
76 |
-
|
77 |
-
acts = fused_add_tanh_sigmoid_multiply(x_in, cond_l, n_channels_tensor)
|
78 |
-
|
79 |
-
res_skip_acts = self.res_skip_layers[i](acts)
|
80 |
-
if i < self.n_layers - 1:
|
81 |
-
x = (x + res_skip_acts[:, :self.hidden_size, :]) * nonpadding
|
82 |
-
output = output + res_skip_acts[:, self.hidden_size:, :]
|
83 |
-
else:
|
84 |
-
output = output + res_skip_acts
|
85 |
-
output = output * nonpadding
|
86 |
-
if self.is_BTC:
|
87 |
-
output = output.transpose(1, 2)
|
88 |
-
return output
|
89 |
-
|
90 |
-
def remove_weight_norm(self):
|
91 |
-
def remove_weight_norm(m):
|
92 |
-
try:
|
93 |
-
nn.utils.remove_weight_norm(m)
|
94 |
-
except ValueError: # this module didn't have weight norm
|
95 |
-
return
|
96 |
-
|
97 |
-
self.apply(remove_weight_norm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192.py
DELETED
@@ -1,2861 +0,0 @@
|
|
1 |
-
default_scope = 'mmpose'
|
2 |
-
default_hooks = dict(
|
3 |
-
timer=dict(type='IterTimerHook'),
|
4 |
-
logger=dict(type='LoggerHook', interval=50),
|
5 |
-
param_scheduler=dict(type='ParamSchedulerHook'),
|
6 |
-
checkpoint=dict(
|
7 |
-
type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
|
8 |
-
sampler_seed=dict(type='DistSamplerSeedHook'),
|
9 |
-
visualization=dict(type='PoseVisualizationHook', enable=False))
|
10 |
-
custom_hooks = [dict(type='SyncBuffersHook')]
|
11 |
-
env_cfg = dict(
|
12 |
-
cudnn_benchmark=False,
|
13 |
-
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
14 |
-
dist_cfg=dict(backend='nccl'))
|
15 |
-
vis_backends = [dict(type='LocalVisBackend')]
|
16 |
-
visualizer = dict(
|
17 |
-
type='PoseLocalVisualizer',
|
18 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
19 |
-
dict(type='WandbVisBackend')],
|
20 |
-
name='visualizer')
|
21 |
-
log_processor = dict(
|
22 |
-
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
23 |
-
log_level = 'INFO'
|
24 |
-
load_from = None
|
25 |
-
resume = False
|
26 |
-
backend_args = dict(backend='local')
|
27 |
-
train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10)
|
28 |
-
val_cfg = dict()
|
29 |
-
test_cfg = dict()
|
30 |
-
colors = dict(
|
31 |
-
sss=[255, 128, 0],
|
32 |
-
lss=[255, 0, 128],
|
33 |
-
sso=[128, 0, 255],
|
34 |
-
lso=[0, 128, 255],
|
35 |
-
vest=[0, 128, 128],
|
36 |
-
sling=[0, 0, 128],
|
37 |
-
shorts=[128, 128, 128],
|
38 |
-
trousers=[128, 0, 128],
|
39 |
-
skirt=[64, 128, 128],
|
40 |
-
ssd=[64, 64, 128],
|
41 |
-
lsd=[128, 64, 0],
|
42 |
-
vd=[128, 64, 255],
|
43 |
-
sd=[128, 64, 0])
|
44 |
-
dataset_info = dict(
|
45 |
-
dataset_name='deepfashion2',
|
46 |
-
paper_info=dict(
|
47 |
-
author=
|
48 |
-
'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
|
49 |
-
title=
|
50 |
-
'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
|
51 |
-
container=
|
52 |
-
'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
|
53 |
-
year='2019',
|
54 |
-
homepage='https://github.com/switchablenorms/DeepFashion2'),
|
55 |
-
keypoint_info=dict({
|
56 |
-
0:
|
57 |
-
dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
|
58 |
-
1:
|
59 |
-
dict(
|
60 |
-
name='sss_kpt2',
|
61 |
-
id=1,
|
62 |
-
color=[255, 128, 0],
|
63 |
-
type='',
|
64 |
-
swap='sss_kpt6'),
|
65 |
-
2:
|
66 |
-
dict(
|
67 |
-
name='sss_kpt3',
|
68 |
-
id=2,
|
69 |
-
color=[255, 128, 0],
|
70 |
-
type='',
|
71 |
-
swap='sss_kpt5'),
|
72 |
-
3:
|
73 |
-
dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
|
74 |
-
4:
|
75 |
-
dict(
|
76 |
-
name='sss_kpt5',
|
77 |
-
id=4,
|
78 |
-
color=[255, 128, 0],
|
79 |
-
type='',
|
80 |
-
swap='sss_kpt3'),
|
81 |
-
5:
|
82 |
-
dict(
|
83 |
-
name='sss_kpt6',
|
84 |
-
id=5,
|
85 |
-
color=[255, 128, 0],
|
86 |
-
type='',
|
87 |
-
swap='sss_kpt2'),
|
88 |
-
6:
|
89 |
-
dict(
|
90 |
-
name='sss_kpt7',
|
91 |
-
id=6,
|
92 |
-
color=[255, 128, 0],
|
93 |
-
type='',
|
94 |
-
swap='sss_kpt25'),
|
95 |
-
7:
|
96 |
-
dict(
|
97 |
-
name='sss_kpt8',
|
98 |
-
id=7,
|
99 |
-
color=[255, 128, 0],
|
100 |
-
type='',
|
101 |
-
swap='sss_kpt24'),
|
102 |
-
8:
|
103 |
-
dict(
|
104 |
-
name='sss_kpt9',
|
105 |
-
id=8,
|
106 |
-
color=[255, 128, 0],
|
107 |
-
type='',
|
108 |
-
swap='sss_kpt23'),
|
109 |
-
9:
|
110 |
-
dict(
|
111 |
-
name='sss_kpt10',
|
112 |
-
id=9,
|
113 |
-
color=[255, 128, 0],
|
114 |
-
type='',
|
115 |
-
swap='sss_kpt22'),
|
116 |
-
10:
|
117 |
-
dict(
|
118 |
-
name='sss_kpt11',
|
119 |
-
id=10,
|
120 |
-
color=[255, 128, 0],
|
121 |
-
type='',
|
122 |
-
swap='sss_kpt21'),
|
123 |
-
11:
|
124 |
-
dict(
|
125 |
-
name='sss_kpt12',
|
126 |
-
id=11,
|
127 |
-
color=[255, 128, 0],
|
128 |
-
type='',
|
129 |
-
swap='sss_kpt20'),
|
130 |
-
12:
|
131 |
-
dict(
|
132 |
-
name='sss_kpt13',
|
133 |
-
id=12,
|
134 |
-
color=[255, 128, 0],
|
135 |
-
type='',
|
136 |
-
swap='sss_kpt19'),
|
137 |
-
13:
|
138 |
-
dict(
|
139 |
-
name='sss_kpt14',
|
140 |
-
id=13,
|
141 |
-
color=[255, 128, 0],
|
142 |
-
type='',
|
143 |
-
swap='sss_kpt18'),
|
144 |
-
14:
|
145 |
-
dict(
|
146 |
-
name='sss_kpt15',
|
147 |
-
id=14,
|
148 |
-
color=[255, 128, 0],
|
149 |
-
type='',
|
150 |
-
swap='sss_kpt17'),
|
151 |
-
15:
|
152 |
-
dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
|
153 |
-
16:
|
154 |
-
dict(
|
155 |
-
name='sss_kpt17',
|
156 |
-
id=16,
|
157 |
-
color=[255, 128, 0],
|
158 |
-
type='',
|
159 |
-
swap='sss_kpt15'),
|
160 |
-
17:
|
161 |
-
dict(
|
162 |
-
name='sss_kpt18',
|
163 |
-
id=17,
|
164 |
-
color=[255, 128, 0],
|
165 |
-
type='',
|
166 |
-
swap='sss_kpt14'),
|
167 |
-
18:
|
168 |
-
dict(
|
169 |
-
name='sss_kpt19',
|
170 |
-
id=18,
|
171 |
-
color=[255, 128, 0],
|
172 |
-
type='',
|
173 |
-
swap='sss_kpt13'),
|
174 |
-
19:
|
175 |
-
dict(
|
176 |
-
name='sss_kpt20',
|
177 |
-
id=19,
|
178 |
-
color=[255, 128, 0],
|
179 |
-
type='',
|
180 |
-
swap='sss_kpt12'),
|
181 |
-
20:
|
182 |
-
dict(
|
183 |
-
name='sss_kpt21',
|
184 |
-
id=20,
|
185 |
-
color=[255, 128, 0],
|
186 |
-
type='',
|
187 |
-
swap='sss_kpt11'),
|
188 |
-
21:
|
189 |
-
dict(
|
190 |
-
name='sss_kpt22',
|
191 |
-
id=21,
|
192 |
-
color=[255, 128, 0],
|
193 |
-
type='',
|
194 |
-
swap='sss_kpt10'),
|
195 |
-
22:
|
196 |
-
dict(
|
197 |
-
name='sss_kpt23',
|
198 |
-
id=22,
|
199 |
-
color=[255, 128, 0],
|
200 |
-
type='',
|
201 |
-
swap='sss_kpt9'),
|
202 |
-
23:
|
203 |
-
dict(
|
204 |
-
name='sss_kpt24',
|
205 |
-
id=23,
|
206 |
-
color=[255, 128, 0],
|
207 |
-
type='',
|
208 |
-
swap='sss_kpt8'),
|
209 |
-
24:
|
210 |
-
dict(
|
211 |
-
name='sss_kpt25',
|
212 |
-
id=24,
|
213 |
-
color=[255, 128, 0],
|
214 |
-
type='',
|
215 |
-
swap='sss_kpt7'),
|
216 |
-
25:
|
217 |
-
dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
|
218 |
-
26:
|
219 |
-
dict(
|
220 |
-
name='lss_kpt2',
|
221 |
-
id=26,
|
222 |
-
color=[255, 0, 128],
|
223 |
-
type='',
|
224 |
-
swap='lss_kpt6'),
|
225 |
-
27:
|
226 |
-
dict(
|
227 |
-
name='lss_kpt3',
|
228 |
-
id=27,
|
229 |
-
color=[255, 0, 128],
|
230 |
-
type='',
|
231 |
-
swap='lss_kpt5'),
|
232 |
-
28:
|
233 |
-
dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
|
234 |
-
29:
|
235 |
-
dict(
|
236 |
-
name='lss_kpt5',
|
237 |
-
id=29,
|
238 |
-
color=[255, 0, 128],
|
239 |
-
type='',
|
240 |
-
swap='lss_kpt3'),
|
241 |
-
30:
|
242 |
-
dict(
|
243 |
-
name='lss_kpt6',
|
244 |
-
id=30,
|
245 |
-
color=[255, 0, 128],
|
246 |
-
type='',
|
247 |
-
swap='lss_kpt2'),
|
248 |
-
31:
|
249 |
-
dict(
|
250 |
-
name='lss_kpt7',
|
251 |
-
id=31,
|
252 |
-
color=[255, 0, 128],
|
253 |
-
type='',
|
254 |
-
swap='lss_kpt33'),
|
255 |
-
32:
|
256 |
-
dict(
|
257 |
-
name='lss_kpt8',
|
258 |
-
id=32,
|
259 |
-
color=[255, 0, 128],
|
260 |
-
type='',
|
261 |
-
swap='lss_kpt32'),
|
262 |
-
33:
|
263 |
-
dict(
|
264 |
-
name='lss_kpt9',
|
265 |
-
id=33,
|
266 |
-
color=[255, 0, 128],
|
267 |
-
type='',
|
268 |
-
swap='lss_kpt31'),
|
269 |
-
34:
|
270 |
-
dict(
|
271 |
-
name='lss_kpt10',
|
272 |
-
id=34,
|
273 |
-
color=[255, 0, 128],
|
274 |
-
type='',
|
275 |
-
swap='lss_kpt30'),
|
276 |
-
35:
|
277 |
-
dict(
|
278 |
-
name='lss_kpt11',
|
279 |
-
id=35,
|
280 |
-
color=[255, 0, 128],
|
281 |
-
type='',
|
282 |
-
swap='lss_kpt29'),
|
283 |
-
36:
|
284 |
-
dict(
|
285 |
-
name='lss_kpt12',
|
286 |
-
id=36,
|
287 |
-
color=[255, 0, 128],
|
288 |
-
type='',
|
289 |
-
swap='lss_kpt28'),
|
290 |
-
37:
|
291 |
-
dict(
|
292 |
-
name='lss_kpt13',
|
293 |
-
id=37,
|
294 |
-
color=[255, 0, 128],
|
295 |
-
type='',
|
296 |
-
swap='lss_kpt27'),
|
297 |
-
38:
|
298 |
-
dict(
|
299 |
-
name='lss_kpt14',
|
300 |
-
id=38,
|
301 |
-
color=[255, 0, 128],
|
302 |
-
type='',
|
303 |
-
swap='lss_kpt26'),
|
304 |
-
39:
|
305 |
-
dict(
|
306 |
-
name='lss_kpt15',
|
307 |
-
id=39,
|
308 |
-
color=[255, 0, 128],
|
309 |
-
type='',
|
310 |
-
swap='lss_kpt25'),
|
311 |
-
40:
|
312 |
-
dict(
|
313 |
-
name='lss_kpt16',
|
314 |
-
id=40,
|
315 |
-
color=[255, 0, 128],
|
316 |
-
type='',
|
317 |
-
swap='lss_kpt24'),
|
318 |
-
41:
|
319 |
-
dict(
|
320 |
-
name='lss_kpt17',
|
321 |
-
id=41,
|
322 |
-
color=[255, 0, 128],
|
323 |
-
type='',
|
324 |
-
swap='lss_kpt23'),
|
325 |
-
42:
|
326 |
-
dict(
|
327 |
-
name='lss_kpt18',
|
328 |
-
id=42,
|
329 |
-
color=[255, 0, 128],
|
330 |
-
type='',
|
331 |
-
swap='lss_kpt22'),
|
332 |
-
43:
|
333 |
-
dict(
|
334 |
-
name='lss_kpt19',
|
335 |
-
id=43,
|
336 |
-
color=[255, 0, 128],
|
337 |
-
type='',
|
338 |
-
swap='lss_kpt21'),
|
339 |
-
44:
|
340 |
-
dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
|
341 |
-
45:
|
342 |
-
dict(
|
343 |
-
name='lss_kpt21',
|
344 |
-
id=45,
|
345 |
-
color=[255, 0, 128],
|
346 |
-
type='',
|
347 |
-
swap='lss_kpt19'),
|
348 |
-
46:
|
349 |
-
dict(
|
350 |
-
name='lss_kpt22',
|
351 |
-
id=46,
|
352 |
-
color=[255, 0, 128],
|
353 |
-
type='',
|
354 |
-
swap='lss_kpt18'),
|
355 |
-
47:
|
356 |
-
dict(
|
357 |
-
name='lss_kpt23',
|
358 |
-
id=47,
|
359 |
-
color=[255, 0, 128],
|
360 |
-
type='',
|
361 |
-
swap='lss_kpt17'),
|
362 |
-
48:
|
363 |
-
dict(
|
364 |
-
name='lss_kpt24',
|
365 |
-
id=48,
|
366 |
-
color=[255, 0, 128],
|
367 |
-
type='',
|
368 |
-
swap='lss_kpt16'),
|
369 |
-
49:
|
370 |
-
dict(
|
371 |
-
name='lss_kpt25',
|
372 |
-
id=49,
|
373 |
-
color=[255, 0, 128],
|
374 |
-
type='',
|
375 |
-
swap='lss_kpt15'),
|
376 |
-
50:
|
377 |
-
dict(
|
378 |
-
name='lss_kpt26',
|
379 |
-
id=50,
|
380 |
-
color=[255, 0, 128],
|
381 |
-
type='',
|
382 |
-
swap='lss_kpt14'),
|
383 |
-
51:
|
384 |
-
dict(
|
385 |
-
name='lss_kpt27',
|
386 |
-
id=51,
|
387 |
-
color=[255, 0, 128],
|
388 |
-
type='',
|
389 |
-
swap='lss_kpt13'),
|
390 |
-
52:
|
391 |
-
dict(
|
392 |
-
name='lss_kpt28',
|
393 |
-
id=52,
|
394 |
-
color=[255, 0, 128],
|
395 |
-
type='',
|
396 |
-
swap='lss_kpt12'),
|
397 |
-
53:
|
398 |
-
dict(
|
399 |
-
name='lss_kpt29',
|
400 |
-
id=53,
|
401 |
-
color=[255, 0, 128],
|
402 |
-
type='',
|
403 |
-
swap='lss_kpt11'),
|
404 |
-
54:
|
405 |
-
dict(
|
406 |
-
name='lss_kpt30',
|
407 |
-
id=54,
|
408 |
-
color=[255, 0, 128],
|
409 |
-
type='',
|
410 |
-
swap='lss_kpt10'),
|
411 |
-
55:
|
412 |
-
dict(
|
413 |
-
name='lss_kpt31',
|
414 |
-
id=55,
|
415 |
-
color=[255, 0, 128],
|
416 |
-
type='',
|
417 |
-
swap='lss_kpt9'),
|
418 |
-
56:
|
419 |
-
dict(
|
420 |
-
name='lss_kpt32',
|
421 |
-
id=56,
|
422 |
-
color=[255, 0, 128],
|
423 |
-
type='',
|
424 |
-
swap='lss_kpt8'),
|
425 |
-
57:
|
426 |
-
dict(
|
427 |
-
name='lss_kpt33',
|
428 |
-
id=57,
|
429 |
-
color=[255, 0, 128],
|
430 |
-
type='',
|
431 |
-
swap='lss_kpt7'),
|
432 |
-
58:
|
433 |
-
dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
|
434 |
-
59:
|
435 |
-
dict(
|
436 |
-
name='sso_kpt2',
|
437 |
-
id=59,
|
438 |
-
color=[128, 0, 255],
|
439 |
-
type='',
|
440 |
-
swap='sso_kpt26'),
|
441 |
-
60:
|
442 |
-
dict(
|
443 |
-
name='sso_kpt3',
|
444 |
-
id=60,
|
445 |
-
color=[128, 0, 255],
|
446 |
-
type='',
|
447 |
-
swap='sso_kpt5'),
|
448 |
-
61:
|
449 |
-
dict(
|
450 |
-
name='sso_kpt4',
|
451 |
-
id=61,
|
452 |
-
color=[128, 0, 255],
|
453 |
-
type='',
|
454 |
-
swap='sso_kpt6'),
|
455 |
-
62:
|
456 |
-
dict(
|
457 |
-
name='sso_kpt5',
|
458 |
-
id=62,
|
459 |
-
color=[128, 0, 255],
|
460 |
-
type='',
|
461 |
-
swap='sso_kpt3'),
|
462 |
-
63:
|
463 |
-
dict(
|
464 |
-
name='sso_kpt6',
|
465 |
-
id=63,
|
466 |
-
color=[128, 0, 255],
|
467 |
-
type='',
|
468 |
-
swap='sso_kpt4'),
|
469 |
-
64:
|
470 |
-
dict(
|
471 |
-
name='sso_kpt7',
|
472 |
-
id=64,
|
473 |
-
color=[128, 0, 255],
|
474 |
-
type='',
|
475 |
-
swap='sso_kpt25'),
|
476 |
-
65:
|
477 |
-
dict(
|
478 |
-
name='sso_kpt8',
|
479 |
-
id=65,
|
480 |
-
color=[128, 0, 255],
|
481 |
-
type='',
|
482 |
-
swap='sso_kpt24'),
|
483 |
-
66:
|
484 |
-
dict(
|
485 |
-
name='sso_kpt9',
|
486 |
-
id=66,
|
487 |
-
color=[128, 0, 255],
|
488 |
-
type='',
|
489 |
-
swap='sso_kpt23'),
|
490 |
-
67:
|
491 |
-
dict(
|
492 |
-
name='sso_kpt10',
|
493 |
-
id=67,
|
494 |
-
color=[128, 0, 255],
|
495 |
-
type='',
|
496 |
-
swap='sso_kpt22'),
|
497 |
-
68:
|
498 |
-
dict(
|
499 |
-
name='sso_kpt11',
|
500 |
-
id=68,
|
501 |
-
color=[128, 0, 255],
|
502 |
-
type='',
|
503 |
-
swap='sso_kpt21'),
|
504 |
-
69:
|
505 |
-
dict(
|
506 |
-
name='sso_kpt12',
|
507 |
-
id=69,
|
508 |
-
color=[128, 0, 255],
|
509 |
-
type='',
|
510 |
-
swap='sso_kpt20'),
|
511 |
-
70:
|
512 |
-
dict(
|
513 |
-
name='sso_kpt13',
|
514 |
-
id=70,
|
515 |
-
color=[128, 0, 255],
|
516 |
-
type='',
|
517 |
-
swap='sso_kpt19'),
|
518 |
-
71:
|
519 |
-
dict(
|
520 |
-
name='sso_kpt14',
|
521 |
-
id=71,
|
522 |
-
color=[128, 0, 255],
|
523 |
-
type='',
|
524 |
-
swap='sso_kpt18'),
|
525 |
-
72:
|
526 |
-
dict(
|
527 |
-
name='sso_kpt15',
|
528 |
-
id=72,
|
529 |
-
color=[128, 0, 255],
|
530 |
-
type='',
|
531 |
-
swap='sso_kpt17'),
|
532 |
-
73:
|
533 |
-
dict(
|
534 |
-
name='sso_kpt16',
|
535 |
-
id=73,
|
536 |
-
color=[128, 0, 255],
|
537 |
-
type='',
|
538 |
-
swap='sso_kpt29'),
|
539 |
-
74:
|
540 |
-
dict(
|
541 |
-
name='sso_kpt17',
|
542 |
-
id=74,
|
543 |
-
color=[128, 0, 255],
|
544 |
-
type='',
|
545 |
-
swap='sso_kpt15'),
|
546 |
-
75:
|
547 |
-
dict(
|
548 |
-
name='sso_kpt18',
|
549 |
-
id=75,
|
550 |
-
color=[128, 0, 255],
|
551 |
-
type='',
|
552 |
-
swap='sso_kpt14'),
|
553 |
-
76:
|
554 |
-
dict(
|
555 |
-
name='sso_kpt19',
|
556 |
-
id=76,
|
557 |
-
color=[128, 0, 255],
|
558 |
-
type='',
|
559 |
-
swap='sso_kpt13'),
|
560 |
-
77:
|
561 |
-
dict(
|
562 |
-
name='sso_kpt20',
|
563 |
-
id=77,
|
564 |
-
color=[128, 0, 255],
|
565 |
-
type='',
|
566 |
-
swap='sso_kpt12'),
|
567 |
-
78:
|
568 |
-
dict(
|
569 |
-
name='sso_kpt21',
|
570 |
-
id=78,
|
571 |
-
color=[128, 0, 255],
|
572 |
-
type='',
|
573 |
-
swap='sso_kpt11'),
|
574 |
-
79:
|
575 |
-
dict(
|
576 |
-
name='sso_kpt22',
|
577 |
-
id=79,
|
578 |
-
color=[128, 0, 255],
|
579 |
-
type='',
|
580 |
-
swap='sso_kpt10'),
|
581 |
-
80:
|
582 |
-
dict(
|
583 |
-
name='sso_kpt23',
|
584 |
-
id=80,
|
585 |
-
color=[128, 0, 255],
|
586 |
-
type='',
|
587 |
-
swap='sso_kpt9'),
|
588 |
-
81:
|
589 |
-
dict(
|
590 |
-
name='sso_kpt24',
|
591 |
-
id=81,
|
592 |
-
color=[128, 0, 255],
|
593 |
-
type='',
|
594 |
-
swap='sso_kpt8'),
|
595 |
-
82:
|
596 |
-
dict(
|
597 |
-
name='sso_kpt25',
|
598 |
-
id=82,
|
599 |
-
color=[128, 0, 255],
|
600 |
-
type='',
|
601 |
-
swap='sso_kpt7'),
|
602 |
-
83:
|
603 |
-
dict(
|
604 |
-
name='sso_kpt26',
|
605 |
-
id=83,
|
606 |
-
color=[128, 0, 255],
|
607 |
-
type='',
|
608 |
-
swap='sso_kpt2'),
|
609 |
-
84:
|
610 |
-
dict(
|
611 |
-
name='sso_kpt27',
|
612 |
-
id=84,
|
613 |
-
color=[128, 0, 255],
|
614 |
-
type='',
|
615 |
-
swap='sso_kpt30'),
|
616 |
-
85:
|
617 |
-
dict(
|
618 |
-
name='sso_kpt28',
|
619 |
-
id=85,
|
620 |
-
color=[128, 0, 255],
|
621 |
-
type='',
|
622 |
-
swap='sso_kpt31'),
|
623 |
-
86:
|
624 |
-
dict(
|
625 |
-
name='sso_kpt29',
|
626 |
-
id=86,
|
627 |
-
color=[128, 0, 255],
|
628 |
-
type='',
|
629 |
-
swap='sso_kpt16'),
|
630 |
-
87:
|
631 |
-
dict(
|
632 |
-
name='sso_kpt30',
|
633 |
-
id=87,
|
634 |
-
color=[128, 0, 255],
|
635 |
-
type='',
|
636 |
-
swap='sso_kpt27'),
|
637 |
-
88:
|
638 |
-
dict(
|
639 |
-
name='sso_kpt31',
|
640 |
-
id=88,
|
641 |
-
color=[128, 0, 255],
|
642 |
-
type='',
|
643 |
-
swap='sso_kpt28'),
|
644 |
-
89:
|
645 |
-
dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
|
646 |
-
90:
|
647 |
-
dict(
|
648 |
-
name='lso_kpt2',
|
649 |
-
id=90,
|
650 |
-
color=[0, 128, 255],
|
651 |
-
type='',
|
652 |
-
swap='lso_kpt6'),
|
653 |
-
91:
|
654 |
-
dict(
|
655 |
-
name='lso_kpt3',
|
656 |
-
id=91,
|
657 |
-
color=[0, 128, 255],
|
658 |
-
type='',
|
659 |
-
swap='lso_kpt5'),
|
660 |
-
92:
|
661 |
-
dict(
|
662 |
-
name='lso_kpt4',
|
663 |
-
id=92,
|
664 |
-
color=[0, 128, 255],
|
665 |
-
type='',
|
666 |
-
swap='lso_kpt34'),
|
667 |
-
93:
|
668 |
-
dict(
|
669 |
-
name='lso_kpt5',
|
670 |
-
id=93,
|
671 |
-
color=[0, 128, 255],
|
672 |
-
type='',
|
673 |
-
swap='lso_kpt3'),
|
674 |
-
94:
|
675 |
-
dict(
|
676 |
-
name='lso_kpt6',
|
677 |
-
id=94,
|
678 |
-
color=[0, 128, 255],
|
679 |
-
type='',
|
680 |
-
swap='lso_kpt2'),
|
681 |
-
95:
|
682 |
-
dict(
|
683 |
-
name='lso_kpt7',
|
684 |
-
id=95,
|
685 |
-
color=[0, 128, 255],
|
686 |
-
type='',
|
687 |
-
swap='lso_kpt33'),
|
688 |
-
96:
|
689 |
-
dict(
|
690 |
-
name='lso_kpt8',
|
691 |
-
id=96,
|
692 |
-
color=[0, 128, 255],
|
693 |
-
type='',
|
694 |
-
swap='lso_kpt32'),
|
695 |
-
97:
|
696 |
-
dict(
|
697 |
-
name='lso_kpt9',
|
698 |
-
id=97,
|
699 |
-
color=[0, 128, 255],
|
700 |
-
type='',
|
701 |
-
swap='lso_kpt31'),
|
702 |
-
98:
|
703 |
-
dict(
|
704 |
-
name='lso_kpt10',
|
705 |
-
id=98,
|
706 |
-
color=[0, 128, 255],
|
707 |
-
type='',
|
708 |
-
swap='lso_kpt30'),
|
709 |
-
99:
|
710 |
-
dict(
|
711 |
-
name='lso_kpt11',
|
712 |
-
id=99,
|
713 |
-
color=[0, 128, 255],
|
714 |
-
type='',
|
715 |
-
swap='lso_kpt29'),
|
716 |
-
100:
|
717 |
-
dict(
|
718 |
-
name='lso_kpt12',
|
719 |
-
id=100,
|
720 |
-
color=[0, 128, 255],
|
721 |
-
type='',
|
722 |
-
swap='lso_kpt28'),
|
723 |
-
101:
|
724 |
-
dict(
|
725 |
-
name='lso_kpt13',
|
726 |
-
id=101,
|
727 |
-
color=[0, 128, 255],
|
728 |
-
type='',
|
729 |
-
swap='lso_kpt27'),
|
730 |
-
102:
|
731 |
-
dict(
|
732 |
-
name='lso_kpt14',
|
733 |
-
id=102,
|
734 |
-
color=[0, 128, 255],
|
735 |
-
type='',
|
736 |
-
swap='lso_kpt26'),
|
737 |
-
103:
|
738 |
-
dict(
|
739 |
-
name='lso_kpt15',
|
740 |
-
id=103,
|
741 |
-
color=[0, 128, 255],
|
742 |
-
type='',
|
743 |
-
swap='lso_kpt25'),
|
744 |
-
104:
|
745 |
-
dict(
|
746 |
-
name='lso_kpt16',
|
747 |
-
id=104,
|
748 |
-
color=[0, 128, 255],
|
749 |
-
type='',
|
750 |
-
swap='lso_kpt24'),
|
751 |
-
105:
|
752 |
-
dict(
|
753 |
-
name='lso_kpt17',
|
754 |
-
id=105,
|
755 |
-
color=[0, 128, 255],
|
756 |
-
type='',
|
757 |
-
swap='lso_kpt23'),
|
758 |
-
106:
|
759 |
-
dict(
|
760 |
-
name='lso_kpt18',
|
761 |
-
id=106,
|
762 |
-
color=[0, 128, 255],
|
763 |
-
type='',
|
764 |
-
swap='lso_kpt22'),
|
765 |
-
107:
|
766 |
-
dict(
|
767 |
-
name='lso_kpt19',
|
768 |
-
id=107,
|
769 |
-
color=[0, 128, 255],
|
770 |
-
type='',
|
771 |
-
swap='lso_kpt21'),
|
772 |
-
108:
|
773 |
-
dict(
|
774 |
-
name='lso_kpt20',
|
775 |
-
id=108,
|
776 |
-
color=[0, 128, 255],
|
777 |
-
type='',
|
778 |
-
swap='lso_kpt37'),
|
779 |
-
109:
|
780 |
-
dict(
|
781 |
-
name='lso_kpt21',
|
782 |
-
id=109,
|
783 |
-
color=[0, 128, 255],
|
784 |
-
type='',
|
785 |
-
swap='lso_kpt19'),
|
786 |
-
110:
|
787 |
-
dict(
|
788 |
-
name='lso_kpt22',
|
789 |
-
id=110,
|
790 |
-
color=[0, 128, 255],
|
791 |
-
type='',
|
792 |
-
swap='lso_kpt18'),
|
793 |
-
111:
|
794 |
-
dict(
|
795 |
-
name='lso_kpt23',
|
796 |
-
id=111,
|
797 |
-
color=[0, 128, 255],
|
798 |
-
type='',
|
799 |
-
swap='lso_kpt17'),
|
800 |
-
112:
|
801 |
-
dict(
|
802 |
-
name='lso_kpt24',
|
803 |
-
id=112,
|
804 |
-
color=[0, 128, 255],
|
805 |
-
type='',
|
806 |
-
swap='lso_kpt16'),
|
807 |
-
113:
|
808 |
-
dict(
|
809 |
-
name='lso_kpt25',
|
810 |
-
id=113,
|
811 |
-
color=[0, 128, 255],
|
812 |
-
type='',
|
813 |
-
swap='lso_kpt15'),
|
814 |
-
114:
|
815 |
-
dict(
|
816 |
-
name='lso_kpt26',
|
817 |
-
id=114,
|
818 |
-
color=[0, 128, 255],
|
819 |
-
type='',
|
820 |
-
swap='lso_kpt14'),
|
821 |
-
115:
|
822 |
-
dict(
|
823 |
-
name='lso_kpt27',
|
824 |
-
id=115,
|
825 |
-
color=[0, 128, 255],
|
826 |
-
type='',
|
827 |
-
swap='lso_kpt13'),
|
828 |
-
116:
|
829 |
-
dict(
|
830 |
-
name='lso_kpt28',
|
831 |
-
id=116,
|
832 |
-
color=[0, 128, 255],
|
833 |
-
type='',
|
834 |
-
swap='lso_kpt12'),
|
835 |
-
117:
|
836 |
-
dict(
|
837 |
-
name='lso_kpt29',
|
838 |
-
id=117,
|
839 |
-
color=[0, 128, 255],
|
840 |
-
type='',
|
841 |
-
swap='lso_kpt11'),
|
842 |
-
118:
|
843 |
-
dict(
|
844 |
-
name='lso_kpt30',
|
845 |
-
id=118,
|
846 |
-
color=[0, 128, 255],
|
847 |
-
type='',
|
848 |
-
swap='lso_kpt10'),
|
849 |
-
119:
|
850 |
-
dict(
|
851 |
-
name='lso_kpt31',
|
852 |
-
id=119,
|
853 |
-
color=[0, 128, 255],
|
854 |
-
type='',
|
855 |
-
swap='lso_kpt9'),
|
856 |
-
120:
|
857 |
-
dict(
|
858 |
-
name='lso_kpt32',
|
859 |
-
id=120,
|
860 |
-
color=[0, 128, 255],
|
861 |
-
type='',
|
862 |
-
swap='lso_kpt8'),
|
863 |
-
121:
|
864 |
-
dict(
|
865 |
-
name='lso_kpt33',
|
866 |
-
id=121,
|
867 |
-
color=[0, 128, 255],
|
868 |
-
type='',
|
869 |
-
swap='lso_kpt7'),
|
870 |
-
122:
|
871 |
-
dict(
|
872 |
-
name='lso_kpt34',
|
873 |
-
id=122,
|
874 |
-
color=[0, 128, 255],
|
875 |
-
type='',
|
876 |
-
swap='lso_kpt4'),
|
877 |
-
123:
|
878 |
-
dict(
|
879 |
-
name='lso_kpt35',
|
880 |
-
id=123,
|
881 |
-
color=[0, 128, 255],
|
882 |
-
type='',
|
883 |
-
swap='lso_kpt38'),
|
884 |
-
124:
|
885 |
-
dict(
|
886 |
-
name='lso_kpt36',
|
887 |
-
id=124,
|
888 |
-
color=[0, 128, 255],
|
889 |
-
type='',
|
890 |
-
swap='lso_kpt39'),
|
891 |
-
125:
|
892 |
-
dict(
|
893 |
-
name='lso_kpt37',
|
894 |
-
id=125,
|
895 |
-
color=[0, 128, 255],
|
896 |
-
type='',
|
897 |
-
swap='lso_kpt20'),
|
898 |
-
126:
|
899 |
-
dict(
|
900 |
-
name='lso_kpt38',
|
901 |
-
id=126,
|
902 |
-
color=[0, 128, 255],
|
903 |
-
type='',
|
904 |
-
swap='lso_kpt35'),
|
905 |
-
127:
|
906 |
-
dict(
|
907 |
-
name='lso_kpt39',
|
908 |
-
id=127,
|
909 |
-
color=[0, 128, 255],
|
910 |
-
type='',
|
911 |
-
swap='lso_kpt36'),
|
912 |
-
128:
|
913 |
-
dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
|
914 |
-
129:
|
915 |
-
dict(
|
916 |
-
name='vest_kpt2',
|
917 |
-
id=129,
|
918 |
-
color=[0, 128, 128],
|
919 |
-
type='',
|
920 |
-
swap='vest_kpt6'),
|
921 |
-
130:
|
922 |
-
dict(
|
923 |
-
name='vest_kpt3',
|
924 |
-
id=130,
|
925 |
-
color=[0, 128, 128],
|
926 |
-
type='',
|
927 |
-
swap='vest_kpt5'),
|
928 |
-
131:
|
929 |
-
dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
|
930 |
-
132:
|
931 |
-
dict(
|
932 |
-
name='vest_kpt5',
|
933 |
-
id=132,
|
934 |
-
color=[0, 128, 128],
|
935 |
-
type='',
|
936 |
-
swap='vest_kpt3'),
|
937 |
-
133:
|
938 |
-
dict(
|
939 |
-
name='vest_kpt6',
|
940 |
-
id=133,
|
941 |
-
color=[0, 128, 128],
|
942 |
-
type='',
|
943 |
-
swap='vest_kpt2'),
|
944 |
-
134:
|
945 |
-
dict(
|
946 |
-
name='vest_kpt7',
|
947 |
-
id=134,
|
948 |
-
color=[0, 128, 128],
|
949 |
-
type='',
|
950 |
-
swap='vest_kpt15'),
|
951 |
-
135:
|
952 |
-
dict(
|
953 |
-
name='vest_kpt8',
|
954 |
-
id=135,
|
955 |
-
color=[0, 128, 128],
|
956 |
-
type='',
|
957 |
-
swap='vest_kpt14'),
|
958 |
-
136:
|
959 |
-
dict(
|
960 |
-
name='vest_kpt9',
|
961 |
-
id=136,
|
962 |
-
color=[0, 128, 128],
|
963 |
-
type='',
|
964 |
-
swap='vest_kpt13'),
|
965 |
-
137:
|
966 |
-
dict(
|
967 |
-
name='vest_kpt10',
|
968 |
-
id=137,
|
969 |
-
color=[0, 128, 128],
|
970 |
-
type='',
|
971 |
-
swap='vest_kpt12'),
|
972 |
-
138:
|
973 |
-
dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
|
974 |
-
139:
|
975 |
-
dict(
|
976 |
-
name='vest_kpt12',
|
977 |
-
id=139,
|
978 |
-
color=[0, 128, 128],
|
979 |
-
type='',
|
980 |
-
swap='vest_kpt10'),
|
981 |
-
140:
|
982 |
-
dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
|
983 |
-
141:
|
984 |
-
dict(
|
985 |
-
name='vest_kpt14',
|
986 |
-
id=141,
|
987 |
-
color=[0, 128, 128],
|
988 |
-
type='',
|
989 |
-
swap='vest_kpt8'),
|
990 |
-
142:
|
991 |
-
dict(
|
992 |
-
name='vest_kpt15',
|
993 |
-
id=142,
|
994 |
-
color=[0, 128, 128],
|
995 |
-
type='',
|
996 |
-
swap='vest_kpt7'),
|
997 |
-
143:
|
998 |
-
dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
|
999 |
-
144:
|
1000 |
-
dict(
|
1001 |
-
name='sling_kpt2',
|
1002 |
-
id=144,
|
1003 |
-
color=[0, 0, 128],
|
1004 |
-
type='',
|
1005 |
-
swap='sling_kpt6'),
|
1006 |
-
145:
|
1007 |
-
dict(
|
1008 |
-
name='sling_kpt3',
|
1009 |
-
id=145,
|
1010 |
-
color=[0, 0, 128],
|
1011 |
-
type='',
|
1012 |
-
swap='sling_kpt5'),
|
1013 |
-
146:
|
1014 |
-
dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
|
1015 |
-
147:
|
1016 |
-
dict(
|
1017 |
-
name='sling_kpt5',
|
1018 |
-
id=147,
|
1019 |
-
color=[0, 0, 128],
|
1020 |
-
type='',
|
1021 |
-
swap='sling_kpt3'),
|
1022 |
-
148:
|
1023 |
-
dict(
|
1024 |
-
name='sling_kpt6',
|
1025 |
-
id=148,
|
1026 |
-
color=[0, 0, 128],
|
1027 |
-
type='',
|
1028 |
-
swap='sling_kpt2'),
|
1029 |
-
149:
|
1030 |
-
dict(
|
1031 |
-
name='sling_kpt7',
|
1032 |
-
id=149,
|
1033 |
-
color=[0, 0, 128],
|
1034 |
-
type='',
|
1035 |
-
swap='sling_kpt15'),
|
1036 |
-
150:
|
1037 |
-
dict(
|
1038 |
-
name='sling_kpt8',
|
1039 |
-
id=150,
|
1040 |
-
color=[0, 0, 128],
|
1041 |
-
type='',
|
1042 |
-
swap='sling_kpt14'),
|
1043 |
-
151:
|
1044 |
-
dict(
|
1045 |
-
name='sling_kpt9',
|
1046 |
-
id=151,
|
1047 |
-
color=[0, 0, 128],
|
1048 |
-
type='',
|
1049 |
-
swap='sling_kpt13'),
|
1050 |
-
152:
|
1051 |
-
dict(
|
1052 |
-
name='sling_kpt10',
|
1053 |
-
id=152,
|
1054 |
-
color=[0, 0, 128],
|
1055 |
-
type='',
|
1056 |
-
swap='sling_kpt12'),
|
1057 |
-
153:
|
1058 |
-
dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
|
1059 |
-
154:
|
1060 |
-
dict(
|
1061 |
-
name='sling_kpt12',
|
1062 |
-
id=154,
|
1063 |
-
color=[0, 0, 128],
|
1064 |
-
type='',
|
1065 |
-
swap='sling_kpt10'),
|
1066 |
-
155:
|
1067 |
-
dict(
|
1068 |
-
name='sling_kpt13',
|
1069 |
-
id=155,
|
1070 |
-
color=[0, 0, 128],
|
1071 |
-
type='',
|
1072 |
-
swap='sling_kpt9'),
|
1073 |
-
156:
|
1074 |
-
dict(
|
1075 |
-
name='sling_kpt14',
|
1076 |
-
id=156,
|
1077 |
-
color=[0, 0, 128],
|
1078 |
-
type='',
|
1079 |
-
swap='sling_kpt8'),
|
1080 |
-
157:
|
1081 |
-
dict(
|
1082 |
-
name='sling_kpt15',
|
1083 |
-
id=157,
|
1084 |
-
color=[0, 0, 128],
|
1085 |
-
type='',
|
1086 |
-
swap='sling_kpt7'),
|
1087 |
-
158:
|
1088 |
-
dict(
|
1089 |
-
name='shorts_kpt1',
|
1090 |
-
id=158,
|
1091 |
-
color=[128, 128, 128],
|
1092 |
-
type='',
|
1093 |
-
swap='shorts_kpt3'),
|
1094 |
-
159:
|
1095 |
-
dict(
|
1096 |
-
name='shorts_kpt2',
|
1097 |
-
id=159,
|
1098 |
-
color=[128, 128, 128],
|
1099 |
-
type='',
|
1100 |
-
swap=''),
|
1101 |
-
160:
|
1102 |
-
dict(
|
1103 |
-
name='shorts_kpt3',
|
1104 |
-
id=160,
|
1105 |
-
color=[128, 128, 128],
|
1106 |
-
type='',
|
1107 |
-
swap='shorts_kpt1'),
|
1108 |
-
161:
|
1109 |
-
dict(
|
1110 |
-
name='shorts_kpt4',
|
1111 |
-
id=161,
|
1112 |
-
color=[128, 128, 128],
|
1113 |
-
type='',
|
1114 |
-
swap='shorts_kpt10'),
|
1115 |
-
162:
|
1116 |
-
dict(
|
1117 |
-
name='shorts_kpt5',
|
1118 |
-
id=162,
|
1119 |
-
color=[128, 128, 128],
|
1120 |
-
type='',
|
1121 |
-
swap='shorts_kpt9'),
|
1122 |
-
163:
|
1123 |
-
dict(
|
1124 |
-
name='shorts_kpt6',
|
1125 |
-
id=163,
|
1126 |
-
color=[128, 128, 128],
|
1127 |
-
type='',
|
1128 |
-
swap='shorts_kpt8'),
|
1129 |
-
164:
|
1130 |
-
dict(
|
1131 |
-
name='shorts_kpt7',
|
1132 |
-
id=164,
|
1133 |
-
color=[128, 128, 128],
|
1134 |
-
type='',
|
1135 |
-
swap=''),
|
1136 |
-
165:
|
1137 |
-
dict(
|
1138 |
-
name='shorts_kpt8',
|
1139 |
-
id=165,
|
1140 |
-
color=[128, 128, 128],
|
1141 |
-
type='',
|
1142 |
-
swap='shorts_kpt6'),
|
1143 |
-
166:
|
1144 |
-
dict(
|
1145 |
-
name='shorts_kpt9',
|
1146 |
-
id=166,
|
1147 |
-
color=[128, 128, 128],
|
1148 |
-
type='',
|
1149 |
-
swap='shorts_kpt5'),
|
1150 |
-
167:
|
1151 |
-
dict(
|
1152 |
-
name='shorts_kpt10',
|
1153 |
-
id=167,
|
1154 |
-
color=[128, 128, 128],
|
1155 |
-
type='',
|
1156 |
-
swap='shorts_kpt4'),
|
1157 |
-
168:
|
1158 |
-
dict(
|
1159 |
-
name='trousers_kpt1',
|
1160 |
-
id=168,
|
1161 |
-
color=[128, 0, 128],
|
1162 |
-
type='',
|
1163 |
-
swap='trousers_kpt3'),
|
1164 |
-
169:
|
1165 |
-
dict(
|
1166 |
-
name='trousers_kpt2',
|
1167 |
-
id=169,
|
1168 |
-
color=[128, 0, 128],
|
1169 |
-
type='',
|
1170 |
-
swap=''),
|
1171 |
-
170:
|
1172 |
-
dict(
|
1173 |
-
name='trousers_kpt3',
|
1174 |
-
id=170,
|
1175 |
-
color=[128, 0, 128],
|
1176 |
-
type='',
|
1177 |
-
swap='trousers_kpt1'),
|
1178 |
-
171:
|
1179 |
-
dict(
|
1180 |
-
name='trousers_kpt4',
|
1181 |
-
id=171,
|
1182 |
-
color=[128, 0, 128],
|
1183 |
-
type='',
|
1184 |
-
swap='trousers_kpt14'),
|
1185 |
-
172:
|
1186 |
-
dict(
|
1187 |
-
name='trousers_kpt5',
|
1188 |
-
id=172,
|
1189 |
-
color=[128, 0, 128],
|
1190 |
-
type='',
|
1191 |
-
swap='trousers_kpt13'),
|
1192 |
-
173:
|
1193 |
-
dict(
|
1194 |
-
name='trousers_kpt6',
|
1195 |
-
id=173,
|
1196 |
-
color=[128, 0, 128],
|
1197 |
-
type='',
|
1198 |
-
swap='trousers_kpt12'),
|
1199 |
-
174:
|
1200 |
-
dict(
|
1201 |
-
name='trousers_kpt7',
|
1202 |
-
id=174,
|
1203 |
-
color=[128, 0, 128],
|
1204 |
-
type='',
|
1205 |
-
swap='trousers_kpt11'),
|
1206 |
-
175:
|
1207 |
-
dict(
|
1208 |
-
name='trousers_kpt8',
|
1209 |
-
id=175,
|
1210 |
-
color=[128, 0, 128],
|
1211 |
-
type='',
|
1212 |
-
swap='trousers_kpt10'),
|
1213 |
-
176:
|
1214 |
-
dict(
|
1215 |
-
name='trousers_kpt9',
|
1216 |
-
id=176,
|
1217 |
-
color=[128, 0, 128],
|
1218 |
-
type='',
|
1219 |
-
swap=''),
|
1220 |
-
177:
|
1221 |
-
dict(
|
1222 |
-
name='trousers_kpt10',
|
1223 |
-
id=177,
|
1224 |
-
color=[128, 0, 128],
|
1225 |
-
type='',
|
1226 |
-
swap='trousers_kpt8'),
|
1227 |
-
178:
|
1228 |
-
dict(
|
1229 |
-
name='trousers_kpt11',
|
1230 |
-
id=178,
|
1231 |
-
color=[128, 0, 128],
|
1232 |
-
type='',
|
1233 |
-
swap='trousers_kpt7'),
|
1234 |
-
179:
|
1235 |
-
dict(
|
1236 |
-
name='trousers_kpt12',
|
1237 |
-
id=179,
|
1238 |
-
color=[128, 0, 128],
|
1239 |
-
type='',
|
1240 |
-
swap='trousers_kpt6'),
|
1241 |
-
180:
|
1242 |
-
dict(
|
1243 |
-
name='trousers_kpt13',
|
1244 |
-
id=180,
|
1245 |
-
color=[128, 0, 128],
|
1246 |
-
type='',
|
1247 |
-
swap='trousers_kpt5'),
|
1248 |
-
181:
|
1249 |
-
dict(
|
1250 |
-
name='trousers_kpt14',
|
1251 |
-
id=181,
|
1252 |
-
color=[128, 0, 128],
|
1253 |
-
type='',
|
1254 |
-
swap='trousers_kpt4'),
|
1255 |
-
182:
|
1256 |
-
dict(
|
1257 |
-
name='skirt_kpt1',
|
1258 |
-
id=182,
|
1259 |
-
color=[64, 128, 128],
|
1260 |
-
type='',
|
1261 |
-
swap='skirt_kpt3'),
|
1262 |
-
183:
|
1263 |
-
dict(
|
1264 |
-
name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
|
1265 |
-
184:
|
1266 |
-
dict(
|
1267 |
-
name='skirt_kpt3',
|
1268 |
-
id=184,
|
1269 |
-
color=[64, 128, 128],
|
1270 |
-
type='',
|
1271 |
-
swap='skirt_kpt1'),
|
1272 |
-
185:
|
1273 |
-
dict(
|
1274 |
-
name='skirt_kpt4',
|
1275 |
-
id=185,
|
1276 |
-
color=[64, 128, 128],
|
1277 |
-
type='',
|
1278 |
-
swap='skirt_kpt8'),
|
1279 |
-
186:
|
1280 |
-
dict(
|
1281 |
-
name='skirt_kpt5',
|
1282 |
-
id=186,
|
1283 |
-
color=[64, 128, 128],
|
1284 |
-
type='',
|
1285 |
-
swap='skirt_kpt7'),
|
1286 |
-
187:
|
1287 |
-
dict(
|
1288 |
-
name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
|
1289 |
-
188:
|
1290 |
-
dict(
|
1291 |
-
name='skirt_kpt7',
|
1292 |
-
id=188,
|
1293 |
-
color=[64, 128, 128],
|
1294 |
-
type='',
|
1295 |
-
swap='skirt_kpt5'),
|
1296 |
-
189:
|
1297 |
-
dict(
|
1298 |
-
name='skirt_kpt8',
|
1299 |
-
id=189,
|
1300 |
-
color=[64, 128, 128],
|
1301 |
-
type='',
|
1302 |
-
swap='skirt_kpt4'),
|
1303 |
-
190:
|
1304 |
-
dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
|
1305 |
-
191:
|
1306 |
-
dict(
|
1307 |
-
name='ssd_kpt2',
|
1308 |
-
id=191,
|
1309 |
-
color=[64, 64, 128],
|
1310 |
-
type='',
|
1311 |
-
swap='ssd_kpt6'),
|
1312 |
-
192:
|
1313 |
-
dict(
|
1314 |
-
name='ssd_kpt3',
|
1315 |
-
id=192,
|
1316 |
-
color=[64, 64, 128],
|
1317 |
-
type='',
|
1318 |
-
swap='ssd_kpt5'),
|
1319 |
-
193:
|
1320 |
-
dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
|
1321 |
-
194:
|
1322 |
-
dict(
|
1323 |
-
name='ssd_kpt5',
|
1324 |
-
id=194,
|
1325 |
-
color=[64, 64, 128],
|
1326 |
-
type='',
|
1327 |
-
swap='ssd_kpt3'),
|
1328 |
-
195:
|
1329 |
-
dict(
|
1330 |
-
name='ssd_kpt6',
|
1331 |
-
id=195,
|
1332 |
-
color=[64, 64, 128],
|
1333 |
-
type='',
|
1334 |
-
swap='ssd_kpt2'),
|
1335 |
-
196:
|
1336 |
-
dict(
|
1337 |
-
name='ssd_kpt7',
|
1338 |
-
id=196,
|
1339 |
-
color=[64, 64, 128],
|
1340 |
-
type='',
|
1341 |
-
swap='ssd_kpt29'),
|
1342 |
-
197:
|
1343 |
-
dict(
|
1344 |
-
name='ssd_kpt8',
|
1345 |
-
id=197,
|
1346 |
-
color=[64, 64, 128],
|
1347 |
-
type='',
|
1348 |
-
swap='ssd_kpt28'),
|
1349 |
-
198:
|
1350 |
-
dict(
|
1351 |
-
name='ssd_kpt9',
|
1352 |
-
id=198,
|
1353 |
-
color=[64, 64, 128],
|
1354 |
-
type='',
|
1355 |
-
swap='ssd_kpt27'),
|
1356 |
-
199:
|
1357 |
-
dict(
|
1358 |
-
name='ssd_kpt10',
|
1359 |
-
id=199,
|
1360 |
-
color=[64, 64, 128],
|
1361 |
-
type='',
|
1362 |
-
swap='ssd_kpt26'),
|
1363 |
-
200:
|
1364 |
-
dict(
|
1365 |
-
name='ssd_kpt11',
|
1366 |
-
id=200,
|
1367 |
-
color=[64, 64, 128],
|
1368 |
-
type='',
|
1369 |
-
swap='ssd_kpt25'),
|
1370 |
-
201:
|
1371 |
-
dict(
|
1372 |
-
name='ssd_kpt12',
|
1373 |
-
id=201,
|
1374 |
-
color=[64, 64, 128],
|
1375 |
-
type='',
|
1376 |
-
swap='ssd_kpt24'),
|
1377 |
-
202:
|
1378 |
-
dict(
|
1379 |
-
name='ssd_kpt13',
|
1380 |
-
id=202,
|
1381 |
-
color=[64, 64, 128],
|
1382 |
-
type='',
|
1383 |
-
swap='ssd_kpt23'),
|
1384 |
-
203:
|
1385 |
-
dict(
|
1386 |
-
name='ssd_kpt14',
|
1387 |
-
id=203,
|
1388 |
-
color=[64, 64, 128],
|
1389 |
-
type='',
|
1390 |
-
swap='ssd_kpt22'),
|
1391 |
-
204:
|
1392 |
-
dict(
|
1393 |
-
name='ssd_kpt15',
|
1394 |
-
id=204,
|
1395 |
-
color=[64, 64, 128],
|
1396 |
-
type='',
|
1397 |
-
swap='ssd_kpt21'),
|
1398 |
-
205:
|
1399 |
-
dict(
|
1400 |
-
name='ssd_kpt16',
|
1401 |
-
id=205,
|
1402 |
-
color=[64, 64, 128],
|
1403 |
-
type='',
|
1404 |
-
swap='ssd_kpt20'),
|
1405 |
-
206:
|
1406 |
-
dict(
|
1407 |
-
name='ssd_kpt17',
|
1408 |
-
id=206,
|
1409 |
-
color=[64, 64, 128],
|
1410 |
-
type='',
|
1411 |
-
swap='ssd_kpt19'),
|
1412 |
-
207:
|
1413 |
-
dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
|
1414 |
-
208:
|
1415 |
-
dict(
|
1416 |
-
name='ssd_kpt19',
|
1417 |
-
id=208,
|
1418 |
-
color=[64, 64, 128],
|
1419 |
-
type='',
|
1420 |
-
swap='ssd_kpt17'),
|
1421 |
-
209:
|
1422 |
-
dict(
|
1423 |
-
name='ssd_kpt20',
|
1424 |
-
id=209,
|
1425 |
-
color=[64, 64, 128],
|
1426 |
-
type='',
|
1427 |
-
swap='ssd_kpt16'),
|
1428 |
-
210:
|
1429 |
-
dict(
|
1430 |
-
name='ssd_kpt21',
|
1431 |
-
id=210,
|
1432 |
-
color=[64, 64, 128],
|
1433 |
-
type='',
|
1434 |
-
swap='ssd_kpt15'),
|
1435 |
-
211:
|
1436 |
-
dict(
|
1437 |
-
name='ssd_kpt22',
|
1438 |
-
id=211,
|
1439 |
-
color=[64, 64, 128],
|
1440 |
-
type='',
|
1441 |
-
swap='ssd_kpt14'),
|
1442 |
-
212:
|
1443 |
-
dict(
|
1444 |
-
name='ssd_kpt23',
|
1445 |
-
id=212,
|
1446 |
-
color=[64, 64, 128],
|
1447 |
-
type='',
|
1448 |
-
swap='ssd_kpt13'),
|
1449 |
-
213:
|
1450 |
-
dict(
|
1451 |
-
name='ssd_kpt24',
|
1452 |
-
id=213,
|
1453 |
-
color=[64, 64, 128],
|
1454 |
-
type='',
|
1455 |
-
swap='ssd_kpt12'),
|
1456 |
-
214:
|
1457 |
-
dict(
|
1458 |
-
name='ssd_kpt25',
|
1459 |
-
id=214,
|
1460 |
-
color=[64, 64, 128],
|
1461 |
-
type='',
|
1462 |
-
swap='ssd_kpt11'),
|
1463 |
-
215:
|
1464 |
-
dict(
|
1465 |
-
name='ssd_kpt26',
|
1466 |
-
id=215,
|
1467 |
-
color=[64, 64, 128],
|
1468 |
-
type='',
|
1469 |
-
swap='ssd_kpt10'),
|
1470 |
-
216:
|
1471 |
-
dict(
|
1472 |
-
name='ssd_kpt27',
|
1473 |
-
id=216,
|
1474 |
-
color=[64, 64, 128],
|
1475 |
-
type='',
|
1476 |
-
swap='ssd_kpt9'),
|
1477 |
-
217:
|
1478 |
-
dict(
|
1479 |
-
name='ssd_kpt28',
|
1480 |
-
id=217,
|
1481 |
-
color=[64, 64, 128],
|
1482 |
-
type='',
|
1483 |
-
swap='ssd_kpt8'),
|
1484 |
-
218:
|
1485 |
-
dict(
|
1486 |
-
name='ssd_kpt29',
|
1487 |
-
id=218,
|
1488 |
-
color=[64, 64, 128],
|
1489 |
-
type='',
|
1490 |
-
swap='ssd_kpt7'),
|
1491 |
-
219:
|
1492 |
-
dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
|
1493 |
-
220:
|
1494 |
-
dict(
|
1495 |
-
name='lsd_kpt2',
|
1496 |
-
id=220,
|
1497 |
-
color=[128, 64, 0],
|
1498 |
-
type='',
|
1499 |
-
swap='lsd_kpt6'),
|
1500 |
-
221:
|
1501 |
-
dict(
|
1502 |
-
name='lsd_kpt3',
|
1503 |
-
id=221,
|
1504 |
-
color=[128, 64, 0],
|
1505 |
-
type='',
|
1506 |
-
swap='lsd_kpt5'),
|
1507 |
-
222:
|
1508 |
-
dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
|
1509 |
-
223:
|
1510 |
-
dict(
|
1511 |
-
name='lsd_kpt5',
|
1512 |
-
id=223,
|
1513 |
-
color=[128, 64, 0],
|
1514 |
-
type='',
|
1515 |
-
swap='lsd_kpt3'),
|
1516 |
-
224:
|
1517 |
-
dict(
|
1518 |
-
name='lsd_kpt6',
|
1519 |
-
id=224,
|
1520 |
-
color=[128, 64, 0],
|
1521 |
-
type='',
|
1522 |
-
swap='lsd_kpt2'),
|
1523 |
-
225:
|
1524 |
-
dict(
|
1525 |
-
name='lsd_kpt7',
|
1526 |
-
id=225,
|
1527 |
-
color=[128, 64, 0],
|
1528 |
-
type='',
|
1529 |
-
swap='lsd_kpt37'),
|
1530 |
-
226:
|
1531 |
-
dict(
|
1532 |
-
name='lsd_kpt8',
|
1533 |
-
id=226,
|
1534 |
-
color=[128, 64, 0],
|
1535 |
-
type='',
|
1536 |
-
swap='lsd_kpt36'),
|
1537 |
-
227:
|
1538 |
-
dict(
|
1539 |
-
name='lsd_kpt9',
|
1540 |
-
id=227,
|
1541 |
-
color=[128, 64, 0],
|
1542 |
-
type='',
|
1543 |
-
swap='lsd_kpt35'),
|
1544 |
-
228:
|
1545 |
-
dict(
|
1546 |
-
name='lsd_kpt10',
|
1547 |
-
id=228,
|
1548 |
-
color=[128, 64, 0],
|
1549 |
-
type='',
|
1550 |
-
swap='lsd_kpt34'),
|
1551 |
-
229:
|
1552 |
-
dict(
|
1553 |
-
name='lsd_kpt11',
|
1554 |
-
id=229,
|
1555 |
-
color=[128, 64, 0],
|
1556 |
-
type='',
|
1557 |
-
swap='lsd_kpt33'),
|
1558 |
-
230:
|
1559 |
-
dict(
|
1560 |
-
name='lsd_kpt12',
|
1561 |
-
id=230,
|
1562 |
-
color=[128, 64, 0],
|
1563 |
-
type='',
|
1564 |
-
swap='lsd_kpt32'),
|
1565 |
-
231:
|
1566 |
-
dict(
|
1567 |
-
name='lsd_kpt13',
|
1568 |
-
id=231,
|
1569 |
-
color=[128, 64, 0],
|
1570 |
-
type='',
|
1571 |
-
swap='lsd_kpt31'),
|
1572 |
-
232:
|
1573 |
-
dict(
|
1574 |
-
name='lsd_kpt14',
|
1575 |
-
id=232,
|
1576 |
-
color=[128, 64, 0],
|
1577 |
-
type='',
|
1578 |
-
swap='lsd_kpt30'),
|
1579 |
-
233:
|
1580 |
-
dict(
|
1581 |
-
name='lsd_kpt15',
|
1582 |
-
id=233,
|
1583 |
-
color=[128, 64, 0],
|
1584 |
-
type='',
|
1585 |
-
swap='lsd_kpt29'),
|
1586 |
-
234:
|
1587 |
-
dict(
|
1588 |
-
name='lsd_kpt16',
|
1589 |
-
id=234,
|
1590 |
-
color=[128, 64, 0],
|
1591 |
-
type='',
|
1592 |
-
swap='lsd_kpt28'),
|
1593 |
-
235:
|
1594 |
-
dict(
|
1595 |
-
name='lsd_kpt17',
|
1596 |
-
id=235,
|
1597 |
-
color=[128, 64, 0],
|
1598 |
-
type='',
|
1599 |
-
swap='lsd_kpt27'),
|
1600 |
-
236:
|
1601 |
-
dict(
|
1602 |
-
name='lsd_kpt18',
|
1603 |
-
id=236,
|
1604 |
-
color=[128, 64, 0],
|
1605 |
-
type='',
|
1606 |
-
swap='lsd_kpt26'),
|
1607 |
-
237:
|
1608 |
-
dict(
|
1609 |
-
name='lsd_kpt19',
|
1610 |
-
id=237,
|
1611 |
-
color=[128, 64, 0],
|
1612 |
-
type='',
|
1613 |
-
swap='lsd_kpt25'),
|
1614 |
-
238:
|
1615 |
-
dict(
|
1616 |
-
name='lsd_kpt20',
|
1617 |
-
id=238,
|
1618 |
-
color=[128, 64, 0],
|
1619 |
-
type='',
|
1620 |
-
swap='lsd_kpt24'),
|
1621 |
-
239:
|
1622 |
-
dict(
|
1623 |
-
name='lsd_kpt21',
|
1624 |
-
id=239,
|
1625 |
-
color=[128, 64, 0],
|
1626 |
-
type='',
|
1627 |
-
swap='lsd_kpt23'),
|
1628 |
-
240:
|
1629 |
-
dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
|
1630 |
-
241:
|
1631 |
-
dict(
|
1632 |
-
name='lsd_kpt23',
|
1633 |
-
id=241,
|
1634 |
-
color=[128, 64, 0],
|
1635 |
-
type='',
|
1636 |
-
swap='lsd_kpt21'),
|
1637 |
-
242:
|
1638 |
-
dict(
|
1639 |
-
name='lsd_kpt24',
|
1640 |
-
id=242,
|
1641 |
-
color=[128, 64, 0],
|
1642 |
-
type='',
|
1643 |
-
swap='lsd_kpt20'),
|
1644 |
-
243:
|
1645 |
-
dict(
|
1646 |
-
name='lsd_kpt25',
|
1647 |
-
id=243,
|
1648 |
-
color=[128, 64, 0],
|
1649 |
-
type='',
|
1650 |
-
swap='lsd_kpt19'),
|
1651 |
-
244:
|
1652 |
-
dict(
|
1653 |
-
name='lsd_kpt26',
|
1654 |
-
id=244,
|
1655 |
-
color=[128, 64, 0],
|
1656 |
-
type='',
|
1657 |
-
swap='lsd_kpt18'),
|
1658 |
-
245:
|
1659 |
-
dict(
|
1660 |
-
name='lsd_kpt27',
|
1661 |
-
id=245,
|
1662 |
-
color=[128, 64, 0],
|
1663 |
-
type='',
|
1664 |
-
swap='lsd_kpt17'),
|
1665 |
-
246:
|
1666 |
-
dict(
|
1667 |
-
name='lsd_kpt28',
|
1668 |
-
id=246,
|
1669 |
-
color=[128, 64, 0],
|
1670 |
-
type='',
|
1671 |
-
swap='lsd_kpt16'),
|
1672 |
-
247:
|
1673 |
-
dict(
|
1674 |
-
name='lsd_kpt29',
|
1675 |
-
id=247,
|
1676 |
-
color=[128, 64, 0],
|
1677 |
-
type='',
|
1678 |
-
swap='lsd_kpt15'),
|
1679 |
-
248:
|
1680 |
-
dict(
|
1681 |
-
name='lsd_kpt30',
|
1682 |
-
id=248,
|
1683 |
-
color=[128, 64, 0],
|
1684 |
-
type='',
|
1685 |
-
swap='lsd_kpt14'),
|
1686 |
-
249:
|
1687 |
-
dict(
|
1688 |
-
name='lsd_kpt31',
|
1689 |
-
id=249,
|
1690 |
-
color=[128, 64, 0],
|
1691 |
-
type='',
|
1692 |
-
swap='lsd_kpt13'),
|
1693 |
-
250:
|
1694 |
-
dict(
|
1695 |
-
name='lsd_kpt32',
|
1696 |
-
id=250,
|
1697 |
-
color=[128, 64, 0],
|
1698 |
-
type='',
|
1699 |
-
swap='lsd_kpt12'),
|
1700 |
-
251:
|
1701 |
-
dict(
|
1702 |
-
name='lsd_kpt33',
|
1703 |
-
id=251,
|
1704 |
-
color=[128, 64, 0],
|
1705 |
-
type='',
|
1706 |
-
swap='lsd_kpt11'),
|
1707 |
-
252:
|
1708 |
-
dict(
|
1709 |
-
name='lsd_kpt34',
|
1710 |
-
id=252,
|
1711 |
-
color=[128, 64, 0],
|
1712 |
-
type='',
|
1713 |
-
swap='lsd_kpt10'),
|
1714 |
-
253:
|
1715 |
-
dict(
|
1716 |
-
name='lsd_kpt35',
|
1717 |
-
id=253,
|
1718 |
-
color=[128, 64, 0],
|
1719 |
-
type='',
|
1720 |
-
swap='lsd_kpt9'),
|
1721 |
-
254:
|
1722 |
-
dict(
|
1723 |
-
name='lsd_kpt36',
|
1724 |
-
id=254,
|
1725 |
-
color=[128, 64, 0],
|
1726 |
-
type='',
|
1727 |
-
swap='lsd_kpt8'),
|
1728 |
-
255:
|
1729 |
-
dict(
|
1730 |
-
name='lsd_kpt37',
|
1731 |
-
id=255,
|
1732 |
-
color=[128, 64, 0],
|
1733 |
-
type='',
|
1734 |
-
swap='lsd_kpt7'),
|
1735 |
-
256:
|
1736 |
-
dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
|
1737 |
-
257:
|
1738 |
-
dict(
|
1739 |
-
name='vd_kpt2',
|
1740 |
-
id=257,
|
1741 |
-
color=[128, 64, 255],
|
1742 |
-
type='',
|
1743 |
-
swap='vd_kpt6'),
|
1744 |
-
258:
|
1745 |
-
dict(
|
1746 |
-
name='vd_kpt3',
|
1747 |
-
id=258,
|
1748 |
-
color=[128, 64, 255],
|
1749 |
-
type='',
|
1750 |
-
swap='vd_kpt5'),
|
1751 |
-
259:
|
1752 |
-
dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
|
1753 |
-
260:
|
1754 |
-
dict(
|
1755 |
-
name='vd_kpt5',
|
1756 |
-
id=260,
|
1757 |
-
color=[128, 64, 255],
|
1758 |
-
type='',
|
1759 |
-
swap='vd_kpt3'),
|
1760 |
-
261:
|
1761 |
-
dict(
|
1762 |
-
name='vd_kpt6',
|
1763 |
-
id=261,
|
1764 |
-
color=[128, 64, 255],
|
1765 |
-
type='',
|
1766 |
-
swap='vd_kpt2'),
|
1767 |
-
262:
|
1768 |
-
dict(
|
1769 |
-
name='vd_kpt7',
|
1770 |
-
id=262,
|
1771 |
-
color=[128, 64, 255],
|
1772 |
-
type='',
|
1773 |
-
swap='vd_kpt19'),
|
1774 |
-
263:
|
1775 |
-
dict(
|
1776 |
-
name='vd_kpt8',
|
1777 |
-
id=263,
|
1778 |
-
color=[128, 64, 255],
|
1779 |
-
type='',
|
1780 |
-
swap='vd_kpt18'),
|
1781 |
-
264:
|
1782 |
-
dict(
|
1783 |
-
name='vd_kpt9',
|
1784 |
-
id=264,
|
1785 |
-
color=[128, 64, 255],
|
1786 |
-
type='',
|
1787 |
-
swap='vd_kpt17'),
|
1788 |
-
265:
|
1789 |
-
dict(
|
1790 |
-
name='vd_kpt10',
|
1791 |
-
id=265,
|
1792 |
-
color=[128, 64, 255],
|
1793 |
-
type='',
|
1794 |
-
swap='vd_kpt16'),
|
1795 |
-
266:
|
1796 |
-
dict(
|
1797 |
-
name='vd_kpt11',
|
1798 |
-
id=266,
|
1799 |
-
color=[128, 64, 255],
|
1800 |
-
type='',
|
1801 |
-
swap='vd_kpt15'),
|
1802 |
-
267:
|
1803 |
-
dict(
|
1804 |
-
name='vd_kpt12',
|
1805 |
-
id=267,
|
1806 |
-
color=[128, 64, 255],
|
1807 |
-
type='',
|
1808 |
-
swap='vd_kpt14'),
|
1809 |
-
268:
|
1810 |
-
dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
|
1811 |
-
269:
|
1812 |
-
dict(
|
1813 |
-
name='vd_kpt14',
|
1814 |
-
id=269,
|
1815 |
-
color=[128, 64, 255],
|
1816 |
-
type='',
|
1817 |
-
swap='vd_kpt12'),
|
1818 |
-
270:
|
1819 |
-
dict(
|
1820 |
-
name='vd_kpt15',
|
1821 |
-
id=270,
|
1822 |
-
color=[128, 64, 255],
|
1823 |
-
type='',
|
1824 |
-
swap='vd_kpt11'),
|
1825 |
-
271:
|
1826 |
-
dict(
|
1827 |
-
name='vd_kpt16',
|
1828 |
-
id=271,
|
1829 |
-
color=[128, 64, 255],
|
1830 |
-
type='',
|
1831 |
-
swap='vd_kpt10'),
|
1832 |
-
272:
|
1833 |
-
dict(
|
1834 |
-
name='vd_kpt17',
|
1835 |
-
id=272,
|
1836 |
-
color=[128, 64, 255],
|
1837 |
-
type='',
|
1838 |
-
swap='vd_kpt9'),
|
1839 |
-
273:
|
1840 |
-
dict(
|
1841 |
-
name='vd_kpt18',
|
1842 |
-
id=273,
|
1843 |
-
color=[128, 64, 255],
|
1844 |
-
type='',
|
1845 |
-
swap='vd_kpt8'),
|
1846 |
-
274:
|
1847 |
-
dict(
|
1848 |
-
name='vd_kpt19',
|
1849 |
-
id=274,
|
1850 |
-
color=[128, 64, 255],
|
1851 |
-
type='',
|
1852 |
-
swap='vd_kpt7'),
|
1853 |
-
275:
|
1854 |
-
dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
|
1855 |
-
276:
|
1856 |
-
dict(
|
1857 |
-
name='sd_kpt2',
|
1858 |
-
id=276,
|
1859 |
-
color=[128, 64, 0],
|
1860 |
-
type='',
|
1861 |
-
swap='sd_kpt6'),
|
1862 |
-
277:
|
1863 |
-
dict(
|
1864 |
-
name='sd_kpt3',
|
1865 |
-
id=277,
|
1866 |
-
color=[128, 64, 0],
|
1867 |
-
type='',
|
1868 |
-
swap='sd_kpt5'),
|
1869 |
-
278:
|
1870 |
-
dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
|
1871 |
-
279:
|
1872 |
-
dict(
|
1873 |
-
name='sd_kpt5',
|
1874 |
-
id=279,
|
1875 |
-
color=[128, 64, 0],
|
1876 |
-
type='',
|
1877 |
-
swap='sd_kpt3'),
|
1878 |
-
280:
|
1879 |
-
dict(
|
1880 |
-
name='sd_kpt6',
|
1881 |
-
id=280,
|
1882 |
-
color=[128, 64, 0],
|
1883 |
-
type='',
|
1884 |
-
swap='sd_kpt2'),
|
1885 |
-
281:
|
1886 |
-
dict(
|
1887 |
-
name='sd_kpt7',
|
1888 |
-
id=281,
|
1889 |
-
color=[128, 64, 0],
|
1890 |
-
type='',
|
1891 |
-
swap='sd_kpt19'),
|
1892 |
-
282:
|
1893 |
-
dict(
|
1894 |
-
name='sd_kpt8',
|
1895 |
-
id=282,
|
1896 |
-
color=[128, 64, 0],
|
1897 |
-
type='',
|
1898 |
-
swap='sd_kpt18'),
|
1899 |
-
283:
|
1900 |
-
dict(
|
1901 |
-
name='sd_kpt9',
|
1902 |
-
id=283,
|
1903 |
-
color=[128, 64, 0],
|
1904 |
-
type='',
|
1905 |
-
swap='sd_kpt17'),
|
1906 |
-
284:
|
1907 |
-
dict(
|
1908 |
-
name='sd_kpt10',
|
1909 |
-
id=284,
|
1910 |
-
color=[128, 64, 0],
|
1911 |
-
type='',
|
1912 |
-
swap='sd_kpt16'),
|
1913 |
-
285:
|
1914 |
-
dict(
|
1915 |
-
name='sd_kpt11',
|
1916 |
-
id=285,
|
1917 |
-
color=[128, 64, 0],
|
1918 |
-
type='',
|
1919 |
-
swap='sd_kpt15'),
|
1920 |
-
286:
|
1921 |
-
dict(
|
1922 |
-
name='sd_kpt12',
|
1923 |
-
id=286,
|
1924 |
-
color=[128, 64, 0],
|
1925 |
-
type='',
|
1926 |
-
swap='sd_kpt14'),
|
1927 |
-
287:
|
1928 |
-
dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
|
1929 |
-
288:
|
1930 |
-
dict(
|
1931 |
-
name='sd_kpt14',
|
1932 |
-
id=288,
|
1933 |
-
color=[128, 64, 0],
|
1934 |
-
type='',
|
1935 |
-
swap='sd_kpt12'),
|
1936 |
-
289:
|
1937 |
-
dict(
|
1938 |
-
name='sd_kpt15',
|
1939 |
-
id=289,
|
1940 |
-
color=[128, 64, 0],
|
1941 |
-
type='',
|
1942 |
-
swap='sd_kpt11'),
|
1943 |
-
290:
|
1944 |
-
dict(
|
1945 |
-
name='sd_kpt16',
|
1946 |
-
id=290,
|
1947 |
-
color=[128, 64, 0],
|
1948 |
-
type='',
|
1949 |
-
swap='sd_kpt10'),
|
1950 |
-
291:
|
1951 |
-
dict(
|
1952 |
-
name='sd_kpt17',
|
1953 |
-
id=291,
|
1954 |
-
color=[128, 64, 0],
|
1955 |
-
type='',
|
1956 |
-
swap='sd_kpt9'),
|
1957 |
-
292:
|
1958 |
-
dict(
|
1959 |
-
name='sd_kpt18',
|
1960 |
-
id=292,
|
1961 |
-
color=[128, 64, 0],
|
1962 |
-
type='',
|
1963 |
-
swap='sd_kpt8'),
|
1964 |
-
293:
|
1965 |
-
dict(
|
1966 |
-
name='sd_kpt19',
|
1967 |
-
id=293,
|
1968 |
-
color=[128, 64, 0],
|
1969 |
-
type='',
|
1970 |
-
swap='sd_kpt7')
|
1971 |
-
}),
|
1972 |
-
skeleton_info=dict({
|
1973 |
-
0:
|
1974 |
-
dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
|
1975 |
-
1:
|
1976 |
-
dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
|
1977 |
-
2:
|
1978 |
-
dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
|
1979 |
-
3:
|
1980 |
-
dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
|
1981 |
-
4:
|
1982 |
-
dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
|
1983 |
-
5:
|
1984 |
-
dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
|
1985 |
-
6:
|
1986 |
-
dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
|
1987 |
-
7:
|
1988 |
-
dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
|
1989 |
-
8:
|
1990 |
-
dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
|
1991 |
-
9:
|
1992 |
-
dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
|
1993 |
-
10:
|
1994 |
-
dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
|
1995 |
-
11:
|
1996 |
-
dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
|
1997 |
-
12:
|
1998 |
-
dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
|
1999 |
-
13:
|
2000 |
-
dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
|
2001 |
-
14:
|
2002 |
-
dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
|
2003 |
-
15:
|
2004 |
-
dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
|
2005 |
-
16:
|
2006 |
-
dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
|
2007 |
-
17:
|
2008 |
-
dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
|
2009 |
-
18:
|
2010 |
-
dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
|
2011 |
-
19:
|
2012 |
-
dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
|
2013 |
-
20:
|
2014 |
-
dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
|
2015 |
-
21:
|
2016 |
-
dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
|
2017 |
-
22:
|
2018 |
-
dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
|
2019 |
-
23:
|
2020 |
-
dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
|
2021 |
-
24:
|
2022 |
-
dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
|
2023 |
-
25:
|
2024 |
-
dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
|
2025 |
-
26:
|
2026 |
-
dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
|
2027 |
-
27:
|
2028 |
-
dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
|
2029 |
-
28:
|
2030 |
-
dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
|
2031 |
-
29:
|
2032 |
-
dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
|
2033 |
-
30:
|
2034 |
-
dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
|
2035 |
-
31:
|
2036 |
-
dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
|
2037 |
-
32:
|
2038 |
-
dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
|
2039 |
-
33:
|
2040 |
-
dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
|
2041 |
-
34:
|
2042 |
-
dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
|
2043 |
-
35:
|
2044 |
-
dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
|
2045 |
-
36:
|
2046 |
-
dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
|
2047 |
-
37:
|
2048 |
-
dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
|
2049 |
-
38:
|
2050 |
-
dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
|
2051 |
-
39:
|
2052 |
-
dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
|
2053 |
-
40:
|
2054 |
-
dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
|
2055 |
-
41:
|
2056 |
-
dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
|
2057 |
-
42:
|
2058 |
-
dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
|
2059 |
-
43:
|
2060 |
-
dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
|
2061 |
-
44:
|
2062 |
-
dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
|
2063 |
-
45:
|
2064 |
-
dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
|
2065 |
-
46:
|
2066 |
-
dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
|
2067 |
-
47:
|
2068 |
-
dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
|
2069 |
-
48:
|
2070 |
-
dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
|
2071 |
-
49:
|
2072 |
-
dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
|
2073 |
-
50:
|
2074 |
-
dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
|
2075 |
-
51:
|
2076 |
-
dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
|
2077 |
-
52:
|
2078 |
-
dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
|
2079 |
-
53:
|
2080 |
-
dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
|
2081 |
-
54:
|
2082 |
-
dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
|
2083 |
-
55:
|
2084 |
-
dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
|
2085 |
-
56:
|
2086 |
-
dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
|
2087 |
-
57:
|
2088 |
-
dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
|
2089 |
-
58:
|
2090 |
-
dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
|
2091 |
-
59:
|
2092 |
-
dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
|
2093 |
-
60:
|
2094 |
-
dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
|
2095 |
-
61:
|
2096 |
-
dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
|
2097 |
-
62:
|
2098 |
-
dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
|
2099 |
-
63:
|
2100 |
-
dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
|
2101 |
-
64:
|
2102 |
-
dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
|
2103 |
-
65:
|
2104 |
-
dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
|
2105 |
-
66:
|
2106 |
-
dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
|
2107 |
-
67:
|
2108 |
-
dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
|
2109 |
-
68:
|
2110 |
-
dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
|
2111 |
-
69:
|
2112 |
-
dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
|
2113 |
-
70:
|
2114 |
-
dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
|
2115 |
-
71:
|
2116 |
-
dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
|
2117 |
-
72:
|
2118 |
-
dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
|
2119 |
-
73:
|
2120 |
-
dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
|
2121 |
-
74:
|
2122 |
-
dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
|
2123 |
-
75:
|
2124 |
-
dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
|
2125 |
-
76:
|
2126 |
-
dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
|
2127 |
-
77:
|
2128 |
-
dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
|
2129 |
-
78:
|
2130 |
-
dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
|
2131 |
-
79:
|
2132 |
-
dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
|
2133 |
-
80:
|
2134 |
-
dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
|
2135 |
-
81:
|
2136 |
-
dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
|
2137 |
-
82:
|
2138 |
-
dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
|
2139 |
-
83:
|
2140 |
-
dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
|
2141 |
-
84:
|
2142 |
-
dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
|
2143 |
-
85:
|
2144 |
-
dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
|
2145 |
-
86:
|
2146 |
-
dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
|
2147 |
-
87:
|
2148 |
-
dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
|
2149 |
-
88:
|
2150 |
-
dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
|
2151 |
-
89:
|
2152 |
-
dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
|
2153 |
-
90:
|
2154 |
-
dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
|
2155 |
-
91:
|
2156 |
-
dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
|
2157 |
-
92:
|
2158 |
-
dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
|
2159 |
-
93:
|
2160 |
-
dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
|
2161 |
-
94:
|
2162 |
-
dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
|
2163 |
-
95:
|
2164 |
-
dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
|
2165 |
-
96:
|
2166 |
-
dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
|
2167 |
-
97:
|
2168 |
-
dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
|
2169 |
-
98:
|
2170 |
-
dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
|
2171 |
-
99:
|
2172 |
-
dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
|
2173 |
-
100:
|
2174 |
-
dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
|
2175 |
-
101:
|
2176 |
-
dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
|
2177 |
-
102:
|
2178 |
-
dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
|
2179 |
-
103:
|
2180 |
-
dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
|
2181 |
-
104:
|
2182 |
-
dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
|
2183 |
-
105:
|
2184 |
-
dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
|
2185 |
-
106:
|
2186 |
-
dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
|
2187 |
-
107:
|
2188 |
-
dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
|
2189 |
-
108:
|
2190 |
-
dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
|
2191 |
-
109:
|
2192 |
-
dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
|
2193 |
-
110:
|
2194 |
-
dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
|
2195 |
-
111:
|
2196 |
-
dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
|
2197 |
-
112:
|
2198 |
-
dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
|
2199 |
-
113:
|
2200 |
-
dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
|
2201 |
-
114:
|
2202 |
-
dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
|
2203 |
-
115:
|
2204 |
-
dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
|
2205 |
-
116:
|
2206 |
-
dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
|
2207 |
-
117:
|
2208 |
-
dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
|
2209 |
-
118:
|
2210 |
-
dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
|
2211 |
-
119:
|
2212 |
-
dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
|
2213 |
-
120:
|
2214 |
-
dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
|
2215 |
-
121:
|
2216 |
-
dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
|
2217 |
-
122:
|
2218 |
-
dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
|
2219 |
-
123:
|
2220 |
-
dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
|
2221 |
-
124:
|
2222 |
-
dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
|
2223 |
-
125:
|
2224 |
-
dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
|
2225 |
-
126:
|
2226 |
-
dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
|
2227 |
-
127:
|
2228 |
-
dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
|
2229 |
-
128:
|
2230 |
-
dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
|
2231 |
-
129:
|
2232 |
-
dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
|
2233 |
-
130:
|
2234 |
-
dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
|
2235 |
-
131:
|
2236 |
-
dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
|
2237 |
-
132:
|
2238 |
-
dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
|
2239 |
-
133:
|
2240 |
-
dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
|
2241 |
-
134:
|
2242 |
-
dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
|
2243 |
-
135:
|
2244 |
-
dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
|
2245 |
-
136:
|
2246 |
-
dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
|
2247 |
-
137:
|
2248 |
-
dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
|
2249 |
-
138:
|
2250 |
-
dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
|
2251 |
-
139:
|
2252 |
-
dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
|
2253 |
-
140:
|
2254 |
-
dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
|
2255 |
-
141:
|
2256 |
-
dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
|
2257 |
-
142:
|
2258 |
-
dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
|
2259 |
-
143:
|
2260 |
-
dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
|
2261 |
-
144:
|
2262 |
-
dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
|
2263 |
-
145:
|
2264 |
-
dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
|
2265 |
-
146:
|
2266 |
-
dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
|
2267 |
-
147:
|
2268 |
-
dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
|
2269 |
-
148:
|
2270 |
-
dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
|
2271 |
-
149:
|
2272 |
-
dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
|
2273 |
-
150:
|
2274 |
-
dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
|
2275 |
-
151:
|
2276 |
-
dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
|
2277 |
-
152:
|
2278 |
-
dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
|
2279 |
-
153:
|
2280 |
-
dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
|
2281 |
-
154:
|
2282 |
-
dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
|
2283 |
-
155:
|
2284 |
-
dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
|
2285 |
-
156:
|
2286 |
-
dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
|
2287 |
-
157:
|
2288 |
-
dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
|
2289 |
-
158:
|
2290 |
-
dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
|
2291 |
-
159:
|
2292 |
-
dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
|
2293 |
-
160:
|
2294 |
-
dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
|
2295 |
-
161:
|
2296 |
-
dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
|
2297 |
-
162:
|
2298 |
-
dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
|
2299 |
-
163:
|
2300 |
-
dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
|
2301 |
-
164:
|
2302 |
-
dict(
|
2303 |
-
link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
|
2304 |
-
128]),
|
2305 |
-
165:
|
2306 |
-
dict(
|
2307 |
-
link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
|
2308 |
-
128]),
|
2309 |
-
166:
|
2310 |
-
dict(
|
2311 |
-
link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
|
2312 |
-
128]),
|
2313 |
-
167:
|
2314 |
-
dict(
|
2315 |
-
link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
|
2316 |
-
128]),
|
2317 |
-
168:
|
2318 |
-
dict(
|
2319 |
-
link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
|
2320 |
-
128]),
|
2321 |
-
169:
|
2322 |
-
dict(
|
2323 |
-
link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
|
2324 |
-
128]),
|
2325 |
-
170:
|
2326 |
-
dict(
|
2327 |
-
link=('shorts_kpt9', 'shorts_kpt10'),
|
2328 |
-
id=170,
|
2329 |
-
color=[128, 128, 128]),
|
2330 |
-
171:
|
2331 |
-
dict(
|
2332 |
-
link=('shorts_kpt10', 'shorts_kpt3'),
|
2333 |
-
id=171,
|
2334 |
-
color=[128, 128, 128]),
|
2335 |
-
172:
|
2336 |
-
dict(
|
2337 |
-
link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
|
2338 |
-
128]),
|
2339 |
-
173:
|
2340 |
-
dict(
|
2341 |
-
link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
|
2342 |
-
128]),
|
2343 |
-
174:
|
2344 |
-
dict(
|
2345 |
-
link=('trousers_kpt1', 'trousers_kpt4'),
|
2346 |
-
id=174,
|
2347 |
-
color=[128, 0, 128]),
|
2348 |
-
175:
|
2349 |
-
dict(
|
2350 |
-
link=('trousers_kpt4', 'trousers_kpt5'),
|
2351 |
-
id=175,
|
2352 |
-
color=[128, 0, 128]),
|
2353 |
-
176:
|
2354 |
-
dict(
|
2355 |
-
link=('trousers_kpt5', 'trousers_kpt6'),
|
2356 |
-
id=176,
|
2357 |
-
color=[128, 0, 128]),
|
2358 |
-
177:
|
2359 |
-
dict(
|
2360 |
-
link=('trousers_kpt6', 'trousers_kpt7'),
|
2361 |
-
id=177,
|
2362 |
-
color=[128, 0, 128]),
|
2363 |
-
178:
|
2364 |
-
dict(
|
2365 |
-
link=('trousers_kpt7', 'trousers_kpt8'),
|
2366 |
-
id=178,
|
2367 |
-
color=[128, 0, 128]),
|
2368 |
-
179:
|
2369 |
-
dict(
|
2370 |
-
link=('trousers_kpt8', 'trousers_kpt9'),
|
2371 |
-
id=179,
|
2372 |
-
color=[128, 0, 128]),
|
2373 |
-
180:
|
2374 |
-
dict(
|
2375 |
-
link=('trousers_kpt9', 'trousers_kpt10'),
|
2376 |
-
id=180,
|
2377 |
-
color=[128, 0, 128]),
|
2378 |
-
181:
|
2379 |
-
dict(
|
2380 |
-
link=('trousers_kpt10', 'trousers_kpt11'),
|
2381 |
-
id=181,
|
2382 |
-
color=[128, 0, 128]),
|
2383 |
-
182:
|
2384 |
-
dict(
|
2385 |
-
link=('trousers_kpt11', 'trousers_kpt12'),
|
2386 |
-
id=182,
|
2387 |
-
color=[128, 0, 128]),
|
2388 |
-
183:
|
2389 |
-
dict(
|
2390 |
-
link=('trousers_kpt12', 'trousers_kpt13'),
|
2391 |
-
id=183,
|
2392 |
-
color=[128, 0, 128]),
|
2393 |
-
184:
|
2394 |
-
dict(
|
2395 |
-
link=('trousers_kpt13', 'trousers_kpt14'),
|
2396 |
-
id=184,
|
2397 |
-
color=[128, 0, 128]),
|
2398 |
-
185:
|
2399 |
-
dict(
|
2400 |
-
link=('trousers_kpt14', 'trousers_kpt3'),
|
2401 |
-
id=185,
|
2402 |
-
color=[128, 0, 128]),
|
2403 |
-
186:
|
2404 |
-
dict(
|
2405 |
-
link=('trousers_kpt3', 'trousers_kpt2'),
|
2406 |
-
id=186,
|
2407 |
-
color=[128, 0, 128]),
|
2408 |
-
187:
|
2409 |
-
dict(
|
2410 |
-
link=('trousers_kpt2', 'trousers_kpt1'),
|
2411 |
-
id=187,
|
2412 |
-
color=[128, 0, 128]),
|
2413 |
-
188:
|
2414 |
-
dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
|
2415 |
-
189:
|
2416 |
-
dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
|
2417 |
-
190:
|
2418 |
-
dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
|
2419 |
-
191:
|
2420 |
-
dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
|
2421 |
-
192:
|
2422 |
-
dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
|
2423 |
-
193:
|
2424 |
-
dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
|
2425 |
-
194:
|
2426 |
-
dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
|
2427 |
-
195:
|
2428 |
-
dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
|
2429 |
-
196:
|
2430 |
-
dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
|
2431 |
-
197:
|
2432 |
-
dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
|
2433 |
-
198:
|
2434 |
-
dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
|
2435 |
-
199:
|
2436 |
-
dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
|
2437 |
-
200:
|
2438 |
-
dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
|
2439 |
-
201:
|
2440 |
-
dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
|
2441 |
-
202:
|
2442 |
-
dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
|
2443 |
-
203:
|
2444 |
-
dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
|
2445 |
-
204:
|
2446 |
-
dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
|
2447 |
-
205:
|
2448 |
-
dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
|
2449 |
-
206:
|
2450 |
-
dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
|
2451 |
-
207:
|
2452 |
-
dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
|
2453 |
-
208:
|
2454 |
-
dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
|
2455 |
-
209:
|
2456 |
-
dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
|
2457 |
-
210:
|
2458 |
-
dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
|
2459 |
-
211:
|
2460 |
-
dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
|
2461 |
-
212:
|
2462 |
-
dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
|
2463 |
-
213:
|
2464 |
-
dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
|
2465 |
-
214:
|
2466 |
-
dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
|
2467 |
-
215:
|
2468 |
-
dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
|
2469 |
-
216:
|
2470 |
-
dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
|
2471 |
-
217:
|
2472 |
-
dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
|
2473 |
-
218:
|
2474 |
-
dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
|
2475 |
-
219:
|
2476 |
-
dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
|
2477 |
-
220:
|
2478 |
-
dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
|
2479 |
-
221:
|
2480 |
-
dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
|
2481 |
-
222:
|
2482 |
-
dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
|
2483 |
-
223:
|
2484 |
-
dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
|
2485 |
-
224:
|
2486 |
-
dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
|
2487 |
-
225:
|
2488 |
-
dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
|
2489 |
-
226:
|
2490 |
-
dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
|
2491 |
-
227:
|
2492 |
-
dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
|
2493 |
-
228:
|
2494 |
-
dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
|
2495 |
-
229:
|
2496 |
-
dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
|
2497 |
-
230:
|
2498 |
-
dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
|
2499 |
-
231:
|
2500 |
-
dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
|
2501 |
-
232:
|
2502 |
-
dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
|
2503 |
-
233:
|
2504 |
-
dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
|
2505 |
-
234:
|
2506 |
-
dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
|
2507 |
-
235:
|
2508 |
-
dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
|
2509 |
-
236:
|
2510 |
-
dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
|
2511 |
-
237:
|
2512 |
-
dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
|
2513 |
-
238:
|
2514 |
-
dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
|
2515 |
-
239:
|
2516 |
-
dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
|
2517 |
-
240:
|
2518 |
-
dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
|
2519 |
-
241:
|
2520 |
-
dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
|
2521 |
-
242:
|
2522 |
-
dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
|
2523 |
-
243:
|
2524 |
-
dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
|
2525 |
-
244:
|
2526 |
-
dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
|
2527 |
-
245:
|
2528 |
-
dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
|
2529 |
-
246:
|
2530 |
-
dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
|
2531 |
-
247:
|
2532 |
-
dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
|
2533 |
-
248:
|
2534 |
-
dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
|
2535 |
-
249:
|
2536 |
-
dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
|
2537 |
-
250:
|
2538 |
-
dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
|
2539 |
-
251:
|
2540 |
-
dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
|
2541 |
-
252:
|
2542 |
-
dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
|
2543 |
-
253:
|
2544 |
-
dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
|
2545 |
-
254:
|
2546 |
-
dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
|
2547 |
-
255:
|
2548 |
-
dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
|
2549 |
-
256:
|
2550 |
-
dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
|
2551 |
-
257:
|
2552 |
-
dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
|
2553 |
-
258:
|
2554 |
-
dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
|
2555 |
-
259:
|
2556 |
-
dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
|
2557 |
-
260:
|
2558 |
-
dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
|
2559 |
-
261:
|
2560 |
-
dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
|
2561 |
-
262:
|
2562 |
-
dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
|
2563 |
-
263:
|
2564 |
-
dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
|
2565 |
-
264:
|
2566 |
-
dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
|
2567 |
-
265:
|
2568 |
-
dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
|
2569 |
-
266:
|
2570 |
-
dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
|
2571 |
-
267:
|
2572 |
-
dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
|
2573 |
-
268:
|
2574 |
-
dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
|
2575 |
-
269:
|
2576 |
-
dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
|
2577 |
-
270:
|
2578 |
-
dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
|
2579 |
-
271:
|
2580 |
-
dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
|
2581 |
-
272:
|
2582 |
-
dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
|
2583 |
-
273:
|
2584 |
-
dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
|
2585 |
-
274:
|
2586 |
-
dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
|
2587 |
-
275:
|
2588 |
-
dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
|
2589 |
-
276:
|
2590 |
-
dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
|
2591 |
-
277:
|
2592 |
-
dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
|
2593 |
-
278:
|
2594 |
-
dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
|
2595 |
-
279:
|
2596 |
-
dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
|
2597 |
-
280:
|
2598 |
-
dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
|
2599 |
-
281:
|
2600 |
-
dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
|
2601 |
-
282:
|
2602 |
-
dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
|
2603 |
-
283:
|
2604 |
-
dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
|
2605 |
-
284:
|
2606 |
-
dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
|
2607 |
-
285:
|
2608 |
-
dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
|
2609 |
-
286:
|
2610 |
-
dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
|
2611 |
-
287:
|
2612 |
-
dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
|
2613 |
-
288:
|
2614 |
-
dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
|
2615 |
-
289:
|
2616 |
-
dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
|
2617 |
-
290:
|
2618 |
-
dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
|
2619 |
-
291:
|
2620 |
-
dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
|
2621 |
-
292:
|
2622 |
-
dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
|
2623 |
-
293:
|
2624 |
-
dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
|
2625 |
-
294:
|
2626 |
-
dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
|
2627 |
-
295:
|
2628 |
-
dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
|
2629 |
-
296:
|
2630 |
-
dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
|
2631 |
-
297:
|
2632 |
-
dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
|
2633 |
-
298:
|
2634 |
-
dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
|
2635 |
-
299:
|
2636 |
-
dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
|
2637 |
-
300:
|
2638 |
-
dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
|
2639 |
-
301:
|
2640 |
-
dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
|
2641 |
-
302:
|
2642 |
-
dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
|
2643 |
-
303:
|
2644 |
-
dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
|
2645 |
-
}),
|
2646 |
-
joint_weights=[
|
2647 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2648 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2649 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2650 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2651 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2652 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2653 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2654 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2655 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2656 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2657 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2658 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2659 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2660 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2661 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2662 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2663 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2664 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2665 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2666 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
2667 |
-
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
2668 |
-
],
|
2669 |
-
sigmas=[])
|
2670 |
-
param_scheduler = [
|
2671 |
-
dict(
|
2672 |
-
type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
|
2673 |
-
dict(
|
2674 |
-
type='MultiStepLR',
|
2675 |
-
begin=0,
|
2676 |
-
end=60,
|
2677 |
-
milestones=[20, 40],
|
2678 |
-
gamma=0.1,
|
2679 |
-
by_epoch=True)
|
2680 |
-
]
|
2681 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
|
2682 |
-
auto_scale_lr = dict(base_batch_size=512)
|
2683 |
-
dataset_type = 'DeepFashion2Dataset'
|
2684 |
-
data_mode = 'topdown'
|
2685 |
-
data_root = 'data/deepfashion2/'
|
2686 |
-
codec = dict(
|
2687 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
2688 |
-
train_pipeline = [
|
2689 |
-
dict(type='LoadImage'),
|
2690 |
-
dict(type='GetBBoxCenterScale'),
|
2691 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2692 |
-
dict(
|
2693 |
-
type='RandomBBoxTransform',
|
2694 |
-
shift_prob=0,
|
2695 |
-
rotate_factor=60,
|
2696 |
-
scale_factor=(0.75, 1.25)),
|
2697 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2698 |
-
dict(
|
2699 |
-
type='GenerateTarget',
|
2700 |
-
encoder=dict(
|
2701 |
-
type='MSRAHeatmap',
|
2702 |
-
input_size=(192, 256),
|
2703 |
-
heatmap_size=(48, 64),
|
2704 |
-
sigma=2)),
|
2705 |
-
dict(type='PackPoseInputs')
|
2706 |
-
]
|
2707 |
-
val_pipeline = [
|
2708 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2709 |
-
dict(type='GetBBoxCenterScale'),
|
2710 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2711 |
-
dict(type='PackPoseInputs')
|
2712 |
-
]
|
2713 |
-
train_dataloader = dict(
|
2714 |
-
batch_size=64,
|
2715 |
-
num_workers=6,
|
2716 |
-
persistent_workers=True,
|
2717 |
-
sampler=dict(type='DefaultSampler', shuffle=True),
|
2718 |
-
dataset=dict(
|
2719 |
-
type='DeepFashion2Dataset',
|
2720 |
-
data_root='data/deepfashion2/',
|
2721 |
-
data_mode='topdown',
|
2722 |
-
ann_file='train/deepfashion2_long_sleeved_shirt.json',
|
2723 |
-
data_prefix=dict(img='train/image/'),
|
2724 |
-
pipeline=[
|
2725 |
-
dict(type='LoadImage'),
|
2726 |
-
dict(type='GetBBoxCenterScale'),
|
2727 |
-
dict(type='RandomFlip', direction='horizontal'),
|
2728 |
-
dict(
|
2729 |
-
type='RandomBBoxTransform',
|
2730 |
-
shift_prob=0,
|
2731 |
-
rotate_factor=60,
|
2732 |
-
scale_factor=(0.75, 1.25)),
|
2733 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2734 |
-
dict(
|
2735 |
-
type='GenerateTarget',
|
2736 |
-
encoder=dict(
|
2737 |
-
type='MSRAHeatmap',
|
2738 |
-
input_size=(192, 256),
|
2739 |
-
heatmap_size=(48, 64),
|
2740 |
-
sigma=2)),
|
2741 |
-
dict(type='PackPoseInputs')
|
2742 |
-
]))
|
2743 |
-
val_dataloader = dict(
|
2744 |
-
batch_size=32,
|
2745 |
-
num_workers=6,
|
2746 |
-
persistent_workers=True,
|
2747 |
-
drop_last=False,
|
2748 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2749 |
-
dataset=dict(
|
2750 |
-
type='DeepFashion2Dataset',
|
2751 |
-
data_root='data/deepfashion2/',
|
2752 |
-
data_mode='topdown',
|
2753 |
-
ann_file='validation/deepfashion2_long_sleeved_shirt.json',
|
2754 |
-
data_prefix=dict(img='validation/image/'),
|
2755 |
-
test_mode=True,
|
2756 |
-
pipeline=[
|
2757 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2758 |
-
dict(type='GetBBoxCenterScale'),
|
2759 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2760 |
-
dict(type='PackPoseInputs')
|
2761 |
-
]))
|
2762 |
-
test_dataloader = dict(
|
2763 |
-
batch_size=32,
|
2764 |
-
num_workers=6,
|
2765 |
-
persistent_workers=True,
|
2766 |
-
drop_last=False,
|
2767 |
-
sampler=dict(type='DefaultSampler', shuffle=False),
|
2768 |
-
dataset=dict(
|
2769 |
-
type='DeepFashion2Dataset',
|
2770 |
-
data_root='data/deepfashion2/',
|
2771 |
-
data_mode='topdown',
|
2772 |
-
ann_file='validation/deepfashion2_long_sleeved_shirt.json',
|
2773 |
-
data_prefix=dict(img='validation/image/'),
|
2774 |
-
test_mode=True,
|
2775 |
-
pipeline=[
|
2776 |
-
dict(type='LoadImage', backend_args=dict(backend='local')),
|
2777 |
-
dict(type='GetBBoxCenterScale'),
|
2778 |
-
dict(type='TopdownAffine', input_size=(192, 256)),
|
2779 |
-
dict(type='PackPoseInputs')
|
2780 |
-
]))
|
2781 |
-
channel_cfg = dict(
|
2782 |
-
num_output_channels=294,
|
2783 |
-
dataset_joints=294,
|
2784 |
-
dataset_channel=[[
|
2785 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2786 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2787 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2788 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2789 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2790 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2791 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2792 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2793 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2794 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2795 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2796 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2797 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2798 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2799 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2800 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2801 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2802 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2803 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2804 |
-
290, 291, 292, 293
|
2805 |
-
]],
|
2806 |
-
inference_channel=[
|
2807 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
2808 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
2809 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
2810 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
2811 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
2812 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
2813 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
2814 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
2815 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
2816 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
2817 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
2818 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
2819 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
2820 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
2821 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
2822 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
2823 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
2824 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
2825 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
2826 |
-
290, 291, 292, 293
|
2827 |
-
])
|
2828 |
-
model = dict(
|
2829 |
-
type='TopdownPoseEstimator',
|
2830 |
-
data_preprocessor=dict(
|
2831 |
-
type='PoseDataPreprocessor',
|
2832 |
-
mean=[123.675, 116.28, 103.53],
|
2833 |
-
std=[58.395, 57.12, 57.375],
|
2834 |
-
bgr_to_rgb=True),
|
2835 |
-
backbone=dict(
|
2836 |
-
type='ResNet',
|
2837 |
-
depth=50,
|
2838 |
-
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
|
2839 |
-
head=dict(
|
2840 |
-
type='HeatmapHead',
|
2841 |
-
in_channels=2048,
|
2842 |
-
out_channels=294,
|
2843 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True),
|
2844 |
-
decoder=dict(
|
2845 |
-
type='MSRAHeatmap',
|
2846 |
-
input_size=(192, 256),
|
2847 |
-
heatmap_size=(48, 64),
|
2848 |
-
sigma=2)),
|
2849 |
-
test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
|
2850 |
-
val_evaluator = [
|
2851 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2852 |
-
dict(type='AUC'),
|
2853 |
-
dict(type='EPE')
|
2854 |
-
]
|
2855 |
-
test_evaluator = [
|
2856 |
-
dict(type='PCKAccuracy', thr=0.2),
|
2857 |
-
dict(type='AUC'),
|
2858 |
-
dict(type='EPE')
|
2859 |
-
]
|
2860 |
-
launcher = 'pytorch'
|
2861 |
-
work_dir = './work_dirs/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Yqcloud.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import random
|
4 |
-
from aiohttp import ClientSession
|
5 |
-
|
6 |
-
from ..typing import AsyncResult, Messages
|
7 |
-
from .base_provider import AsyncGeneratorProvider, format_prompt
|
8 |
-
|
9 |
-
|
10 |
-
class Yqcloud(AsyncGeneratorProvider):
|
11 |
-
url = "https://chat9.yqcloud.top/"
|
12 |
-
working = True
|
13 |
-
supports_gpt_35_turbo = True
|
14 |
-
|
15 |
-
@staticmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
model: str,
|
18 |
-
messages: Messages,
|
19 |
-
proxy: str = None,
|
20 |
-
**kwargs,
|
21 |
-
) -> AsyncResult:
|
22 |
-
async with ClientSession(
|
23 |
-
headers=_create_header()
|
24 |
-
) as session:
|
25 |
-
payload = _create_payload(messages, **kwargs)
|
26 |
-
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
27 |
-
response.raise_for_status()
|
28 |
-
async for chunk in response.content.iter_any():
|
29 |
-
if chunk:
|
30 |
-
chunk = chunk.decode()
|
31 |
-
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
|
32 |
-
raise RuntimeError("IP address is blocked by abuse detection.")
|
33 |
-
yield chunk
|
34 |
-
|
35 |
-
|
36 |
-
def _create_header():
|
37 |
-
return {
|
38 |
-
"accept" : "application/json, text/plain, */*",
|
39 |
-
"content-type" : "application/json",
|
40 |
-
"origin" : "https://chat9.yqcloud.top",
|
41 |
-
}
|
42 |
-
|
43 |
-
|
44 |
-
def _create_payload(
|
45 |
-
messages: Messages,
|
46 |
-
system_message: str = "",
|
47 |
-
user_id: int = None,
|
48 |
-
**kwargs
|
49 |
-
):
|
50 |
-
if not user_id:
|
51 |
-
user_id = random.randint(1690000544336, 2093025544336)
|
52 |
-
return {
|
53 |
-
"prompt": format_prompt(messages),
|
54 |
-
"network": True,
|
55 |
-
"system": system_message,
|
56 |
-
"withoutContext": False,
|
57 |
-
"stream": True,
|
58 |
-
"userId": f"#/chat/{user_id}"
|
59 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapting/TrendFlow/mypages/charts.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
from bokeh.models import ColumnDataSource
|
3 |
-
from bokeh.plotting import figure
|
4 |
-
from bokeh.transform import dodge
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
COLORS = [
|
8 |
-
'#FE2D01',
|
9 |
-
'#016CFE',
|
10 |
-
'#FEB101',
|
11 |
-
'#FE018B',
|
12 |
-
'#AAB7B8',
|
13 |
-
'#212F3D'
|
14 |
-
]
|
15 |
-
|
16 |
-
'''
|
17 |
-
clusters = ['Cluster 1', 'C 2', 'C 3', 'Plums', 'Grapes', 'Strawberries']
|
18 |
-
years = ['number of papers', 'number of keyphrases', ]
|
19 |
-
|
20 |
-
data = {'clusters': clusters,
|
21 |
-
f'{years[0]}': [2, 1, 4, 3, 2, 4],
|
22 |
-
f'{years[1]}': [5, 3, 3, 2, 4, 6],
|
23 |
-
}
|
24 |
-
|
25 |
-
source = ColumnDataSource(data=data)
|
26 |
-
|
27 |
-
p = figure(x_range=clusters, title="Fruit counts by year",
|
28 |
-
toolbar_location=None, tools="")
|
29 |
-
|
30 |
-
p.vbar(x=dodge('clusters', -0.25, range=p.x_range), top=f'{years[0]}', width=0.2, source=source,
|
31 |
-
color="#c9d9d3", legend_label="2015")
|
32 |
-
|
33 |
-
p.vbar(x=dodge('clusters', 0.0, range=p.x_range), top=f'{years[1]}', width=0.2, source=source,
|
34 |
-
color="#718dbf", legend_label="2016")
|
35 |
-
|
36 |
-
|
37 |
-
p.x_range.range_padding = 0.1
|
38 |
-
p.xgrid.grid_line_color = None
|
39 |
-
p.legend.location = "top_left"
|
40 |
-
p.legend.orientation = "horizontal"
|
41 |
-
'''
|
42 |
-
|
43 |
-
|
44 |
-
def build_bar_charts(x_range: List, y_names: List[str], y_data = List[List]):
|
45 |
-
valid_y = lambda x: len(x) == len(x_range)
|
46 |
-
if not (len(y_names) == len(y_data) and all(map(valid_y,y_data))):
|
47 |
-
raise RuntimeError('The data shapes are not aligned.')
|
48 |
-
|
49 |
-
|
50 |
-
if len(y_names) % 2 == 0:
|
51 |
-
offsets = [-0.125 - 0.25*(i-1) for i in range(len(y_names)//2,0,-1)]
|
52 |
-
offsets += [0.125 + 0.25*(i) for i in range(len(y_names)//2)]
|
53 |
-
else:
|
54 |
-
offsets = [-0.25 * i for i in range(len(y_names)//2,0,-1)]
|
55 |
-
offsets.append(0)
|
56 |
-
offsets += [0.25* (i+1) for i in range(len(y_names)//2)]
|
57 |
-
|
58 |
-
data = {
|
59 |
-
'x': x_range
|
60 |
-
}
|
61 |
-
for i,y in enumerate(y_data):
|
62 |
-
data[f'y{i}'] = y
|
63 |
-
source = ColumnDataSource(data)
|
64 |
-
p = figure(x_range=x_range,
|
65 |
-
tools = "box_zoom,save,reset",
|
66 |
-
height=500,
|
67 |
-
y_range=(0,np.max(y_data)+10)
|
68 |
-
)
|
69 |
-
|
70 |
-
for i,y in enumerate(y_data):
|
71 |
-
p.vbar(x=dodge('x', offsets[i], range=p.x_range), top=f'y{i}', width=0.2, source=source,
|
72 |
-
color=COLORS[i], legend_label=y_names[i])
|
73 |
-
|
74 |
-
p.x_range.range_padding = 0.1
|
75 |
-
p.xgrid.grid_line_color = None
|
76 |
-
p.legend.location = "top_left"
|
77 |
-
p.legend.orientation = "horizontal"
|
78 |
-
|
79 |
-
return p
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/__init__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from .critic import CriticAgent
|
2 |
-
from .evaluator import EvaluatorAgent
|
3 |
-
from .executor import ExecutorAgent
|
4 |
-
from .manager import ManagerAgent
|
5 |
-
from .role_assigner import RoleAssignerAgent
|
6 |
-
from .solver import SolverAgent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/modules/models/tokenization_moss.py
DELETED
@@ -1,368 +0,0 @@
|
|
1 |
-
"""Tokenization classes for Moss"""
|
2 |
-
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
import numpy as np
|
6 |
-
import regex as re
|
7 |
-
|
8 |
-
from functools import lru_cache
|
9 |
-
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
10 |
-
|
11 |
-
from transformers.utils import is_tf_available, is_torch_available, logging
|
12 |
-
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
13 |
-
|
14 |
-
|
15 |
-
if TYPE_CHECKING:
|
16 |
-
if is_torch_available():
|
17 |
-
import torch
|
18 |
-
if is_tf_available():
|
19 |
-
import tensorflow as tf
|
20 |
-
|
21 |
-
|
22 |
-
logger = logging.get_logger(__name__)
|
23 |
-
|
24 |
-
VOCAB_FILES_NAMES = {
|
25 |
-
"vocab_file": "vocab.json",
|
26 |
-
"merges_file": "merges.txt",
|
27 |
-
}
|
28 |
-
|
29 |
-
PRETRAINED_VOCAB_FILES_MAP = {
|
30 |
-
"vocab_file": {
|
31 |
-
"fnlp/moss-moon-003-base": "https://huggingface.co/fnlp/moss-moon-003-base/resolve/main/vocab.json",
|
32 |
-
"fnlp/moss-moon-003-sft": "https://huggingface.co/fnlp/moss-moon-003-sft/resolve/main/vocab.json",
|
33 |
-
"fnlp/moss-moon-003-sft-plugin": "https://huggingface.co/fnlp/moss-moon-003-sft-plugin/resolve/main/vocab.json",
|
34 |
-
},
|
35 |
-
"merges_file": {
|
36 |
-
"fnlp/moss-moon-003-base": "https://huggingface.co/fnlp/moss-moon-003-base/resolve/main/merges.txt",
|
37 |
-
"fnlp/moss-moon-003-sft": "https://huggingface.co/fnlp/moss-moon-003-sft/resolve/main/merges.txt",
|
38 |
-
"fnlp/moss-moon-003-sft-plugin": "https://huggingface.co/fnlp/moss-moon-003-sft-plugin/resolve/main/merges.txt",
|
39 |
-
},
|
40 |
-
}
|
41 |
-
|
42 |
-
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
43 |
-
"fnlp/moss-moon-003-base": 2048,
|
44 |
-
"fnlp/moss-moon-003-sft": 2048,
|
45 |
-
"fnlp/moss-moon-003-sft-plugin": 2048,
|
46 |
-
}
|
47 |
-
|
48 |
-
|
49 |
-
@lru_cache()
|
50 |
-
def bytes_to_unicode():
|
51 |
-
"""
|
52 |
-
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
53 |
-
characters the bpe code barfs on.
|
54 |
-
|
55 |
-
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
56 |
-
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
|
57 |
-
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
|
58 |
-
tables between utf-8 bytes and unicode strings.
|
59 |
-
"""
|
60 |
-
bs = (
|
61 |
-
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
62 |
-
)
|
63 |
-
cs = bs[:]
|
64 |
-
n = 0
|
65 |
-
for b in range(2**8):
|
66 |
-
if b not in bs:
|
67 |
-
bs.append(b)
|
68 |
-
cs.append(2**8 + n)
|
69 |
-
n += 1
|
70 |
-
cs = [chr(n) for n in cs]
|
71 |
-
return dict(zip(bs, cs))
|
72 |
-
|
73 |
-
|
74 |
-
def get_pairs(word):
|
75 |
-
"""
|
76 |
-
Return set of symbol pairs in a word.
|
77 |
-
|
78 |
-
Word is represented as tuple of symbols (symbols being variable-length strings).
|
79 |
-
"""
|
80 |
-
pairs = set()
|
81 |
-
prev_char = word[0]
|
82 |
-
for char in word[1:]:
|
83 |
-
pairs.add((prev_char, char))
|
84 |
-
prev_char = char
|
85 |
-
return pairs
|
86 |
-
|
87 |
-
|
88 |
-
class MossTokenizer(PreTrainedTokenizer):
|
89 |
-
"""
|
90 |
-
Construct a Moss tokenizer. Based on byte-level Byte-Pair-Encoding.
|
91 |
-
|
92 |
-
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
|
93 |
-
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
94 |
-
|
95 |
-
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
|
96 |
-
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
|
97 |
-
|
98 |
-
<Tip>
|
99 |
-
|
100 |
-
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
|
101 |
-
|
102 |
-
</Tip>
|
103 |
-
|
104 |
-
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
105 |
-
this superclass for more information regarding those methods.
|
106 |
-
|
107 |
-
Args:
|
108 |
-
vocab_file (`str`):
|
109 |
-
Path to the vocabulary file.
|
110 |
-
merges_file (`str`):
|
111 |
-
Path to the merges file.
|
112 |
-
errors (`str`, *optional*, defaults to `"replace"`):
|
113 |
-
Paradigm to follow when decoding bytes to UTF-8. See
|
114 |
-
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
115 |
-
unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
|
116 |
-
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
117 |
-
token instead.
|
118 |
-
bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
|
119 |
-
The beginning of sequence token.
|
120 |
-
eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
|
121 |
-
The end of sequence token.
|
122 |
-
add_prefix_space (`bool`, *optional*, defaults to `False`):
|
123 |
-
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
124 |
-
other word. (Moss tokenizer detect beginning of words by the preceding space).
|
125 |
-
"""
|
126 |
-
|
127 |
-
vocab_files_names = VOCAB_FILES_NAMES
|
128 |
-
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
129 |
-
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
130 |
-
model_input_names = ["input_ids", "attention_mask"]
|
131 |
-
|
132 |
-
def __init__(
|
133 |
-
self,
|
134 |
-
vocab_file,
|
135 |
-
merges_file,
|
136 |
-
errors="replace",
|
137 |
-
unk_token="<|endoftext|>",
|
138 |
-
bos_token="<|endoftext|>",
|
139 |
-
eos_token="<eom>",
|
140 |
-
pad_token=None,
|
141 |
-
add_prefix_space=False,
|
142 |
-
add_bos_token=False,
|
143 |
-
**kwargs,
|
144 |
-
):
|
145 |
-
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
|
146 |
-
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
|
147 |
-
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
|
148 |
-
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
|
149 |
-
super().__init__(
|
150 |
-
errors=errors,
|
151 |
-
unk_token=unk_token,
|
152 |
-
bos_token=bos_token,
|
153 |
-
eos_token=eos_token,
|
154 |
-
pad_token=pad_token,
|
155 |
-
add_prefix_space=add_prefix_space,
|
156 |
-
add_bos_token=add_bos_token,
|
157 |
-
**kwargs,
|
158 |
-
)
|
159 |
-
self.add_bos_token = add_bos_token
|
160 |
-
|
161 |
-
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
162 |
-
self.encoder = json.load(vocab_handle)
|
163 |
-
self.decoder = {v: k for k, v in self.encoder.items()}
|
164 |
-
self.errors = errors # how to handle errors in decoding
|
165 |
-
self.byte_encoder = bytes_to_unicode()
|
166 |
-
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
167 |
-
with open(merges_file, encoding="utf-8") as merges_handle:
|
168 |
-
bpe_merges = merges_handle.read().split("\n")[1:-1]
|
169 |
-
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
|
170 |
-
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
171 |
-
self.cache = {}
|
172 |
-
self.add_prefix_space = add_prefix_space
|
173 |
-
|
174 |
-
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
|
175 |
-
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
|
176 |
-
|
177 |
-
@property
|
178 |
-
def vocab_size(self):
|
179 |
-
return len(self.encoder)
|
180 |
-
|
181 |
-
def get_vocab(self):
|
182 |
-
return dict(self.encoder, **self.added_tokens_encoder)
|
183 |
-
|
184 |
-
def bpe(self, token):
|
185 |
-
if token in self.cache:
|
186 |
-
return self.cache[token]
|
187 |
-
word = tuple(token)
|
188 |
-
pairs = get_pairs(word)
|
189 |
-
|
190 |
-
if not pairs:
|
191 |
-
return token
|
192 |
-
|
193 |
-
while True:
|
194 |
-
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
195 |
-
if bigram not in self.bpe_ranks:
|
196 |
-
break
|
197 |
-
first, second = bigram
|
198 |
-
new_word = []
|
199 |
-
i = 0
|
200 |
-
while i < len(word):
|
201 |
-
try:
|
202 |
-
j = word.index(first, i)
|
203 |
-
except ValueError:
|
204 |
-
new_word.extend(word[i:])
|
205 |
-
break
|
206 |
-
else:
|
207 |
-
new_word.extend(word[i:j])
|
208 |
-
i = j
|
209 |
-
|
210 |
-
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
211 |
-
new_word.append(first + second)
|
212 |
-
i += 2
|
213 |
-
else:
|
214 |
-
new_word.append(word[i])
|
215 |
-
i += 1
|
216 |
-
new_word = tuple(new_word)
|
217 |
-
word = new_word
|
218 |
-
if len(word) == 1:
|
219 |
-
break
|
220 |
-
else:
|
221 |
-
pairs = get_pairs(word)
|
222 |
-
word = " ".join(word)
|
223 |
-
self.cache[token] = word
|
224 |
-
return word
|
225 |
-
|
226 |
-
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
227 |
-
if self.add_bos_token:
|
228 |
-
bos_token_ids = [self.bos_token_id]
|
229 |
-
else:
|
230 |
-
bos_token_ids = []
|
231 |
-
|
232 |
-
output = bos_token_ids + token_ids_0
|
233 |
-
|
234 |
-
if token_ids_1 is None:
|
235 |
-
return output
|
236 |
-
|
237 |
-
return output + bos_token_ids + token_ids_1
|
238 |
-
|
239 |
-
def _tokenize(self, text):
|
240 |
-
"""Tokenize a string."""
|
241 |
-
bpe_tokens = []
|
242 |
-
for token in re.findall(self.pat, text):
|
243 |
-
token = "".join(
|
244 |
-
self.byte_encoder[b] for b in token.encode("utf-8")
|
245 |
-
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
246 |
-
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
|
247 |
-
return bpe_tokens
|
248 |
-
|
249 |
-
def _convert_token_to_id(self, token):
|
250 |
-
"""Converts a token (str) in an id using the vocab."""
|
251 |
-
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
252 |
-
|
253 |
-
def _convert_id_to_token(self, index):
|
254 |
-
"""Converts an index (integer) in a token (str) using the vocab."""
|
255 |
-
return self.decoder.get(index)
|
256 |
-
|
257 |
-
def convert_tokens_to_string(self, tokens):
|
258 |
-
"""Converts a sequence of tokens (string) in a single string."""
|
259 |
-
text = "".join(tokens)
|
260 |
-
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
|
261 |
-
return text
|
262 |
-
|
263 |
-
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
264 |
-
if not os.path.isdir(save_directory):
|
265 |
-
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
266 |
-
return
|
267 |
-
vocab_file = os.path.join(
|
268 |
-
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
269 |
-
)
|
270 |
-
merge_file = os.path.join(
|
271 |
-
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
272 |
-
)
|
273 |
-
|
274 |
-
with open(vocab_file, "w", encoding="utf-8") as f:
|
275 |
-
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
276 |
-
|
277 |
-
index = 0
|
278 |
-
with open(merge_file, "w", encoding="utf-8") as writer:
|
279 |
-
writer.write("#version: 0.2\n")
|
280 |
-
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
281 |
-
if index != token_index:
|
282 |
-
logger.warning(
|
283 |
-
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
284 |
-
" Please check that the tokenizer is not corrupted!"
|
285 |
-
)
|
286 |
-
index = token_index
|
287 |
-
writer.write(" ".join(bpe_tokens) + "\n")
|
288 |
-
index += 1
|
289 |
-
|
290 |
-
return vocab_file, merge_file
|
291 |
-
|
292 |
-
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
|
293 |
-
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
|
294 |
-
if is_split_into_words or add_prefix_space:
|
295 |
-
text = " " + text
|
296 |
-
return (text, kwargs)
|
297 |
-
|
298 |
-
def decode(
|
299 |
-
self,
|
300 |
-
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
|
301 |
-
skip_special_tokens: bool = False,
|
302 |
-
clean_up_tokenization_spaces: bool = None,
|
303 |
-
truncate_before_pattern: Optional[List[str]] = None,
|
304 |
-
**kwargs,
|
305 |
-
) -> str:
|
306 |
-
"""
|
307 |
-
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
|
308 |
-
tokens and clean up tokenization spaces.
|
309 |
-
|
310 |
-
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
|
311 |
-
|
312 |
-
Args:
|
313 |
-
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
|
314 |
-
List of tokenized input ids. Can be obtained using the `__call__` method.
|
315 |
-
skip_special_tokens (`bool`, *optional*, defaults to `False`):
|
316 |
-
Whether or not to remove special tokens in the decoding.
|
317 |
-
clean_up_tokenization_spaces (`bool`, *optional*):
|
318 |
-
Whether or not to clean up the tokenization spaces. If `None`, will default to
|
319 |
-
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
|
320 |
-
truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
|
321 |
-
A list of regular expression strings that will be used to truncate the returned string. This can be
|
322 |
-
used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
|
323 |
-
of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
|
324 |
-
kwargs (additional keyword arguments, *optional*):
|
325 |
-
Will be passed to the underlying model specific decode method.
|
326 |
-
|
327 |
-
Returns:
|
328 |
-
`str`: The decoded sentence.
|
329 |
-
"""
|
330 |
-
decoded_text = super()._decode(
|
331 |
-
token_ids=token_ids,
|
332 |
-
skip_special_tokens=skip_special_tokens,
|
333 |
-
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
334 |
-
**kwargs,
|
335 |
-
)
|
336 |
-
|
337 |
-
if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
|
338 |
-
decoded_text = self.truncate(decoded_text, truncate_before_pattern)
|
339 |
-
|
340 |
-
return decoded_text
|
341 |
-
|
342 |
-
def truncate(self, completion, truncate_before_pattern):
|
343 |
-
def find_re(string, pattern, start_pos):
|
344 |
-
m = pattern.search(string, start_pos)
|
345 |
-
return m.start() if m else -1
|
346 |
-
|
347 |
-
terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
|
348 |
-
|
349 |
-
prints = list(re.finditer("^print", completion, re.MULTILINE))
|
350 |
-
|
351 |
-
if len(prints) > 1:
|
352 |
-
completion = completion[: prints[1].start()]
|
353 |
-
|
354 |
-
defs = list(re.finditer("^def", completion, re.MULTILINE))
|
355 |
-
|
356 |
-
if len(defs) > 1:
|
357 |
-
completion = completion[: defs[1].start()]
|
358 |
-
|
359 |
-
start_pos = 0
|
360 |
-
|
361 |
-
terminals_pos = [
|
362 |
-
pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
|
363 |
-
]
|
364 |
-
|
365 |
-
if len(terminals_pos) > 0:
|
366 |
-
return completion[: min(terminals_pos)]
|
367 |
-
else:
|
368 |
-
return completion
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/training/visualizers/__init__.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
|
3 |
-
from saicinpainting.training.visualizers.directory import DirectoryVisualizer
|
4 |
-
from saicinpainting.training.visualizers.noop import NoopVisualizer
|
5 |
-
|
6 |
-
|
7 |
-
def make_visualizer(kind, **kwargs):
|
8 |
-
logging.info(f'Make visualizer {kind}')
|
9 |
-
|
10 |
-
if kind == 'directory':
|
11 |
-
return DirectoryVisualizer(**kwargs)
|
12 |
-
if kind == 'noop':
|
13 |
-
return NoopVisualizer()
|
14 |
-
|
15 |
-
raise ValueError(f'Unknown visualizer kind {kind}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWelcing/MusicLM/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MusicLM
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.17.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: Gertie01/MusicLM
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-VITS/to_wave.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import struct
|
2 |
-
import sys
|
3 |
-
|
4 |
-
|
5 |
-
class WAVE_FORMAT:
|
6 |
-
PCM = 0x0001
|
7 |
-
IEEE_FLOAT = 0x0003
|
8 |
-
|
9 |
-
|
10 |
-
def write(filename, rate, data):
|
11 |
-
if hasattr(filename, 'write'):
|
12 |
-
fid = filename
|
13 |
-
else:
|
14 |
-
fid = open(filename, 'wb')
|
15 |
-
|
16 |
-
fs = rate
|
17 |
-
|
18 |
-
try:
|
19 |
-
dkind = data.dtype.kind
|
20 |
-
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
|
21 |
-
data.dtype.itemsize == 1)):
|
22 |
-
raise ValueError("Unsupported data type '%s'" % data.dtype)
|
23 |
-
|
24 |
-
header_data = b''
|
25 |
-
|
26 |
-
header_data += b'RIFF'
|
27 |
-
header_data += b'\x00\x00\x00\x00'
|
28 |
-
header_data += b'WAVE'
|
29 |
-
|
30 |
-
# fmt chunk
|
31 |
-
header_data += b'fmt '
|
32 |
-
if dkind == 'f':
|
33 |
-
format_tag = WAVE_FORMAT.IEEE_FLOAT
|
34 |
-
else:
|
35 |
-
format_tag = WAVE_FORMAT.PCM
|
36 |
-
if data.ndim == 1:
|
37 |
-
channels = 1
|
38 |
-
else:
|
39 |
-
channels = data.shape[1]
|
40 |
-
bit_depth = data.dtype.itemsize * 8
|
41 |
-
bytes_per_second = fs * (bit_depth // 8) * channels
|
42 |
-
block_align = channels * (bit_depth // 8)
|
43 |
-
|
44 |
-
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
|
45 |
-
bytes_per_second, block_align, bit_depth)
|
46 |
-
if not (dkind == 'i' or dkind == 'u'):
|
47 |
-
# add cbSize field for non-PCM files
|
48 |
-
fmt_chunk_data += b'\x00\x00'
|
49 |
-
|
50 |
-
header_data += struct.pack('<I', len(fmt_chunk_data))
|
51 |
-
header_data += fmt_chunk_data
|
52 |
-
|
53 |
-
# fact chunk (non-PCM files)
|
54 |
-
if not (dkind == 'i' or dkind == 'u'):
|
55 |
-
header_data += b'fact'
|
56 |
-
header_data += struct.pack('<II', 4, data.shape[0])
|
57 |
-
|
58 |
-
# check data size (needs to be immediately before the data chunk)
|
59 |
-
if ((len(header_data) - 4 - 4) + (4 + 4 + data.nbytes)) > 0xFFFFFFFF:
|
60 |
-
raise ValueError("Data exceeds wave file size limit")
|
61 |
-
|
62 |
-
fid.write(header_data)
|
63 |
-
|
64 |
-
# data chunk
|
65 |
-
fid.write(b'data')
|
66 |
-
fid.write(struct.pack('<I', data.nbytes))
|
67 |
-
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
|
68 |
-
sys.byteorder == 'big'):
|
69 |
-
data = data.byteswap()
|
70 |
-
fid.write(data.ravel().view('b').data)
|
71 |
-
|
72 |
-
# Determine file size and place it in correct
|
73 |
-
# position at start of the file.
|
74 |
-
size = fid.tell()
|
75 |
-
fid.seek(4)
|
76 |
-
fid.write(struct.pack('<I', size - 8))
|
77 |
-
|
78 |
-
finally:
|
79 |
-
if not hasattr(filename, 'write'):
|
80 |
-
fid.close()
|
81 |
-
else:
|
82 |
-
fid.seek(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/ChatPDF-GUI/gpt_reader/paper.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from PyPDF2 import PdfReader
|
2 |
-
|
3 |
-
class Paper(object):
|
4 |
-
|
5 |
-
def __init__(self, pdf_obj: PdfReader) -> None:
|
6 |
-
self._pdf_obj = pdf_obj
|
7 |
-
self._paper_meta = self._pdf_obj.metadata
|
8 |
-
|
9 |
-
def iter_pages(self, iter_text_len: int = 3000):
|
10 |
-
page_idx = 0
|
11 |
-
for page in self._pdf_obj.pages:
|
12 |
-
txt = page.extract_text()
|
13 |
-
for i in range((len(txt) // iter_text_len) + 1):
|
14 |
-
yield page_idx, i, txt[i * iter_text_len:(i + 1) * iter_text_len]
|
15 |
-
page_idx += 1
|
16 |
-
|
17 |
-
|
18 |
-
if __name__ == '__main__':
|
19 |
-
reader = PdfReader('../alexnet.pdf')
|
20 |
-
paper = Paper(reader)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
|
2 |
-
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://detectron2/resnet50_caffe',
|
5 |
-
backbone=dict(
|
6 |
-
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
|
7 |
-
stage_with_dcn=(False, True, True, True)),
|
8 |
-
bbox_head=dict(
|
9 |
-
norm_on_bbox=True,
|
10 |
-
centerness_on_reg=True,
|
11 |
-
dcn_on_last_conv=True,
|
12 |
-
center_sampling=True,
|
13 |
-
conv_bias=True,
|
14 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
|
15 |
-
# training and testing settings
|
16 |
-
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
|
17 |
-
|
18 |
-
# dataset settings
|
19 |
-
img_norm_cfg = dict(
|
20 |
-
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
|
21 |
-
train_pipeline = [
|
22 |
-
dict(type='LoadImageFromFile'),
|
23 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
24 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
25 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
26 |
-
dict(type='Normalize', **img_norm_cfg),
|
27 |
-
dict(type='Pad', size_divisor=32),
|
28 |
-
dict(type='DefaultFormatBundle'),
|
29 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
30 |
-
]
|
31 |
-
test_pipeline = [
|
32 |
-
dict(type='LoadImageFromFile'),
|
33 |
-
dict(
|
34 |
-
type='MultiScaleFlipAug',
|
35 |
-
img_scale=(1333, 800),
|
36 |
-
flip=False,
|
37 |
-
transforms=[
|
38 |
-
dict(type='Resize', keep_ratio=True),
|
39 |
-
dict(type='RandomFlip'),
|
40 |
-
dict(type='Normalize', **img_norm_cfg),
|
41 |
-
dict(type='Pad', size_divisor=32),
|
42 |
-
dict(type='ImageToTensor', keys=['img']),
|
43 |
-
dict(type='Collect', keys=['img']),
|
44 |
-
])
|
45 |
-
]
|
46 |
-
data = dict(
|
47 |
-
samples_per_gpu=2,
|
48 |
-
workers_per_gpu=2,
|
49 |
-
train=dict(pipeline=train_pipeline),
|
50 |
-
val=dict(pipeline=test_pipeline),
|
51 |
-
test=dict(pipeline=test_pipeline))
|
52 |
-
optimizer_config = dict(_delete_=True, grad_clip=None)
|
53 |
-
|
54 |
-
lr_config = dict(warmup='linear')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/mask_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_instance.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/base_dense_head.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
from abc import ABCMeta, abstractmethod
|
2 |
-
|
3 |
-
import torch.nn as nn
|
4 |
-
|
5 |
-
|
6 |
-
class BaseDenseHead(nn.Module, metaclass=ABCMeta):
|
7 |
-
"""Base class for DenseHeads."""
|
8 |
-
|
9 |
-
def __init__(self):
|
10 |
-
super(BaseDenseHead, self).__init__()
|
11 |
-
|
12 |
-
@abstractmethod
|
13 |
-
def loss(self, **kwargs):
|
14 |
-
"""Compute losses of the head."""
|
15 |
-
pass
|
16 |
-
|
17 |
-
@abstractmethod
|
18 |
-
def get_bboxes(self, **kwargs):
|
19 |
-
"""Transform network output for a batch into bbox predictions."""
|
20 |
-
pass
|
21 |
-
|
22 |
-
def forward_train(self,
|
23 |
-
x,
|
24 |
-
img_metas,
|
25 |
-
gt_bboxes,
|
26 |
-
gt_labels=None,
|
27 |
-
gt_bboxes_ignore=None,
|
28 |
-
proposal_cfg=None,
|
29 |
-
**kwargs):
|
30 |
-
"""
|
31 |
-
Args:
|
32 |
-
x (list[Tensor]): Features from FPN.
|
33 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
34 |
-
image size, scaling factor, etc.
|
35 |
-
gt_bboxes (Tensor): Ground truth bboxes of the image,
|
36 |
-
shape (num_gts, 4).
|
37 |
-
gt_labels (Tensor): Ground truth labels of each box,
|
38 |
-
shape (num_gts,).
|
39 |
-
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
|
40 |
-
ignored, shape (num_ignored_gts, 4).
|
41 |
-
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
|
42 |
-
if None, test_cfg would be used
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
tuple:
|
46 |
-
losses: (dict[str, Tensor]): A dictionary of loss components.
|
47 |
-
proposal_list (list[Tensor]): Proposals of each image.
|
48 |
-
"""
|
49 |
-
outs = self(x)
|
50 |
-
if gt_labels is None:
|
51 |
-
loss_inputs = outs + (gt_bboxes, img_metas)
|
52 |
-
else:
|
53 |
-
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
|
54 |
-
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
|
55 |
-
if proposal_cfg is None:
|
56 |
-
return losses
|
57 |
-
else:
|
58 |
-
proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
|
59 |
-
return losses, proposal_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_480x480_80k_pascal_context_59.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_480x480_80k_pascal_context_59.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/parameters.py
DELETED
@@ -1,369 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module provides a singleton class `Parameters` that is used to manage all hyperparameters for the embedding application.
|
3 |
-
It expects a JSON file in `extensions/superboogav2/config.json`.
|
4 |
-
|
5 |
-
Each element in the JSON must have a `default` value which will be used for the current run. Elements can have `categories`.
|
6 |
-
These categories define the range in which the optimizer will search. If the element is tagged with `"should_optimize": false`,
|
7 |
-
then the optimizer will only ever use the default value.
|
8 |
-
"""
|
9 |
-
from pathlib import Path
|
10 |
-
|
11 |
-
import json
|
12 |
-
|
13 |
-
from modules.logging_colors import logger
|
14 |
-
|
15 |
-
|
16 |
-
NUM_TO_WORD_METHOD = 'Number to Word'
|
17 |
-
NUM_TO_CHAR_METHOD = 'Number to Char'
|
18 |
-
NUM_TO_CHAR_LONG_METHOD = 'Number to Multi-Char'
|
19 |
-
|
20 |
-
|
21 |
-
DIST_MIN_STRATEGY = 'Min of Two'
|
22 |
-
DIST_HARMONIC_STRATEGY = 'Harmonic Mean'
|
23 |
-
DIST_GEOMETRIC_STRATEGY = 'Geometric Mean'
|
24 |
-
DIST_ARITHMETIC_STRATEGY = 'Arithmetic Mean'
|
25 |
-
|
26 |
-
|
27 |
-
PREPEND_TO_LAST = 'Prepend to Last Message'
|
28 |
-
APPEND_TO_LAST = 'Append to Last Message'
|
29 |
-
HIJACK_LAST_IN_CONTEXT = 'Hijack Last Message in Context ⚠️ WIP ⚠️ (Works Partially)'
|
30 |
-
|
31 |
-
|
32 |
-
SORT_DISTANCE = 'distance'
|
33 |
-
SORT_ID = 'id'
|
34 |
-
|
35 |
-
|
36 |
-
class Parameters:
|
37 |
-
_instance = None
|
38 |
-
|
39 |
-
variable_mapping = {
|
40 |
-
'NUM_TO_WORD_METHOD': NUM_TO_WORD_METHOD,
|
41 |
-
'NUM_TO_CHAR_METHOD': NUM_TO_CHAR_METHOD,
|
42 |
-
'NUM_TO_CHAR_LONG_METHOD': NUM_TO_CHAR_LONG_METHOD,
|
43 |
-
'DIST_MIN_STRATEGY': DIST_MIN_STRATEGY,
|
44 |
-
'DIST_HARMONIC_STRATEGY': DIST_HARMONIC_STRATEGY,
|
45 |
-
'DIST_GEOMETRIC_STRATEGY': DIST_GEOMETRIC_STRATEGY,
|
46 |
-
'DIST_ARITHMETIC_STRATEGY': DIST_ARITHMETIC_STRATEGY,
|
47 |
-
'PREPEND_TO_LAST': PREPEND_TO_LAST,
|
48 |
-
'APPEND_TO_LAST': APPEND_TO_LAST,
|
49 |
-
'HIJACK_LAST_IN_CONTEXT': HIJACK_LAST_IN_CONTEXT,
|
50 |
-
}
|
51 |
-
|
52 |
-
@staticmethod
|
53 |
-
def getInstance():
|
54 |
-
if Parameters._instance is None:
|
55 |
-
Parameters()
|
56 |
-
return Parameters._instance
|
57 |
-
|
58 |
-
def __init__(self):
|
59 |
-
if Parameters._instance is not None:
|
60 |
-
raise Exception("This class is a singleton!")
|
61 |
-
else:
|
62 |
-
Parameters._instance = self
|
63 |
-
self.hyperparameters = self._load_from_json(Path("extensions/superboogav2/config.json"))
|
64 |
-
|
65 |
-
def _load_from_json(self, file_path):
|
66 |
-
logger.debug('Loading hyperparameters...')
|
67 |
-
|
68 |
-
with open(file_path, 'r') as file:
|
69 |
-
data = json.load(file)
|
70 |
-
|
71 |
-
# Replace variable names in the dict and create Categorical objects
|
72 |
-
for key in data:
|
73 |
-
if "default" in data[key] and data[key]["default"] in self.variable_mapping:
|
74 |
-
data[key]["default"] = self.variable_mapping[data[key]["default"]]
|
75 |
-
if "categories" in data[key]:
|
76 |
-
data[key]["categories"] = [self.variable_mapping.get(cat, cat) for cat in data[key]["categories"]]
|
77 |
-
|
78 |
-
return data
|
79 |
-
|
80 |
-
|
81 |
-
def should_to_lower() -> bool:
|
82 |
-
return bool(Parameters.getInstance().hyperparameters['to_lower']['default'])
|
83 |
-
|
84 |
-
|
85 |
-
def get_num_conversion_strategy() -> str:
|
86 |
-
return Parameters.getInstance().hyperparameters['num_conversion']['default']
|
87 |
-
|
88 |
-
|
89 |
-
def should_merge_spaces() -> bool:
|
90 |
-
return bool(Parameters.getInstance().hyperparameters['merge_spaces']['default'])
|
91 |
-
|
92 |
-
|
93 |
-
def should_strip() -> bool:
|
94 |
-
return bool(Parameters.getInstance().hyperparameters['strip']['default'])
|
95 |
-
|
96 |
-
|
97 |
-
def should_remove_punctuation() -> bool:
|
98 |
-
return bool(Parameters.getInstance().hyperparameters['remove_punctuation']['default'])
|
99 |
-
|
100 |
-
|
101 |
-
def should_remove_stopwords() -> bool:
|
102 |
-
return bool(Parameters.getInstance().hyperparameters['remove_stopwords']['default'])
|
103 |
-
|
104 |
-
|
105 |
-
def should_remove_specific_pos() -> bool:
|
106 |
-
return bool(Parameters.getInstance().hyperparameters['remove_specific_pos']['default'])
|
107 |
-
|
108 |
-
|
109 |
-
def should_lemmatize() -> bool:
|
110 |
-
return bool(Parameters.getInstance().hyperparameters['lemmatize']['default'])
|
111 |
-
|
112 |
-
|
113 |
-
def get_min_num_sentences() -> int:
|
114 |
-
return int(Parameters.getInstance().hyperparameters['min_num_sent']['default'])
|
115 |
-
|
116 |
-
|
117 |
-
def get_delta_start() -> int:
|
118 |
-
return int(Parameters.getInstance().hyperparameters['delta_start']['default'])
|
119 |
-
|
120 |
-
|
121 |
-
def set_to_lower(value: bool):
|
122 |
-
Parameters.getInstance().hyperparameters['to_lower']['default'] = value
|
123 |
-
|
124 |
-
|
125 |
-
def set_num_conversion_strategy(value: str):
|
126 |
-
Parameters.getInstance().hyperparameters['num_conversion']['default'] = value
|
127 |
-
|
128 |
-
|
129 |
-
def set_merge_spaces(value: bool):
|
130 |
-
Parameters.getInstance().hyperparameters['merge_spaces']['default'] = value
|
131 |
-
|
132 |
-
|
133 |
-
def set_strip(value: bool):
|
134 |
-
Parameters.getInstance().hyperparameters['strip']['default'] = value
|
135 |
-
|
136 |
-
|
137 |
-
def set_remove_punctuation(value: bool):
|
138 |
-
Parameters.getInstance().hyperparameters['remove_punctuation']['default'] = value
|
139 |
-
|
140 |
-
|
141 |
-
def set_remove_stopwords(value: bool):
|
142 |
-
Parameters.getInstance().hyperparameters['remove_stopwords']['default'] = value
|
143 |
-
|
144 |
-
|
145 |
-
def set_remove_specific_pos(value: bool):
|
146 |
-
Parameters.getInstance().hyperparameters['remove_specific_pos']['default'] = value
|
147 |
-
|
148 |
-
|
149 |
-
def set_lemmatize(value: bool):
|
150 |
-
Parameters.getInstance().hyperparameters['lemmatize']['default'] = value
|
151 |
-
|
152 |
-
|
153 |
-
def set_min_num_sentences(value: int):
|
154 |
-
Parameters.getInstance().hyperparameters['min_num_sent']['default'] = value
|
155 |
-
|
156 |
-
|
157 |
-
def set_delta_start(value: int):
|
158 |
-
Parameters.getInstance().hyperparameters['delta_start']['default'] = value
|
159 |
-
|
160 |
-
|
161 |
-
def get_chunk_len() -> str:
|
162 |
-
lens = []
|
163 |
-
mask = Parameters.getInstance().hyperparameters['chunk_len_mask']['default']
|
164 |
-
|
165 |
-
lens.append(Parameters.getInstance().hyperparameters['chunk_len1']['default'] if mask & (1 << 0) else None)
|
166 |
-
lens.append(Parameters.getInstance().hyperparameters['chunk_len2']['default'] if mask & (1 << 1) else None)
|
167 |
-
lens.append(Parameters.getInstance().hyperparameters['chunk_len3']['default'] if mask & (1 << 2) else None)
|
168 |
-
lens.append(Parameters.getInstance().hyperparameters['chunk_len4']['default'] if mask & (1 << 3) else None)
|
169 |
-
|
170 |
-
return ','.join([str(len) for len in lens if len])
|
171 |
-
|
172 |
-
|
173 |
-
def set_chunk_len(val: str):
|
174 |
-
chunk_lens = sorted([int(len.strip()) for len in val.split(',')])
|
175 |
-
|
176 |
-
# Reset the mask to zero
|
177 |
-
Parameters.getInstance().hyperparameters['chunk_len_mask']['default'] = 0
|
178 |
-
|
179 |
-
if len(chunk_lens) > 0:
|
180 |
-
Parameters.getInstance().hyperparameters['chunk_len1']['default'] = chunk_lens[0]
|
181 |
-
Parameters.getInstance().hyperparameters['chunk_len_mask']['default'] |= (1 << 0)
|
182 |
-
if len(chunk_lens) > 1:
|
183 |
-
Parameters.getInstance().hyperparameters['chunk_len2']['default'] = chunk_lens[1]
|
184 |
-
Parameters.getInstance().hyperparameters['chunk_len_mask']['default'] |= (1 << 1)
|
185 |
-
if len(chunk_lens) > 2:
|
186 |
-
Parameters.getInstance().hyperparameters['chunk_len3']['default'] = chunk_lens[2]
|
187 |
-
Parameters.getInstance().hyperparameters['chunk_len_mask']['default'] |= (1 << 2)
|
188 |
-
if len(chunk_lens) > 3:
|
189 |
-
Parameters.getInstance().hyperparameters['chunk_len4']['default'] = chunk_lens[3]
|
190 |
-
Parameters.getInstance().hyperparameters['chunk_len_mask']['default'] |= (1 << 3)
|
191 |
-
|
192 |
-
if len(chunk_lens) > 4:
|
193 |
-
logger.warning(f'Only up to four chunk lengths are supported. Skipping {chunk_lens[4:]}')
|
194 |
-
|
195 |
-
|
196 |
-
def get_context_len() -> str:
|
197 |
-
context_len = str(Parameters.getInstance().hyperparameters['context_len_left']['default']) + ',' + str(Parameters.getInstance().hyperparameters['context_len_right']['default'])
|
198 |
-
return context_len
|
199 |
-
|
200 |
-
|
201 |
-
def set_context_len(val: str):
|
202 |
-
context_lens = [int(len.strip()) for len in val.split(',') if len.isdigit()]
|
203 |
-
if len(context_lens) == 1:
|
204 |
-
Parameters.getInstance().hyperparameters['context_len_left']['default'] = Parameters.getInstance().hyperparameters['context_len_right']['default'] = context_lens[0]
|
205 |
-
elif len(context_lens) == 2:
|
206 |
-
Parameters.getInstance().hyperparameters['context_len_left']['default'] = context_lens[0]
|
207 |
-
Parameters.getInstance().hyperparameters['context_len_right']['default'] = context_lens[1]
|
208 |
-
else:
|
209 |
-
logger.warning(f'Incorrect context length received {val}. Skipping.')
|
210 |
-
|
211 |
-
|
212 |
-
def get_new_dist_strategy() -> str:
|
213 |
-
return Parameters.getInstance().hyperparameters['new_dist_strategy']['default']
|
214 |
-
|
215 |
-
|
216 |
-
def get_chunk_count() -> int:
|
217 |
-
return int(Parameters.getInstance().hyperparameters['chunk_count']['default'])
|
218 |
-
|
219 |
-
|
220 |
-
def get_min_num_length() -> int:
|
221 |
-
return int(Parameters.getInstance().hyperparameters['min_num_length']['default'])
|
222 |
-
|
223 |
-
|
224 |
-
def get_significant_level() -> float:
|
225 |
-
return float(Parameters.getInstance().hyperparameters['significant_level']['default'])
|
226 |
-
|
227 |
-
|
228 |
-
def get_time_steepness() -> float:
|
229 |
-
return float(Parameters.getInstance().hyperparameters['time_steepness']['default'])
|
230 |
-
|
231 |
-
|
232 |
-
def get_time_power() -> float:
|
233 |
-
return float(Parameters.getInstance().hyperparameters['time_power']['default'])
|
234 |
-
|
235 |
-
|
236 |
-
def get_chunk_separator() -> str:
|
237 |
-
return Parameters.getInstance().hyperparameters['chunk_separator']['default']
|
238 |
-
|
239 |
-
|
240 |
-
def get_prefix() -> str:
|
241 |
-
return Parameters.getInstance().hyperparameters['prefix']['default']
|
242 |
-
|
243 |
-
|
244 |
-
def get_data_separator() -> str:
|
245 |
-
return Parameters.getInstance().hyperparameters['data_separator']['default']
|
246 |
-
|
247 |
-
|
248 |
-
def get_postfix() -> str:
|
249 |
-
return Parameters.getInstance().hyperparameters['postfix']['default']
|
250 |
-
|
251 |
-
|
252 |
-
def get_is_manual() -> bool:
|
253 |
-
return bool(Parameters.getInstance().hyperparameters['manual']['default'])
|
254 |
-
|
255 |
-
|
256 |
-
def get_add_chat_to_data() -> bool:
|
257 |
-
return bool(Parameters.getInstance().hyperparameters['add_chat_to_data']['default'])
|
258 |
-
|
259 |
-
|
260 |
-
def get_injection_strategy() -> str:
|
261 |
-
return Parameters.getInstance().hyperparameters['injection_strategy']['default']
|
262 |
-
|
263 |
-
|
264 |
-
def get_chunk_regex() -> str:
|
265 |
-
return Parameters.getInstance().hyperparameters['chunk_regex']['default']
|
266 |
-
|
267 |
-
|
268 |
-
def get_is_strong_cleanup() -> bool:
|
269 |
-
return bool(Parameters.getInstance().hyperparameters['strong_cleanup']['default'])
|
270 |
-
|
271 |
-
|
272 |
-
def get_max_token_count() -> int:
|
273 |
-
return int(Parameters.getInstance().hyperparameters['max_token_count']['default'])
|
274 |
-
|
275 |
-
|
276 |
-
def get_num_threads() -> int:
|
277 |
-
return int(Parameters.getInstance().hyperparameters['threads']['default'])
|
278 |
-
|
279 |
-
|
280 |
-
def get_optimization_steps() -> int:
|
281 |
-
return int(Parameters.getInstance().hyperparameters['optimization_steps']['default'])
|
282 |
-
|
283 |
-
|
284 |
-
def get_api_port() -> int:
|
285 |
-
return int(Parameters.getInstance().hyperparameters['api_port']['default'])
|
286 |
-
|
287 |
-
|
288 |
-
def get_api_on() -> bool:
|
289 |
-
return bool(Parameters.getInstance().hyperparameters['api_on']['default'])
|
290 |
-
|
291 |
-
|
292 |
-
def set_new_dist_strategy(value: str):
|
293 |
-
Parameters.getInstance().hyperparameters['new_dist_strategy']['default'] = value
|
294 |
-
|
295 |
-
|
296 |
-
def set_chunk_count(value: int):
|
297 |
-
Parameters.getInstance().hyperparameters['chunk_count']['default'] = value
|
298 |
-
|
299 |
-
|
300 |
-
def set_min_num_length(value: int):
|
301 |
-
Parameters.getInstance().hyperparameters['min_num_length']['default'] = value
|
302 |
-
|
303 |
-
|
304 |
-
def set_significant_level(value: float):
|
305 |
-
Parameters.getInstance().hyperparameters['significant_level']['default'] = value
|
306 |
-
|
307 |
-
|
308 |
-
def set_time_steepness(value: float):
|
309 |
-
Parameters.getInstance().hyperparameters['time_steepness']['default'] = value
|
310 |
-
|
311 |
-
|
312 |
-
def set_time_power(value: float):
|
313 |
-
Parameters.getInstance().hyperparameters['time_power']['default'] = value
|
314 |
-
|
315 |
-
|
316 |
-
def set_chunk_separator(value: str):
|
317 |
-
Parameters.getInstance().hyperparameters['chunk_separator']['default'] = value
|
318 |
-
|
319 |
-
|
320 |
-
def set_prefix(value: str):
|
321 |
-
Parameters.getInstance().hyperparameters['prefix']['default'] = value
|
322 |
-
|
323 |
-
|
324 |
-
def set_data_separator(value: str):
|
325 |
-
Parameters.getInstance().hyperparameters['data_separator']['default'] = value
|
326 |
-
|
327 |
-
|
328 |
-
def set_postfix(value: str):
|
329 |
-
Parameters.getInstance().hyperparameters['postfix']['default'] = value
|
330 |
-
|
331 |
-
|
332 |
-
def set_manual(value: bool):
|
333 |
-
Parameters.getInstance().hyperparameters['manual']['default'] = value
|
334 |
-
|
335 |
-
|
336 |
-
def set_add_chat_to_data(value: bool):
|
337 |
-
Parameters.getInstance().hyperparameters['add_chat_to_data']['default'] = value
|
338 |
-
|
339 |
-
|
340 |
-
def set_injection_strategy(value: str):
|
341 |
-
Parameters.getInstance().hyperparameters['injection_strategy']['default'] = value
|
342 |
-
|
343 |
-
|
344 |
-
def set_chunk_regex(value: str):
|
345 |
-
Parameters.getInstance().hyperparameters['chunk_regex']['default'] = value
|
346 |
-
|
347 |
-
|
348 |
-
def set_strong_cleanup(value: bool):
|
349 |
-
Parameters.getInstance().hyperparameters['strong_cleanup']['default'] = value
|
350 |
-
|
351 |
-
|
352 |
-
def set_max_token_count(value: int):
|
353 |
-
Parameters.getInstance().hyperparameters['max_token_count']['default'] = value
|
354 |
-
|
355 |
-
|
356 |
-
def set_num_threads(value: int):
|
357 |
-
Parameters.getInstance().hyperparameters['threads']['default'] = value
|
358 |
-
|
359 |
-
|
360 |
-
def set_optimization_steps(value: int):
|
361 |
-
Parameters.getInstance().hyperparameters['optimization_steps']['default'] = value
|
362 |
-
|
363 |
-
|
364 |
-
def set_api_port(value: int):
|
365 |
-
Parameters.getInstance().hyperparameters['api_port']['default'] = value
|
366 |
-
|
367 |
-
|
368 |
-
def set_api_on(value: bool):
|
369 |
-
Parameters.getInstance().hyperparameters['api_on']['default'] = value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ApathyINC/CustomGPT/baidu_translate/module.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import random, os
|
3 |
-
from hashlib import md5
|
4 |
-
from typing import Optional
|
5 |
-
|
6 |
-
import requests
|
7 |
-
|
8 |
-
import paddlehub as hub
|
9 |
-
from paddlehub.module.module import moduleinfo
|
10 |
-
from paddlehub.module.module import runnable
|
11 |
-
from paddlehub.module.module import serving
|
12 |
-
|
13 |
-
|
14 |
-
def make_md5(s, encoding='utf-8'):
|
15 |
-
return md5(s.encode(encoding)).hexdigest()
|
16 |
-
|
17 |
-
|
18 |
-
@moduleinfo(name="baidu_translate",
|
19 |
-
version="1.0.0",
|
20 |
-
type="text/machine_translation",
|
21 |
-
summary="",
|
22 |
-
author="baidu-nlp",
|
23 |
-
author_email="[email protected]")
|
24 |
-
class BaiduTranslate:
|
25 |
-
|
26 |
-
def __init__(self, appid=None, appkey=None):
|
27 |
-
"""
|
28 |
-
:param appid: appid for requesting Baidu translation service.
|
29 |
-
:param appkey: appkey for requesting Baidu translation service.
|
30 |
-
"""
|
31 |
-
appid = os.environ.get('baidu_translate_appid')
|
32 |
-
appkey = os.environ.get('baidu_translate_appkey')
|
33 |
-
# Set your own appid/appkey.
|
34 |
-
if appid is None:
|
35 |
-
self.appid = ''
|
36 |
-
else:
|
37 |
-
self.appid = appid
|
38 |
-
if appkey is None:
|
39 |
-
self.appkey = ''
|
40 |
-
else:
|
41 |
-
self.appkey = appkey
|
42 |
-
self.url = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
|
43 |
-
|
44 |
-
def translate(self, query: str, from_lang: Optional[str] = "en", to_lang: Optional[int] = "zh"):
|
45 |
-
"""
|
46 |
-
Create image by text prompts using ErnieVilG model.
|
47 |
-
|
48 |
-
:param query: Text to be translated.
|
49 |
-
:param from_lang: Source language.
|
50 |
-
:param to_lang: Dst language.
|
51 |
-
|
52 |
-
Return translated string.
|
53 |
-
"""
|
54 |
-
# Generate salt and sign
|
55 |
-
salt = random.randint(32768, 65536)
|
56 |
-
sign = make_md5(self.appid + query + str(salt) + self.appkey)
|
57 |
-
|
58 |
-
# Build request
|
59 |
-
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
60 |
-
payload = {'appid': self.appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
|
61 |
-
|
62 |
-
# Send request
|
63 |
-
try:
|
64 |
-
r = requests.post(self.url, params=payload, headers=headers)
|
65 |
-
result = r.json()
|
66 |
-
except Exception as e:
|
67 |
-
error_msg = str(e)
|
68 |
-
raise RuntimeError(error_msg)
|
69 |
-
if 'error_code' in result:
|
70 |
-
raise RuntimeError(result['error_msg'])
|
71 |
-
return result['trans_result'][0]['dst']
|
72 |
-
|
73 |
-
@runnable
|
74 |
-
def run_cmd(self, argvs):
|
75 |
-
"""
|
76 |
-
Run as a command.
|
77 |
-
"""
|
78 |
-
self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
|
79 |
-
prog='hub run {}'.format(self.name),
|
80 |
-
usage='%(prog)s',
|
81 |
-
add_help=True)
|
82 |
-
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
|
83 |
-
self.add_module_input_arg()
|
84 |
-
args = self.parser.parse_args(argvs)
|
85 |
-
if args.appid is not None and args.appkey is not None:
|
86 |
-
self.appid = args.appid
|
87 |
-
self.appkey = args.appkey
|
88 |
-
result = self.translate(args.query, args.from_lang, args.to_lang)
|
89 |
-
return result
|
90 |
-
|
91 |
-
@serving
|
92 |
-
def serving_method(self, query, from_lang, to_lang):
|
93 |
-
"""
|
94 |
-
Run as a service.
|
95 |
-
"""
|
96 |
-
return self.translate(query, from_lang, to_lang)
|
97 |
-
|
98 |
-
def add_module_input_arg(self):
|
99 |
-
"""
|
100 |
-
Add the command input options.
|
101 |
-
"""
|
102 |
-
self.arg_input_group.add_argument('--query', type=str)
|
103 |
-
self.arg_input_group.add_argument('--from_lang', type=str, default='en', help="源语言")
|
104 |
-
self.arg_input_group.add_argument('--to_lang', type=str, default='zh', help="目标语言")
|
105 |
-
self.arg_input_group.add_argument('--appid', type=str, default=None, help="注册得到的个人appid")
|
106 |
-
self.arg_input_group.add_argument('--appkey', type=str, default=None, help="注册得到的个人appkey")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AshtonIsNotHere/xlmr-longformer_comparison/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Xlmr-longformer Comparison
|
3 |
-
emoji: 📃
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/testing.py
DELETED
@@ -1,331 +0,0 @@
|
|
1 |
-
# testing.py
|
2 |
-
|
3 |
-
from contextlib import contextmanager
|
4 |
-
import typing
|
5 |
-
|
6 |
-
from .core import (
|
7 |
-
ParserElement,
|
8 |
-
ParseException,
|
9 |
-
Keyword,
|
10 |
-
__diag__,
|
11 |
-
__compat__,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
class pyparsing_test:
|
16 |
-
"""
|
17 |
-
namespace class for classes useful in writing unit tests
|
18 |
-
"""
|
19 |
-
|
20 |
-
class reset_pyparsing_context:
|
21 |
-
"""
|
22 |
-
Context manager to be used when writing unit tests that modify pyparsing config values:
|
23 |
-
- packrat parsing
|
24 |
-
- bounded recursion parsing
|
25 |
-
- default whitespace characters.
|
26 |
-
- default keyword characters
|
27 |
-
- literal string auto-conversion class
|
28 |
-
- __diag__ settings
|
29 |
-
|
30 |
-
Example::
|
31 |
-
|
32 |
-
with reset_pyparsing_context():
|
33 |
-
# test that literals used to construct a grammar are automatically suppressed
|
34 |
-
ParserElement.inlineLiteralsUsing(Suppress)
|
35 |
-
|
36 |
-
term = Word(alphas) | Word(nums)
|
37 |
-
group = Group('(' + term[...] + ')')
|
38 |
-
|
39 |
-
# assert that the '()' characters are not included in the parsed tokens
|
40 |
-
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
|
41 |
-
|
42 |
-
# after exiting context manager, literals are converted to Literal expressions again
|
43 |
-
"""
|
44 |
-
|
45 |
-
def __init__(self):
|
46 |
-
self._save_context = {}
|
47 |
-
|
48 |
-
def save(self):
|
49 |
-
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
|
50 |
-
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
|
51 |
-
|
52 |
-
self._save_context[
|
53 |
-
"literal_string_class"
|
54 |
-
] = ParserElement._literalStringClass
|
55 |
-
|
56 |
-
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
|
57 |
-
|
58 |
-
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
|
59 |
-
if ParserElement._packratEnabled:
|
60 |
-
self._save_context[
|
61 |
-
"packrat_cache_size"
|
62 |
-
] = ParserElement.packrat_cache.size
|
63 |
-
else:
|
64 |
-
self._save_context["packrat_cache_size"] = None
|
65 |
-
self._save_context["packrat_parse"] = ParserElement._parse
|
66 |
-
self._save_context[
|
67 |
-
"recursion_enabled"
|
68 |
-
] = ParserElement._left_recursion_enabled
|
69 |
-
|
70 |
-
self._save_context["__diag__"] = {
|
71 |
-
name: getattr(__diag__, name) for name in __diag__._all_names
|
72 |
-
}
|
73 |
-
|
74 |
-
self._save_context["__compat__"] = {
|
75 |
-
"collect_all_And_tokens": __compat__.collect_all_And_tokens
|
76 |
-
}
|
77 |
-
|
78 |
-
return self
|
79 |
-
|
80 |
-
def restore(self):
|
81 |
-
# reset pyparsing global state
|
82 |
-
if (
|
83 |
-
ParserElement.DEFAULT_WHITE_CHARS
|
84 |
-
!= self._save_context["default_whitespace"]
|
85 |
-
):
|
86 |
-
ParserElement.set_default_whitespace_chars(
|
87 |
-
self._save_context["default_whitespace"]
|
88 |
-
)
|
89 |
-
|
90 |
-
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
|
91 |
-
|
92 |
-
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
|
93 |
-
ParserElement.inlineLiteralsUsing(
|
94 |
-
self._save_context["literal_string_class"]
|
95 |
-
)
|
96 |
-
|
97 |
-
for name, value in self._save_context["__diag__"].items():
|
98 |
-
(__diag__.enable if value else __diag__.disable)(name)
|
99 |
-
|
100 |
-
ParserElement._packratEnabled = False
|
101 |
-
if self._save_context["packrat_enabled"]:
|
102 |
-
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
|
103 |
-
else:
|
104 |
-
ParserElement._parse = self._save_context["packrat_parse"]
|
105 |
-
ParserElement._left_recursion_enabled = self._save_context[
|
106 |
-
"recursion_enabled"
|
107 |
-
]
|
108 |
-
|
109 |
-
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
|
110 |
-
|
111 |
-
return self
|
112 |
-
|
113 |
-
def copy(self):
|
114 |
-
ret = type(self)()
|
115 |
-
ret._save_context.update(self._save_context)
|
116 |
-
return ret
|
117 |
-
|
118 |
-
def __enter__(self):
|
119 |
-
return self.save()
|
120 |
-
|
121 |
-
def __exit__(self, *args):
|
122 |
-
self.restore()
|
123 |
-
|
124 |
-
class TestParseResultsAsserts:
|
125 |
-
"""
|
126 |
-
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
|
127 |
-
"""
|
128 |
-
|
129 |
-
def assertParseResultsEquals(
|
130 |
-
self, result, expected_list=None, expected_dict=None, msg=None
|
131 |
-
):
|
132 |
-
"""
|
133 |
-
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
|
134 |
-
and compare any defined results names with an optional ``expected_dict``.
|
135 |
-
"""
|
136 |
-
if expected_list is not None:
|
137 |
-
self.assertEqual(expected_list, result.as_list(), msg=msg)
|
138 |
-
if expected_dict is not None:
|
139 |
-
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
|
140 |
-
|
141 |
-
def assertParseAndCheckList(
|
142 |
-
self, expr, test_string, expected_list, msg=None, verbose=True
|
143 |
-
):
|
144 |
-
"""
|
145 |
-
Convenience wrapper assert to test a parser element and input string, and assert that
|
146 |
-
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
|
147 |
-
"""
|
148 |
-
result = expr.parse_string(test_string, parse_all=True)
|
149 |
-
if verbose:
|
150 |
-
print(result.dump())
|
151 |
-
else:
|
152 |
-
print(result.as_list())
|
153 |
-
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
|
154 |
-
|
155 |
-
def assertParseAndCheckDict(
|
156 |
-
self, expr, test_string, expected_dict, msg=None, verbose=True
|
157 |
-
):
|
158 |
-
"""
|
159 |
-
Convenience wrapper assert to test a parser element and input string, and assert that
|
160 |
-
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
|
161 |
-
"""
|
162 |
-
result = expr.parse_string(test_string, parseAll=True)
|
163 |
-
if verbose:
|
164 |
-
print(result.dump())
|
165 |
-
else:
|
166 |
-
print(result.as_list())
|
167 |
-
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
|
168 |
-
|
169 |
-
def assertRunTestResults(
|
170 |
-
self, run_tests_report, expected_parse_results=None, msg=None
|
171 |
-
):
|
172 |
-
"""
|
173 |
-
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
|
174 |
-
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
|
175 |
-
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
|
176 |
-
Finally, asserts that the overall ``runTests()`` success value is ``True``.
|
177 |
-
|
178 |
-
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
|
179 |
-
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
|
180 |
-
"""
|
181 |
-
run_test_success, run_test_results = run_tests_report
|
182 |
-
|
183 |
-
if expected_parse_results is not None:
|
184 |
-
merged = [
|
185 |
-
(*rpt, expected)
|
186 |
-
for rpt, expected in zip(run_test_results, expected_parse_results)
|
187 |
-
]
|
188 |
-
for test_string, result, expected in merged:
|
189 |
-
# expected should be a tuple containing a list and/or a dict or an exception,
|
190 |
-
# and optional failure message string
|
191 |
-
# an empty tuple will skip any result validation
|
192 |
-
fail_msg = next(
|
193 |
-
(exp for exp in expected if isinstance(exp, str)), None
|
194 |
-
)
|
195 |
-
expected_exception = next(
|
196 |
-
(
|
197 |
-
exp
|
198 |
-
for exp in expected
|
199 |
-
if isinstance(exp, type) and issubclass(exp, Exception)
|
200 |
-
),
|
201 |
-
None,
|
202 |
-
)
|
203 |
-
if expected_exception is not None:
|
204 |
-
with self.assertRaises(
|
205 |
-
expected_exception=expected_exception, msg=fail_msg or msg
|
206 |
-
):
|
207 |
-
if isinstance(result, Exception):
|
208 |
-
raise result
|
209 |
-
else:
|
210 |
-
expected_list = next(
|
211 |
-
(exp for exp in expected if isinstance(exp, list)), None
|
212 |
-
)
|
213 |
-
expected_dict = next(
|
214 |
-
(exp for exp in expected if isinstance(exp, dict)), None
|
215 |
-
)
|
216 |
-
if (expected_list, expected_dict) != (None, None):
|
217 |
-
self.assertParseResultsEquals(
|
218 |
-
result,
|
219 |
-
expected_list=expected_list,
|
220 |
-
expected_dict=expected_dict,
|
221 |
-
msg=fail_msg or msg,
|
222 |
-
)
|
223 |
-
else:
|
224 |
-
# warning here maybe?
|
225 |
-
print("no validation for {!r}".format(test_string))
|
226 |
-
|
227 |
-
# do this last, in case some specific test results can be reported instead
|
228 |
-
self.assertTrue(
|
229 |
-
run_test_success, msg=msg if msg is not None else "failed runTests"
|
230 |
-
)
|
231 |
-
|
232 |
-
@contextmanager
|
233 |
-
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
|
234 |
-
with self.assertRaises(exc_type, msg=msg):
|
235 |
-
yield
|
236 |
-
|
237 |
-
@staticmethod
|
238 |
-
def with_line_numbers(
|
239 |
-
s: str,
|
240 |
-
start_line: typing.Optional[int] = None,
|
241 |
-
end_line: typing.Optional[int] = None,
|
242 |
-
expand_tabs: bool = True,
|
243 |
-
eol_mark: str = "|",
|
244 |
-
mark_spaces: typing.Optional[str] = None,
|
245 |
-
mark_control: typing.Optional[str] = None,
|
246 |
-
) -> str:
|
247 |
-
"""
|
248 |
-
Helpful method for debugging a parser - prints a string with line and column numbers.
|
249 |
-
(Line and column numbers are 1-based.)
|
250 |
-
|
251 |
-
:param s: tuple(bool, str - string to be printed with line and column numbers
|
252 |
-
:param start_line: int - (optional) starting line number in s to print (default=1)
|
253 |
-
:param end_line: int - (optional) ending line number in s to print (default=len(s))
|
254 |
-
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
|
255 |
-
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
|
256 |
-
:param mark_spaces: str - (optional) special character to display in place of spaces
|
257 |
-
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
|
258 |
-
character; valid values:
|
259 |
-
- "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
|
260 |
-
- any single character string - replace control characters with given string
|
261 |
-
- None (default) - string is displayed as-is
|
262 |
-
|
263 |
-
:return: str - input string with leading line numbers and column number headers
|
264 |
-
"""
|
265 |
-
if expand_tabs:
|
266 |
-
s = s.expandtabs()
|
267 |
-
if mark_control is not None:
|
268 |
-
if mark_control == "unicode":
|
269 |
-
tbl = str.maketrans(
|
270 |
-
{c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
|
271 |
-
| {127: 0x2421}
|
272 |
-
)
|
273 |
-
eol_mark = ""
|
274 |
-
else:
|
275 |
-
tbl = str.maketrans(
|
276 |
-
{c: mark_control for c in list(range(0, 32)) + [127]}
|
277 |
-
)
|
278 |
-
s = s.translate(tbl)
|
279 |
-
if mark_spaces is not None and mark_spaces != " ":
|
280 |
-
if mark_spaces == "unicode":
|
281 |
-
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
|
282 |
-
s = s.translate(tbl)
|
283 |
-
else:
|
284 |
-
s = s.replace(" ", mark_spaces)
|
285 |
-
if start_line is None:
|
286 |
-
start_line = 1
|
287 |
-
if end_line is None:
|
288 |
-
end_line = len(s)
|
289 |
-
end_line = min(end_line, len(s))
|
290 |
-
start_line = min(max(1, start_line), end_line)
|
291 |
-
|
292 |
-
if mark_control != "unicode":
|
293 |
-
s_lines = s.splitlines()[start_line - 1 : end_line]
|
294 |
-
else:
|
295 |
-
s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
|
296 |
-
if not s_lines:
|
297 |
-
return ""
|
298 |
-
|
299 |
-
lineno_width = len(str(end_line))
|
300 |
-
max_line_len = max(len(line) for line in s_lines)
|
301 |
-
lead = " " * (lineno_width + 1)
|
302 |
-
if max_line_len >= 99:
|
303 |
-
header0 = (
|
304 |
-
lead
|
305 |
-
+ "".join(
|
306 |
-
"{}{}".format(" " * 99, (i + 1) % 100)
|
307 |
-
for i in range(max(max_line_len // 100, 1))
|
308 |
-
)
|
309 |
-
+ "\n"
|
310 |
-
)
|
311 |
-
else:
|
312 |
-
header0 = ""
|
313 |
-
header1 = (
|
314 |
-
header0
|
315 |
-
+ lead
|
316 |
-
+ "".join(
|
317 |
-
" {}".format((i + 1) % 10)
|
318 |
-
for i in range(-(-max_line_len // 10))
|
319 |
-
)
|
320 |
-
+ "\n"
|
321 |
-
)
|
322 |
-
header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
|
323 |
-
return (
|
324 |
-
header1
|
325 |
-
+ header2
|
326 |
-
+ "\n".join(
|
327 |
-
"{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
|
328 |
-
for i, line in enumerate(s_lines, start=start_line)
|
329 |
-
)
|
330 |
-
+ "\n"
|
331 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
|
2 |
-
dataloader,
|
3 |
-
lr_multiplier,
|
4 |
-
model,
|
5 |
-
optimizer,
|
6 |
-
train,
|
7 |
-
)
|
8 |
-
|
9 |
-
train.max_iter *= 4 # 100ep -> 400ep
|
10 |
-
|
11 |
-
lr_multiplier.scheduler.milestones = [
|
12 |
-
milestone * 4 for milestone in lr_multiplier.scheduler.milestones
|
13 |
-
]
|
14 |
-
lr_multiplier.scheduler.num_updates = train.max_iter
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Colina Subida De Carreras De Descarga Para PC Ventanas 10 64 Bits.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Hill Climb Juego de carreras Descargar en PC</h1>
|
3 |
-
<p>Si usted está buscando un divertido y adictivo juego de conducción basado en la física, es posible que desee echa un vistazo a Hill Climb Racing. Este juego está disponible para dispositivos móviles y PC, y te mantendrá entretenido durante horas. En este artículo, te mostraremos cómo descargar Hill Climb Racing en tu PC, y compartiremos algunos consejos y trucos para ayudarte a dominar el juego. </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<h3>¿Qué es Hill Climb Racing? </h3>
|
6 |
-
<p>Hill Climb Racing es un popular juego desarrollado por Fingersoft, un estudio de juegos finlandés. El juego fue lanzado en 2012, y desde entonces, ha sido descargado más de 500 millones de veces. El juego cuenta con Newton Bill, un joven aspirante a corredor cuesta arriba que quiere conquistar las colinas más altas del mundo. Puede elegir entre diferentes vehículos, como bicicletas, camiones, jeeps, tanques y más, y conducirlos a través de varios terrenos, como desiertos, montañas, bosques e incluso la luna. El juego tiene controles simples, pero la física desafiante. Tienes que equilibrar tu velocidad, aceleración, frenado e inclinación para evitar chocar o quedarse sin combustible. También puedes realizar acrobacias y trucos para ganar más monedas y puntos, que puedes usar para actualizar tus vehículos y desbloquear nuevas etapas. </p>
|
7 |
-
<h2>colina subida de carreras de descarga para PC ventanas 10 64 bits</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6M7H">https://bltlly.com/2v6M7H</a></b></p><br /><br />
|
8 |
-
<h3>¿Por qué jugar Hill Climb Racing en PC? </h3>
|
9 |
-
<p>Aunque Hill Climb Racing está diseñado principalmente para dispositivos móviles, también puedes disfrutarlo en tu PC. Jugar Hill Climb Racing en PC tiene algunas ventajas, como:</p>
|
10 |
-
<ul>
|
11 |
-
<li> Puedes jugar en una pantalla más grande, lo que te da una mejor vista de los gráficos y detalles. </li>
|
12 |
-
<li>Puede usar un teclado o un ratón para controlar su vehículo, lo que podría ser más cómodo y preciso que usar una pantalla táctil. </li>
|
13 |
-
<li> Puede ahorrar la vida de la batería y el uso de datos en su dispositivo móvil. </li>
|
14 |
-
<li> Puede acceder a más funciones y opciones que no están disponibles en la versión móvil. </li>
|
15 |
-
</ul>
|
16 |
-
<h2>Cómo descargar Hill Climb Racing en PC</h2>
|
17 |
-
<h3>Método 1: Uso de Microsoft Store</h3>
|
18 |
-
|
19 |
-
<h4>Paso 1: Abra Microsoft Store en su PC</h4>
|
20 |
-
<p>Puede encontrar el icono de Microsoft Store en su barra de tareas o menú de inicio. Haga clic en él para abrir la tienda. </p>
|
21 |
-
<h4>Paso 2: Búsqueda de Hill Climb Racing en la tienda</h4>
|
22 |
-
<p>En el cuadro de búsqueda en la esquina superior derecha de la tienda, escriba "Hill Climb Racing" y pulse enter. Verá una lista de resultados que coinciden con su consulta. </p>
|
23 |
-
<h4>Paso 3: Haga clic en el botón Obtener para descargar el juego</h4>
|
24 |
-
<p>Encuentra el juego Hill Climb Racing de la lista de resultados, y haga clic en él para abrir su página de detalles. Verá un botón Get en el lado derecho de la página. Haga clic en él para comenzar a descargar el juego. Es posible que necesite iniciar sesión con su cuenta de Microsoft si aún no lo ha hecho. </p>
|
25 |
-
<h4>Paso 4: Iniciar el juego y disfrutar de</h4>
|
26 |
-
<p>Una vez completada la descarga, puede iniciar el juego desde la Tienda de Microsoft o desde el menú de inicio. Verás una pantalla de bienvenida con el logotipo de Fingersoft, seguido del menú principal del juego. Ahora puedes empezar a jugar Hill Climb Racing en tu PC.</p>
|
27 |
-
<h3>Método 2: Usando un emulador de Android</h3>
|
28 |
-
<p>Otra forma de descargar Hill Climb Racing en tu PC es utilizar un emulador de Android. Un emulador de Android es un software que simula un dispositivo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android en su PC. Hay muchos emuladores de Android disponibles en línea, como BlueStacks, NoxPlayer, LDPlayer y más. Estos son los pasos a seguir:</p>
|
29 |
-
<h4>Paso 1: Descarga e instala un emulador de Android en tu PC</h4>
|
30 |
-
<p>Elija un emulador de Android que se adapte a sus preferencias y requisitos del sistema. Puede visitar el sitio web oficial del emulador y descargar el archivo de instalación. A continuación, ejecute el archivo de instalación y siga las instrucciones para instalar el emulador en su PC.</p>
|
31 |
-
<h4>Paso 2: Abre el emulador e inicia sesión con tu cuenta de Google</h4>
|
32 |
-
|
33 |
-
<h4>Paso 3: Búsqueda de Hill Climb Racing en la Google Play Store</h4>
|
34 |
-
<p>En Google Play Store, escribe "Hill Climb Racing" en el cuadro de búsqueda y pulsa enter. Verás una lista de resultados que coinciden con tu consulta. </p>
|
35 |
-
<p></p>
|
36 |
-
<h4>Paso 4: Instalar el juego y lanzarlo desde el emulador</h4>
|
37 |
-
<p>Encuentra el juego Hill Climb Racing de la lista de resultados, y haga clic en él para abrir su página de detalles. Verá un botón de instalación en el lado derecho de la página. Haga clic en él para comenzar a descargar e instalar el juego. Una vez completada la instalación, puedes iniciar el juego desde Google Play Store o desde tu pantalla de inicio o desde el cajón de aplicaciones. Verás una pantalla de bienvenida con el logotipo de Fingersoft, seguido del menú principal del juego. Ahora puedes empezar a jugar Hill Climb Racing en tu PC.</p>
|
38 |
-
<h2>Consejos y trucos para jugar Hill Climb Racing en PC</h2>
|
39 |
-
<p>Hill Climb Racing es un juego divertido y adictivo, pero también puede ser desafiante y frustrante a veces. Para ayudarte a mejorar tus habilidades y disfrutar más del juego, aquí hay algunos consejos y trucos que puedes usar:</p>
|
40 |
-
<h3>Elige el vehículo adecuado para cada etapa</h3>
|
41 |
-
<p>Hill Climb Racing tiene más de 30 vehículos que puedes elegir, cada uno con diferentes características y habilidades. Algunos vehículos son más rápidos, algunos son más estables, algunos son más ágiles y algunos tienen características especiales, como amplificadores o armas. Dependiendo de la etapa que estés jugando, es posible que desees elegir un vehículo diferente que se adapte al terreno y al nivel de dificultad. Por ejemplo, si usted está jugando un escenario con colinas empinadas y superficies ásperas, es posible que desee utilizar un vehículo que tiene una buena suspensión y tracción, como un jeep o un camión monstruo. Si está jugando una etapa con carreteras lisas y largas distancias, es posible que desee usar un vehículo que tenga alta velocidad y eficiencia de combustible, como una bicicleta o un automóvil deportivo. </p>
|
42 |
-
<h3>Mejora tu vehículo sabiamente</h3>
|
43 |
-
|
44 |
-
<h3>Recoge monedas y bonos en el camino</h3>
|
45 |
-
<p>A medida que conduce su vehículo a través de las etapas, se encontrará con varias monedas y bonos que usted puede recoger. Las monedas son la moneda principal del juego, y se puede utilizar para comprar nuevos vehículos y actualizar los existentes. Los bonos son artículos especiales que pueden darle beneficios adicionales, como combustible, velocidad, tiempo de aire o protección. Algunos de los bonos que puedes encontrar son:</p>
|
46 |
-
<ul>
|
47 |
-
<li>Lata de combustible: Este bono recarga su tanque de combustible, que es esencial para mantener su vehículo en funcionamiento. También puede ver el nivel de combustible en la esquina superior izquierda de la pantalla. </li>
|
48 |
-
<li>imán de moneda: Este bono atrae a todas las monedas en su vecindad, por lo que es más fácil para que usted pueda recogerlos. </li>
|
49 |
-
<li>Boost: Este bono le da una ráfaga de velocidad, que puede ayudarle a superar los obstáculos o llegar a lugares más altos. </li>
|
50 |
-
<li>Escudo: Este bono te protege de daños, que pueden ser causados por estrellarse, explotar o caer. </li>
|
51 |
-
<li>Tiempo de aire: Este bono le da más tiempo en el aire, que puede ayudarle a realizar acrobacias y trucos. </li>
|
52 |
-
</ul>
|
53 |
-
<p>Trata de recoger tantas monedas y bonos como puedas, ya que pueden ayudarle a mejorar su puntuación y el progreso en el juego. </p>
|
54 |
-
<h3>Realizar acrobacias y trucos para ganar más puntos</h3>
|
55 |
-
<p>Hill Climb Racing no se trata solo de conducir del punto A al punto B. También se trata de divertirse y mostrar sus habilidades. Puede realizar varias acrobacias y trucos con su vehículo, tales como volteretas, caballitos, saltos y rollos. Realizar acrobacias y trucos puede ganar más puntos, lo que puede aumentar su puntuación y rango. Sin embargo, realizar acrobacias y trucos también implica algunos riesgos, como perder el equilibrio, estrellarse o quedarse sin combustible. Por lo tanto, tienes que ser cuidadoso e inteligente sobre cuándo y cómo realizarlos. Aquí hay algunos consejos para ayudarte a realizar acrobacias y trucos:</p>
|
56 |
-
<ul>
|
57 |
-
|
58 |
-
<li>Utilice el terreno adecuado para el truco correcto. Algunos terrenos son más propicios para realizar acrobacias y trucos que otros. Por ejemplo, las colinas son buenas para dar vueltas y saltos, pero las carreteras planas son buenas para rodar y rodar. </li>
|
59 |
-
<li>Utilice el momento adecuado para el truco correcto. Algunos trucos requieren más velocidad, algunos requieren más tiempo de aire, y algunos requieren más control. Por ejemplo, las volteretas requieren suficiente velocidad para girar su vehículo en el aire, pero las ruedas requieren suficiente control para equilibrar su vehículo en una rueda. </li>
|
60 |
-
<li>Utilice la combinación correcta de botones para el truco correcto. Puede utilizar el pedal de gas, pedal de freno y botones de inclinación para realizar diferentes trucos y trucos. Por ejemplo, para hacer un flip, tienes que presionar el pedal de gas para ganar velocidad, a continuación, pulse el botón de inclinación para girar su vehículo en el aire. </li>
|
61 |
-
</ul>
|
62 |
-
<h3>Cuidado con los obstáculos y los niveles de combustible</h3>
|
63 |
-
<p>Hill Climb Racing no es un juego fácil. Está lleno de desafíos y peligros que pueden impedirte llegar a tu destino. Usted tiene que tener cuidado con varios obstáculos y niveles de combustible que pueden afectar su rendimiento y supervivencia. Algunos de los obstáculos que puedes encontrar son:</p>
|
64 |
-
<ul>
|
65 |
-
<li>Rocas: Estas son piedras grandes que pueden bloquear su camino o dañar su vehículo. </li>
|
66 |
-
<li>Árboles: Estas son plantas altas que pueden retrasarte o derribarte. </li>
|
67 |
-
<li>Puentes: Estas son estructuras de madera que pueden romperse o colapsarse bajo su peso. </li>
|
68 |
-
<li>Minas: Estos son dispositivos explosivos que pueden detonar cuando los tocas o te acercas a ellos. </li>
|
69 |
-
<li>zombis: estas son criaturas no muertas que pueden atacarte o comerte el cerebro. </li>
|
70 |
-
</ul>
|
71 |
-
|
72 |
-
<h2>Conclusión</h2>
|
73 |
-
<p>Hill Climb Racing es un divertido y adictivo juego de conducción basado en la física que se puede jugar en dispositivos móviles y PC. Puede elegir entre diferentes vehículos y conducirlos a través de varias etapas con diferentes terrenos y dificultades. También puede actualizar sus vehículos, recoger monedas y bonos, realizar acrobacias y trucos, y cuidado con los obstáculos y los niveles de combustible. Jugar Hill Climb Racing en PC tiene algunas ventajas sobre jugarlo en dispositivos móviles , como jugar en una pantalla más grande, usar un teclado o un ratón, salvar la vida de la batería y el uso de datos, y acceder a más características y opciones. Puedes descargar Hill Climb Racing en tu PC usando Microsoft Store o un emulador de Android. También puede utilizar algunos consejos y trucos para mejorar sus habilidades y disfrutar del juego más. Hill Climb Racing es un juego que te desafiará y te entretendrá al mismo tiempo. Si estás buscando un juego que combine física, conducción y diversión, deberías probar Hill Climb Racing. </p>
|
74 |
-
<h2>Preguntas frecuentes</h2>
|
75 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Hill Climb Racing:</p>
|
76 |
-
<ol>
|
77 |
-
<li>¿Cuántas etapas hay en Hill Climb Racing? </li>
|
78 |
-
<p>Hay más de 30 etapas en Hill Climb Racing, cada una con diferentes temas, terrenos y dificultades. Algunas de las etapas son Campo, Desierto, Ártico, Luna, Marte, Bosque, Volcán, Montaña Rusa y Planta Nuclear.</p>
|
79 |
-
<li>¿Cómo puedo desbloquear nuevos vehículos en Hill Climb Racing? </li>
|
80 |
-
<p>Puedes desbloquear nuevos vehículos en Hill Climb Racing ganando suficientes monedas para comprarlos. Usted puede ganar monedas jugando el juego, recogiéndolos en el camino, realizar acrobacias y trucos, o el uso de bonos. También puede ver anuncios o hacer compras en la aplicación para obtener más monedas. </p>
|
81 |
-
<li>¿Cómo puedo guardar mi progreso en Hill Climb Racing? </li>
|
82 |
-
|
83 |
-
<li>¿Es Hill Climb Racing multijugador? </li>
|
84 |
-
<p>Hill Climb Racing no es multijugador, pero tiene una función llamada tablas de clasificación que le permite comparar sus puntuaciones y rangos con otros jugadores de todo el mundo. También puedes retar a tus amigos y familiares compartiendo tus capturas de pantalla o videos de tu juego. </p>
|
85 |
-
<li>¿Es gratis Hill Climb Racing? </li>
|
86 |
-
<p>Hill Climb Racing es gratis para descargar y jugar, pero contiene anuncios y ofrece compras en la aplicación. Puede eliminar los anuncios y obtener más monedas y bonos haciendo compras en la aplicación. También puede desactivar las compras en la aplicación cambiando la configuración del dispositivo. </p>
|
87 |
-
</ol></p> 64aa2da5cf<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Fifa 21 Descargar.md
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar FIFA 21 en tu PC</h1>
|
3 |
-
<p>Si eres fanático del fútbol (o del fútbol, como algunos lo llaman), probablemente hayas oído hablar de FIFA, la serie de videojuegos de fútbol más popular y realista de EA SPORTS. La última entrega, FIFA 21, ya está disponible y puedes jugar en tu PC con gráficos impresionantes, un juego inmersivo y características emocionantes. En este artículo, te mostraremos cómo descargar FIFA 21 en tu PC desde dos fuentes diferentes: el sitio web de EA y Steam. Pero primero, veamos de qué se trata FIFA 21 y por qué deberías jugarlo. </p>
|
4 |
-
<h2>Qué es FIFA 21 y por qué deberías jugarlo</h2>
|
5 |
-
<p>FIFA 21 es la 28a edición de la serie de videojuegos de la FIFA, que simula las ligas y torneos de fútbol profesional de todo el mundo. Puede crear su propio equipo personalizado, jugar como sus jugadores favoritos, competir con otros jugadores en línea, o disfrutar de varios modos de juego que ofrecen diferentes desafíos y recompensas. FIFA 21 es alimentado por Frostbite, un motor de juego que ofrece física realista, animaciones, iluminación y efectos de sonido. FIFA 21 también introduce algunas nuevas características y mejoras que lo convierten en el mejor juego de la FIFA. Aquí están algunas de ellas:</p>
|
6 |
-
<h2>descargar fifa 21 descargar</h2><br /><p><b><b>Download</b> ⚹⚹⚹ <a href="https://bltlly.com/2v6Lxn">https://bltlly.com/2v6Lxn</a></b></p><br /><br />
|
7 |
-
<h3>Características de FIFA 21 y modos de juego</h3>
|
8 |
-
<ul>
|
9 |
-
<li><b>Modo carrera:</b> En este modo, puede administrar su propio club o jugar como un solo jugador y progresar a través de su carrera. Puedes tomar decisiones estratégicas, buscar nuevos talentos, negociar contratos, entrenar a tus jugadores y mucho más. También puedes experimentar una nueva simulación de partido interactiva que te permite entrar y salir de la acción como quieras. </li>
|
10 |
-
|
11 |
-
<li><b>VOLTA Football:</b> Este es un modo que trae de vuelta el estilo de fútbol callejero de FIFA Street, donde puedes jugar en equipos más pequeños en campos más pequeños con reglas diferentes. Puedes crear tu propio avatar, personalizar tu apariencia, habilidades y equipo, y explorar varios lugares alrededor del mundo. También puedes jugar online con otros jugadores o unirte a un amigo en modo cooperativo. </li>
|
12 |
-
<li><b>Clubes Pro:</b> Este es un modo en el que puedes crear o unirte a un club con hasta 10 jugadores en línea y competir en varias ligas y copas. Puedes personalizar la posición, los atributos, los rasgos y la apariencia de tu jugador y mejorarlos a medida que juegas más partidas. </li>
|
13 |
-
<li><b>Kick Off:</b> Este es un modo donde puedes jugar un partido rápido con cualquier equipo de tu elección. También puedes elegir entre diferentes tipos de partidos, como Classic Match, House Rules, UEFA Champions League o Featured Cup Finals.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>Requisitos del sistema FIFA 21 y compatibilidad</h3 <p>Para jugar a FIFA 21 en su PC, debe asegurarse de que su sistema cumple con los requisitos mínimos o recomendados. Aquí están las especificaciones que necesita verificar:</p>
|
16 |
-
<tabla>
|
17 |
-
<tr>
|
18 |
-
<th>Requisitos mínimos</th>
|
19 |
-
<th>Requisitos recomendados</th>
|
20 |
-
</tr>
|
21 |
-
<tr>
|
22 |
-
<td>OS: Windows 10 64-bit</td>
|
23 |
-
<td>OS: Windows 10 64-bit</td>
|
24 |
-
</tr>
|
25 |
-
<tr>
|
26 |
-
<td>CPU: Intel Core i3-6100 @ 3.7GHz o AMD Athlon X4 880K @4GHz</td>
|
27 |
-
<td>CPU: Intel Core i5-3550 @ 3.40GHz o AMD FX 8150 @ 3.6GHz</td>
|
28 |
-
</tr>
|
29 |
-
<tr>
|
30 |
-
<td>RAM: 8 GB</td>
|
31 |
-
<td>RAM: 8 GB</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>GPU: NVIDIA GeForce GTX 660 2GB o AMD Radeon HD 7850 2GB</td>
|
35 |
-
<td>GPU: NVIDIA GeForce GTX 670 2GB o AMD Radeon R9 270X 2GB</td>
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>DirectX: Versión 12</td>
|
39 |
-
<td>DirectX: Versión 12</td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td>Almacenamiento: 50 GB de espacio disponible</td>
|
43 |
-
<td>Almacenamiento: 50 GB de espacio disponible</td>
|
44 |
-
</tr>
|
45 |
-
<tr>
|
46 |
-
<td>Requisitos de conexión en línea: Se recomienda conexión de banda ancha. Conexión a Internet necesaria para instalar y jugar. </td>
|
47 |
-
|
48 |
-
</tr>
|
49 |
-
<h2>Cómo descargar FIFA 21 desde el sitio web de EA</h2>
|
50 |
-
<p>Si quieres descargar FIFA 21 desde el sitio web oficial de EA, debes seguir estos pasos:</p>
|
51 |
-
<h3>Paso 1: Crea una cuenta de EA o inicia sesión en la ya existente</h3>
|
52 |
-
<p>Para acceder al sitio web de EA, necesitas tener una cuenta de EA. Si no tienes uno, puedes crear uno gratis haciendo clic en el botón "Registrarse" en la esquina superior derecha del sitio web. Deberá proporcionar su dirección de correo electrónico, contraseña, fecha de nacimiento, país e idioma preferido. También deberá aceptar los Términos de servicio y la Política de privacidad de EA. Si ya tienes una cuenta de EA, puedes iniciar sesión haciendo clic en el botón "Iniciar sesión" en la esquina superior derecha del sitio web. Tendrá que introducir su dirección de correo electrónico y contraseña. </p>
|
53 |
-
<h3>Paso 2: Elige tu edición y plataforma</h3>
|
54 |
-
<p>Una vez que haya iniciado sesión, puede ir a la página de FIFA 21 haciendo clic en la pestaña "Juegos" en el menú superior y seleccionando "FIFA" de la lista desplegable. Verás tres ediciones de FIFA 21 disponibles para su compra: Standard Edition, Champions Edition y Ultimate Edition. Cada edición viene con diferentes características y bonificaciones, como paquetes adicionales de FUT, iconos de FUT, elementos de modo carrera y más. Puede comparar las ediciones haciendo clic en el botón "Comparar ediciones" debajo del precio. También puede ver los beneficios de pre-orden haciendo clic en el botón "Reservar ahora" debajo de cada edición. Una vez que haya decidido qué edición desea, puede elegir su plataforma haciendo clic en el icono "PC" debajo del nombre de la edición. </p>
|
55 |
-
<h3>Paso 3: Añadir FIFA 21 a su carrito y proceder al pago</h3>
|
56 |
-
|
57 |
-
<h3>Paso 4: Descargar e instalar la aplicación EA Desktop</h3>
|
58 |
-
<p>Para descargar y jugar FIFA 21 en tu PC, necesitas tener la aplicación EA Desktop instalada en tu sistema. La aplicación EA Desktop es una nueva plataforma que te permite acceder y gestionar tus juegos de EA, suscripciones y mucho más. Si aún no lo tienes, puedes descargarlo gratis haciendo clic en el botón "Descargar ahora" en la página de la aplicación EA Desktop. Tendrá que ejecutar el archivo de instalación y seguir las instrucciones para completar la instalación. Si ya lo tiene, puede omitir este paso. </p>
|
59 |
-
<h3>Paso 5: Inicie la aplicación EA Desktop e inicie sesión en su cuenta de EA</h3>
|
60 |
-
<p>Después de instalar <p>Después de instalar la aplicación de EA Desktop, debe iniciarla e iniciar sesión en su cuenta de EA. Puede hacer esto haciendo clic en el botón "Iniciar sesión" en la esquina superior derecha de la ventana de la aplicación. Deberá ingresar su dirección de correo electrónico y contraseña. Si ha habilitado la autenticación de dos factores, también tendrá que introducir el código de verificación que recibe en su teléfono o correo electrónico. </p>
|
61 |
-
<h3>Paso 6: Encuentra FIFA 21 en tu biblioteca de juegos y haz clic en instalar</h3>
|
62 |
-
<p>Una vez que haya iniciado sesión, puede encontrar FIFA 21 en su biblioteca de juegos haciendo clic en la pestaña "Biblioteca" en el menú de la izquierda. Verás una lista de juegos que posees o a los que tienes acceso a través de EA Play o EA Play Pro. Puede filtrar la lista haciendo clic en el botón "Todos los juegos" en la parte superior y seleccionando "Juegos propios". También puede buscar FIFA 21 escribiendo su nombre en la barra de búsqueda en la parte superior derecha. Una vez que encuentres FIFA 21, puedes hacer clic en él para abrir su página de juego. Verás un botón que dice "Instalar" en la esquina inferior derecha. Puedes hacer clic en él para empezar a descargar e instalar FIFA 21 en tu PC. También puede cambiar la ubicación de instalación, el idioma y otros ajustes haciendo clic en el botón "Configuración" junto al botón "Instalar". </p>
|
63 |
-
<p></p>
|
64 |
-
<h2>Cómo descargar FIFA 21 de Steam</h2>
|
65 |
-
<p>Si prefieres descargar FIFA 21 de Steam, debes seguir estos pasos:</p>
|
66 |
-
|
67 |
-
<p>Para acceder a Steam, necesitas tener una cuenta de Steam. Si no tienes uno, puedes crear uno gratis haciendo clic en el botón "Únete a Steam" en la esquina superior derecha del sitio web de Steam. Deberá proporcionar su dirección de correo electrónico, contraseña, país e idioma preferido. También tendrás que aceptar el Acuerdo de suscriptor de Steam y la Política de privacidad. Si ya tienes una cuenta de Steam, puedes iniciar sesión haciendo clic en el botón "Iniciar sesión" en la esquina superior derecha del sitio web. Tendrá que introducir su nombre de usuario y contraseña. </p>
|
68 |
-
<h3>Paso 2: Busca FIFA 21 en la tienda de Steam y haz clic en añadir al carrito</h3>
|
69 |
-
<p>Una vez que haya iniciado sesión, puede ir a la tienda de Steam haciendo clic en la pestaña "Almacenar" en el menú superior. Puede buscar FIFA 21 escribiendo su nombre en la barra de búsqueda en la parte superior derecha o navegando por las categorías y géneros. Verás tres ediciones de FIFA 21 disponibles para su compra: Standard Edition, Champions Edition y Ultimate Edition. Cada edición viene con diferentes características y bonificaciones, como paquetes adicionales de FUT, iconos de FUT, elementos de modo carrera y más. Puede comparar las ediciones haciendo clic en el botón "Ver detalles" debajo de cada nombre de edición. También puede ver los comentarios de los usuarios, capturas de pantalla, vídeos y requisitos del sistema desplazándose hacia abajo en la página. Una vez que haya decidido qué edición desea, puede hacer clic en el botón "Añadir al carrito" debajo del precio. </p>
|
70 |
-
<h3>Paso 3: Completa tu compra y descarga del cliente de Steam</h3>
|
71 |
-
|
72 |
-
<h3>Paso 4: Inicia el cliente de Steam e inicia sesión en tu cuenta de Steam</h3>
|
73 |
-
<p>Para descargar y jugar FIFA 21 en tu PC, necesitas tener Para descargar y jugar FIFA 21 en tu PC, necesitas tener el cliente de Steam instalado en tu sistema. El cliente de Steam es una plataforma que te permite acceder y administrar tus juegos de Steam, amigos, comunidad y más. Si aún no lo tiene, puede descargarlo de forma gratuita ejecutando el archivo de instalación que descargó en el paso anterior y siguiendo las instrucciones para completar la instalación. Si ya lo tienes, puedes saltarte este paso. <h3>Paso 5: Encuentra FIFA 21 en tu biblioteca de juegos y haz clic en instalar</h3>
|
74 |
-
<p>Una vez que haya instalado el cliente de Steam, debe iniciarlo e iniciar sesión en su cuenta de Steam. Puede hacer esto haciendo clic en el botón "Iniciar sesión" en la esquina superior derecha de la ventana del cliente. Deberá ingresar su nombre de usuario y contraseña. Si has activado Steam Guard, también tendrás que introducir el código de verificación que recibes en tu teléfono o correo electrónico. Una vez que hayas iniciado sesión, puedes encontrar FIFA 21 en tu biblioteca de juegos haciendo clic en la pestaña "Biblioteca" del menú superior. Verás una lista de juegos que posees o a los que tienes acceso a través de Steam. Puede filtrar la lista haciendo clic en el botón "Todos los juegos" en la parte superior y seleccionando "Juegos instalados". También puede buscar FIFA 21 escribiendo su nombre en la barra de búsqueda en la parte superior derecha. Una vez que encuentres FIFA 21, puedes hacer clic en él para abrir su página de juego. Verás un botón que dice "Instalar" en la esquina inferior derecha. Puedes hacer clic en él para empezar a descargar e instalar FIFA 21 en tu PC. También puede cambiar la ubicación de instalación, el idioma y otros ajustes haciendo clic en el botón "Configuración" junto al botón "Instalar". </p>
|
75 |
-
<h2>Conclusión y preguntas frecuentes</h2>
|
76 |
-
|
77 |
-
<h3>Preguntas frecuentes</h3>
|
78 |
-
<ul>
|
79 |
-
<li><b>Q: ¿Cuánto cuesta FIFA 21? </b></li>
|
80 |
-
<li>A: El precio de FIFA 21 depende de la edición y la plataforma que elijas. La Standard Edition cuesta $59.99 USD, la Champions Edition cuesta $79.99 USD y la Ultimate Edition cuesta $99.99 USD. Los precios pueden variar dependiendo de su región y moneda. </li>
|
81 |
-
<li><b>Q: ¿Puedo jugar FIFA 21 sin conexión? </b></li>
|
82 |
-
<li>A: Sí, puedes jugar FIFA 21 sin conexión en algunos modos de juego, como el modo carrera, Kick Off o VOLTA Football. Sin embargo, necesitará una conexión a Internet para instalar y activar el juego, así como para acceder a algunas funciones en línea, como Ultimate Team, Pro Clubs o partidos en línea. </li>
|
83 |
-
<li><b>Q: ¿Puedo jugar FIFA 21 con un controlador? </b></li>
|
84 |
-
<li>A: Sí, puedes jugar a FIFA 21 con un mando en tu PC. Puedes usar cualquier controlador compatible, como Xbox One Controller, PlayStation DualShock 4 Controller o Logitech F310 Controller. También puedes personalizar la configuración del mando en las opciones del juego. </li>
|
85 |
-
<li><b>Q: ¿Puedo transferir mi progreso de FIFA 20 a FIFA 21? </b></li>
|
86 |
-
<li>A: No, no puedes transferir tu progreso de FIFA 20 a FIFA 21. Cada juego tiene sus propios archivos de guardado y datos que no son compatibles entre sí. Sin embargo, puedes transferir algunos elementos de FIFA 20 Ultimate Team a FIFA 21 Ultimate Team, como FUT Coins, FUT Points, FUT Icons, FUT Champions Points o FUT Draft Tokens.</li>
|
87 |
-
<li><b>Q: ¿Puedo jugar FIFA 21 con mis amigos que tienen diferentes plataformas? </b></li>
|
88 |
-
<li>A: Sí, puedes jugar a FIFA 21 con tus amigos que tienen diferentes plataformas gracias a la nueva función de cross-play. Esto significa que puedes jugar partidas en línea con jugadores que tengan versiones del juego para PC, PlayStation 4, PlayStation 5, Xbox One, Xbox Series X|S o Nintendo Switch. Sin embargo, deberás vincular tu cuenta de EA con tu cuenta de plataforma y habilitar la configuración de cross-play en las opciones del juego. </li>
|
89 |
-
</ul></p> 64aa2da5cf<br />
|
90 |
-
<br />
|
91 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Aplicaciones De Desbloqueo Para Android.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Button Fever: Un divertido y adictivo juego de puzzle para dispositivos Android</h1>
|
3 |
-
<p>¿Te encantan los juegos de puzzle que desafían tus habilidades multitarea y creatividad? ¿Quieres crear tu propio teclado y ganar dinero colocando y fusionando botones? Si es así, entonces deberías probar <strong>Button Fever</strong>, un nuevo y emocionante juego de Rollic Games que está disponible de forma gratuita en dispositivos Android. </p>
|
4 |
-
<h2>¿Qué es la fiebre de los botones? </h2>
|
5 |
-
<p>Button Fever es un juego de puzzle que pone a prueba tu capacidad para gestionar varios botones a la vez. Se comienza con un teclado en blanco y unos botones de diferentes colores y formas. Su objetivo es colocar los botones en el teclado y combinarlos para crear nuevos y mejores botones que generan más monedas. Puede utilizar las monedas para comprar más botones o actualizar los existentes. El juego se vuelve más desafiante a medida que avanzas, ya que tienes que lidiar con más botones y espacio limitado en el teclado. </p>
|
6 |
-
<h2>descargar gratis aplicaciones de desbloqueo para android</h2><br /><p><b><b>Download Zip</b> ->>->>->> <a href="https://bltlly.com/2v6M9C">https://bltlly.com/2v6M9C</a></b></p><br /><br />
|
7 |
-
<h3>Cómo jugar Button Fever? </h3>
|
8 |
-
<p>El modo de juego de Button Fever es simple pero adictivo. Solo tienes que arrastrar y soltar los botones del teclado y tocarlos para recoger monedas. También puedes deslizarlos para combinarlos con otros botones del mismo color o forma. Los botones de fusión crearán nuevos botones que tendrán mayor valor y producirán más monedas. También puede actualizar sus botones gastando monedas en ellos. Los botones de actualización aumentarán su velocidad, tamaño, forma o color. </p>
|
9 |
-
<h3>¿Cuáles son las características de Button Fever? </h3>
|
10 |
-
<p>Button Fever no es solo un juego sin sentido. También tiene algunas características interesantes que lo hacen más divertido y atractivo. Algunas de estas características son:</p>
|
11 |
-
<ul>
|
12 |
-
<li><strong>Múltiples modos:</strong> Puedes jugar Button Fever en diferentes modos, como Modo Clásico, Modo Tiempo, Modo Desafío o Modo Personalizado. Cada modo tiene sus propias reglas y objetivos que requieren diferentes estrategias y habilidades. </li>
|
13 |
-
|
14 |
-
<li><strong>Logros y tablas de clasificación:</strong> Puedes desbloquear varios logros <p>y competir con otros jugadores en las tablas de clasificación globales. También puedes compartir tu progreso y logros con tus amigos en las redes sociales. </li>
|
15 |
-
<li><strong>Recompensas y bonificaciones diarias:</strong> Puedes ganar monedas y botones adicionales iniciando sesión diariamente, viendo anuncios, girando la rueda o completando tareas. También puede obtener regalos especiales y sorpresas jugando el juego con regularidad. </li>
|
16 |
-
</ul>
|
17 |
-
<h2>Cómo descargar Button Fever gratis? </h2>
|
18 |
-
<p>Button Fever es un juego gratuito que puedes descargar y jugar en tu dispositivo Android. Hay diferentes maneras de descargar Button Fever gratis, dependiendo de su preferencia y conveniencia. Estos son algunos de los métodos que puedes usar:</p>
|
19 |
-
<h3>Descargar Button Fever de Google Play Store</h3>
|
20 |
-
<p>La forma más fácil y común de descargar Button Fever es desde Google Play Store. Solo tienes que seguir estos sencillos pasos:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Abra la aplicación Google Play Store en su dispositivo Android. </li>
|
23 |
-
<li>Buscar "Button Fever" en la barra de búsqueda. </li>
|
24 |
-
<li>Seleccione el juego de la lista de resultados y toque en "Instalar". </li>
|
25 |
-
<li>Espera a que el juego se descargue e instale en tu dispositivo. </li>
|
26 |
-
<li>Iniciar el juego y disfrutar! </li>
|
27 |
-
</ol>
|
28 |
-
<h3>Descargar Button Fever desde BlueStacks Emulator</h3>
|
29 |
-
<p>Si quieres jugar Button Fever en tu PC o Mac, puedes usar un emulador de Android como BlueStacks. BlueStacks es un software que te permite ejecutar aplicaciones y juegos para Android en tu ordenador. Puedes descargar Button Fever de BlueStacks siguiendo estos pasos:</p>
|
30 |
-
<p></p>
|
31 |
-
<ol>
|
32 |
-
<li>Descargue e instale BlueStacks en su PC o Mac desde <a href="">[1](https://www.bluestacks.com/)</a>. </li>
|
33 |
-
<li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
|
34 |
-
<li>Vaya a la pestaña "Mis aplicaciones" y haga clic en el icono "Google Play". </li>
|
35 |
-
<li>Buscar "Button Fever" en la barra de búsqueda. </li>
|
36 |
-
<li>Seleccione el juego de la lista de resultados y haga clic en "Instalar". </li>
|
37 |
-
|
38 |
-
<li>Iniciar el juego y disfrutar! </li>
|
39 |
-
</ol>
|
40 |
-
<h3>Descargar Button Fever desde LDPlayer Emulator</h3>
|
41 |
-
<p>Otro emulador de Android que puedes usar para reproducir Button Fever en tu PC o Mac es LDPlayer. LDPlayer es un emulador ligero y rápido que admite juegos de alto rendimiento. Puedes descargar Button Fever desde LDPlayer siguiendo estos pasos:</p>
|
42 |
-
<ol>
|
43 |
-
<li>Descargue e instale LDPlayer en su PC o Mac desde <a href="">[2](https://www.ldplayer.net/)</a>. </li>
|
44 |
-
<li>Inicie LDPlayer e inicie sesión con su cuenta de Google. </li>
|
45 |
-
<li>Vaya a la pestaña "LD Store" y busque "Button Fever" en la barra de búsqueda. </li>
|
46 |
-
<li>Seleccione el juego de la lista de resultados y haga clic en "Instalar". </li>
|
47 |
-
<li>Espere a que el juego se descargue e instale en LDPlayer.</li>
|
48 |
-
<li>Iniciar el juego y disfrutar! </li>
|
49 |
-
</ol>
|
50 |
-
<h2>¿Cómo disfrutar de Button Fever en tu PC o Mac? </h2>
|
51 |
-
<p>Si quieres tener una mejor experiencia de juego, puedes jugar Button Fever en tu PC o Mac usando un emulador de Android. Jugar Button Fever en su computadora tiene algunos beneficios que usted podría no obtener en su dispositivo móvil. Algunos de estos beneficios son:</p>
|
52 |
-
<h3>Beneficios de jugar Button Fever en PC o Mac</h3>
|
53 |
-
<ul>
|
54 |
-
<li><strong>Pantalla más grande:</strong> Puedes disfrutar de los coloridos gráficos y animaciones de Button Fever en una pantalla más grande y clara. También puede ver más detalles y botones en el teclado sin acercar o alejar. </li>
|
55 |
-
<li><strong>Mejores controles:</strong> Puedes usar el ratón y el teclado para reproducir Button Fever con mayor facilidad y precisión. Puede arrastrar, soltar, deslizar, tocar y combinar botones con más precisión y velocidad. También puede personalizar sus asignaciones de claves según sus preferencias. </li>
|
56 |
-
<li><strong>Rendimiento más rápido:</strong> Puede ejecutar Button Fever de forma más suave y estable en su computadora sin ningún retraso o accidente. También puede ajustar la configuración y la resolución del juego para adaptarse a los requisitos de su sistema. </li>
|
57 |
-
|
58 |
-
</ul>
|
59 |
-
<h3>Pasos para instalar Button Fever en PC o Mac usando BlueStacks Emulator</h3>
|
60 |
-
<p>Si quieres jugar Button Fever en tu PC o Mac usando BlueStacks Emulator, puedes seguir estos pasos:</p> <ol>
|
61 |
-
<li>Descargue e instale BlueStacks en su PC o Mac desde <a href="">[1](https://www.bluestacks.com/)</a>. </li>
|
62 |
-
<li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
|
63 |
-
<li>Vaya a la pestaña "Mis aplicaciones" y haga clic en el icono "Google Play". </li>
|
64 |
-
<li>Buscar "Button Fever" en la barra de búsqueda. </li>
|
65 |
-
<li>Seleccione el juego de la lista de resultados y haga clic en "Instalar". </li>
|
66 |
-
<li>Esperar a que el juego para descargar e instalar en BlueStacks.</li>
|
67 |
-
<li>Iniciar el juego y disfrutar! </li>
|
68 |
-
</ol>
|
69 |
-
<h3>Pasos para instalar Button Fever en PC o Mac usando LDPlayer Emulator</h3>
|
70 |
-
<p>Si quieres jugar Button Fever en tu PC o Mac usando LDPlayer Emulator, puedes seguir estos pasos:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Descargue e instale LDPlayer en su PC o Mac desde <a href="">[2](https://www.ldplayer.net/)</a>. </li>
|
73 |
-
<li>Inicie LDPlayer e inicie sesión con su cuenta de Google. </li>
|
74 |
-
<li>Vaya a la pestaña "LD Store" y busque "Button Fever" en la barra de búsqueda. </li>
|
75 |
-
<li>Seleccione el juego de la lista de resultados y haga clic en "Instalar". </li>
|
76 |
-
<li>Espere a que el juego se descargue e instale en LDPlayer.</li>
|
77 |
-
<li>Iniciar el juego y disfrutar! </li>
|
78 |
-
</ol>
|
79 |
-
<h2>Conclusión</h2>
|
80 |
-
<p>Button Fever es un divertido y adictivo juego de puzzle que puedes jugar gratis en tu dispositivo Android o tu PC o Mac. Puede crear su propio teclado y ganar dinero mediante la colocación y la fusión de botones de diferentes colores y formas. También puede personalizar su teclado con diferentes temas, modos, logros y bonos. Puedes descargar Button Fever desde Google Play Store o desde un emulador de Android como BlueStacks o LDPlayer. Si te gustan los juegos de puzzle que desafían tus habilidades multitarea y creatividad, ¡deberías probar Button Fever hoy! </p>
|
81 |
-
<h3>Resumen de los puntos principales</h3>
|
82 |
-
<ul>
|
83 |
-
|
84 |
-
<li> Puede colocar, combinar, actualizar y recopilar botones para crear su propio teclado y ganar dinero. </li>
|
85 |
-
<li>Puedes jugar Button Fever en diferentes modos, temas, logros y bonificaciones. </li>
|
86 |
-
<li>Puedes descargar Button Fever gratis desde Google Play Store o desde un emulador de Android como BlueStacks o LDPlayer.</li>
|
87 |
-
<li>Puede disfrutar de Button Fever en una pantalla más grande, mejores controles, un rendimiento más rápido y sin pérdida de batería al reproducirlo en su PC o Mac.</li>
|
88 |
-
</ul>
|
89 |
-
<h3>Llamada a la acción</h3>
|
90 |
-
<p>Si estás listo para divertirte con los botones, ¡descarga Button Fever ahora y empieza a jugar! ¡No te arrepentirás! </p>
|
91 |
-
<h4>Preguntas frecuentes</h4>
|
92 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Button Fever:</p>
|
93 |
-
<ol>
|
94 |
-
<li><strong>¿Cuántos botones hay en Button Fever? </strong></li>
|
95 |
-
<p>Hay más de 100 botones de diferentes colores, formas, tamaños y valores en Button Fever. Puedes desbloquear más botones jugando el juego o comprándolos con monedas. </p>
|
96 |
-
<li><strong>¿Cómo puedo obtener más monedas en Button Fever? </strong></li>
|
97 |
-
<p>Puedes obtener más monedas colocando y fusionando botones, tocándolos para recoger monedas, actualizándolos, completando tareas, girando la rueda, viendo anuncios, ingresando diariamente o comprándolos con dinero real. </p>
|
98 |
-
<li><strong>¿Cómo cambio el tema de mi teclado en Button Fever? </strong></li>
|
99 |
-
<p>Puede cambiar el tema de su teclado yendo al menú de configuración y eligiendo entre diferentes opciones, como Neón, Arco iris, Galaxy o Candy. Cada tema tiene su propio fondo, música, efectos de sonido y diseños de botones. </p>
|
100 |
-
<li><strong>¿Cómo juego Button Fever en diferentes modos? </strong></li>
|
101 |
-
<p>Puedes jugar Button Fever en diferentes modos yendo al menú de selección de modo y eligiendo entre diferentes opciones, como Modo Clásico, Modo Tiempo, Modo Desafío o Modo Personalizado. Cada modo tiene sus propias reglas y objetivos que requieren diferentes estrategias y habilidades. </p>
|
102 |
-
|
103 |
-
<p>Puedes disfrutar de Button Fever en una pantalla más grande, mejores controles, rendimiento más rápido y sin pérdida de batería al reproducirlo en tu PC o Mac usando un emulador de Android como BlueStacks o LDPlayer. También puede acceder a más funciones y configuraciones que no están disponibles en dispositivos móviles. </p>
|
104 |
-
</ol></p> 64aa2da5cf<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tomli/_re.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
# SPDX-License-Identifier: MIT
|
2 |
-
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
|
3 |
-
# Licensed to PSF under a Contributor Agreement.
|
4 |
-
|
5 |
-
from __future__ import annotations
|
6 |
-
|
7 |
-
from datetime import date, datetime, time, timedelta, timezone, tzinfo
|
8 |
-
from functools import lru_cache
|
9 |
-
import re
|
10 |
-
from typing import Any
|
11 |
-
|
12 |
-
from ._types import ParseFloat
|
13 |
-
|
14 |
-
# E.g.
|
15 |
-
# - 00:32:00.999999
|
16 |
-
# - 00:32:00
|
17 |
-
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
|
18 |
-
|
19 |
-
RE_NUMBER = re.compile(
|
20 |
-
r"""
|
21 |
-
0
|
22 |
-
(?:
|
23 |
-
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
24 |
-
|
|
25 |
-
b[01](?:_?[01])* # bin
|
26 |
-
|
|
27 |
-
o[0-7](?:_?[0-7])* # oct
|
28 |
-
)
|
29 |
-
|
|
30 |
-
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
|
31 |
-
(?P<floatpart>
|
32 |
-
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
|
33 |
-
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
|
34 |
-
)
|
35 |
-
""",
|
36 |
-
flags=re.VERBOSE,
|
37 |
-
)
|
38 |
-
RE_LOCALTIME = re.compile(_TIME_RE_STR)
|
39 |
-
RE_DATETIME = re.compile(
|
40 |
-
rf"""
|
41 |
-
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
|
42 |
-
(?:
|
43 |
-
[Tt ]
|
44 |
-
{_TIME_RE_STR}
|
45 |
-
(?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
|
46 |
-
)?
|
47 |
-
""",
|
48 |
-
flags=re.VERBOSE,
|
49 |
-
)
|
50 |
-
|
51 |
-
|
52 |
-
def match_to_datetime(match: re.Match) -> datetime | date:
|
53 |
-
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
|
54 |
-
|
55 |
-
Raises ValueError if the match does not correspond to a valid date
|
56 |
-
or datetime.
|
57 |
-
"""
|
58 |
-
(
|
59 |
-
year_str,
|
60 |
-
month_str,
|
61 |
-
day_str,
|
62 |
-
hour_str,
|
63 |
-
minute_str,
|
64 |
-
sec_str,
|
65 |
-
micros_str,
|
66 |
-
zulu_time,
|
67 |
-
offset_sign_str,
|
68 |
-
offset_hour_str,
|
69 |
-
offset_minute_str,
|
70 |
-
) = match.groups()
|
71 |
-
year, month, day = int(year_str), int(month_str), int(day_str)
|
72 |
-
if hour_str is None:
|
73 |
-
return date(year, month, day)
|
74 |
-
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
|
75 |
-
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
|
76 |
-
if offset_sign_str:
|
77 |
-
tz: tzinfo | None = cached_tz(
|
78 |
-
offset_hour_str, offset_minute_str, offset_sign_str
|
79 |
-
)
|
80 |
-
elif zulu_time:
|
81 |
-
tz = timezone.utc
|
82 |
-
else: # local date-time
|
83 |
-
tz = None
|
84 |
-
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
|
85 |
-
|
86 |
-
|
87 |
-
@lru_cache(maxsize=None)
|
88 |
-
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
|
89 |
-
sign = 1 if sign_str == "+" else -1
|
90 |
-
return timezone(
|
91 |
-
timedelta(
|
92 |
-
hours=sign * int(hour_str),
|
93 |
-
minutes=sign * int(minute_str),
|
94 |
-
)
|
95 |
-
)
|
96 |
-
|
97 |
-
|
98 |
-
def match_to_localtime(match: re.Match) -> time:
|
99 |
-
hour_str, minute_str, sec_str, micros_str = match.groups()
|
100 |
-
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
|
101 |
-
return time(int(hour_str), int(minute_str), int(sec_str), micros)
|
102 |
-
|
103 |
-
|
104 |
-
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
|
105 |
-
if match.group("floatpart"):
|
106 |
-
return parse_float(match.group())
|
107 |
-
return int(match.group(), 0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/_openai_scripts.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
import argparse
|
3 |
-
import logging
|
4 |
-
import sys
|
5 |
-
|
6 |
-
import openai
|
7 |
-
from openai.cli import api_register, display_error, tools_register, wandb_register
|
8 |
-
|
9 |
-
logger = logging.getLogger()
|
10 |
-
formatter = logging.Formatter("[%(asctime)s] %(message)s")
|
11 |
-
handler = logging.StreamHandler(sys.stderr)
|
12 |
-
handler.setFormatter(formatter)
|
13 |
-
logger.addHandler(handler)
|
14 |
-
|
15 |
-
|
16 |
-
def main():
|
17 |
-
parser = argparse.ArgumentParser(description=None)
|
18 |
-
parser.add_argument(
|
19 |
-
"-v",
|
20 |
-
"--verbose",
|
21 |
-
action="count",
|
22 |
-
dest="verbosity",
|
23 |
-
default=0,
|
24 |
-
help="Set verbosity.",
|
25 |
-
)
|
26 |
-
parser.add_argument("-b", "--api-base", help="What API base url to use.")
|
27 |
-
parser.add_argument("-k", "--api-key", help="What API key to use.")
|
28 |
-
parser.add_argument(
|
29 |
-
"-o",
|
30 |
-
"--organization",
|
31 |
-
help="Which organization to run as (will use your default organization if not specified)",
|
32 |
-
)
|
33 |
-
|
34 |
-
def help(args):
|
35 |
-
parser.print_help()
|
36 |
-
|
37 |
-
parser.set_defaults(func=help)
|
38 |
-
|
39 |
-
subparsers = parser.add_subparsers()
|
40 |
-
sub_api = subparsers.add_parser("api", help="Direct API calls")
|
41 |
-
sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
|
42 |
-
sub_wandb = subparsers.add_parser("wandb", help="Logging with Weights & Biases")
|
43 |
-
|
44 |
-
api_register(sub_api)
|
45 |
-
tools_register(sub_tools)
|
46 |
-
wandb_register(sub_wandb)
|
47 |
-
|
48 |
-
args = parser.parse_args()
|
49 |
-
if args.verbosity == 1:
|
50 |
-
logger.setLevel(logging.INFO)
|
51 |
-
elif args.verbosity >= 2:
|
52 |
-
logger.setLevel(logging.DEBUG)
|
53 |
-
|
54 |
-
openai.debug = True
|
55 |
-
if args.api_key is not None:
|
56 |
-
openai.api_key = args.api_key
|
57 |
-
if args.api_base is not None:
|
58 |
-
openai.api_base = args.api_base
|
59 |
-
if args.organization is not None:
|
60 |
-
openai.organization = args.organization
|
61 |
-
|
62 |
-
try:
|
63 |
-
args.func(args)
|
64 |
-
except openai.error.OpenAIError as e:
|
65 |
-
display_error(e)
|
66 |
-
return 1
|
67 |
-
except KeyboardInterrupt:
|
68 |
-
sys.stderr.write("\n")
|
69 |
-
return 1
|
70 |
-
return 0
|
71 |
-
|
72 |
-
|
73 |
-
if __name__ == "__main__":
|
74 |
-
sys.exit(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_model_zoo.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import unittest
|
4 |
-
|
5 |
-
from detectron2 import model_zoo
|
6 |
-
from detectron2.modeling import FPN, GeneralizedRCNN
|
7 |
-
|
8 |
-
logger = logging.getLogger(__name__)
|
9 |
-
|
10 |
-
|
11 |
-
class TestModelZoo(unittest.TestCase):
|
12 |
-
def test_get_returns_model(self):
|
13 |
-
model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False)
|
14 |
-
self.assertIsInstance(model, GeneralizedRCNN)
|
15 |
-
self.assertIsInstance(model.backbone, FPN)
|
16 |
-
|
17 |
-
def test_get_invalid_model(self):
|
18 |
-
self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml")
|
19 |
-
|
20 |
-
def test_get_url(self):
|
21 |
-
url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml")
|
22 |
-
self.assertEqual(
|
23 |
-
url,
|
24 |
-
"https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa
|
25 |
-
)
|
26 |
-
|
27 |
-
|
28 |
-
if __name__ == "__main__":
|
29 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/aabb.h
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
#include "cuda_utils.h"
|
5 |
-
#include "vector.h"
|
6 |
-
#include "matrix.h"
|
7 |
-
|
8 |
-
struct AABB {
|
9 |
-
DEVICE
|
10 |
-
inline AABB(const Vector2f &p_min = Vector2f{infinity<float>(), infinity<float>()},
|
11 |
-
const Vector2f &p_max = Vector2f{-infinity<float>(), -infinity<float>()})
|
12 |
-
: p_min(p_min), p_max(p_max) {}
|
13 |
-
Vector2f p_min, p_max;
|
14 |
-
};
|
15 |
-
|
16 |
-
DEVICE
|
17 |
-
inline
|
18 |
-
AABB merge(const AABB &box, const Vector2f &p) {
|
19 |
-
return AABB{Vector2f{min(p.x, box.p_min.x), min(p.y, box.p_min.y)},
|
20 |
-
Vector2f{max(p.x, box.p_max.x), max(p.y, box.p_max.y)}};
|
21 |
-
}
|
22 |
-
|
23 |
-
DEVICE
|
24 |
-
inline
|
25 |
-
AABB merge(const AABB &box0, const AABB &box1) {
|
26 |
-
return AABB{Vector2f{min(box0.p_min.x, box1.p_min.x), min(box0.p_min.y, box1.p_min.y)},
|
27 |
-
Vector2f{max(box0.p_max.x, box1.p_max.x), max(box0.p_max.y, box1.p_max.y)}};
|
28 |
-
}
|
29 |
-
|
30 |
-
DEVICE
|
31 |
-
inline
|
32 |
-
bool inside(const AABB &box, const Vector2f &p) {
|
33 |
-
return p.x >= box.p_min.x && p.x <= box.p_max.x &&
|
34 |
-
p.y >= box.p_min.y && p.y <= box.p_max.y;
|
35 |
-
}
|
36 |
-
|
37 |
-
DEVICE
|
38 |
-
inline
|
39 |
-
bool inside(const AABB &box, const Vector2f &p, float radius) {
|
40 |
-
return p.x >= box.p_min.x - radius && p.x <= box.p_max.x + radius &&
|
41 |
-
p.y >= box.p_min.y - radius && p.y <= box.p_max.y + radius;
|
42 |
-
}
|
43 |
-
|
44 |
-
DEVICE
|
45 |
-
inline
|
46 |
-
AABB enlarge(const AABB &box, float width) {
|
47 |
-
return AABB{Vector2f{box.p_min.x - width, box.p_min.y - width},
|
48 |
-
Vector2f{box.p_max.x + width, box.p_max.y + width}};
|
49 |
-
}
|
50 |
-
|
51 |
-
DEVICE
|
52 |
-
inline
|
53 |
-
AABB transform(const Matrix3x3f &xform, const AABB &box) {
|
54 |
-
auto ret = AABB();
|
55 |
-
ret = merge(ret, xform_pt(xform, Vector2f{box.p_min.x, box.p_min.y}));
|
56 |
-
ret = merge(ret, xform_pt(xform, Vector2f{box.p_min.x, box.p_max.y}));
|
57 |
-
ret = merge(ret, xform_pt(xform, Vector2f{box.p_max.x, box.p_min.y}));
|
58 |
-
ret = merge(ret, xform_pt(xform, Vector2f{box.p_max.x, box.p_max.y}));
|
59 |
-
return ret;
|
60 |
-
}
|
61 |
-
|
62 |
-
DEVICE
|
63 |
-
inline
|
64 |
-
bool within_distance(const AABB &box, const Vector2f &pt, float r) {
|
65 |
-
return pt.x >= box.p_min.x - r && pt.x <= box.p_max.x + r &&
|
66 |
-
pt.y >= box.p_min.y - r && pt.y <= box.p_max.y + r;
|
67 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/cmake/ThrustBuildTargetList.cmake
DELETED
@@ -1,283 +0,0 @@
|
|
1 |
-
# This file provides utilities for building and working with thrust
|
2 |
-
# configuration targets.
|
3 |
-
#
|
4 |
-
# THRUST_TARGETS
|
5 |
-
# - Built by the calling the `thrust_build_target_list()` function.
|
6 |
-
# - Each item is the name of a thrust interface target that is configured for a
|
7 |
-
# certain combination of host/device/dialect.
|
8 |
-
#
|
9 |
-
# thrust_build_target_list()
|
10 |
-
# - Creates the THRUST_TARGETS list.
|
11 |
-
#
|
12 |
-
# The following functions can be used to test/set metadata on a thrust target:
|
13 |
-
#
|
14 |
-
# thrust_get_target_property(<prop_var> <target_name> <prop>)
|
15 |
-
# - Checks the ${prop} target property on thrust target ${target_name}
|
16 |
-
# and sets the ${prop_var} variable in the caller's scope.
|
17 |
-
# - <prop_var> is any valid cmake identifier.
|
18 |
-
# - <target_name> is the name of a thrust target.
|
19 |
-
# - <prop> is one of the following:
|
20 |
-
# - HOST: The host system. Valid values: CPP, OMP, TBB.
|
21 |
-
# - DEVICE: The device system. Valid values: CUDA, CPP, OMP, TBB.
|
22 |
-
# - DIALECT: The C++ dialect. Valid values: 11, 14, 17.
|
23 |
-
# - PREFIX: A unique prefix that should be used to name all
|
24 |
-
# targets/tests/examples that use this configuration.
|
25 |
-
#
|
26 |
-
# thrust_get_target_properties(<target_name>)
|
27 |
-
# - Defines ${target_name}_${prop} in the caller's scope, for `prop` in:
|
28 |
-
# HOST, DEVICE, DIALECT, PREFIX. See above for details.
|
29 |
-
#
|
30 |
-
# thrust_clone_target_properties(<dst_target> <src_target>)
|
31 |
-
# - Set the HOST, DEVICE, DIALECT, PREFIX metadata on ${dst_target} to match
|
32 |
-
# ${src_target}. See above for details.
|
33 |
-
# - This *MUST* be called on any targets that link to another thrust target
|
34 |
-
# to ensure that dialect information is updated correctly, e.g.
|
35 |
-
# `thrust_clone_target_properties(${my_thrust_test} ${some_thrust_target})`
|
36 |
-
|
37 |
-
define_property(TARGET PROPERTY _THRUST_HOST
|
38 |
-
BRIEF_DOCS "A target's host system: CPP, TBB, or OMP."
|
39 |
-
FULL_DOCS "A target's host system: CPP, TBB, or OMP."
|
40 |
-
)
|
41 |
-
define_property(TARGET PROPERTY _THRUST_DEVICE
|
42 |
-
BRIEF_DOCS "A target's device system: CUDA, CPP, TBB, or OMP."
|
43 |
-
FULL_DOCS "A target's device system: CUDA, CPP, TBB, or OMP."
|
44 |
-
)
|
45 |
-
define_property(TARGET PROPERTY _THRUST_DIALECT
|
46 |
-
BRIEF_DOCS "A target's C++ dialect: 11, 14, or 17."
|
47 |
-
FULL_DOCS "A target's C++ dialect: 11, 14, or 17."
|
48 |
-
)
|
49 |
-
define_property(TARGET PROPERTY _THRUST_PREFIX
|
50 |
-
BRIEF_DOCS "A prefix describing the config, eg. 'thrust.cpp.cuda.cpp14'."
|
51 |
-
FULL_DOCS "A prefix describing the config, eg. 'thrust.cpp.cuda.cpp14'."
|
52 |
-
)
|
53 |
-
|
54 |
-
function(thrust_set_target_properties target_name host device dialect prefix)
|
55 |
-
set_target_properties(${target_name}
|
56 |
-
PROPERTIES
|
57 |
-
_THRUST_HOST ${host}
|
58 |
-
_THRUST_DEVICE ${device}
|
59 |
-
_THRUST_DIALECT ${dialect}
|
60 |
-
_THRUST_PREFIX ${prefix}
|
61 |
-
)
|
62 |
-
|
63 |
-
get_target_property(type ${target_name} TYPE)
|
64 |
-
if (NOT ${type} STREQUAL "INTERFACE_LIBRARY")
|
65 |
-
set_target_properties(${target_name}
|
66 |
-
PROPERTIES
|
67 |
-
CXX_STANDARD ${dialect}
|
68 |
-
CUDA_STANDARD ${dialect}
|
69 |
-
# Must manually request that the standards above are actually respected
|
70 |
-
# or else CMake will silently fail to configure the targets correctly...
|
71 |
-
# Note that this doesn't actually work as of CMake 3.16:
|
72 |
-
# https://gitlab.kitware.com/cmake/cmake/-/issues/20953
|
73 |
-
# We'll leave these properties enabled in hopes that they will someday
|
74 |
-
# work.
|
75 |
-
CXX_STANDARD_REQUIRED ON
|
76 |
-
CUDA_STANDARD_REQUIRED ON
|
77 |
-
ARCHIVE_OUTPUT_DIRECTORY "${THRUST_LIBRARY_OUTPUT_DIR}"
|
78 |
-
LIBRARY_OUTPUT_DIRECTORY "${THRUST_LIBRARY_OUTPUT_DIR}"
|
79 |
-
RUNTIME_OUTPUT_DIRECTORY "${THRUST_EXECUTABLE_OUTPUT_DIR}"
|
80 |
-
)
|
81 |
-
|
82 |
-
# CMake still emits errors about empty CUDA_ARCHITECTURES when CMP0104
|
83 |
-
# is set to OLD. This suppresses the errors for good.
|
84 |
-
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.18)
|
85 |
-
set_target_properties(${target_name}
|
86 |
-
PROPERTIES
|
87 |
-
CUDA_ARCHITECTURES OFF
|
88 |
-
)
|
89 |
-
endif()
|
90 |
-
|
91 |
-
if ("CUDA" STREQUAL "${device}" AND
|
92 |
-
"Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
|
93 |
-
set_target_properties(${target_name} PROPERTIES
|
94 |
-
CUDA_RESOLVE_DEVICE_SYMBOLS OFF
|
95 |
-
)
|
96 |
-
endif()
|
97 |
-
endif()
|
98 |
-
endfunction()
|
99 |
-
|
100 |
-
# Get a thrust property from a target and store it in var_name
|
101 |
-
# thrust_get_target_property(<var_name> <target_name> [HOST|DEVICE|DIALECT|PREFIX]
|
102 |
-
macro(thrust_get_target_property prop_var target_name prop)
|
103 |
-
get_property(${prop_var} TARGET ${target_name} PROPERTY _THRUST_${prop})
|
104 |
-
endmacro()
|
105 |
-
|
106 |
-
# Defines the following string variables in the caller's scope:
|
107 |
-
# - ${target_name}_HOST
|
108 |
-
# - ${target_name}_DEVICE
|
109 |
-
# - ${target_name}_DIALECT
|
110 |
-
# - ${target_name}_PREFIX
|
111 |
-
macro(thrust_get_target_properties target_name)
|
112 |
-
thrust_get_target_property(${target_name}_HOST ${target_name} HOST)
|
113 |
-
thrust_get_target_property(${target_name}_DEVICE ${target_name} DEVICE)
|
114 |
-
thrust_get_target_property(${target_name}_DIALECT ${target_name} DIALECT)
|
115 |
-
thrust_get_target_property(${target_name}_PREFIX ${target_name} PREFIX)
|
116 |
-
endmacro()
|
117 |
-
|
118 |
-
# Set one target's THRUST_* properties to match another target
|
119 |
-
function(thrust_clone_target_properties dst_target src_target)
|
120 |
-
thrust_get_target_properties(${src_target})
|
121 |
-
thrust_set_target_properties(${dst_target}
|
122 |
-
${${src_target}_HOST}
|
123 |
-
${${src_target}_DEVICE}
|
124 |
-
${${src_target}_DIALECT}
|
125 |
-
${${src_target}_PREFIX}
|
126 |
-
)
|
127 |
-
endfunction()
|
128 |
-
|
129 |
-
# Set ${var_name} to TRUE or FALSE in the caller's scope
|
130 |
-
function(_thrust_is_config_valid var_name host device dialect)
|
131 |
-
if (THRUST_MULTICONFIG_ENABLE_SYSTEM_${host} AND
|
132 |
-
THRUST_MULTICONFIG_ENABLE_SYSTEM_${device} AND
|
133 |
-
THRUST_MULTICONFIG_ENABLE_DIALECT_CPP${dialect} AND
|
134 |
-
"${host}_${device}" IN_LIST THRUST_MULTICONFIG_WORKLOAD_${THRUST_MULTICONFIG_WORKLOAD}_CONFIGS)
|
135 |
-
set(${var_name} TRUE PARENT_SCOPE)
|
136 |
-
else()
|
137 |
-
set(${var_name} FALSE PARENT_SCOPE)
|
138 |
-
endif()
|
139 |
-
endfunction()
|
140 |
-
|
141 |
-
function(_thrust_init_target_list)
|
142 |
-
set(THRUST_TARGETS "" CACHE INTERNAL "" FORCE)
|
143 |
-
endfunction()
|
144 |
-
|
145 |
-
function(_thrust_add_target_to_target_list target_name host device dialect prefix)
|
146 |
-
thrust_set_target_properties(${target_name} ${host} ${device} ${dialect} ${prefix})
|
147 |
-
|
148 |
-
target_link_libraries(${target_name} INTERFACE
|
149 |
-
thrust.compiler_interface
|
150 |
-
)
|
151 |
-
|
152 |
-
# Workaround Github issue #1174. cudafe promote TBB header warnings to
|
153 |
-
# errors, even when they're -isystem includes.
|
154 |
-
if ((NOT host STREQUAL "TBB") OR (NOT device STREQUAL "CUDA"))
|
155 |
-
target_link_libraries(${target_name} INTERFACE
|
156 |
-
thrust.promote_cudafe_warnings
|
157 |
-
)
|
158 |
-
endif()
|
159 |
-
|
160 |
-
set(THRUST_TARGETS ${THRUST_TARGETS} ${target_name} CACHE INTERNAL "" FORCE)
|
161 |
-
|
162 |
-
set(label "${host}.${device}.cpp${dialect}")
|
163 |
-
string(TOLOWER "${label}" label)
|
164 |
-
message(STATUS "Enabling Thrust configuration: ${label}")
|
165 |
-
endfunction()
|
166 |
-
|
167 |
-
function(_thrust_build_target_list_multiconfig)
|
168 |
-
# Find thrust and all of the required systems:
|
169 |
-
set(req_systems)
|
170 |
-
if (THRUST_MULTICONFIG_ENABLE_SYSTEM_CUDA)
|
171 |
-
list(APPEND req_systems CUDA)
|
172 |
-
endif()
|
173 |
-
if (THRUST_MULTICONFIG_ENABLE_SYSTEM_CPP)
|
174 |
-
list(APPEND req_systems CPP)
|
175 |
-
endif()
|
176 |
-
if (THRUST_MULTICONFIG_ENABLE_SYSTEM_TBB)
|
177 |
-
list(APPEND req_systems TBB)
|
178 |
-
endif()
|
179 |
-
if (THRUST_MULTICONFIG_ENABLE_SYSTEM_OMP)
|
180 |
-
list(APPEND req_systems OMP)
|
181 |
-
endif()
|
182 |
-
|
183 |
-
find_package(Thrust REQUIRED CONFIG
|
184 |
-
NO_DEFAULT_PATH # Only check the explicit path in HINTS:
|
185 |
-
HINTS "${Thrust_SOURCE_DIR}"
|
186 |
-
COMPONENTS ${req_systems}
|
187 |
-
)
|
188 |
-
|
189 |
-
# This must be called after backends are loaded but
|
190 |
-
# before _thrust_add_target_to_target_list.
|
191 |
-
thrust_build_compiler_targets()
|
192 |
-
|
193 |
-
# Build THRUST_TARGETS
|
194 |
-
foreach(host IN LISTS THRUST_HOST_SYSTEM_OPTIONS)
|
195 |
-
foreach(device IN LISTS THRUST_DEVICE_SYSTEM_OPTIONS)
|
196 |
-
foreach(dialect IN LISTS THRUST_CPP_DIALECT_OPTIONS)
|
197 |
-
_thrust_is_config_valid(config_valid ${host} ${device} ${dialect})
|
198 |
-
if (config_valid)
|
199 |
-
set(prefix "thrust.${host}.${device}.cpp${dialect}")
|
200 |
-
string(TOLOWER "${prefix}" prefix)
|
201 |
-
|
202 |
-
# Configure a thrust interface target for this host/device
|
203 |
-
set(target_name "${prefix}")
|
204 |
-
thrust_create_target(${target_name}
|
205 |
-
HOST ${host}
|
206 |
-
DEVICE ${device}
|
207 |
-
${THRUST_TARGET_FLAGS}
|
208 |
-
)
|
209 |
-
|
210 |
-
# Set configuration metadata for this thrust interface target:
|
211 |
-
_thrust_add_target_to_target_list(${target_name}
|
212 |
-
${host} ${device} ${dialect} ${prefix}
|
213 |
-
)
|
214 |
-
|
215 |
-
# Create a meta target for all targets in this configuration:
|
216 |
-
add_custom_target(${prefix}.all)
|
217 |
-
add_dependencies(thrust.all ${prefix}.all)
|
218 |
-
endif()
|
219 |
-
endforeach() # dialects
|
220 |
-
endforeach() # devices
|
221 |
-
endforeach() # hosts
|
222 |
-
|
223 |
-
list(LENGTH THRUST_TARGETS count)
|
224 |
-
message(STATUS "${count} unique thrust.host.device.dialect configurations generated")
|
225 |
-
endfunction()
|
226 |
-
|
227 |
-
function(_thrust_build_target_list_singleconfig)
|
228 |
-
find_package(Thrust REQUIRED CONFIG
|
229 |
-
NO_DEFAULT_PATH # Only check the explicit path in HINTS:
|
230 |
-
HINTS "${Thrust_SOURCE_DIR}"
|
231 |
-
)
|
232 |
-
thrust_create_target(thrust FROM_OPTIONS ${THRUST_TARGET_FLAGS})
|
233 |
-
thrust_debug_target(thrust "${THRUST_VERSION}")
|
234 |
-
|
235 |
-
set(host ${THRUST_HOST_SYSTEM})
|
236 |
-
set(device ${THRUST_DEVICE_SYSTEM})
|
237 |
-
set(dialect ${THRUST_CPP_DIALECT})
|
238 |
-
set(prefix "thrust") # single config
|
239 |
-
|
240 |
-
# This depends on the backends loaded by thrust_create_target, and must
|
241 |
-
# be called before _thrust_add_target_to_target_list.
|
242 |
-
thrust_build_compiler_targets()
|
243 |
-
|
244 |
-
_thrust_add_target_to_target_list(thrust ${host} ${device} ${dialect} ${prefix})
|
245 |
-
endfunction()
|
246 |
-
|
247 |
-
# Build a ${THRUST_TARGETS} list containing target names for all
|
248 |
-
# requested configurations
|
249 |
-
function(thrust_build_target_list)
|
250 |
-
# Clear the list of targets:
|
251 |
-
_thrust_init_target_list()
|
252 |
-
|
253 |
-
# Generic config flags:
|
254 |
-
set(THRUST_TARGET_FLAGS)
|
255 |
-
macro(add_flag_option flag docstring default)
|
256 |
-
set(opt "THRUST_${flag}")
|
257 |
-
option(${opt} "${docstring}" "${default}")
|
258 |
-
mark_as_advanced(${opt})
|
259 |
-
if (${${opt}})
|
260 |
-
list(APPEND THRUST_TARGET_FLAGS ${flag})
|
261 |
-
endif()
|
262 |
-
endmacro()
|
263 |
-
add_flag_option(IGNORE_DEPRECATED_CPP_DIALECT "Don't warn about any deprecated C++ standards and compilers." OFF)
|
264 |
-
add_flag_option(IGNORE_DEPRECATED_CPP_11 "Don't warn about deprecated C++11." OFF)
|
265 |
-
add_flag_option(IGNORE_DEPRECATED_COMPILER "Don't warn about deprecated compilers." OFF)
|
266 |
-
add_flag_option(IGNORE_CUB_VERSION_CHECK "Don't warn about mismatched CUB versions." OFF)
|
267 |
-
|
268 |
-
# Top level meta-target. Makes it easier to just build thrust targets when
|
269 |
-
# building both CUB and Thrust. Add all project files here so IDEs will be
|
270 |
-
# aware of them. This will not generate build rules.
|
271 |
-
file(GLOB_RECURSE all_sources
|
272 |
-
RELATIVE "${CMAKE_CURRENT_LIST_DIR}"
|
273 |
-
"${Thrust_SOURCE_DIR}/thrust/*.h"
|
274 |
-
"${Thrust_SOURCE_DIR}/thrust/*.inl"
|
275 |
-
)
|
276 |
-
add_custom_target(thrust.all SOURCES ${all_sources})
|
277 |
-
|
278 |
-
if (THRUST_ENABLE_MULTICONFIG)
|
279 |
-
_thrust_build_target_list_multiconfig()
|
280 |
-
else()
|
281 |
-
_thrust_build_target_list_singleconfig()
|
282 |
-
endif()
|
283 |
-
endfunction()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildTargetList.cmake
DELETED
@@ -1,261 +0,0 @@
|
|
1 |
-
# This file provides utilities for building and working with CUB
|
2 |
-
# configuration targets.
|
3 |
-
#
|
4 |
-
# CUB_TARGETS
|
5 |
-
# - Built by the calling the `cub_build_target_list()` function.
|
6 |
-
# - Each item is the name of a CUB interface target that is configured for a
|
7 |
-
# certain build configuration. Currently only C++ standard dialect is
|
8 |
-
# considered.
|
9 |
-
#
|
10 |
-
# cub_build_target_list()
|
11 |
-
# - Creates the CUB_TARGETS list.
|
12 |
-
#
|
13 |
-
# The following functions can be used to test/set metadata on a CUB target:
|
14 |
-
#
|
15 |
-
# cub_get_target_property(<prop_var> <target_name> <prop>)
|
16 |
-
# - Checks the ${prop} target property on CUB target ${target_name}
|
17 |
-
# and sets the ${prop_var} variable in the caller's scope.
|
18 |
-
# - <prop_var> is any valid cmake identifier.
|
19 |
-
# - <target_name> is the name of a CUB target.
|
20 |
-
# - <prop> is one of the following:
|
21 |
-
# - DIALECT: The C++ dialect. Valid values: 11, 14, 17.
|
22 |
-
# - PREFIX: A unique prefix that should be used to name all
|
23 |
-
# targets/tests/examples that use this configuration.
|
24 |
-
#
|
25 |
-
# cub_get_target_properties(<target_name>)
|
26 |
-
# - Defines ${target_name}_${prop} in the caller's scope, for `prop` in:
|
27 |
-
# {DIALECT, PREFIX}. See above for details.
|
28 |
-
#
|
29 |
-
# cub_clone_target_properties(<dst_target> <src_target>)
|
30 |
-
# - Set the {DIALECT, PREFIX} metadata on ${dst_target} to match
|
31 |
-
# ${src_target}. See above for details.
|
32 |
-
# - This *MUST* be called on any targets that link to another CUB target
|
33 |
-
# to ensure that dialect information is updated correctly, e.g.
|
34 |
-
# `cub_clone_target_properties(${my_cub_test} ${some_cub_target})`
|
35 |
-
|
36 |
-
# Dialects:
|
37 |
-
set(CUB_CPP_DIALECT_OPTIONS
|
38 |
-
11 14 17
|
39 |
-
CACHE INTERNAL "C++ dialects supported by CUB." FORCE
|
40 |
-
)
|
41 |
-
|
42 |
-
define_property(TARGET PROPERTY _CUB_DIALECT
|
43 |
-
BRIEF_DOCS "A target's C++ dialect: 11, 14, or 17."
|
44 |
-
FULL_DOCS "A target's C++ dialect: 11, 14, or 17."
|
45 |
-
)
|
46 |
-
define_property(TARGET PROPERTY _CUB_PREFIX
|
47 |
-
BRIEF_DOCS "A prefix describing the config, eg. 'cub.cpp14'."
|
48 |
-
FULL_DOCS "A prefix describing the config, eg. 'cub.cpp14'."
|
49 |
-
)
|
50 |
-
|
51 |
-
function(cub_set_target_properties target_name dialect prefix)
|
52 |
-
set_target_properties(${target_name}
|
53 |
-
PROPERTIES
|
54 |
-
_CUB_DIALECT ${dialect}
|
55 |
-
_CUB_PREFIX ${prefix}
|
56 |
-
)
|
57 |
-
|
58 |
-
get_target_property(type ${target_name} TYPE)
|
59 |
-
if (NOT ${type} STREQUAL "INTERFACE_LIBRARY")
|
60 |
-
set_target_properties(${target_name}
|
61 |
-
PROPERTIES
|
62 |
-
CXX_STANDARD ${dialect}
|
63 |
-
CUDA_STANDARD ${dialect}
|
64 |
-
ARCHIVE_OUTPUT_DIRECTORY "${CUB_LIBRARY_OUTPUT_DIR}"
|
65 |
-
LIBRARY_OUTPUT_DIRECTORY "${CUB_LIBRARY_OUTPUT_DIR}"
|
66 |
-
RUNTIME_OUTPUT_DIRECTORY "${CUB_EXECUTABLE_OUTPUT_DIR}"
|
67 |
-
)
|
68 |
-
|
69 |
-
# CMake still emits errors about empty CUDA_ARCHITECTURES when CMP0104
|
70 |
-
# is set to OLD. This suppresses the errors for good.
|
71 |
-
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.18)
|
72 |
-
set_target_properties(${target_name}
|
73 |
-
PROPERTIES
|
74 |
-
CUDA_ARCHITECTURES OFF
|
75 |
-
)
|
76 |
-
endif()
|
77 |
-
endif()
|
78 |
-
endfunction()
|
79 |
-
|
80 |
-
# Get a cub property from a target and store it in var_name
|
81 |
-
# cub_get_target_property(<var_name> <target_name> [DIALECT|PREFIX]
|
82 |
-
macro(cub_get_target_property prop_var target_name prop)
|
83 |
-
get_property(${prop_var} TARGET ${target_name} PROPERTY _CUB_${prop})
|
84 |
-
endmacro()
|
85 |
-
|
86 |
-
# Defines the following string variables in the caller's scope:
|
87 |
-
# - ${target_name}_DIALECT
|
88 |
-
# - ${target_name}_PREFIX
|
89 |
-
macro(cub_get_target_properties target_name)
|
90 |
-
cub_get_target_property(${target_name}_DIALECT ${target_name} DIALECT)
|
91 |
-
cub_get_target_property(${target_name}_PREFIX ${target_name} PREFIX)
|
92 |
-
endmacro()
|
93 |
-
|
94 |
-
# Set one target's _CUB_* properties to match another target
|
95 |
-
function(cub_clone_target_properties dst_target src_target)
|
96 |
-
cub_get_target_properties(${src_target})
|
97 |
-
cub_set_target_properties(${dst_target}
|
98 |
-
${${src_target}_DIALECT}
|
99 |
-
${${src_target}_PREFIX}
|
100 |
-
)
|
101 |
-
endfunction()
|
102 |
-
|
103 |
-
# Set ${var_name} to TRUE or FALSE in the caller's scope
|
104 |
-
function(_cub_is_config_valid var_name dialect)
|
105 |
-
if (CUB_ENABLE_DIALECT_CPP${dialect})
|
106 |
-
set(${var_name} TRUE PARENT_SCOPE)
|
107 |
-
else()
|
108 |
-
set(${var_name} FALSE PARENT_SCOPE)
|
109 |
-
endif()
|
110 |
-
endfunction()
|
111 |
-
|
112 |
-
function(_cub_init_target_list)
|
113 |
-
set(CUB_TARGETS "" CACHE INTERNAL "" FORCE)
|
114 |
-
endfunction()
|
115 |
-
|
116 |
-
function(_cub_add_target_to_target_list target_name dialect prefix)
|
117 |
-
cub_set_target_properties(${target_name} ${dialect} ${prefix})
|
118 |
-
|
119 |
-
target_link_libraries(${target_name} INTERFACE
|
120 |
-
CUB::CUB
|
121 |
-
cub.compiler_interface
|
122 |
-
)
|
123 |
-
|
124 |
-
if (TARGET cub.thrust)
|
125 |
-
target_link_libraries(${target_name} INTERFACE cub.thrust)
|
126 |
-
endif()
|
127 |
-
|
128 |
-
set(CUB_TARGETS ${CUB_TARGETS} ${target_name} CACHE INTERNAL "" FORCE)
|
129 |
-
|
130 |
-
set(label "cpp${dialect}")
|
131 |
-
string(TOLOWER "${label}" label)
|
132 |
-
message(STATUS "Enabling CUB configuration: ${label}")
|
133 |
-
endfunction()
|
134 |
-
|
135 |
-
# Build a ${CUB_TARGETS} list containing target names for all
|
136 |
-
# requested configurations
|
137 |
-
function(cub_build_target_list)
|
138 |
-
# Clear the list of targets:
|
139 |
-
_cub_init_target_list()
|
140 |
-
|
141 |
-
# Handle dialect options:
|
142 |
-
foreach (dialect IN LISTS CUB_CPP_DIALECT_OPTIONS)
|
143 |
-
if (CUB_IN_THRUST)
|
144 |
-
# Just use Thrust's settings:
|
145 |
-
if (THRUST_ENABLE_MULTICONFIG)
|
146 |
-
set(CUB_ENABLE_DIALECT_CPP${dialect}
|
147 |
-
${THRUST_MULTICONFIG_ENABLE_DIALECT_CPP${dialect}}
|
148 |
-
)
|
149 |
-
else()
|
150 |
-
set(val OFF)
|
151 |
-
if (dialect EQUAL ${THRUST_CPP_DIALECT})
|
152 |
-
set(val ON)
|
153 |
-
endif()
|
154 |
-
set(CUB_ENABLE_DIALECT_CPP${dialect} ${val})
|
155 |
-
endif()
|
156 |
-
else()
|
157 |
-
# Create CMake options:
|
158 |
-
set(default_value OFF)
|
159 |
-
if (dialect EQUAL 14) # Default to just 14 on:
|
160 |
-
set(default_value ON)
|
161 |
-
endif()
|
162 |
-
option(CUB_ENABLE_DIALECT_CPP${dialect}
|
163 |
-
"Generate C++${dialect} build configurations."
|
164 |
-
${default_value}
|
165 |
-
)
|
166 |
-
endif()
|
167 |
-
endforeach()
|
168 |
-
|
169 |
-
# CMake added C++17 support for CUDA targets in 3.18:
|
170 |
-
if (CUB_ENABLE_DIALECT_CPP17)
|
171 |
-
cmake_minimum_required(VERSION 3.18)
|
172 |
-
endif()
|
173 |
-
|
174 |
-
# Supported versions of MSVC do not distinguish between C++11 and C++14.
|
175 |
-
# Warn the user that they may be generating a ton of redundant targets.
|
176 |
-
if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}" AND
|
177 |
-
CUB_ENABLE_DIALECT_CPP11)
|
178 |
-
message(WARNING
|
179 |
-
"Supported versions of MSVC (2017+) do not distinguish between C++11 "
|
180 |
-
"and C++14. The requested C++11 targets will be built with C++14."
|
181 |
-
)
|
182 |
-
endif()
|
183 |
-
|
184 |
-
# Generic config flags:
|
185 |
-
macro(add_flag_option flag docstring default)
|
186 |
-
set(cub_opt "CUB_${flag}")
|
187 |
-
if (CUB_IN_THRUST)
|
188 |
-
set(thrust_opt "THRUST_${flag}")
|
189 |
-
# Use thrust's settings:
|
190 |
-
set(${cub_opt} ${${thrust_opt}})
|
191 |
-
else()
|
192 |
-
option(${cub_opt} "${docstring}" "${default}")
|
193 |
-
mark_as_advanced(${cub_opt})
|
194 |
-
endif()
|
195 |
-
endmacro()
|
196 |
-
add_flag_option(IGNORE_DEPRECATED_CPP_DIALECT "Don't warn about any deprecated C++ standards and compilers." OFF)
|
197 |
-
add_flag_option(IGNORE_DEPRECATED_CPP_11 "Don't warn about deprecated C++11." OFF)
|
198 |
-
add_flag_option(IGNORE_DEPRECATED_COMPILER "Don't warn about deprecated compilers." OFF)
|
199 |
-
|
200 |
-
# Build cub.compiler_interface with warning flags, etc
|
201 |
-
# This must be called before _cub_add_target_to_target_list.
|
202 |
-
cub_build_compiler_targets()
|
203 |
-
|
204 |
-
# Set up the CUB target while testing out our find_package scripts.
|
205 |
-
find_package(CUB REQUIRED CONFIG
|
206 |
-
NO_DEFAULT_PATH # Only check the explicit path in HINTS:
|
207 |
-
HINTS "${CUB_SOURCE_DIR}"
|
208 |
-
)
|
209 |
-
|
210 |
-
# TODO
|
211 |
-
# Some of the iterators and unittests depend on thrust. We should break the
|
212 |
-
# cyclical dependency by migrating CUB's Thrust bits into Thrust.
|
213 |
-
find_package(Thrust ${CUB_VERSION} EXACT CONFIG
|
214 |
-
HINTS "../../" # Check if we are in thrust/dependencies/cub
|
215 |
-
)
|
216 |
-
|
217 |
-
if (Thrust_FOUND)
|
218 |
-
thrust_set_CUB_target(CUB::CUB)
|
219 |
-
thrust_create_target(cub.thrust HOST CPP DEVICE CUDA)
|
220 |
-
else()
|
221 |
-
message(STATUS
|
222 |
-
"Thrust was not found. Set CMake variable 'Thrust_DIR' to the "
|
223 |
-
"thrust-config.cmake file of a Thrust ${CUB_VERSION} installation to "
|
224 |
-
"enable additional testing."
|
225 |
-
)
|
226 |
-
endif()
|
227 |
-
|
228 |
-
# Build CUB_TARGETS
|
229 |
-
foreach(dialect IN LISTS CUB_CPP_DIALECT_OPTIONS)
|
230 |
-
_cub_is_config_valid(config_valid ${dialect})
|
231 |
-
if (config_valid)
|
232 |
-
set(prefix "cub.cpp${dialect}")
|
233 |
-
string(TOLOWER "${prefix}" prefix)
|
234 |
-
set(target_name "${prefix}")
|
235 |
-
|
236 |
-
add_library(${target_name} INTERFACE)
|
237 |
-
|
238 |
-
# Set configuration metadata for this cub interface target:
|
239 |
-
_cub_add_target_to_target_list(${target_name} ${dialect} ${prefix})
|
240 |
-
endif()
|
241 |
-
endforeach() # dialects
|
242 |
-
|
243 |
-
list(LENGTH CUB_TARGETS count)
|
244 |
-
message(STATUS "${count} unique cub.dialect configurations generated")
|
245 |
-
|
246 |
-
# Top level meta-target. Makes it easier to just build CUB targets when
|
247 |
-
# building both CUB and Thrust. Add all project files here so IDEs will be
|
248 |
-
# aware of them. This will not generate build rules.
|
249 |
-
file(GLOB_RECURSE all_sources
|
250 |
-
RELATIVE "${CMAKE_CURRENT_LIST_DIR}"
|
251 |
-
"${CUB_SOURCE_DIR}/cub/*.cuh"
|
252 |
-
)
|
253 |
-
add_custom_target(cub.all SOURCES ${all_sources})
|
254 |
-
|
255 |
-
# Create meta targets for each config:
|
256 |
-
foreach(cub_target IN LISTS CUB_TARGETS)
|
257 |
-
cub_get_target_property(config_prefix ${cub_target} PREFIX)
|
258 |
-
add_custom_target(${config_prefix}.all)
|
259 |
-
add_dependencies(cub.all ${config_prefix}.all)
|
260 |
-
endforeach()
|
261 |
-
endfunction()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/test/mersenne.h
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
A C-program for MT19937, with initialization improved 2002/1/26.
|
3 |
-
Coded by Takuji Nishimura and Makoto Matsumoto.
|
4 |
-
|
5 |
-
Before using, initialize the state by using init_genrand(seed)
|
6 |
-
or init_by_array(init_key, key_length).
|
7 |
-
|
8 |
-
Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
|
9 |
-
All rights reserved.
|
10 |
-
|
11 |
-
Redistribution and use in source and binary forms, with or without
|
12 |
-
modification, are permitted provided that the following conditions
|
13 |
-
are met:
|
14 |
-
|
15 |
-
1. Redistributions of source code must retain the above copyright
|
16 |
-
notice, this list of conditions and the following disclaimer.
|
17 |
-
|
18 |
-
2. Redistributions in binary form must reproduce the above copyright
|
19 |
-
notice, this list of conditions and the following disclaimer in the
|
20 |
-
documentation and/or other materials provided with the distribution.
|
21 |
-
|
22 |
-
3. The names of its contributors may not be used to endorse or promote
|
23 |
-
products derived from this software without specific prior written
|
24 |
-
permission.
|
25 |
-
|
26 |
-
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
27 |
-
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
28 |
-
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
29 |
-
A PARTICAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
30 |
-
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
31 |
-
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
32 |
-
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
33 |
-
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
34 |
-
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
35 |
-
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
36 |
-
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
37 |
-
|
38 |
-
|
39 |
-
Any feedback is very welcome.
|
40 |
-
http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
|
41 |
-
email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
|
42 |
-
*/
|
43 |
-
|
44 |
-
#include <stdio.h>
|
45 |
-
|
46 |
-
namespace mersenne {
|
47 |
-
|
48 |
-
/* Period parameters */
|
49 |
-
const unsigned int N = 624;
|
50 |
-
const unsigned int M = 397;
|
51 |
-
const unsigned int MATRIX_A = 0x9908b0df; /* constant vector a */
|
52 |
-
const unsigned int UPPER_MASK = 0x80000000; /* most significant w-r bits */
|
53 |
-
const unsigned int LOWER_MASK = 0x7fffffff; /* least significant r bits */
|
54 |
-
|
55 |
-
static unsigned int mt[N]; /* the array for the state vector */
|
56 |
-
static int mti = N + 1; /* mti==N+1 means mt[N] is not initialized */
|
57 |
-
|
58 |
-
/* initializes mt[N] with a seed */
|
59 |
-
void init_genrand(unsigned int s)
|
60 |
-
{
|
61 |
-
mt[0] = s & 0xffffffff;
|
62 |
-
for (mti = 1; mti < static_cast<int>(N); mti++)
|
63 |
-
{
|
64 |
-
mt[mti] = (1812433253 * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti);
|
65 |
-
|
66 |
-
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for mtiplier. */
|
67 |
-
/* In the previous versions, MSBs of the seed affect */
|
68 |
-
/* only MSBs of the array mt[]. */
|
69 |
-
/* 2002/01/09 modified by Makoto Matsumoto */
|
70 |
-
|
71 |
-
mt[mti] &= 0xffffffff;
|
72 |
-
/* for >32 bit machines */
|
73 |
-
}
|
74 |
-
}
|
75 |
-
|
76 |
-
/* initialize by an array with array-length */
|
77 |
-
/* init_key is the array for initializing keys */
|
78 |
-
/* key_length is its length */
|
79 |
-
/* slight change for C++, 2004/2/26 */
|
80 |
-
void init_by_array(unsigned int init_key[], int key_length)
|
81 |
-
{
|
82 |
-
int i, j, k;
|
83 |
-
init_genrand(19650218);
|
84 |
-
i = 1;
|
85 |
-
j = 0;
|
86 |
-
k = (static_cast<int>(N) > key_length
|
87 |
-
? static_cast<int>(N)
|
88 |
-
: key_length);
|
89 |
-
for (; k; k--)
|
90 |
-
{
|
91 |
-
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525))
|
92 |
-
+ init_key[j] + j; /* non linear */
|
93 |
-
mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */
|
94 |
-
i++;
|
95 |
-
j++;
|
96 |
-
if (i >= static_cast<int>(N))
|
97 |
-
{
|
98 |
-
mt[0] = mt[N - 1];
|
99 |
-
i = 1;
|
100 |
-
}
|
101 |
-
if (j >= key_length) j = 0;
|
102 |
-
}
|
103 |
-
for (k = N - 1; k; k--)
|
104 |
-
{
|
105 |
-
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941)) - i; /* non linear */
|
106 |
-
mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */
|
107 |
-
i++;
|
108 |
-
if (i >= static_cast<int>(N))
|
109 |
-
{
|
110 |
-
mt[0] = mt[N - 1];
|
111 |
-
i = 1;
|
112 |
-
}
|
113 |
-
}
|
114 |
-
|
115 |
-
mt[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */
|
116 |
-
}
|
117 |
-
|
118 |
-
/* generates a random number on [0,0xffffffff]-interval */
|
119 |
-
unsigned int genrand_int32(void)
|
120 |
-
{
|
121 |
-
unsigned int y;
|
122 |
-
static unsigned int mag01[2] = { 0x0, MATRIX_A };
|
123 |
-
|
124 |
-
/* mag01[x] = x * MATRIX_A for x=0,1 */
|
125 |
-
|
126 |
-
if (mti >= static_cast<int>(N))
|
127 |
-
{ /* generate N words at one time */
|
128 |
-
int kk;
|
129 |
-
|
130 |
-
if (mti == N + 1) /* if init_genrand() has not been called, */
|
131 |
-
init_genrand(5489); /* a defat initial seed is used */
|
132 |
-
|
133 |
-
for (kk = 0; kk < static_cast<int>(N - M); kk++)
|
134 |
-
{
|
135 |
-
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK);
|
136 |
-
mt[kk] = mt[kk + M] ^ (y >> 1) ^ mag01[y & 0x1];
|
137 |
-
}
|
138 |
-
for (; kk < static_cast<int>(N - 1); kk++)
|
139 |
-
{
|
140 |
-
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK);
|
141 |
-
mt[kk] = mt[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1];
|
142 |
-
}
|
143 |
-
y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK);
|
144 |
-
mt[N - 1] = mt[M - 1] ^ (y >> 1) ^ mag01[y & 0x1];
|
145 |
-
|
146 |
-
mti = 0;
|
147 |
-
}
|
148 |
-
|
149 |
-
y = mt[mti++];
|
150 |
-
|
151 |
-
/* Tempering */
|
152 |
-
y ^= (y >> 11);
|
153 |
-
y ^= (y << 7) & 0x9d2c5680;
|
154 |
-
y ^= (y << 15) & 0xefc60000;
|
155 |
-
y ^= (y >> 18);
|
156 |
-
|
157 |
-
return y;
|
158 |
-
}
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
} // namespace mersenne
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/dense_heads/ssd_head.py
DELETED
@@ -1,265 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from mmcv.cnn import xavier_init
|
5 |
-
from mmcv.runner import force_fp32
|
6 |
-
|
7 |
-
from mmdet.core import (build_anchor_generator, build_assigner,
|
8 |
-
build_bbox_coder, build_sampler, multi_apply)
|
9 |
-
from ..builder import HEADS
|
10 |
-
from ..losses import smooth_l1_loss
|
11 |
-
from .anchor_head import AnchorHead
|
12 |
-
|
13 |
-
|
14 |
-
# TODO: add loss evaluator for SSD
|
15 |
-
@HEADS.register_module()
|
16 |
-
class SSDHead(AnchorHead):
|
17 |
-
"""SSD head used in https://arxiv.org/abs/1512.02325.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
num_classes (int): Number of categories excluding the background
|
21 |
-
category.
|
22 |
-
in_channels (int): Number of channels in the input feature map.
|
23 |
-
anchor_generator (dict): Config dict for anchor generator
|
24 |
-
bbox_coder (dict): Config of bounding box coder.
|
25 |
-
reg_decoded_bbox (bool): If true, the regression loss would be
|
26 |
-
applied directly on decoded bounding boxes, converting both
|
27 |
-
the predicted boxes and regression targets to absolute
|
28 |
-
coordinates format. Default False. It should be `True` when
|
29 |
-
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
|
30 |
-
train_cfg (dict): Training config of anchor head.
|
31 |
-
test_cfg (dict): Testing config of anchor head.
|
32 |
-
""" # noqa: W605
|
33 |
-
|
34 |
-
def __init__(self,
|
35 |
-
num_classes=80,
|
36 |
-
in_channels=(512, 1024, 512, 256, 256, 256),
|
37 |
-
anchor_generator=dict(
|
38 |
-
type='SSDAnchorGenerator',
|
39 |
-
scale_major=False,
|
40 |
-
input_size=300,
|
41 |
-
strides=[8, 16, 32, 64, 100, 300],
|
42 |
-
ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
|
43 |
-
basesize_ratio_range=(0.1, 0.9)),
|
44 |
-
bbox_coder=dict(
|
45 |
-
type='DeltaXYWHBBoxCoder',
|
46 |
-
clip_border=True,
|
47 |
-
target_means=[.0, .0, .0, .0],
|
48 |
-
target_stds=[1.0, 1.0, 1.0, 1.0],
|
49 |
-
),
|
50 |
-
reg_decoded_bbox=False,
|
51 |
-
train_cfg=None,
|
52 |
-
test_cfg=None):
|
53 |
-
super(AnchorHead, self).__init__()
|
54 |
-
self.num_classes = num_classes
|
55 |
-
self.in_channels = in_channels
|
56 |
-
self.cls_out_channels = num_classes + 1 # add background class
|
57 |
-
self.anchor_generator = build_anchor_generator(anchor_generator)
|
58 |
-
num_anchors = self.anchor_generator.num_base_anchors
|
59 |
-
|
60 |
-
reg_convs = []
|
61 |
-
cls_convs = []
|
62 |
-
for i in range(len(in_channels)):
|
63 |
-
reg_convs.append(
|
64 |
-
nn.Conv2d(
|
65 |
-
in_channels[i],
|
66 |
-
num_anchors[i] * 4,
|
67 |
-
kernel_size=3,
|
68 |
-
padding=1))
|
69 |
-
cls_convs.append(
|
70 |
-
nn.Conv2d(
|
71 |
-
in_channels[i],
|
72 |
-
num_anchors[i] * (num_classes + 1),
|
73 |
-
kernel_size=3,
|
74 |
-
padding=1))
|
75 |
-
self.reg_convs = nn.ModuleList(reg_convs)
|
76 |
-
self.cls_convs = nn.ModuleList(cls_convs)
|
77 |
-
|
78 |
-
self.bbox_coder = build_bbox_coder(bbox_coder)
|
79 |
-
self.reg_decoded_bbox = reg_decoded_bbox
|
80 |
-
self.use_sigmoid_cls = False
|
81 |
-
self.cls_focal_loss = False
|
82 |
-
self.train_cfg = train_cfg
|
83 |
-
self.test_cfg = test_cfg
|
84 |
-
# set sampling=False for archor_target
|
85 |
-
self.sampling = False
|
86 |
-
if self.train_cfg:
|
87 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
88 |
-
# SSD sampling=False so use PseudoSampler
|
89 |
-
sampler_cfg = dict(type='PseudoSampler')
|
90 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
91 |
-
self.fp16_enabled = False
|
92 |
-
|
93 |
-
def init_weights(self):
|
94 |
-
"""Initialize weights of the head."""
|
95 |
-
for m in self.modules():
|
96 |
-
if isinstance(m, nn.Conv2d):
|
97 |
-
xavier_init(m, distribution='uniform', bias=0)
|
98 |
-
|
99 |
-
def forward(self, feats):
|
100 |
-
"""Forward features from the upstream network.
|
101 |
-
|
102 |
-
Args:
|
103 |
-
feats (tuple[Tensor]): Features from the upstream network, each is
|
104 |
-
a 4D-tensor.
|
105 |
-
|
106 |
-
Returns:
|
107 |
-
tuple:
|
108 |
-
cls_scores (list[Tensor]): Classification scores for all scale
|
109 |
-
levels, each is a 4D-tensor, the channels number is
|
110 |
-
num_anchors * num_classes.
|
111 |
-
bbox_preds (list[Tensor]): Box energies / deltas for all scale
|
112 |
-
levels, each is a 4D-tensor, the channels number is
|
113 |
-
num_anchors * 4.
|
114 |
-
"""
|
115 |
-
cls_scores = []
|
116 |
-
bbox_preds = []
|
117 |
-
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
|
118 |
-
self.cls_convs):
|
119 |
-
cls_scores.append(cls_conv(feat))
|
120 |
-
bbox_preds.append(reg_conv(feat))
|
121 |
-
return cls_scores, bbox_preds
|
122 |
-
|
123 |
-
def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
|
124 |
-
bbox_targets, bbox_weights, num_total_samples):
|
125 |
-
"""Compute loss of a single image.
|
126 |
-
|
127 |
-
Args:
|
128 |
-
cls_score (Tensor): Box scores for eachimage
|
129 |
-
Has shape (num_total_anchors, num_classes).
|
130 |
-
bbox_pred (Tensor): Box energies / deltas for each image
|
131 |
-
level with shape (num_total_anchors, 4).
|
132 |
-
anchors (Tensor): Box reference for each scale level with shape
|
133 |
-
(num_total_anchors, 4).
|
134 |
-
labels (Tensor): Labels of each anchors with shape
|
135 |
-
(num_total_anchors,).
|
136 |
-
label_weights (Tensor): Label weights of each anchor with shape
|
137 |
-
(num_total_anchors,)
|
138 |
-
bbox_targets (Tensor): BBox regression targets of each anchor wight
|
139 |
-
shape (num_total_anchors, 4).
|
140 |
-
bbox_weights (Tensor): BBox regression loss weights of each anchor
|
141 |
-
with shape (num_total_anchors, 4).
|
142 |
-
num_total_samples (int): If sampling, num total samples equal to
|
143 |
-
the number of total anchors; Otherwise, it is the number of
|
144 |
-
positive anchors.
|
145 |
-
|
146 |
-
Returns:
|
147 |
-
dict[str, Tensor]: A dictionary of loss components.
|
148 |
-
"""
|
149 |
-
|
150 |
-
loss_cls_all = F.cross_entropy(
|
151 |
-
cls_score, labels, reduction='none') * label_weights
|
152 |
-
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
|
153 |
-
pos_inds = ((labels >= 0) &
|
154 |
-
(labels < self.num_classes)).nonzero().reshape(-1)
|
155 |
-
neg_inds = (labels == self.num_classes).nonzero().view(-1)
|
156 |
-
|
157 |
-
num_pos_samples = pos_inds.size(0)
|
158 |
-
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
|
159 |
-
if num_neg_samples > neg_inds.size(0):
|
160 |
-
num_neg_samples = neg_inds.size(0)
|
161 |
-
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
|
162 |
-
loss_cls_pos = loss_cls_all[pos_inds].sum()
|
163 |
-
loss_cls_neg = topk_loss_cls_neg.sum()
|
164 |
-
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
|
165 |
-
|
166 |
-
if self.reg_decoded_bbox:
|
167 |
-
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
|
168 |
-
# is applied directly on the decoded bounding boxes, it
|
169 |
-
# decodes the already encoded coordinates to absolute format.
|
170 |
-
bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
|
171 |
-
|
172 |
-
loss_bbox = smooth_l1_loss(
|
173 |
-
bbox_pred,
|
174 |
-
bbox_targets,
|
175 |
-
bbox_weights,
|
176 |
-
beta=self.train_cfg.smoothl1_beta,
|
177 |
-
avg_factor=num_total_samples)
|
178 |
-
return loss_cls[None], loss_bbox
|
179 |
-
|
180 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
|
181 |
-
def loss(self,
|
182 |
-
cls_scores,
|
183 |
-
bbox_preds,
|
184 |
-
gt_bboxes,
|
185 |
-
gt_labels,
|
186 |
-
img_metas,
|
187 |
-
gt_bboxes_ignore=None):
|
188 |
-
"""Compute losses of the head.
|
189 |
-
|
190 |
-
Args:
|
191 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
192 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
193 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
194 |
-
level with shape (N, num_anchors * 4, H, W)
|
195 |
-
gt_bboxes (list[Tensor]): each item are the truth boxes for each
|
196 |
-
image in [tl_x, tl_y, br_x, br_y] format.
|
197 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
198 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
199 |
-
image size, scaling factor, etc.
|
200 |
-
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
|
201 |
-
boxes can be ignored when computing the loss.
|
202 |
-
|
203 |
-
Returns:
|
204 |
-
dict[str, Tensor]: A dictionary of loss components.
|
205 |
-
"""
|
206 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
207 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
208 |
-
|
209 |
-
device = cls_scores[0].device
|
210 |
-
|
211 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
212 |
-
featmap_sizes, img_metas, device=device)
|
213 |
-
cls_reg_targets = self.get_targets(
|
214 |
-
anchor_list,
|
215 |
-
valid_flag_list,
|
216 |
-
gt_bboxes,
|
217 |
-
img_metas,
|
218 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
219 |
-
gt_labels_list=gt_labels,
|
220 |
-
label_channels=1,
|
221 |
-
unmap_outputs=False)
|
222 |
-
if cls_reg_targets is None:
|
223 |
-
return None
|
224 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
225 |
-
num_total_pos, num_total_neg) = cls_reg_targets
|
226 |
-
|
227 |
-
num_images = len(img_metas)
|
228 |
-
all_cls_scores = torch.cat([
|
229 |
-
s.permute(0, 2, 3, 1).reshape(
|
230 |
-
num_images, -1, self.cls_out_channels) for s in cls_scores
|
231 |
-
], 1)
|
232 |
-
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
|
233 |
-
all_label_weights = torch.cat(label_weights_list,
|
234 |
-
-1).view(num_images, -1)
|
235 |
-
all_bbox_preds = torch.cat([
|
236 |
-
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
|
237 |
-
for b in bbox_preds
|
238 |
-
], -2)
|
239 |
-
all_bbox_targets = torch.cat(bbox_targets_list,
|
240 |
-
-2).view(num_images, -1, 4)
|
241 |
-
all_bbox_weights = torch.cat(bbox_weights_list,
|
242 |
-
-2).view(num_images, -1, 4)
|
243 |
-
|
244 |
-
# concat all level anchors to a single tensor
|
245 |
-
all_anchors = []
|
246 |
-
for i in range(num_images):
|
247 |
-
all_anchors.append(torch.cat(anchor_list[i]))
|
248 |
-
|
249 |
-
# check NaN and Inf
|
250 |
-
assert torch.isfinite(all_cls_scores).all().item(), \
|
251 |
-
'classification scores become infinite or NaN!'
|
252 |
-
assert torch.isfinite(all_bbox_preds).all().item(), \
|
253 |
-
'bbox predications become infinite or NaN!'
|
254 |
-
|
255 |
-
losses_cls, losses_bbox = multi_apply(
|
256 |
-
self.loss_single,
|
257 |
-
all_cls_scores,
|
258 |
-
all_bbox_preds,
|
259 |
-
all_anchors,
|
260 |
-
all_labels,
|
261 |
-
all_label_weights,
|
262 |
-
all_bbox_targets,
|
263 |
-
all_bbox_weights,
|
264 |
-
num_total_samples=num_total_pos)
|
265 |
-
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/train.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import copy
|
3 |
-
import os
|
4 |
-
import os.path as osp
|
5 |
-
import time
|
6 |
-
import warnings
|
7 |
-
|
8 |
-
import mmcv
|
9 |
-
import torch
|
10 |
-
from mmcv import Config, DictAction
|
11 |
-
from mmcv.runner import get_dist_info, init_dist
|
12 |
-
from mmcv.utils import get_git_hash
|
13 |
-
|
14 |
-
from mmdet import __version__
|
15 |
-
from mmdet.apis import set_random_seed
|
16 |
-
from code_local.apis import train_detector
|
17 |
-
from code_local.datasets import build_dataset
|
18 |
-
from mmdet.models import build_detector
|
19 |
-
from mmdet.utils import collect_env, get_root_logger
|
20 |
-
|
21 |
-
|
22 |
-
def parse_args():
|
23 |
-
parser = argparse.ArgumentParser(description='Train a detector')
|
24 |
-
parser.add_argument('config', help='train config file path')
|
25 |
-
parser.add_argument('--work-dir', help='the dir to save logs and models')
|
26 |
-
parser.add_argument(
|
27 |
-
'--resume-from', help='the checkpoint file to resume from')
|
28 |
-
parser.add_argument(
|
29 |
-
'--no-validate',
|
30 |
-
action='store_true',
|
31 |
-
help='whether not to evaluate the checkpoint during training')
|
32 |
-
group_gpus = parser.add_mutually_exclusive_group()
|
33 |
-
group_gpus.add_argument(
|
34 |
-
'--gpus',
|
35 |
-
type=int,
|
36 |
-
help='number of gpus to use '
|
37 |
-
'(only applicable to non-distributed training)')
|
38 |
-
group_gpus.add_argument(
|
39 |
-
'--gpu-ids',
|
40 |
-
type=int,
|
41 |
-
nargs='+',
|
42 |
-
help='ids of gpus to use '
|
43 |
-
'(only applicable to non-distributed training)')
|
44 |
-
parser.add_argument('--seed', type=int, default=None, help='random seed')
|
45 |
-
parser.add_argument(
|
46 |
-
'--deterministic',
|
47 |
-
action='store_true',
|
48 |
-
help='whether to set deterministic options for CUDNN backend.')
|
49 |
-
parser.add_argument(
|
50 |
-
'--options',
|
51 |
-
nargs='+',
|
52 |
-
action=DictAction,
|
53 |
-
help='override some settings in the used config, the key-value pair '
|
54 |
-
'in xxx=yyy format will be merged into config file (deprecate), '
|
55 |
-
'change to --cfg-options instead.')
|
56 |
-
parser.add_argument(
|
57 |
-
'--cfg-options',
|
58 |
-
nargs='+',
|
59 |
-
action=DictAction,
|
60 |
-
help='override some settings in the used config, the key-value pair '
|
61 |
-
'in xxx=yyy format will be merged into config file. If the value to '
|
62 |
-
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
|
63 |
-
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
|
64 |
-
'Note that the quotation marks are necessary and that no white space '
|
65 |
-
'is allowed.')
|
66 |
-
parser.add_argument(
|
67 |
-
'--launcher',
|
68 |
-
choices=['none', 'pytorch', 'slurm', 'mpi'],
|
69 |
-
default='none',
|
70 |
-
help='job launcher')
|
71 |
-
parser.add_argument('--local_rank', type=int, default=0)
|
72 |
-
args = parser.parse_args()
|
73 |
-
if 'LOCAL_RANK' not in os.environ:
|
74 |
-
os.environ['LOCAL_RANK'] = str(args.local_rank)
|
75 |
-
|
76 |
-
if args.options and args.cfg_options:
|
77 |
-
raise ValueError(
|
78 |
-
'--options and --cfg-options cannot be both '
|
79 |
-
'specified, --options is deprecated in favor of --cfg-options')
|
80 |
-
if args.options:
|
81 |
-
warnings.warn('--options is deprecated in favor of --cfg-options')
|
82 |
-
args.cfg_options = args.options
|
83 |
-
|
84 |
-
return args
|
85 |
-
|
86 |
-
|
87 |
-
def main():
|
88 |
-
args = parse_args()
|
89 |
-
|
90 |
-
cfg = Config.fromfile(args.config)
|
91 |
-
if args.cfg_options is not None:
|
92 |
-
cfg.merge_from_dict(args.cfg_options)
|
93 |
-
# import modules from string list.
|
94 |
-
if cfg.get('custom_imports', None):
|
95 |
-
from mmcv.utils import import_modules_from_strings
|
96 |
-
import_modules_from_strings(**cfg['custom_imports'])
|
97 |
-
# set cudnn_benchmark
|
98 |
-
if cfg.get('cudnn_benchmark', False):
|
99 |
-
torch.backends.cudnn.benchmark = True
|
100 |
-
|
101 |
-
# work_dir is determined in this priority: CLI > segment in file > filename
|
102 |
-
if args.work_dir is not None:
|
103 |
-
# update configs according to CLI args if args.work_dir is not None
|
104 |
-
cfg.work_dir = args.work_dir
|
105 |
-
elif cfg.get('work_dir', None) is None:
|
106 |
-
# use config filename as default work_dir if cfg.work_dir is None
|
107 |
-
cfg.work_dir = osp.join('./work_dirs',
|
108 |
-
osp.splitext(osp.basename(args.config))[0])
|
109 |
-
if args.resume_from is not None:
|
110 |
-
cfg.resume_from = args.resume_from
|
111 |
-
if args.gpu_ids is not None:
|
112 |
-
cfg.gpu_ids = args.gpu_ids
|
113 |
-
else:
|
114 |
-
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
|
115 |
-
|
116 |
-
# init distributed env first, since logger depends on the dist info.
|
117 |
-
if args.launcher == 'none':
|
118 |
-
distributed = False
|
119 |
-
else:
|
120 |
-
distributed = True
|
121 |
-
init_dist(args.launcher, **cfg.dist_params)
|
122 |
-
# re-set gpu_ids with distributed training mode
|
123 |
-
_, world_size = get_dist_info()
|
124 |
-
cfg.gpu_ids = range(world_size)
|
125 |
-
|
126 |
-
# create work_dir
|
127 |
-
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
|
128 |
-
# dump config
|
129 |
-
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
|
130 |
-
# init the logger before other steps
|
131 |
-
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
|
132 |
-
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
|
133 |
-
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
|
134 |
-
|
135 |
-
# init the meta dict to record some important information such as
|
136 |
-
# environment info and seed, which will be logged
|
137 |
-
meta = dict()
|
138 |
-
# log env info
|
139 |
-
env_info_dict = collect_env()
|
140 |
-
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
|
141 |
-
dash_line = '-' * 60 + '\n'
|
142 |
-
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
|
143 |
-
dash_line)
|
144 |
-
meta['env_info'] = env_info
|
145 |
-
meta['config'] = cfg.pretty_text
|
146 |
-
# log some basic info
|
147 |
-
logger.info(f'Distributed training: {distributed}')
|
148 |
-
logger.info(f'Config:\n{cfg.pretty_text}')
|
149 |
-
|
150 |
-
# set random seeds
|
151 |
-
if args.seed is not None:
|
152 |
-
logger.info(f'Set random seed to {args.seed}, '
|
153 |
-
f'deterministic: {args.deterministic}')
|
154 |
-
set_random_seed(args.seed, deterministic=args.deterministic)
|
155 |
-
cfg.seed = args.seed
|
156 |
-
meta['seed'] = args.seed
|
157 |
-
meta['exp_name'] = osp.basename(args.config)
|
158 |
-
|
159 |
-
model = build_detector(
|
160 |
-
cfg.model,
|
161 |
-
train_cfg=cfg.get('train_cfg'),
|
162 |
-
test_cfg=cfg.get('test_cfg'))
|
163 |
-
|
164 |
-
datasets = [build_dataset(cfg.data.train)]
|
165 |
-
if len(cfg.workflow) == 2:
|
166 |
-
val_dataset = copy.deepcopy(cfg.data.val)
|
167 |
-
val_dataset.pipeline = cfg.data.train.pipeline
|
168 |
-
datasets.append(build_dataset(val_dataset))
|
169 |
-
if cfg.checkpoint_config is not None:
|
170 |
-
# save mmdet version, config file content and class names in
|
171 |
-
# checkpoints as meta data
|
172 |
-
cfg.checkpoint_config.meta = dict(
|
173 |
-
mmdet_version=__version__ + get_git_hash()[:7],
|
174 |
-
CLASSES=datasets[0].CLASSES)
|
175 |
-
# add an attribute for visualization convenience
|
176 |
-
model.CLASSES = datasets[0].CLASSES
|
177 |
-
train_detector(
|
178 |
-
model,
|
179 |
-
datasets,
|
180 |
-
cfg,
|
181 |
-
distributed=distributed,
|
182 |
-
validate=(not args.no_validate),
|
183 |
-
timestamp=timestamp,
|
184 |
-
meta=meta)
|
185 |
-
|
186 |
-
|
187 |
-
if __name__ == '__main__':
|
188 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cecil8352/vits-models/text/symbols.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
Defines the set of symbols used in text input to the model.
|
3 |
-
'''
|
4 |
-
|
5 |
-
'''# japanese_cleaners
|
6 |
-
_pad = '_'
|
7 |
-
_punctuation = ',.!?-'
|
8 |
-
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
|
9 |
-
'''
|
10 |
-
|
11 |
-
'''# japanese_cleaners2
|
12 |
-
_pad = '_'
|
13 |
-
_punctuation = ',.!?-~…'
|
14 |
-
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
|
15 |
-
'''
|
16 |
-
|
17 |
-
'''# korean_cleaners
|
18 |
-
_pad = '_'
|
19 |
-
_punctuation = ',.!?…~'
|
20 |
-
_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
|
21 |
-
'''
|
22 |
-
|
23 |
-
'''# chinese_cleaners
|
24 |
-
_pad = '_'
|
25 |
-
_punctuation = ',。!?—…'
|
26 |
-
_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
|
27 |
-
'''
|
28 |
-
|
29 |
-
# zh_ja_mixture_cleaners
|
30 |
-
_pad = '_'
|
31 |
-
_punctuation = ',.!?-~…'
|
32 |
-
_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
|
33 |
-
|
34 |
-
|
35 |
-
# Export all symbols:
|
36 |
-
symbols = [_pad] + list(_punctuation) + list(_letters)
|
37 |
-
|
38 |
-
# Special symbol ids
|
39 |
-
SPACE_ID = symbols.index(" ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/community.py
DELETED
@@ -1,354 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Data structures to interact with Discussions and Pull Requests on the Hub.
|
3 |
-
|
4 |
-
See [the Discussions and Pull Requests guide](https://huggingface.co/docs/hub/repositories-pull-requests-discussions)
|
5 |
-
for more information on Pull Requests, Discussions, and the community tab.
|
6 |
-
"""
|
7 |
-
from dataclasses import dataclass
|
8 |
-
from datetime import datetime
|
9 |
-
from typing import List, Optional
|
10 |
-
|
11 |
-
from .constants import REPO_TYPE_MODEL
|
12 |
-
from .utils import parse_datetime
|
13 |
-
from .utils._typing import Literal
|
14 |
-
|
15 |
-
|
16 |
-
DiscussionStatus = Literal["open", "closed", "merged", "draft"]
|
17 |
-
|
18 |
-
|
19 |
-
@dataclass
|
20 |
-
class Discussion:
|
21 |
-
"""
|
22 |
-
A Discussion or Pull Request on the Hub.
|
23 |
-
|
24 |
-
This dataclass is not intended to be instantiated directly.
|
25 |
-
|
26 |
-
Attributes:
|
27 |
-
title (`str`):
|
28 |
-
The title of the Discussion / Pull Request
|
29 |
-
status (`str`):
|
30 |
-
The status of the Discussion / Pull Request.
|
31 |
-
It must be one of:
|
32 |
-
* `"open"`
|
33 |
-
* `"closed"`
|
34 |
-
* `"merged"` (only for Pull Requests )
|
35 |
-
* `"draft"` (only for Pull Requests )
|
36 |
-
num (`int`):
|
37 |
-
The number of the Discussion / Pull Request.
|
38 |
-
repo_id (`str`):
|
39 |
-
The id (`"{namespace}/{repo_name}"`) of the repo on which
|
40 |
-
the Discussion / Pull Request was open.
|
41 |
-
repo_type (`str`):
|
42 |
-
The type of the repo on which the Discussion / Pull Request was open.
|
43 |
-
Possible values are: `"model"`, `"dataset"`, `"space"`.
|
44 |
-
author (`str`):
|
45 |
-
The username of the Discussion / Pull Request author.
|
46 |
-
Can be `"deleted"` if the user has been deleted since.
|
47 |
-
is_pull_request (`bool`):
|
48 |
-
Whether or not this is a Pull Request.
|
49 |
-
created_at (`datetime`):
|
50 |
-
The `datetime` of creation of the Discussion / Pull Request.
|
51 |
-
endpoint (`str`):
|
52 |
-
Endpoint of the Hub. Default is https://huggingface.co.
|
53 |
-
git_reference (`str`, *optional*):
|
54 |
-
(property) Git reference to which changes can be pushed if this is a Pull Request, `None` otherwise.
|
55 |
-
url (`str`):
|
56 |
-
(property) URL of the discussion on the Hub.
|
57 |
-
"""
|
58 |
-
|
59 |
-
title: str
|
60 |
-
status: DiscussionStatus
|
61 |
-
num: int
|
62 |
-
repo_id: str
|
63 |
-
repo_type: str
|
64 |
-
author: str
|
65 |
-
is_pull_request: bool
|
66 |
-
created_at: datetime
|
67 |
-
endpoint: str
|
68 |
-
|
69 |
-
@property
|
70 |
-
def git_reference(self) -> Optional[str]:
|
71 |
-
"""
|
72 |
-
If this is a Pull Request , returns the git reference to which changes can be pushed.
|
73 |
-
Returns `None` otherwise.
|
74 |
-
"""
|
75 |
-
if self.is_pull_request:
|
76 |
-
return f"refs/pr/{self.num}"
|
77 |
-
return None
|
78 |
-
|
79 |
-
@property
|
80 |
-
def url(self) -> str:
|
81 |
-
"""Returns the URL of the discussion on the Hub."""
|
82 |
-
if self.repo_type is None or self.repo_type == REPO_TYPE_MODEL:
|
83 |
-
return f"{self.endpoint}/{self.repo_id}/discussions/{self.num}"
|
84 |
-
return f"{self.endpoint}/{self.repo_type}s/{self.repo_id}/discussions/{self.num}"
|
85 |
-
|
86 |
-
|
87 |
-
@dataclass
|
88 |
-
class DiscussionWithDetails(Discussion):
|
89 |
-
"""
|
90 |
-
Subclass of [`Discussion`].
|
91 |
-
|
92 |
-
Attributes:
|
93 |
-
title (`str`):
|
94 |
-
The title of the Discussion / Pull Request
|
95 |
-
status (`str`):
|
96 |
-
The status of the Discussion / Pull Request.
|
97 |
-
It can be one of:
|
98 |
-
* `"open"`
|
99 |
-
* `"closed"`
|
100 |
-
* `"merged"` (only for Pull Requests )
|
101 |
-
* `"draft"` (only for Pull Requests )
|
102 |
-
num (`int`):
|
103 |
-
The number of the Discussion / Pull Request.
|
104 |
-
repo_id (`str`):
|
105 |
-
The id (`"{namespace}/{repo_name}"`) of the repo on which
|
106 |
-
the Discussion / Pull Request was open.
|
107 |
-
repo_type (`str`):
|
108 |
-
The type of the repo on which the Discussion / Pull Request was open.
|
109 |
-
Possible values are: `"model"`, `"dataset"`, `"space"`.
|
110 |
-
author (`str`):
|
111 |
-
The username of the Discussion / Pull Request author.
|
112 |
-
Can be `"deleted"` if the user has been deleted since.
|
113 |
-
is_pull_request (`bool`):
|
114 |
-
Whether or not this is a Pull Request.
|
115 |
-
created_at (`datetime`):
|
116 |
-
The `datetime` of creation of the Discussion / Pull Request.
|
117 |
-
events (`list` of [`DiscussionEvent`])
|
118 |
-
The list of [`DiscussionEvents`] in this Discussion or Pull Request.
|
119 |
-
conflicting_files (`list` of `str`, *optional*):
|
120 |
-
A list of conflicting files if this is a Pull Request.
|
121 |
-
`None` if `self.is_pull_request` is `False`.
|
122 |
-
target_branch (`str`, *optional*):
|
123 |
-
The branch into which changes are to be merged if this is a
|
124 |
-
Pull Request . `None` if `self.is_pull_request` is `False`.
|
125 |
-
merge_commit_oid (`str`, *optional*):
|
126 |
-
If this is a merged Pull Request , this is set to the OID / SHA of
|
127 |
-
the merge commit, `None` otherwise.
|
128 |
-
diff (`str`, *optional*):
|
129 |
-
The git diff if this is a Pull Request , `None` otherwise.
|
130 |
-
endpoint (`str`):
|
131 |
-
Endpoint of the Hub. Default is https://huggingface.co.
|
132 |
-
git_reference (`str`, *optional*):
|
133 |
-
(property) Git reference to which changes can be pushed if this is a Pull Request, `None` otherwise.
|
134 |
-
url (`str`):
|
135 |
-
(property) URL of the discussion on the Hub.
|
136 |
-
"""
|
137 |
-
|
138 |
-
events: List["DiscussionEvent"]
|
139 |
-
conflicting_files: Optional[List[str]]
|
140 |
-
target_branch: Optional[str]
|
141 |
-
merge_commit_oid: Optional[str]
|
142 |
-
diff: Optional[str]
|
143 |
-
|
144 |
-
|
145 |
-
@dataclass
|
146 |
-
class DiscussionEvent:
|
147 |
-
"""
|
148 |
-
An event in a Discussion or Pull Request.
|
149 |
-
|
150 |
-
Use concrete classes:
|
151 |
-
* [`DiscussionComment`]
|
152 |
-
* [`DiscussionStatusChange`]
|
153 |
-
* [`DiscussionCommit`]
|
154 |
-
* [`DiscussionTitleChange`]
|
155 |
-
|
156 |
-
Attributes:
|
157 |
-
id (`str`):
|
158 |
-
The ID of the event. An hexadecimal string.
|
159 |
-
type (`str`):
|
160 |
-
The type of the event.
|
161 |
-
created_at (`datetime`):
|
162 |
-
A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
|
163 |
-
object holding the creation timestamp for the event.
|
164 |
-
author (`str`):
|
165 |
-
The username of the Discussion / Pull Request author.
|
166 |
-
Can be `"deleted"` if the user has been deleted since.
|
167 |
-
"""
|
168 |
-
|
169 |
-
id: str
|
170 |
-
type: str
|
171 |
-
created_at: datetime
|
172 |
-
author: str
|
173 |
-
|
174 |
-
_event: dict
|
175 |
-
"""Stores the original event data, in case we need to access it later."""
|
176 |
-
|
177 |
-
|
178 |
-
@dataclass
|
179 |
-
class DiscussionComment(DiscussionEvent):
|
180 |
-
"""A comment in a Discussion / Pull Request.
|
181 |
-
|
182 |
-
Subclass of [`DiscussionEvent`].
|
183 |
-
|
184 |
-
|
185 |
-
Attributes:
|
186 |
-
id (`str`):
|
187 |
-
The ID of the event. An hexadecimal string.
|
188 |
-
type (`str`):
|
189 |
-
The type of the event.
|
190 |
-
created_at (`datetime`):
|
191 |
-
A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
|
192 |
-
object holding the creation timestamp for the event.
|
193 |
-
author (`str`):
|
194 |
-
The username of the Discussion / Pull Request author.
|
195 |
-
Can be `"deleted"` if the user has been deleted since.
|
196 |
-
content (`str`):
|
197 |
-
The raw markdown content of the comment. Mentions, links and images are not rendered.
|
198 |
-
edited (`bool`):
|
199 |
-
Whether or not this comment has been edited.
|
200 |
-
hidden (`bool`):
|
201 |
-
Whether or not this comment has been hidden.
|
202 |
-
"""
|
203 |
-
|
204 |
-
content: str
|
205 |
-
edited: bool
|
206 |
-
hidden: bool
|
207 |
-
|
208 |
-
@property
|
209 |
-
def rendered(self) -> str:
|
210 |
-
"""The rendered comment, as a HTML string"""
|
211 |
-
return self._event["data"]["latest"]["html"]
|
212 |
-
|
213 |
-
@property
|
214 |
-
def last_edited_at(self) -> datetime:
|
215 |
-
"""The last edit time, as a `datetime` object."""
|
216 |
-
return parse_datetime(self._event["data"]["latest"]["updatedAt"])
|
217 |
-
|
218 |
-
@property
|
219 |
-
def last_edited_by(self) -> str:
|
220 |
-
"""The last edit time, as a `datetime` object."""
|
221 |
-
return self._event["data"]["latest"].get("author", {}).get("name", "deleted")
|
222 |
-
|
223 |
-
@property
|
224 |
-
def edit_history(self) -> List[dict]:
|
225 |
-
"""The edit history of the comment"""
|
226 |
-
return self._event["data"]["history"]
|
227 |
-
|
228 |
-
@property
|
229 |
-
def number_of_edits(self) -> int:
|
230 |
-
return len(self.edit_history)
|
231 |
-
|
232 |
-
|
233 |
-
@dataclass
|
234 |
-
class DiscussionStatusChange(DiscussionEvent):
|
235 |
-
"""A change of status in a Discussion / Pull Request.
|
236 |
-
|
237 |
-
Subclass of [`DiscussionEvent`].
|
238 |
-
|
239 |
-
Attributes:
|
240 |
-
id (`str`):
|
241 |
-
The ID of the event. An hexadecimal string.
|
242 |
-
type (`str`):
|
243 |
-
The type of the event.
|
244 |
-
created_at (`datetime`):
|
245 |
-
A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
|
246 |
-
object holding the creation timestamp for the event.
|
247 |
-
author (`str`):
|
248 |
-
The username of the Discussion / Pull Request author.
|
249 |
-
Can be `"deleted"` if the user has been deleted since.
|
250 |
-
new_status (`str`):
|
251 |
-
The status of the Discussion / Pull Request after the change.
|
252 |
-
It can be one of:
|
253 |
-
* `"open"`
|
254 |
-
* `"closed"`
|
255 |
-
* `"merged"` (only for Pull Requests )
|
256 |
-
"""
|
257 |
-
|
258 |
-
new_status: str
|
259 |
-
|
260 |
-
|
261 |
-
@dataclass
|
262 |
-
class DiscussionCommit(DiscussionEvent):
|
263 |
-
"""A commit in a Pull Request.
|
264 |
-
|
265 |
-
Subclass of [`DiscussionEvent`].
|
266 |
-
|
267 |
-
Attributes:
|
268 |
-
id (`str`):
|
269 |
-
The ID of the event. An hexadecimal string.
|
270 |
-
type (`str`):
|
271 |
-
The type of the event.
|
272 |
-
created_at (`datetime`):
|
273 |
-
A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
|
274 |
-
object holding the creation timestamp for the event.
|
275 |
-
author (`str`):
|
276 |
-
The username of the Discussion / Pull Request author.
|
277 |
-
Can be `"deleted"` if the user has been deleted since.
|
278 |
-
summary (`str`):
|
279 |
-
The summary of the commit.
|
280 |
-
oid (`str`):
|
281 |
-
The OID / SHA of the commit, as a hexadecimal string.
|
282 |
-
"""
|
283 |
-
|
284 |
-
summary: str
|
285 |
-
oid: str
|
286 |
-
|
287 |
-
|
288 |
-
@dataclass
|
289 |
-
class DiscussionTitleChange(DiscussionEvent):
|
290 |
-
"""A rename event in a Discussion / Pull Request.
|
291 |
-
|
292 |
-
Subclass of [`DiscussionEvent`].
|
293 |
-
|
294 |
-
Attributes:
|
295 |
-
id (`str`):
|
296 |
-
The ID of the event. An hexadecimal string.
|
297 |
-
type (`str`):
|
298 |
-
The type of the event.
|
299 |
-
created_at (`datetime`):
|
300 |
-
A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
|
301 |
-
object holding the creation timestamp for the event.
|
302 |
-
author (`str`):
|
303 |
-
The username of the Discussion / Pull Request author.
|
304 |
-
Can be `"deleted"` if the user has been deleted since.
|
305 |
-
old_title (`str`):
|
306 |
-
The previous title for the Discussion / Pull Request.
|
307 |
-
new_title (`str`):
|
308 |
-
The new title.
|
309 |
-
"""
|
310 |
-
|
311 |
-
old_title: str
|
312 |
-
new_title: str
|
313 |
-
|
314 |
-
|
315 |
-
def deserialize_event(event: dict) -> DiscussionEvent:
|
316 |
-
"""Instantiates a [`DiscussionEvent`] from a dict"""
|
317 |
-
event_id: str = event["id"]
|
318 |
-
event_type: str = event["type"]
|
319 |
-
created_at = parse_datetime(event["createdAt"])
|
320 |
-
|
321 |
-
common_args = dict(
|
322 |
-
id=event_id,
|
323 |
-
type=event_type,
|
324 |
-
created_at=created_at,
|
325 |
-
author=event.get("author", {}).get("name", "deleted"),
|
326 |
-
_event=event,
|
327 |
-
)
|
328 |
-
|
329 |
-
if event_type == "comment":
|
330 |
-
return DiscussionComment(
|
331 |
-
**common_args,
|
332 |
-
edited=event["data"]["edited"],
|
333 |
-
hidden=event["data"]["hidden"],
|
334 |
-
content=event["data"]["latest"]["raw"],
|
335 |
-
)
|
336 |
-
if event_type == "status-change":
|
337 |
-
return DiscussionStatusChange(
|
338 |
-
**common_args,
|
339 |
-
new_status=event["data"]["status"],
|
340 |
-
)
|
341 |
-
if event_type == "commit":
|
342 |
-
return DiscussionCommit(
|
343 |
-
**common_args,
|
344 |
-
summary=event["data"]["subject"],
|
345 |
-
oid=event["data"]["oid"],
|
346 |
-
)
|
347 |
-
if event_type == "title-change":
|
348 |
-
return DiscussionTitleChange(
|
349 |
-
**common_args,
|
350 |
-
old_title=event["data"]["from"],
|
351 |
-
new_title=event["data"]["to"],
|
352 |
-
)
|
353 |
-
|
354 |
-
return DiscussionEvent(**common_args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dagfinn1962/prodia2/transform.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
|
3 |
-
url = "https://api.prodia.com/v1/transform"
|
4 |
-
|
5 |
-
headers = {
|
6 |
-
"accept": "application/json",
|
7 |
-
"content-type": "application/json",
|
8 |
-
"X-Prodia-Key": "69e66898-010d-4cd1-9e22-090f73ad007b"
|
9 |
-
}
|
10 |
-
|
11 |
-
response = requests.post(url, headers=headers)
|
12 |
-
|
13 |
-
print(response.text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Daimon/translation_demo/tokenization_small100.py
DELETED
@@ -1,364 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 Idiap Research Institute, http://www.idiap.ch/
|
2 |
-
# Written by Alireza Mohammadshahi <[email protected]>
|
3 |
-
# This is a modified version of https://github.com/huggingface/transformers/blob/main/src/transformers/models/m2m_100/tokenization_m2m_100.py
|
4 |
-
# which owns by Fariseq Authors and The HuggingFace Inc. team.
|
5 |
-
#
|
6 |
-
#
|
7 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
8 |
-
# you may not use this file except in compliance with the License.
|
9 |
-
# You may obtain a copy of the License at
|
10 |
-
#
|
11 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
12 |
-
#
|
13 |
-
# Unless required by applicable law or agreed to in writing, software
|
14 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
15 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
16 |
-
# See the License for the specific language governing permissions and
|
17 |
-
# limitations under the License.
|
18 |
-
"""Tokenization classes for SMALL100."""
|
19 |
-
import json
|
20 |
-
import os
|
21 |
-
from pathlib import Path
|
22 |
-
from shutil import copyfile
|
23 |
-
from typing import Any, Dict, List, Optional, Tuple, Union
|
24 |
-
|
25 |
-
import sentencepiece
|
26 |
-
|
27 |
-
from transformers.tokenization_utils import BatchEncoding, PreTrainedTokenizer
|
28 |
-
from transformers.utils import logging
|
29 |
-
|
30 |
-
|
31 |
-
logger = logging.get_logger(__name__)
|
32 |
-
|
33 |
-
SPIECE_UNDERLINE = "▁"
|
34 |
-
|
35 |
-
VOCAB_FILES_NAMES = {
|
36 |
-
"vocab_file": "vocab.json",
|
37 |
-
"spm_file": "sentencepiece.bpe.model",
|
38 |
-
"tokenizer_config_file": "tokenizer_config.json",
|
39 |
-
}
|
40 |
-
|
41 |
-
PRETRAINED_VOCAB_FILES_MAP = {
|
42 |
-
"vocab_file": {
|
43 |
-
"alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/vocab.json",
|
44 |
-
},
|
45 |
-
"spm_file": {
|
46 |
-
"alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/sentencepiece.bpe.model",
|
47 |
-
},
|
48 |
-
"tokenizer_config_file": {
|
49 |
-
"alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/tokenizer_config.json",
|
50 |
-
},
|
51 |
-
}
|
52 |
-
|
53 |
-
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
54 |
-
"alirezamsh/small100": 1024,
|
55 |
-
}
|
56 |
-
|
57 |
-
# fmt: off
|
58 |
-
FAIRSEQ_LANGUAGE_CODES = {
|
59 |
-
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"]
|
60 |
-
}
|
61 |
-
# fmt: on
|
62 |
-
|
63 |
-
|
64 |
-
class SMALL100Tokenizer(PreTrainedTokenizer):
|
65 |
-
"""
|
66 |
-
Construct an SMALL100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
|
67 |
-
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
68 |
-
this superclass for more information regarding those methods.
|
69 |
-
Args:
|
70 |
-
vocab_file (`str`):
|
71 |
-
Path to the vocabulary file.
|
72 |
-
spm_file (`str`):
|
73 |
-
Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
|
74 |
-
contains the vocabulary.
|
75 |
-
tgt_lang (`str`, *optional*):
|
76 |
-
A string representing the target language.
|
77 |
-
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
78 |
-
The end of sequence token.
|
79 |
-
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
80 |
-
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
81 |
-
sequence classification or for a text and a question for question answering. It is also used as the last
|
82 |
-
token of a sequence built with special tokens.
|
83 |
-
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
84 |
-
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
85 |
-
token instead.
|
86 |
-
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
87 |
-
The token used for padding, for example when batching sequences of different lengths.
|
88 |
-
language_codes (`str`, *optional*):
|
89 |
-
What language codes to use. Should be `"m2m100"`.
|
90 |
-
sp_model_kwargs (`dict`, *optional*):
|
91 |
-
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
|
92 |
-
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
|
93 |
-
to set:
|
94 |
-
- `enable_sampling`: Enable subword regularization.
|
95 |
-
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
|
96 |
-
- `nbest_size = {0,1}`: No sampling is performed.
|
97 |
-
- `nbest_size > 1`: samples from the nbest_size results.
|
98 |
-
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
|
99 |
-
using forward-filtering-and-backward-sampling algorithm.
|
100 |
-
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
|
101 |
-
BPE-dropout.
|
102 |
-
Examples:
|
103 |
-
```python
|
104 |
-
>>> from tokenization_small100 import SMALL100Tokenizer
|
105 |
-
>>> tokenizer = SMALL100Tokenizer.from_pretrained("alirezamsh/small100", tgt_lang="ro")
|
106 |
-
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
|
107 |
-
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
108 |
-
>>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
|
109 |
-
>>> model(**model_inputs) # should work
|
110 |
-
```"""
|
111 |
-
|
112 |
-
vocab_files_names = VOCAB_FILES_NAMES
|
113 |
-
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
114 |
-
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
115 |
-
model_input_names = ["input_ids", "attention_mask"]
|
116 |
-
|
117 |
-
prefix_tokens: List[int] = []
|
118 |
-
suffix_tokens: List[int] = []
|
119 |
-
|
120 |
-
def __init__(
|
121 |
-
self,
|
122 |
-
vocab_file,
|
123 |
-
spm_file,
|
124 |
-
tgt_lang=None,
|
125 |
-
bos_token="<s>",
|
126 |
-
eos_token="</s>",
|
127 |
-
sep_token="</s>",
|
128 |
-
pad_token="<pad>",
|
129 |
-
unk_token="<unk>",
|
130 |
-
language_codes="m2m100",
|
131 |
-
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
132 |
-
num_madeup_words=8,
|
133 |
-
**kwargs,
|
134 |
-
) -> None:
|
135 |
-
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
136 |
-
|
137 |
-
self.language_codes = language_codes
|
138 |
-
fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes]
|
139 |
-
self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
|
140 |
-
|
141 |
-
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
|
142 |
-
kwargs["additional_special_tokens"] += [
|
143 |
-
self.get_lang_token(lang_code)
|
144 |
-
for lang_code in fairseq_language_code
|
145 |
-
if self.get_lang_token(lang_code) not in kwargs["additional_special_tokens"]
|
146 |
-
]
|
147 |
-
|
148 |
-
super().__init__(
|
149 |
-
tgt_lang=tgt_lang,
|
150 |
-
bos_token=bos_token,
|
151 |
-
eos_token=eos_token,
|
152 |
-
sep_token=sep_token,
|
153 |
-
unk_token=unk_token,
|
154 |
-
pad_token=pad_token,
|
155 |
-
language_codes=language_codes,
|
156 |
-
sp_model_kwargs=self.sp_model_kwargs,
|
157 |
-
num_madeup_words=num_madeup_words,
|
158 |
-
**kwargs,
|
159 |
-
)
|
160 |
-
|
161 |
-
self.vocab_file = vocab_file
|
162 |
-
self.encoder = load_json(vocab_file)
|
163 |
-
self.decoder = {v: k for k, v in self.encoder.items()}
|
164 |
-
self.spm_file = spm_file
|
165 |
-
self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
|
166 |
-
|
167 |
-
self.encoder_size = len(self.encoder)
|
168 |
-
|
169 |
-
self.lang_token_to_id = {
|
170 |
-
self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)
|
171 |
-
}
|
172 |
-
self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)}
|
173 |
-
self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()}
|
174 |
-
|
175 |
-
self._tgt_lang = tgt_lang if tgt_lang is not None else "en"
|
176 |
-
self.cur_lang_id = self.get_lang_id(self._tgt_lang)
|
177 |
-
self.set_lang_special_tokens(self._tgt_lang)
|
178 |
-
|
179 |
-
self.num_madeup_words = num_madeup_words
|
180 |
-
|
181 |
-
@property
|
182 |
-
def vocab_size(self) -> int:
|
183 |
-
return len(self.encoder) + len(self.lang_token_to_id) + self.num_madeup_words
|
184 |
-
|
185 |
-
@property
|
186 |
-
def tgt_lang(self) -> str:
|
187 |
-
return self._tgt_lang
|
188 |
-
|
189 |
-
@tgt_lang.setter
|
190 |
-
def tgt_lang(self, new_tgt_lang: str) -> None:
|
191 |
-
self._tgt_lang = new_tgt_lang
|
192 |
-
self.set_lang_special_tokens(self._tgt_lang)
|
193 |
-
|
194 |
-
def _tokenize(self, text: str) -> List[str]:
|
195 |
-
return self.sp_model.encode(text, out_type=str)
|
196 |
-
|
197 |
-
def _convert_token_to_id(self, token):
|
198 |
-
if token in self.lang_token_to_id:
|
199 |
-
return self.lang_token_to_id[token]
|
200 |
-
return self.encoder.get(token, self.encoder[self.unk_token])
|
201 |
-
|
202 |
-
def _convert_id_to_token(self, index: int) -> str:
|
203 |
-
"""Converts an index (integer) in a token (str) using the decoder."""
|
204 |
-
if index in self.id_to_lang_token:
|
205 |
-
return self.id_to_lang_token[index]
|
206 |
-
return self.decoder.get(index, self.unk_token)
|
207 |
-
|
208 |
-
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
209 |
-
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
|
210 |
-
return self.sp_model.decode(tokens)
|
211 |
-
|
212 |
-
def get_special_tokens_mask(
|
213 |
-
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
214 |
-
) -> List[int]:
|
215 |
-
"""
|
216 |
-
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
217 |
-
special tokens using the tokenizer `prepare_for_model` method.
|
218 |
-
Args:
|
219 |
-
token_ids_0 (`List[int]`):
|
220 |
-
List of IDs.
|
221 |
-
token_ids_1 (`List[int]`, *optional*):
|
222 |
-
Optional second list of IDs for sequence pairs.
|
223 |
-
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
224 |
-
Whether or not the token list is already formatted with special tokens for the model.
|
225 |
-
Returns:
|
226 |
-
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
227 |
-
"""
|
228 |
-
|
229 |
-
if already_has_special_tokens:
|
230 |
-
return super().get_special_tokens_mask(
|
231 |
-
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
232 |
-
)
|
233 |
-
|
234 |
-
prefix_ones = [1] * len(self.prefix_tokens)
|
235 |
-
suffix_ones = [1] * len(self.suffix_tokens)
|
236 |
-
if token_ids_1 is None:
|
237 |
-
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
|
238 |
-
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
|
239 |
-
|
240 |
-
def build_inputs_with_special_tokens(
|
241 |
-
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
242 |
-
) -> List[int]:
|
243 |
-
"""
|
244 |
-
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
245 |
-
adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
|
246 |
-
- `input_ids` (for encoder) `X [eos, src_lang_code]`
|
247 |
-
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
|
248 |
-
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
|
249 |
-
separator.
|
250 |
-
Args:
|
251 |
-
token_ids_0 (`List[int]`):
|
252 |
-
List of IDs to which the special tokens will be added.
|
253 |
-
token_ids_1 (`List[int]`, *optional*):
|
254 |
-
Optional second list of IDs for sequence pairs.
|
255 |
-
Returns:
|
256 |
-
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
257 |
-
"""
|
258 |
-
if token_ids_1 is None:
|
259 |
-
if self.prefix_tokens is None:
|
260 |
-
return token_ids_0 + self.suffix_tokens
|
261 |
-
else:
|
262 |
-
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
|
263 |
-
# We don't expect to process pairs, but leave the pair logic for API consistency
|
264 |
-
if self.prefix_tokens is None:
|
265 |
-
return token_ids_0 + token_ids_1 + self.suffix_tokens
|
266 |
-
else:
|
267 |
-
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
|
268 |
-
|
269 |
-
def get_vocab(self) -> Dict:
|
270 |
-
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
271 |
-
vocab.update(self.added_tokens_encoder)
|
272 |
-
return vocab
|
273 |
-
|
274 |
-
def __getstate__(self) -> Dict:
|
275 |
-
state = self.__dict__.copy()
|
276 |
-
state["sp_model"] = None
|
277 |
-
return state
|
278 |
-
|
279 |
-
def __setstate__(self, d: Dict) -> None:
|
280 |
-
self.__dict__ = d
|
281 |
-
|
282 |
-
# for backward compatibility
|
283 |
-
if not hasattr(self, "sp_model_kwargs"):
|
284 |
-
self.sp_model_kwargs = {}
|
285 |
-
|
286 |
-
self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
|
287 |
-
|
288 |
-
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
289 |
-
save_dir = Path(save_directory)
|
290 |
-
if not save_dir.is_dir():
|
291 |
-
raise OSError(f"{save_directory} should be a directory")
|
292 |
-
vocab_save_path = save_dir / (
|
293 |
-
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
|
294 |
-
)
|
295 |
-
spm_save_path = save_dir / (
|
296 |
-
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
|
297 |
-
)
|
298 |
-
|
299 |
-
save_json(self.encoder, vocab_save_path)
|
300 |
-
|
301 |
-
if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
|
302 |
-
copyfile(self.spm_file, spm_save_path)
|
303 |
-
elif not os.path.isfile(self.spm_file):
|
304 |
-
with open(spm_save_path, "wb") as fi:
|
305 |
-
content_spiece_model = self.sp_model.serialized_model_proto()
|
306 |
-
fi.write(content_spiece_model)
|
307 |
-
|
308 |
-
return (str(vocab_save_path), str(spm_save_path))
|
309 |
-
|
310 |
-
def prepare_seq2seq_batch(
|
311 |
-
self,
|
312 |
-
src_texts: List[str],
|
313 |
-
tgt_texts: Optional[List[str]] = None,
|
314 |
-
tgt_lang: str = "ro",
|
315 |
-
**kwargs,
|
316 |
-
) -> BatchEncoding:
|
317 |
-
self.tgt_lang = tgt_lang
|
318 |
-
self.set_lang_special_tokens(self.tgt_lang)
|
319 |
-
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
|
320 |
-
|
321 |
-
def _build_translation_inputs(self, raw_inputs, tgt_lang: Optional[str], **extra_kwargs):
|
322 |
-
"""Used by translation pipeline, to prepare inputs for the generate function"""
|
323 |
-
if tgt_lang is None:
|
324 |
-
raise ValueError("Translation requires a `tgt_lang` for this model")
|
325 |
-
self.tgt_lang = tgt_lang
|
326 |
-
inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs)
|
327 |
-
return inputs
|
328 |
-
|
329 |
-
def _switch_to_input_mode(self):
|
330 |
-
self.set_lang_special_tokens(self.tgt_lang)
|
331 |
-
|
332 |
-
def _switch_to_target_mode(self):
|
333 |
-
self.prefix_tokens = None
|
334 |
-
self.suffix_tokens = [self.eos_token_id]
|
335 |
-
|
336 |
-
def set_lang_special_tokens(self, src_lang: str) -> None:
|
337 |
-
"""Reset the special tokens to the tgt lang setting. No prefix and suffix=[eos, tgt_lang_code]."""
|
338 |
-
lang_token = self.get_lang_token(src_lang)
|
339 |
-
self.cur_lang_id = self.lang_token_to_id[lang_token]
|
340 |
-
self.prefix_tokens = [self.cur_lang_id]
|
341 |
-
self.suffix_tokens = [self.eos_token_id]
|
342 |
-
|
343 |
-
def get_lang_token(self, lang: str) -> str:
|
344 |
-
return self.lang_code_to_token[lang]
|
345 |
-
|
346 |
-
def get_lang_id(self, lang: str) -> int:
|
347 |
-
lang_token = self.get_lang_token(lang)
|
348 |
-
return self.lang_token_to_id[lang_token]
|
349 |
-
|
350 |
-
|
351 |
-
def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
|
352 |
-
spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
|
353 |
-
spm.Load(str(path))
|
354 |
-
return spm
|
355 |
-
|
356 |
-
|
357 |
-
def load_json(path: str) -> Union[Dict, List]:
|
358 |
-
with open(path, "r") as f:
|
359 |
-
return json.load(f)
|
360 |
-
|
361 |
-
|
362 |
-
def save_json(data, path: str) -> None:
|
363 |
-
with open(path, "w") as f:
|
364 |
-
json.dump(data, f, indent=2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|