parquet-converter commited on
Commit
0afeb05
·
1 Parent(s): 55c8914

Update parquet files (step 85 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0xSpleef/openchat-openchat_8192/README.md +0 -12
  2. spaces/0xtanmoysamanta/espnet-kan-bayashi_ljspeech_vits/README.md +0 -13
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 32-bit Version Online or Offline.md +0 -36
  4. spaces/1gistliPinn/ChatGPT4/Examples/Facebook Password Hacker V4 0 Free Download.md +0 -48
  5. spaces/1phancelerku/anime-remove-background/AP KGBV Teaching Jobs 2023 How to Apply for Principal PGT CRT PET Posts in KGBV Schools.md +0 -185
  6. spaces/1phancelerku/anime-remove-background/Candy Crush Soda Saga A Free and Fun Game for PC Windows 7 Users.md +0 -129
  7. spaces/1phancelerku/anime-remove-background/Download Guardian Tales JP and Experience a Classic Adventure with Pixel Art and Puzzles.md +0 -176
  8. spaces/AIConsultant/MusicGen/audiocraft/metrics/__init__.py +0 -14
  9. spaces/AIWaves/Debate/src/agents/Action/base_action.py +0 -48
  10. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb16-120e_deepfashion2_long_sleeved_outwear_256x192.py +0 -172
  11. spaces/Abhilashvj/planogram-compliance/data/scripts/get_imagenet.sh +0 -51
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetAllChildrenSizers.js +0 -14
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RemoveChildMethods.js +0 -39
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.js +0 -2
  15. spaces/Aki004/herta-so-vits/vdecoder/__init__.py +0 -0
  16. spaces/AlekseyKorshuk/rugpt3/README.md +0 -37
  17. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/model_irse.py +0 -84
  18. spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.cpp +0 -103
  19. spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/global_config.py +0 -12
  20. spaces/Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/style.css +0 -28
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text2image.md +0 -277
  22. spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/hrfpn.py +0 -102
  23. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py +0 -9
  24. spaces/AngoHF/ANGO-Leaderboard/assets/__init__.py +0 -0
  25. spaces/AnishKumbhar/ChatBot/README.md +0 -13
  26. spaces/Ankush05/Newcode/README.md +0 -12
  27. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/diffusionmodules/util.py +0 -270
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__init__.py +0 -18
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/url.py +0 -435
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/cmd.py +0 -436
  31. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py +0 -191
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/build.py +0 -24
  33. spaces/BetterAPI/BetterChat/src/lib/utils/sum.ts +0 -3
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/compat.py +0 -350
  35. spaces/BigSalmon/BackTranslation/app.py +0 -117
  36. spaces/CVPR/LIVE/thrust/thrust/detail/static_assert.h +0 -92
  37. spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/iterator/is_discard_iterator.h +0 -40
  38. spaces/CVPR/Text2Human/app.py +0 -158
  39. spaces/CVPR/WALT/mmdet/core/bbox/match_costs/builder.py +0 -8
  40. spaces/CVPR/WALT/mmdet/models/dense_heads/dense_test_mixins.py +0 -100
  41. spaces/CVPR/regionclip-demo/detectron2/structures/rotated_boxes.py +0 -505
  42. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py +0 -1
  43. spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/core.py +0 -35
  44. spaces/CodingBillionaire/bark-voice-cloning/hubert/__init__.py +0 -0
  45. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/blocks.py +0 -342
  46. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/io_.py +0 -216
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-4ccfb72c.css +0 -1
  48. spaces/Dagfinn1962/stablediffusion-articlera/theme.css +0 -1
  49. spaces/Djacon/emotion_detection/files/js/summarizer.js +0 -213
  50. spaces/DrSong/ChatGLM-6B-ChatBot/README.md +0 -13
spaces/0xSpleef/openchat-openchat_8192/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Openchat-openchat 8192
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/0xtanmoysamanta/espnet-kan-bayashi_ljspeech_vits/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Espnet-kan-bayashi Ljspeech Vits
3
- emoji: 🐨
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 32-bit Version Online or Offline.md DELETED
@@ -1,36 +0,0 @@
1
-
2
- <h1>How to Install Microsoft Office 32-bit Version on Your PC</h1>
3
- <p>Microsoft Office is a popular suite of productivity applications that includes Word, Excel, PowerPoint, Outlook, and more. You can install Microsoft Office on your PC either online or offline, depending on your preference and internet connection. However, before you install Microsoft Office, you need to choose between the 64-bit or 32-bit version of the software. In this article, we will explain how to install Microsoft Office 32-bit version on your PC and why you might want to do so.</p>
4
- <h2>microsoft office install 32 bit</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://byltly.com/2uKxVM">https://byltly.com/2uKxVM</a></b></p><br /><br />
5
- <h2>What is the difference between 64-bit and 32-bit versions of Microsoft Office?</h2>
6
- <p>The main difference between 64-bit and 32-bit versions of Microsoft Office is the amount of memory they can use. The 64-bit version can access more memory than the 32-bit version, which can improve the performance and stability of the software when working with large files and data sets. However, the 64-bit version also requires more disk space and may not be compatible with some older add-ins or customizations.</p>
7
- <h2>Why choose the 32-bit version of Microsoft Office?</h2>
8
- <p>There are some reasons why you might want to choose the 32-bit version of Microsoft Office over the 64-bit version. For example, you might want to choose the 32-bit version if:</p>
9
- <ul>
10
- <li>Your PC has a 32-bit processor or operating system. You can check this by going to Settings > System > About and looking at the System type.</li>
11
- <li>You have a 64-bit processor or operating system but less than 4 GB of RAM. The 64-bit version may not run well on low-memory devices.</li>
12
- <li>You use add-ins or customizations that are only compatible with the 32-bit version of Microsoft Office. You can check this by contacting the add-in or customization provider.</li>
13
- <li>You have other software that requires the 32-bit version of Microsoft Office to work properly. For example, some database applications may need the 32-bit version of Access.</li>
14
- </ul>
15
- <h2>How to install Microsoft Office 32-bit version online?</h2>
16
- <p>If you have a stable internet connection and a Microsoft account, you can install Microsoft Office 32-bit version online by following these steps:</p>
17
- <ol>
18
- <li>Go to <a href="https://www.office.com">https://www.office.com</a> and sign in with your Microsoft account.</li>
19
- <li>From the home page, select Install Office > Other install options.</li>
20
- <li>Under Language and install options, select Additional install options.</li>
21
- <li>Under Version, choose 32-bit and then select Install.</li>
22
- <li>The installation file will be downloaded to your PC. Run it and follow the instructions on the screen to complete the installation.</li>
23
- </ol>
24
- <h2>How to install Microsoft Office 32-bit version offline?</h2>
25
- <p>If you don't have a stable internet connection or prefer to install Microsoft Office offline, you can use the offline installer by following these steps:</p>
26
- <p></p>
27
- <ol>
28
- <li>Go to <a href="https://www.microsoft.com/en-us/download/details.aspx?id=49117">https://www.microsoft.com/en-us/download/details.aspx?id=49117</a> and download the offline installer for your language and region.</li>
29
- <li>The offline installer will be downloaded as an ISO file. You can either burn it to a DVD or mount it as a virtual drive on your PC.</li>
30
- <li>Select the Microsoft 365 folder from the virtual drive and then double-click either Setup32.exe to install the 32-bit version of Microsoft Office.</li>
31
- <li>Follow the instructions on the screen to complete the installation.</li>
32
- </ol>
33
- <h2>Conclusion</h2>
34
- <p>Microsoft Office is a powerful and versatile suite of productivity applications that you can install on your PC either online or offline. However, before you install Microsoft Office, you need to choose between the 64-bit or 32-bit version of the software depending on your device specifications and compatibility needs. In this article, we explained how to install Microsoft Office 32-bit version on your PC and why you might want to do so. We hope this article was helpful and informative for you.</p> ddb901b051<br />
35
- <br />
36
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Facebook Password Hacker V4 0 Free Download.md DELETED
@@ -1,48 +0,0 @@
1
- <h2>facebook password hacker v4 0 free download</h2><br /><p><b><b>Download Zip</b> ->>->>->> <a href="https://imgfil.com/2uy0uu">https://imgfil.com/2uy0uu</a></b></p><br /><br />
2
-
3
- This hack tool includes access to all the information on your Facebook profile. You can use it to read the messages from all your Facebook friends and read the messages in the social network.
4
-
5
- Below, I will tell you how you can download the APK file.
6
-
7
- Download the APK of FaceBook Password Hacker for Android for free.
8
-
9
- The FaceBook Password Hacker app is very easy to use. You will be able to hack the passwords of your Facebook account in the next 30 seconds.
10
-
11
- After you have logged in to your Facebook account, the app will allow you to hack any Facebook account within a few seconds. You can hack multiple accounts simultaneously.
12
-
13
- With this hack tool, you will be able to hack the password of all the Facebook accounts on your Android phone.
14
-
15
- There are many reasons why you can use this app to hack the password of your Facebook account. If you need to access a file or document on Facebook which is locked by Facebook, then you can use this Facebook Password Hacker app to hack your password and open the file.
16
-
17
- This Facebook password hacker app is not designed to hack the account of a Facebook admin or Facebook staff. You cannot access the data of other Facebook accounts.
18
-
19
- Facebook password hacker is a powerful tool that can help you to hack all your Facebook account on your Android phone.
20
-
21
- If you have a Facebook account on your Android phone, then you can use this Facebook Password Hacker to hack any Facebook account. You can use this FaceBook Password hacker app to hack Facebook accounts that you can use on your Android phone. You can hack Facebook accounts on your Android phone which are shared with you.
22
-
23
- With the FaceBook Password Hacker app, you can hack Facebook accounts. The app allows you to access your Facebook account, and all the data on your Facebook account is provided to you.
24
-
25
- You can hack Facebook accounts by email or phone. You can get the phone number from your Facebook friends, or you can also access the phone numbers of your Facebook friends directly.
26
-
27
- App Details:
28
-
29
- Name: FaceBook Password Hacker
30
-
31
- Version: 1.4.3
32
-
33
- Developer: JAN
34
-
35
36
-
37
- File Size: 38 MB
38
-
39
- Requires Android: 3.0 and up
40
-
41
- Overview:
42
-
43
- You can use this FaceBook Password Hacker app to hack Facebook account.
44
-
45
- This FaceBook password hacker app is 4fefd39f24<br />
46
- <br />
47
- <br />
48
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/AP KGBV Teaching Jobs 2023 How to Apply for Principal PGT CRT PET Posts in KGBV Schools.md DELETED
@@ -1,185 +0,0 @@
1
- <br />
2
- <h1>APKGVB Notification 2023: Everything You Need to Know</h1>
3
- <p>If you are interested in working as a teacher or studying in a residential school for girls in Andhra Pradesh, then you should not miss the APKGVB notification 2023. This notification is released by the Samagra Shiksha, Government of Andhra Pradesh, to invite online applications for filling up of vacant posts of teaching staff and students in all the Kasturba Gandhi Balika Vidyalayas (KGBVs) located across the state. In this article, we will tell you everything you need to know about the APKGVB notification 2023, including what is APKGVB, what is the recruitment and admission process, how to apply online, and more.</p>
4
- <h2>apkgbv notification 2023</h2><br /><p><b><b>Download</b> >>>>> <a href="https://jinyurl.com/2uNQpT">https://jinyurl.com/2uNQpT</a></b></p><br /><br />
5
- <h2>What is APKGVB?</h2>
6
- <p>APKGVB stands for Andhra Pradesh Kasturba Gandhi Balika Vidyalaya. It is a scheme launched by the Government of India in August 2004, under the Sarva Shiksha Abhiyan (SSA), to provide quality education to girls from disadvantaged sections of society. The scheme aims to set up residential schools at upper primary level for girls belonging to SC, ST, OBC, minority communities and families below the poverty line (BPL) in educationally backward blocks. The scheme was later extended to cover girls in secondary level as well.</p>
7
- <h3>The objectives and features of APKGVB</h3>
8
- <p>The main objectives of APKGVB are:</p>
9
- <ul>
10
- <li>To ensure access and quality education to girls from marginalized groups by providing them with a safe and supportive environment.</li>
11
- <li>To reduce gender disparities in education and enhance the self-esteem and confidence of girls.</li>
12
- <li>To promote girls' participation in co-curricular activities, life skills education, vocational training and career guidance.</li>
13
- <li>To facilitate the transition of girls from school to higher education or work.</li>
14
- </ul>
15
- <p>Some of the key features of APKGVB are:</p>
16
- <ul>
17
- <li>The schools are run by local bodies or NGOs with financial assistance from the central and state governments.</li>
18
- <li>The schools provide free boarding, lodging, uniforms, textbooks, stationery and health care to the students.</li>
19
- <li>The schools follow the state curriculum and use local language as the medium of instruction.</li>
20
- <li>The schools have qualified and trained female teachers and staff.</li>
21
- <li>The schools have well-equipped classrooms, libraries, laboratories, computer rooms, playgrounds and other facilities.</li>
22
- <li>The schools have a child-friendly and gender-sensitive environment that respects the diversity and dignity of girls.</li>
23
- </ul>
24
- <h3>The benefits and achievements of APKGVB</h3>
25
- <p>APKGVB has been successful in achieving its goals and bringing positive changes in the lives of girls. Some of the benefits and achievements of APKGVB are:</p>
26
- <ul>
27
- <li>APKGVB has increased the enrollment, retention and completion rates of girls in upper primary and secondary education.</li>
28
- <li>APKGVB has improved the learning outcomes and academic performance of girls.</li>
29
- <li>APKGVB has reduced the dropout rate, absenteeism rate and child marriage rate among girls.</li>
30
- <li>APKGVB has empowered girls with knowledge, skills, values and attitudes that enable them to face challenges and opportunities in life.</li>
31
- <li>APKGVB has created awareness among parents, community members and stakeholders about the importance of girls' education.</li>
32
- </ul>
33
- <h2>What is the APKGVB notification 2023?</h2> <p>The APKGVB notification 2023 is a document that contains all the information regarding the recruitment of teaching staff and the admission of students in the KGBVs for the academic year 2023-24. The notification is released by the Samagra Shiksha, Government of Andhra Pradesh, on its official website apkgbv.apcfss.in. The notification covers two aspects:</p>
34
- <p>apkgbv recruitment 2023 apply online<br />
35
- apkgbv merit list 2023 district wise<br />
36
- apkgbv contract recruitment 2023<br />
37
- apkgbv application form 2023<br />
38
- apkgbv vacancies 2023<br />
39
- apkgbv principal pgt crt pet jobs 2023<br />
40
- apkgbv exam date 2023<br />
41
- apkgbv syllabus 2023 pdf download<br />
42
- apkgbv admit card 2023<br />
43
- apkgbv result 2023<br />
44
- apkgbv salary details 2023<br />
45
- apkgbv eligibility criteria 2023<br />
46
- apkgbv selection process 2023<br />
47
- apkgbv previous papers 2023<br />
48
- apkgbv answer key 2023<br />
49
- apkgbv cut off marks 2023<br />
50
- apkgbv counselling schedule 2023<br />
51
- apkgbv joining letter 2023<br />
52
- apkgbv latest news and updates 2023<br />
53
- apkgbv official website link 2023<br />
54
- apkgbv kasturba gandhi balika vidyalaya recruitment 2023<br />
55
- apkgbv andhra pradesh sarva shiksha abhiyan recruitment 2023<br />
56
- apkgbv teaching and non teaching posts recruitment 2023<br />
57
- apkgbv online application portal link 2023<br />
58
- apkgbv notification pdf download 2023<br />
59
- apkgbv district wise vacancy list 2023<br />
60
- apkgbv reservation roster points breakup 2023<br />
61
- apkgbv online grievance registration link 2023<br />
62
- apkgbv tentative merit list release date 2023<br />
63
- apkgbv final merit list release date 2023<br />
64
- apkgbv interview date and venue details 2023<br />
65
- apkgbv document verification process and checklist 2023<br />
66
- apkgbv how to apply step by step guide 2023<br />
67
- apkgbv application fee payment mode and amount details 2023<br />
68
- apkgbv age limit and relaxation details 2023<br />
69
- apkgbv educational qualification and experience details 2023<br />
70
- apkgbv exam pattern and marking scheme details 2023<br />
71
- apkgbv exam center and code details 2023<br />
72
- apkgbv how to download admit card step by step guide 2023<br />
73
- apkgbv how to check result step by step guide 2023<br />
74
- apkgbv how to download merit list step by step guide 2023<br />
75
- apkgbv how to calculate cut off marks formula and factors details 2023<br />
76
- apkgbv how to raise objections against answer key step by step guide 2023<br />
77
- apkgbv how to download joining letter step by step guide 2023<br />
78
- apkgbv frequently asked questions and answers details 2023</p>
79
- <h3>The recruitment process for teaching staff in KGBVs</h3>
80
- <p>The Samagra Shiksha invites online applications from eligible women candidates for filling up of 1358 vacant posts of teaching staff in all the KGBVs across the state. The posts include Principal, Post Graduate Teachers (PGT), Contract Residential Teachers (CRT) and Physical Education Teachers (PET). The recruitment is done on a contractual basis for a period of one year or till regular recruitment is made, whichever is earlier.</p>
81
- <h4>Eligibility criteria and application fee</h4>
82
- <p>The candidates who wish to apply for the teaching staff recruitment must fulfill the following eligibility criteria:</p>
83
- <table>
84
- <tr><th>Post</th><th>Qualification</th><th>Age Limit</th><th>Application Fee</th></tr>
85
- <tr><td>Principal</td><td>Post Graduation Degree with B.Ed. from a recognized university with at least 50% marks in aggregate for OCs, 45% for BCs and 40% for SC/ST/Differently abled persons.</td><td>Not more than 45 years as on 01.07.2023</td><td>Rs. 500/-</td></tr>
86
- <tr><td>PGT</td><td>Post Graduation Degree in the relevant subject with B.Ed. from a recognized university with at least 50% marks in aggregate for OCs, 45% for BCs and 40% for SC/ST/Differently abled persons.</td><td>Not more than 44 years as on 01.07.2023</td><td>Rs. 500/-</td></tr>
87
- <tr><td>CRT</td><td>Graduation Degree in the relevant subject with B.Ed. from a recognized university with at least 50% marks in aggregate for OCs, 45% for BCs and 40% for SC/ST/Differently abled persons.</td><td>Not more than 39 years as on 01.07.2023</td><td>Rs. 500/-</td></tr>
88
- <tr><td>PET</td><td>Intermediate with D.P.Ed./B.P.Ed./M.P.Ed. from a recognized board or university.</td><td>Not more than 39 years as on 01.07.2023</td><td>Rs. 250/-</td></tr>
89
- </table>
90
- <h4>Timeline and selection procedure</h4>
91
- <p>The candidates who are interested and eligible can apply online through the official website apkgbv.apcfss.in from May 30, 2023 to June 05, 2023. The candidates have to pay the application fee through online mode only using debit card/credit card/net banking etc. The candidates have to upload their scanned copies of photograph, signature and relevant documents while applying online.</p>
92
- <p>The selection of the candidates will be done on the basis of merit list prepared by the State Office at the ratio of 1:3 for each post. The merit list will be based on the academic qualifications, professional qualifications and experience of the candidates as per the weightage given below:</p>
93
- <table>
94
- <tr><th>Post</th><th>Acedemic Qualifications (Max Marks)</th><th>Professional Qualifications (Max Marks)</th><th>Experience (Max Marks)</th></tr>
95
- <tr><td>Principal</td><td>30 (10 marks each for SSC, Intermediate and Graduation)</td><td>20 (10 marks each for Post Graduation and B.Ed.)</td><td>50 (10 marks each for one year of experience as Principal/PGT/CRT/PET in any residential school)</td></tr>
96
- <tr><td>PGT</td><td>30 (10 marks each for SSC, Intermediate and Graduation)</td><td>20 (10 marks each for Post Graduation and B.Ed.)</td><td>50 (10 marks each for one year of experience as PGT/CRT/PET in any residential school)</td></tr>
97
- <tr><td>CRT</td><td>30 (10 marks each for SSC, Intermediate and Graduation)</td><td>20 (10 marks each for B.Ed.)</td><td>50 (10 marks each for one year of experience as CRT/PET in any residential school)</td></tr>
98
- <tr><td>PET</td><td>30 (10 marks each for SSC, Intermediate and D.P.Ed./B.P.Ed./M.P.Ed.)</td><td>20 (10 marks each for Graduation)</td><td>50 (10 marks each for one year of experience as PET in any residential school)</td></tr>
99
- </table>
100
- <p>The candidates who are shortlisted in the merit list will be called for certificate verification and demo/interview at the district level. The final selection will be based on the performance of the candidates in the demo/interview and the availability of vacancies.</p>
101
- <h4>Vacancy details and salary structure</h4>
102
- <p>The vacancy details for the teaching staff recruitment are as follows:</p>
103
- <table>
104
- <tr><th>Post</th><th>No. of Vacancies</th><th>Salary per month</th></tr>
105
- <tr><td>Principal</td><td>44</td><td>Rs. 40,000/-</td></tr>
106
- <tr><td>PGT</td><td>313</td><td>Rs. 31,000/-</td></tr>
107
- <tr><td>CRT</td><td>897</td><td>Rs. 21,000/-</td></tr>
108
- <tr><td>PET</td><td>104</td><td>Rs. 12,000/-</td></tr>
109
- </table>
110
- <p>The salary structure for the teaching staff is subject to revision as per the norms of the Samagra Shiksha.</p>
111
- <h3>The admission process for students in KGBVs</h3>
112
- <p>The Samagra Shiksha also invites online applications from eligible girl students for admission into Class VI to X in all the KGBVs across the state. The admission is done on a merit-cum-reservation basis for a total of 36,720 seats available in 918 KGBVs.</p>
113
- <h4>Eligibility criteria and application fee</h4>
114
- <p>The girl students who wish to apply for the admission in KGBVs must fulfill the following eligibility criteria:</p>
115
- <ul>
116
- <li>The girl student must belong to SC, ST, OBC, minority community or BPL family.</li>
117
- <li>The girl student must have passed Class V to IX from any recognized school in Andhra Pradesh.</li>
118
- <li>The girl student must not be enrolled in any other residential school or hostel.</li>
119
- <li>The girl student must not be suffering from any contagious disease or disability.</li>
120
- <li>The girl student must be willing to stay in the KGBV hostel and follow its rules and regulations.</li>
121
- </ul>
122
- <p>The girl students who are eligible can apply online through the official website apkgbv.apcfss.in without paying any application fee.</p>
123
- <h4>Timeline and selection procedure</h4>
124
- <p>The girl students who are interested and eligible can apply online through the official website apkgbv.apcfss.in from June 10, 2023 to June 20, 2023. The girl students have to upload their scanned copies of photograph, signature and relevant documents while applying online.</p>
125
- <p>The selection of the girl students will be done on the basis of merit list prepared by the District Project Office at the ratio of 1:2 for each seat. The merit list will be based on the marks obtained by the girl students in their previous class. The merit list will be displayed on the notice board of the concerned KGBV and on the official website apkgbv.apcfss.in by June 25, 2023.</p>
126
- <p>The girl students who are shortlisted in the merit list will be called for certificate verification and counseling at the district level. The final selection will be based on the verification of documents and the availability of seats.</p>
127
- <h4>Reservation policy and seat allotment</h4>
128
- <p>The reservation policy for the admission of girl students in KGBVs is as follows:</p>
129
- <ul>
130
- <li>15% of seats are reserved for SCs, 6% for STs, 29% for BCs, 15% for minorities and 3% for differently abled persons.</li>
131
- <li>33% of seats are reserved for girls from BPL families irrespective of their caste or community.</li>
132
- <li>In case of non-availability of eligible candidates in any category, the seats will be filled up by eligible candidates from other categories as per merit.</li>
133
- <li>In case of non-availability of eligible candidates from any district, the seats will be filled up by eligible candidates from other districts as per merit.</li>
134
- <li>In case of non-availability of eligible candidates from any state, the seats will be filled up by eligible candidates from other states as per merit.</li>
135
- <li>The seat allotment will be done by the District Project Office based on the preferences given by the girl students during counseling.</li>
136
- <li>The girl students who are allotted seats in KGBVs have to report to their respective schools by June 30, 2023 with their original certificates and other documents.</li>
137
- </ul>
138
- <h2>How to apply for APKGVB notification 2023?</h2>
139
- <p>If you are interested in applying for the APKGVB notification 2023, either as a teaching staff or as a student, you have to follow the steps given below:</p>
140
- <h3>The steps to apply online for teaching staff recruitment</h3>
141
- <ol>
142
- <li>Visit the official website apkgbv.apcfss.in and click on the link "Online Application for Teaching Staff Recruitment 2023".</li>
143
- <li>Read the instructions carefully and click on the "Proceed" button.</li>
144
- <li>Fill in the basic details such as name, date of birth, gender, mobile number, email id, etc. and click on the "Submit" button.</li>
145
- <li>You will receive an OTP on your registered mobile number and email id. Enter the OTP and click on the "Verify" button.</li>
146
- <li>You will get a registration number and password. Note them down for future reference.</li>
147
- <li>Login with your registration number and password and fill in the personal details, educational details, experience details, etc. and click on the "Save" button.</li>
148
- <li>Upload your scanned photograph, signature and relevant documents in the prescribed format and size and click on the "Upload" button.</li>
149
- <li>Pay the application fee through online mode using debit card/credit card/net banking etc. and click on the "Pay" button.</li>
150
- <li>Take a printout of the application form and fee receipt for future reference.</li>
151
- </ol>
152
- <h3>The steps to apply online for student admission</h3>
153
- <ol>
154
- <li>Visit the official website apkgbv.apcfss.in and click on the link "Online Application for Student Admission 2023".</li>
155
- <li>Read the instructions carefully and click on the "Proceed" button.</li>
156
- <li>Fill in the basic details such as name, date of birth, gender, caste, community, BPL status, etc. and click on the "Submit" button.</li>
157
- <li>You will receive an OTP on your registered mobile number. Enter the OTP and click on the "Verify" button.</li>
158
- <li>You will get a registration number and password. Note them down for future reference.</li>
159
- <li>Login with your registration number and password and fill in the personal details, educational details, preferences of schools, etc. and click on the "Save" button.</li>
160
- <li>Upload your scanned photograph, signature and relevant documents in the prescribed format and size and click on the "Upload" button.</li>
161
- <li>Take a printout of the application form for future reference.</li>
162
- </ol>
163
- <h2>Conclusion</h2>
164
- <p>The APKGVB notification 2023 is a great opportunity for women candidates who want to pursue a career as a teacher and for girl students who want to get quality education in a residential school. The notification provides all the details about the eligibility criteria, application process, selection process, vacancy details, reservation policy, etc. for both teaching staff recruitment and student admission. The candidates who are interested and eligible can apply online through the official website apkgbv.apcfss.in before the last date. The candidates who are selected will be able to work or study in one of the best KGBVs in Andhra Pradesh.</p>
165
- <h2>Frequently Asked Questions</h2>
166
- <p>Here are some of the frequently asked questions about the APKGVB notification 2023:</p>
167
- <h4>Q: When will the APKGVB notification 2023 be released?</h4>
168
- <p>A: The APKGVB notification 2023 is expected to be released by May 2023 on the official website apkgbv.apcfss.in.</p>
169
- <h4>Q: How many vacancies are there for teaching staff recruitment in KGBVs?</h4>
170
- <p>A: There are 1358 vacancies for teaching staff recruitment in KGBVs, including 44 for Principal, 313 for PGT, 897 for CRT and 104 for PET.</p>
171
- <h4>Q: How many seats are there for student admission in KGBVs?</h4>
172
- <p>A: There are 36,720 seats for student admission in KGBVs, including 9180 seats for Class VI, 9180 seats for Class VII, 9180 seats for Class VIII, 9180 seats for Class IX and 9180 seats for Class X.</p>
173
- <h4>Q: What is the application fee for teaching staff recruitment in KGBVs?</h4>
174
- <p>A: The application fee for teaching staff recruitment in KGBVs is Rs. 500/- for Principal, PGT and CRT posts and Rs. 250/- for PET post.</p>
175
- <h4>Q: What is the application fee for student admission in KGBVs?</h4>
176
- <p>A: There is no application fee for student admission in KGBVs. The girl students can apply online for free.</p>
177
- <h4>Q: How can I contact the Samagra Shiksha for any queries or grievances regarding the APKGVB notification 2023?</h4>
178
- <p>A: You can contact the Samagra Shiksha through the following modes:</p>
179
- <ul>
180
- <li>Email: [email protected]</li>
181
- <li>Phone: 040-23317140, 040-23317141</li>
182
- <li>Address: Samagra Shiksha, Government of Andhra Pradesh, 5th Floor, Anjaneya Towers, Ibrahimpatnam, Vijayawada - 521456</li>
183
- </ul></p> 197e85843d<br />
184
- <br />
185
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Candy Crush Soda Saga A Free and Fun Game for PC Windows 7 Users.md DELETED
@@ -1,129 +0,0 @@
1
-
2
- <h1>Candy Crush Soda Saga Download for PC Windows 7 Free</h1>
3
- <p>If you are looking for a fun and addictive puzzle game that will keep you entertained for hours, you might want to try Candy Crush Soda Saga. This game is a sequel to the popular Candy Crush Saga, and it offers more divine matching combinations, challenging game modes, and fizzy fun. In this article, we will show you how to download and install Candy Crush Soda Saga on your PC Windows 7 for free. We will also share some of the benefits of playing this game, as well as some tips and tricks to help you master it.</p>
4
- <h2>What is Candy Crush Soda Saga?</h2>
5
- <p>Candy Crush Soda Saga is a match-3 puzzle game developed by King, a leading company in casual gaming. The game was released in 2014 as a spin-off of Candy Crush Saga, one of the most successful mobile games of all time. The game has over 100 million downloads on Google Play Store alone, and it has received positive reviews from critics and players alike.</p>
6
- <h2>candy crush soda saga download for pc windows 7 free</h2><br /><p><b><b>Download Zip</b> &#10031;&#10031;&#10031; <a href="https://jinyurl.com/2uNRqC">https://jinyurl.com/2uNRqC</a></b></p><br /><br />
7
- <h3>A fun and addictive match-3 puzzle game</h3>
8
- <p>The gameplay of Candy Crush Soda Saga is similar to that of Candy Crush Saga. You have to match three or more candies of the same color to clear them from the board. You can also create special candies by matching four or more candies in different shapes, such as striped, wrapped, or fish candies. These special candies can have various effects, such as clearing a whole row, column, or area of candies.</p>
9
- <p>The game has different objectives depending on the level type. For example, in soda levels, you have to switch the bottles and match candies to release purple soda and save the candy bears. In frosting levels, you have to match candies to smash the ice and set the candy bears free. In honey levels, you have to match candies next to honeycomb to release the trapped candy bears. In jam levels, you have to spread the jam across the board.</p>
10
- <p>candy crush soda saga pc windows 7 free download<br />
11
- download candy crush soda saga for windows 7 pc free<br />
12
- candy crush soda saga windows 7 free pc game download<br />
13
- how to download candy crush soda saga on windows 7 pc for free<br />
14
- candy crush soda saga for pc windows 7 free full version download<br />
15
- candy crush soda saga download free for windows 7 laptop pc<br />
16
- download and install candy crush soda saga on windows 7 pc free<br />
17
- candy crush soda saga free online game download for windows 7 pc<br />
18
- candy crush soda saga windows 7 pc offline download free<br />
19
- candy crush soda saga microsoft store download for windows 7 pc free<br />
20
- candy crush soda saga apk download for windows 7 pc free<br />
21
- candy crush soda saga mod apk download for windows 7 pc free<br />
22
- candy crush soda saga hack download for windows 7 pc free<br />
23
- candy crush soda saga cheats download for windows 7 pc free<br />
24
- candy crush soda saga unlimited lives download for windows 7 pc free<br />
25
- candy crush soda saga latest version download for windows 7 pc free<br />
26
- candy crush soda saga update download for windows 7 pc free<br />
27
- candy crush soda saga level editor download for windows 7 pc free<br />
28
- candy crush soda saga level unlocker download for windows 7 pc free<br />
29
- candy crush soda saga booster generator download for windows 7 pc free<br />
30
- candy crush soda saga tips and tricks download for windows 7 pc free<br />
31
- candy crush soda saga walkthrough guide download for windows 7 pc free<br />
32
- candy crush soda saga gameplay video download for windows 7 pc free<br />
33
- candy crush soda saga soundtrack music download for windows 7 pc free<br />
34
- candy crush soda saga wallpaper hd download for windows 7 pc free<br />
35
- candy crush soda saga theme pack download for windows 7 pc free<br />
36
- candy crush soda saga icons pack download for windows 7 pc free<br />
37
- candy crush soda saga fonts pack download for windows 7 pc free<br />
38
- candy crush soda saga screensaver download for windows 7 pc free<br />
39
- candy crush soda saga cursor pack download for windows 7 pc free<br />
40
- play candy crush soda saga online without downloading on windows 7 pc free<br />
41
- play candy crush soda saga offline without downloading on windows 7 pc free<br />
42
- play candy crush soda saga with friends without downloading on windows 7 pc free<br />
43
- play candy crush soda saga with facebook without downloading on windows 7 pc free<br />
44
- play candy crush soda saga with keyboard without downloading on windows 7 pc free<br />
45
- play candy crush soda saga with mouse without downloading on windows 7 pc free<br />
46
- play candy crush soda saga with controller without downloading on windows 7 pc free<br />
47
- play candy crush soda saga with touch screen without downloading on windows 7 pc free<br />
48
- play candy crush soda saga with voice commands without downloading on windows 7 pc free<br />
49
- play candy crush soda saga with vr headset without downloading on windows 7 pc free<br />
50
- best site to download candy crush soda saga for windows 7 pc free<br />
51
- best app to download candy crush soda saga for windows 7 pc free<br />
52
- best software to download candy crush soda saga for windows 7 pc free<br />
53
- best tool to download candy crush soda saga for windows 7 pc free<br />
54
- best way to download candy crush soda saga for windows 7 pc free<br />
55
- fastest way to download candy crush soda saga for windows 7 pc free<br />
56
- easiest way to download candy crush soda saga for windows 7 pc free<br />
57
- safest way to download candy crush soda saga for windows 7 pc free<br />
58
- legal way to download candy crush soda saga for windows 7 pc free</p>
59
- <p>The game has over 10,000 levels to play, each with different layouts, obstacles, and challenges. You have a limited number of moves or time to complete each level. If you run out of moves or time before reaching the goal, you will lose a life. You can earn up to five lives at a time, which regenerate over time or can be purchased with real money. You can also use boosters, such as lollipop hammers or color bombs, to help <p>you clear the board or make special moves. You can also earn stars and gold bars by completing levels, which can be used to unlock new episodes, buy boosters, or access special features.</p>
60
- <h3>A sequel to the popular Candy Crush Saga</h3>
61
- <p>Candy Crush Soda Saga is a sequel to Candy Crush Saga, which was launched in 2012 and became a global phenomenon. Candy Crush Saga is based on the classic game Candy Crush, which was created by King in 2011. The game has been downloaded over 2.7 billion times and has more than 270 million monthly active users. The game has also inspired several spin-offs, such as Candy Crush Jelly Saga, Candy Crush Friends Saga, and Candy Crush All Stars.</p>
62
- <p>Candy Crush Soda Saga follows the adventures of Kimmy, the sister of Tiffi, the main character of Candy Crush Saga. Kimmy is looking for her lost sister and travels through the Candy Kingdom, meeting new friends and foes along the way. The game introduces new characters, such as Mr. Toffee, Yeti, Bubblegum Troll, and Percy the Penguin. The game also features new graphics, animations, sound effects, and music that enhance the candy-themed experience.</p>
63
- <h3>A game with different modes, levels, and challenges</h3>
64
- <p>Candy Crush Soda Saga is a game that offers a lot of variety and fun for players of all ages and skill levels. The game has different modes that test your abilities and creativity. For example, in Live Events mode, you can compete with other players in real-time for prizes and glory. In Quests mode, you can complete daily tasks and earn rewards. In Team mode, you can join or create a team with other players and chat, share lives, and play together.</p>
65
- <p>The game also has different levels that challenge your strategy and logic. For example, in Boss levels, you have to face off against powerful enemies that have special abilities and tricks. In Super Hard levels, you have to overcome extra difficult obstacles and puzzles. In Treasure Hunt levels, you have to find hidden treasures and collect them.</p>
66
- <p>The game also has different challenges that add more excitement and fun to the gameplay. For example, in Bubblegum Hill challenge, you have to climb a mountain of bubblegum and collect as many gold crowns as possible. In Soda Squad challenge, you have to work with your team to fill a soda meter and win rewards. In Rainbow Rapids challenge, you have to match candies on rainbow-colored tiles and create rainbow streaks.</p>
67
- <h2>How to download and install Candy Crush Soda Saga on PC Windows 7?</h2>
68
- <p>If you want to enjoy Candy Crush Soda Saga on a bigger screen and with better performance, you can download and install it on your PC Windows 7 for free. There are two main options for doing this: downloading from the Microsoft Store or downloading from a third-party platform.</p>
69
- <h3>Option 1: Download from the Microsoft Store</h3>
70
- <p>The Microsoft Store is the official app store for Windows devices. It offers a wide range of apps and games that are compatible with Windows 7 or later versions. You can download Candy Crush Soda Saga from the Microsoft Store by following these steps:</p>
71
- <h4>Step 1: Open the Microsoft Store app</h4>
72
- <p>To open the Microsoft Store app, you can click on the Start button on the bottom left corner of your screen and type "Microsoft Store" in the search box. Alternatively, you can press the Windows key + S on your keyboard and type "Microsoft Store" in the search box.</p>
73
- <h4>Step 2: Search for Candy Crush Soda Saga</h4>
74
- <p>To search for Candy Crush Soda Saga in the Microsoft Store app, you can click on the magnifying glass icon on the top right corner of the app window and type "Candy Crush Soda Saga" in the search box. Alternatively, you can press Ctrl + F on your keyboard and type "Candy Crush Soda Saga" in the search box.</p>
75
- <h4>Step 3: Click on Get or Install to download the game</h4>
76
- <p>To download Candy Crush Soda Saga from the Microsoft Store app, you can click on the Get or Install button next to the game's name and icon. This will start downloading the game to your PC Windows 7. You may need to sign in with your Microsoft account or create one if you don't have one already.</p>
77
- <h4>Step 4: Launch the game from the Start menu or the desktop shortcut</h4>
78
- <p>To launch Candy Crush Soda Saga from your PC Windows 7, you can click on the Start button on the bottom left corner of your screen and scroll down to find the game's name and icon under "C". Alternatively, you can press the Windows key + Q on your keyboard and type "Candy Crush Soda Saga" in the search box. You can also find a desktop shortcut for the game on your desktop and double-click on it to launch the game.</p>
79
- <h3>Option 2: Download from a third-party platform</h3>
80
- <p>If you prefer to download Candy Crush Soda Saga from a different source than the Microsoft Store, you can use a third-party platform that offers PC games. Some of the most popular platforms are Steam, Epic Games, GOG, and itch.io. You can download Candy Crush Soda Saga from any of these platforms by following these steps:</p>
81
- <h4>Step 1: Choose a platform such as Steam, Epic Games, GOG, or itch.io</h4>
82
- <p>To choose a platform to download Candy Crush Soda Saga from, you can visit their official websites and compare their features, prices, and reviews. You can also check if they have any discounts, deals, or free games available. Some of the factors to consider when choosing a platform are:</p>
83
- <ul>
84
- <li>The compatibility of the platform with your PC Windows 7</li>
85
- <li>The security and reliability of the platform and its payment methods</li>
86
- <li>The availability and quality of customer support and community forums</li>
87
- <li>The variety and exclusivity of games and genres offered by the platform</li>
88
- <li>The ease of use and customization of the platform's interface and settings</li>
89
- </ul>
90
- <h4>Step 2: Create an account and log in to the platform</h4>
91
- <p>To create an account and log in to the platform of your choice, you can follow the instructions on their website or app. You may need to provide some personal information, such as your name, email address, password, and payment details. You may also need to verify your account through email or phone. Once you have created an account and logged in to the platform, you can access its features and browse its games.</p>
92
- <h4>Step 3: Search for Candy Crush Soda Saga and purchase or download the game</h4>
93
- <p>To search for Candy Crush Soda Saga on the platform of your choice, you can use the search bar or filter options to find the game's name and icon. You can also check the game's description, screenshots, videos, ratings, reviews, and system requirements. Depending on the platform, you may need to purchase or download the game before playing it. Some platforms may offer free trials or demos of the game. You can also check if there are any updates or patches available for the game.</p>
94
- <h4>Step 4: Launch the game from the platform's library or launcher</h4>
95
- <p>To launch Candy Crush Soda Saga from the platform of your choice, you can go to your library or launcher and find the game's name and icon. You can also create a desktop shortcut for the game if you want. You can then click on Play or Launch to start playing the game. You may need to log in to your account or connect to the internet to play the game.</p>
96
- <h2>What are the benefits of playing Candy Crush Soda Saga?</h2>
97
- <p>Candy Crush Soda Saga is not only a fun and addictive game, but also a beneficial one. Playing this game can have positive effects on your mental and emotional well-being. Here are some of the benefits of playing Candy Crush Soda Saga:</p>
98
- <h3>It can improve your cognitive skills and memory</h3>
99
- <p>Playing Candy Crush Soda Saga can stimulate your brain and enhance your cognitive skills, such as attention, concentration, problem-solving, logic, spatial awareness, pattern recognition, and memory. These skills are essential for learning, working, and everyday life. By matching candies and creating special combinations, you can train your brain to process information faster and more efficiently. By completing levels and advancing through episodes, you can challenge your brain to remember details and strategies.</p>
100
- <h3>It can reduce stress and boredom</h3>
101
- <p>Playing Candy Crush Soda Saga can also help you relax and unwind from stress and boredom. The game has colorful graphics, cheerful music, cute characters, and satisfying sound effects that can create a positive mood and atmosphere. The game also has simple rules and easy controls that can make you feel comfortable and confident. The game also has different modes and levels that can keep you entertained and engaged for hours. The game also has a rewarding system that can make you feel accomplished and motivated. By playing Candy Crush Soda Saga, you can escape from the worries and pressures of reality and enjoy a sweet and refreshing adventure.</p>
102
- <h3>It can provide social interaction and entertainment</h3>
103
- <p>Playing Candy Crush Soda Saga can also help you connect and interact with other people who share your passion for the game. The game has a social feature that allows you to link your Facebook account and see your friends' progress and scores. You can also send and receive lives, boosters, and messages from your friends. You can also join or create a team with other players and chat, share lives, and play together. You can also compete with other players in live events or leaderboards and show off your skills and achievements. By playing Candy Crush Soda Saga, you can have fun and make new friends at the same time.</p>
104
- <h2>What are some tips and tricks for playing Candy Crush Soda Saga?</h2>
105
- <p>Candy Crush Soda Saga is a game that requires strategy and skill to master. If you want to improve your performance and progress faster in the game, you might want to follow some tips and tricks that can help you beat the levels and challenges. Here are some of them:</p>
106
- <h3>Focus on clearing soda bottles and raising the soda level</h3>
107
- <p>In soda levels, the main objective is to switch the bottles and match candies to release purple soda and save the candy bears. The more soda you release, the higher the soda level will rise. The higher the soda level, the easier it will be to match candies and clear the board. Therefore, you should focus on clearing soda bottles as soon as possible and raising the soda level as high as possible. You should also try to match candies near the bottom of the board, as this will create more cascades and opportunities to clear more bottles.</p>
108
- <h3>Use special candies and combos to clear obstacles and ice</h3>
109
- <p>In frosting levels, the main objective is to match candies to smash the ice and set the candy bears free. The ice can be thick or thin, depending on the level. The thicker the ice, the more times you have to match candies next to it to break it. Therefore, you should use special candies and combos to clear obstacles and ice faster and more efficiently. Special candies are created by matching four or more candies in different shapes, such as striped, wrapped, or fish candies. Combos are created by matching two or more special candies together, such as striped + striped, striped + wrapped, or wrapped + wrapped. These special candies and combos can have various effects, such as clearing a whole row, column, or area of candies.</p>
110
- <h3>Keep an eye on the bubble bears and don't let them float away</h3>
111
- <p>In honey levels, the main objective is to match candies next to honeycomb to release the trapped candy bears. The honeycomb can be thick or thin, depending on the level. The thicker the honeycomb, the more times you have to match candies next to it to break it. However, there is another challenge in these levels: the bubble bears. These are candy bears that are surrounded by bubbles and float up when you match candies below them. If they reach the top of the board, they will disappear and you will lose them. Therefore, you should keep an eye on the bubble bears and don't let them float away. You should try to match candies next to them or use special candies or combos to pop their bubbles.</p>
112
- <h3>Plan your moves ahead and save your boosters for hard levels</h3>
113
- <p>In jam levels, the main objective is to spread the jam across the board. The jam is a sticky substance that covers some of the candies or tiles on the board. To spread the jam, you have to match candies on top of it or use special candies or combos to splash it. However, you have a limited number of moves or time to spread the jam to all the tiles on the board. Therefore, you should plan your moves ahead and save your boosters for hard levels. Boosters are items that can help you clear the board or make special moves. You can earn boosters by completing levels, quests, events, or challenges. You can also buy boosters with real money. Some of the boosters are lollipop hammers, color bombs, striped brushes, free switches, and extra moves.</p>
114
- <h2>Conclusion</h2>
115
- <p>Candy Crush Soda Saga is a game that can offer you hours of fun and enjoyment. It is a game that can improve your cognitive skills and memory, reduce your stress and boredom, and provide you with social interaction and entertainment. It is also a game that can challenge your strategy and logic with different modes, levels, and obstacles. If you want to play this game on your PC Windows 7 for free, you can download it from the Microsoft Store or from a third-party platform. You can also follow some tips and tricks to help you master the game and beat the levels. So what are you waiting for? Download Candy Crush Soda Saga today and join Kimmy on her sweet and fizzy adventure!</p>
116
- <h2>FAQs</h2>
117
- <p>Here are some of the frequently asked questions about Candy Crush Soda Saga:</p>
118
- <h3>Q: How do I sync my progress across different devices?</h3>
119
- <p>A: To sync your progress across different devices, you need to link your game to your Facebook account or your King account. You can do this by tapping on the settings icon on the main screen and choosing "Connect" or "Log in". Once you have linked your game to your account, you can access your progress on any device that has the game installed.</p>
120
- <h3>Q: How do I get more lives?</h3>
121
- <p>A: To get more lives, you have several options. You can wait for your lives to regenerate over time, which takes about 30 minutes per life. You can ask your friends for lives, which they can send you through Facebook or the game's app. You can join or create a team and share lives with your teammates. You can also buy lives with real money or gold bars.</p>
122
- <h3>Q: How do I get more gold bars?</h3>
123
- <p>A: To get more gold bars, you have several options. You can earn gold bars by completing levels, quests, events, or challenges. You can also buy gold bars with real money or redeem them with gift cards or coupons. You can also get gold bars from your friends or teammates as gifts.</p>
124
- <h3>Q: How do I unlock new episodes?</h3>
125
- <p>A: To unlock new episodes, you need to complete all the levels in the previous episode. You may also need to pay a certain amount of gold bars or ask your friends for tickets to unlock the next episode. Some episodes may also have special requirements or conditions to unlock them.</p>
126
- <h3>Q: How do I contact customer support?</h3>
127
- <p>A: To contact customer support, you can visit the official website of King and go to the "Help Center" section. There you can find answers to common questions, report a problem, give feedback, or chat with an agent.</p> 401be4b1e0<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Guardian Tales JP and Experience a Classic Adventure with Pixel Art and Puzzles.md DELETED
@@ -1,176 +0,0 @@
1
-
2
- <h1>How to Download and Play Guardian Tales JP: Tips and Tricks for Beginners</h1>
3
- <p>Guardian Tales is a pixel RPG game that combines gacha elements, puzzle-solving, and action combat. You can collect over 50 heroes and 100 weapons, each with their own unique abilities and skills. You can also explore various worlds, dungeons, and bosses, as well as challenge other players in real-time PvP battles.</p>
4
- <h2>guardian tales jp download</h2><br /><p><b><b>Download</b> ->>->>->> <a href="https://jinyurl.com/2uNMRn">https://jinyurl.com/2uNMRn</a></b></p><br /><br />
5
- <p>But did you know that there is a Japanese version of Guardian Tales that has some exclusive features and content? For example, the Japanese version has different voice actors, collab events, costumes, and banners than the global version. If you are a fan of Japanese culture and anime, you might want to try out Guardian Tales JP.</p>
6
- <p>In this article, we will show you how to download and play Guardian Tales JP on your Android devices or PC using an emulator. We will also share some tips and tricks for beginners who want to start their adventure in Kanterbury, the world of Guardian Tales.</p>
7
- <h2>How to Download Guardian Tales JP on Android Devices</h2>
8
- <p>If you have an Android device, you can download Guardian Tales JP from the Google Play Store. However, you will need to change your region settings to Japan first. Here are the steps to do so:</p>
9
- <ol>
10
- <li>Open the Google Play Store app on your device.</li>
11
- <li>Tap on the menu icon (three horizontal lines) on the top left corner.</li>
12
- <li>Tap on Account.</li>
13
- <li>Tap on Country and profiles.</li>
14
- <li>Select Japan as your country. You might need to add a payment method from Japan to do this.</li>
15
- <li>Accept the Terms of Service and wait for the changes to take effect.</li>
16
- <li>Search for "Guardian Tales" in the Play Store. You should see the Japanese version of the game with the title "ガーディアンテイルズ".</li>
17
- <li>Tap on Install and wait for the game to download.</li>
18
- </ol>
19
- <p>Congratulations! You have successfully downloaded Guardian Tales JP on your Android device. You can now launch the game and enjoy its features.</p>
20
- <h2>How to Download Guardian Tales JP on PC Using an Emulator</h2>
21
- <p>If you want to play Guardian Tales JP on your PC, you will need to use an Android emulator. An emulator is a software that allows you to run Android apps on your computer. There are many emulators available online, but we recommend using BlueStacks or MuMu Player as they are popular and reliable options.</p>
22
- <p>Here are the steps to download Guardian Tales JP on PC using an emulator:</p>
23
- <p>guardian tales japanese version apk download<br />
24
- how to download guardian tales jp on android<br />
25
- guardian tales jp ios download<br />
26
- guardian tales jp qooapp download<br />
27
- guardian tales jp server download<br />
28
- guardian tales jp mod apk download<br />
29
- guardian tales jp play store download<br />
30
- guardian tales jp english patch download<br />
31
- guardian tales jp pc download<br />
32
- guardian tales jp update download<br />
33
- guardian tales jp app store download<br />
34
- guardian tales jp vpn download<br />
35
- guardian tales jp google play download<br />
36
- guardian tales jp apk pure download<br />
37
- guardian tales jp emulator download<br />
38
- guardian tales jp bluestacks download<br />
39
- guardian tales jp apk mirror download<br />
40
- guardian tales jp latest version download<br />
41
- guardian tales jp obb download<br />
42
- guardian tales jp reddit download<br />
43
- guardian tales jp discord download<br />
44
- guardian tales jp data download<br />
45
- guardian tales jp kakao games download<br />
46
- guardian tales jp offline download<br />
47
- guardian tales jp free download<br />
48
- guardian tales jp full game download<br />
49
- guardian tales jp beta download<br />
50
- guardian tales jp guide download<br />
51
- guardian tales jp wiki download<br />
52
- guardian tales jp review download<br />
53
- guardian tales jp tips and tricks download<br />
54
- guardian tales jp cheats and hacks download<br />
55
- guardian tales jp tier list download<br />
56
- guardian tales jp characters and weapons download<br />
57
- guardian tales jp codes and coupons download<br />
58
- guardian tales jp events and updates download<br />
59
- guardian tales jp news and announcements download<br />
60
- guardian tales jp gameplay and walkthrough download<br />
61
- guardian tales jp story and lore download<br />
62
- guardian tales jp puzzles and dungeons download<br />
63
- guardian tales jp bosses and battles download<br />
64
- guardian tales jp pvp and rankings download<br />
65
- guardian tales jp guild and friends download<br />
66
- guardian tales jp island and castle download<br />
67
- guardian tales jp parodies and easter eggs download<br />
68
- guardian tales japan release date and time <br />
69
- how to play guardian tales japan on iphone <br />
70
- how to switch to guardian tales japan server <br />
71
- how to get free gems in guardian tales japan <br />
72
- how to reroll in guardian tales japan</p>
73
- <ol>
74
- <li>Download and install an Android emulator on your PC. You can choose from BlueStacks or MuMu Player.</li>
75
- <li>Open the emulator and complete Google sign-in to access the Play Store.</li>
76
- <li>Change your region settings to Japan following the same steps as above.</li>
77
- <li>Search for "Guardian Tales" in the Play Store. You should see the Japanese version of the game with the title "ガーディアンテイルズ".</li>
78
- <li>Click to install Guardian Tales JP from the search results.</li>
79
- <li>Once installation completes, click the game icon to start the game.</li>
80
- <li>Enjoy playing Guardian Tales JP on your PC with the emulator.</li>
81
- </ol>
82
- <p>You can also customize your keyboard and mouse controls using the Advanced Keymapping feature in BlueStacks or MuMu Player. This will allow you to play Guardian Tales JP more comfortably and efficiently on your PC.</p>
83
- <h2>Tips and Tricks for Beginners</h2>
84
- <p>Now that you have downloaded Guardian Tales JP, you might be wondering how to play it well. Don't worry, we have some tips and tricks for beginners who want to have a smooth start in the game. Here are some of them:</p>
85
- <h3>Choose a Good Starter Hero and Reroll if Needed</h3>
86
- <p>When you start the game, you will be able to choose one of four starter heroes: Knight, Warrior, Mage, or Archer. Each hero has their own strengths and weaknesses, as well as different roles and playstyles. You can check their stats and skills before making your choice.</p>
87
- <p>However, if you are not satisfied with your starter hero, you can reroll for a better one. Rerolling means resetting your game data and starting over until you get the hero you want. To reroll in Guardian Tales JP, you need to do the following:</p>
88
- <ol>
89
- <li>Complete the tutorial and the first chapter of the story mode.</li>
90
- <li>Collect the free gems and tickets from the mailbox and events.</li>
91
- <li>Go to the summon menu and use your gems and tickets to pull for heroes and weapons.</li>
92
- <li>If you get a good hero or weapon, keep playing. If not, go to the settings menu and tap on "Delete Account".</li>
93
- <li>Confirm your decision and restart the game.</li>
94
- <li>Repeat the process until you get your desired hero or weapon.</li>
95
- </ol>
96
- <p>The best heroes to aim for are those with a rarity of 3 stars, as they have higher stats and skills than lower rarity heroes. Some of the most popular 3-star heroes are Marina, Bari, Nari, Oghma, Bianca, and Eugene. You can also check the tier list for more information on the best heroes and weapons in the game.</p>
97
- <h3>Complete the Story Mode and Side Quests for Rewards</h3>
98
- <p>One of the main features of Guardian Tales is its story mode, which consists of 10 chapters with different themes and settings. The story mode is not only fun and engaging, but also rewarding. You can earn gems, gold, experience, items, and even new heroes by completing the story mode.</p>
99
- <p>However, don't just rush through the main quests. You should also pay attention to the side quests, which are marked with a yellow exclamation point on the map. Side quests are optional missions that give you more insight into the characters and the world of Guardian Tales. They also reward you with more gems, gold, experience, items, and sometimes even costumes for your heroes.</p>
100
- <p>Therefore, try to complete as many side quests as possible while progressing through the story mode. You can also replay the story mode stages on higher difficulties for more rewards and challenges.</p>
101
- <h3>Join a Guild and Participate in Raids and Events</h3>
102
- <p>Another way to enjoy Guardian Tales is to join a guild and participate in raids and events. A guild is a group of players who can chat, cooperate, and compete with each other. You can join an existing guild or create your own guild with your friends.</p>
103
- <p>By joining a guild, you can access various benefits such as guild buffs, guild shop, guild attendance rewards, and guild missions. You can also participate in guild raids, which are special battles that require teamwork and strategy. Guild raids reward you with raid coins, which you can use to buy exclusive items from the raid shop.</p>
104
- <p>Besides guild raids, you can also participate in various events that are held regularly in Guardian Tales. Events are limited-time missions that offer unique rewards such as gems, gold, items, costumes, heroes, and weapons. Some events are also collab events that feature characters from other popular games or anime series. For example, there was a collab event with Re:Zero in 2021 that allowed players to obtain Rem, Ram, Emilia, Subaru, Beatrice, and Roswaal as playable heroes.</p>
105
- <p>Therefore, try to join a guild and participate in raids and events as much as possible. They will not only make your game more fun and social but also help you progress faster and easier.</p>
106
- <h3>Upgrade Your Heroes, Weapons, and Accessories</h3>
107
- <p>As you play Guardian Tales JP , you will need to upgrade your heroes, weapons, and accessories to make them stronger and more effective. There are several ways to do this, such as leveling up, awakening, evolution, limit breaking, and enhancement.</p>
108
- <p>Leveling up is the simplest way to increase your heroes' and weapons' stats. You can level up your heroes by using experience points (XP) that you earn from battles or items. You can level up your weapons by using weapon XP that you earn from dismantling other weapons or items.</p>
109
- <p>Awakening is a process that unlocks new skills and abilities for your heroes and weapons. You can awaken your heroes by using awakening stones that you obtain from the awakening dungeon or events. You can awaken your weapons by using magic metal that you obtain from dismantling other weapons or events.</p>
110
- <p>Evolution is a process that increases the rarity and potential of your heroes and weapons. You can evolve your heroes by using hero crystals that you obtain from summoning or events. You can evolve your weapons by using weapon hammers that you obtain from the evolution dungeon or events.</p>
111
- <p>Limit breaking is a process that increases the maximum level and stats of your heroes and weapons. You can limit break your heroes by using hero shards that you obtain from summoning or events. You can limit break your weapons by using weapon shards that you obtain from summoning or events.</p>
112
- <p>Enhancement is a process that adds extra effects and bonuses to your accessories. You can enhance your accessories by using enhancement stones that you obtain from the enhancement dungeon or events.</p>
113
- <p>Therefore, try to upgrade your heroes, weapons, and accessories as much as possible. They will make a huge difference in your performance and results in the game.</p>
114
- <h3>Explore the Floating Island and Customize Your Base</h3>
115
- <p>The last tip we have for beginners is to explore the floating island and customize your base. The floating island is a feature that allows you to create and decorate your own base with various buildings, facilities, and items. You can also invite your heroes and friends to visit your base and interact with them.</p>
116
- <p>The floating island is not only a place to relax and have fun but also a source of income and resources. You can collect gold, gems, items, and energy from the buildings and facilities in your base. You can also complete quests and missions related to the floating island for more rewards.</p>
117
- <p>To access the floating island, you need to tap on the island icon on the top right corner of the screen. You can then use the edit mode to place and move buildings, facilities, and items on your base. You can also use the visit mode to see how your base looks like and interact with your heroes and friends.</p>
118
- <p>Some of the buildings and facilities you can build on your base are:</p>
119
- <table>
120
- <tr>
121
- <th>Name</th>
122
- <th>Description</th>
123
- <th>Benefits</th>
124
- </tr>
125
- <tr>
126
- <td>Inn</td>
127
- <td>A place where your heroes can rest and recover.</td>
128
- <td>Increases hero XP over time.</td>
129
- </tr>
130
- <tr>
131
- <td>Tower</td>
132
- <td>A place where you can store and display your weapons.</td>
133
- <td>Increases weapon XP over time.</td>
134
- </tr>
135
- <tr>
136
- <td>Shop</td>
137
- <td>A place where you can buy and sell items.</td>
138
- <td>Generates gold over time.</td>
139
- </tr>
140
- <tr>
141
- <td>Cafe</td>
142
- <td>A place where you can serve drinks and snacks to your heroes.</td>
143
- <td>Increases hero affection over time.</td>
144
- </tr>
145
- <tr>
146
- <td>Mine</td>
147
- <td>A place where you can dig for minerals and gems.</td>
148
- <td>Generates gems over time.</td>
149
- </tr>
150
- <tr>
151
- <td>Factory</td>
152
- <td>A place where you can produce items and materials.</td>
153
- <td>Generates items over time.</td>
154
- </tr>
155
- <tr>
156
- <td>Battery</td ><td>A place where you can store and recharge energy.</td ><td>Generates energy over time.</td ></tr ></table ><p >Therefore, try to explore the floating island and customize your base as much as possible. They will not only make your game more enjoyable but also help you progress faster and easier.</p ><h2 >Conclusion: Summary of the Main Points and a Call to Action</h2 ><p >In conclusion, Guardian Tales JP is a pixel RPG game that has some exclusive features and content that are different from the global version. If you want to try it out, you can download it on your Android devices or PC using an emulator. You can also follow our tips and tricks for beginners who want to have a smooth start in the game. We hope this article has been helpful and informative for you.</p ><p >If you liked this article, please share it with your friends who are also interested in Guardian Tales JP. You can also leave a comment below and let us know what you think about the game. And if you want to learn more about Guardian Tales JP, you can visit the official website or follow the social media accounts of the game. Thank you for reading and have a great day!</p>
157
- <h2>FAQs: Five Common Questions and Answers About Guardian Tales JP</h2>
158
- <p>Here are some of the most frequently asked questions and answers about Guardian Tales JP. If you have any other questions, feel free to ask them in the comments section.</p>
159
- <h3>Q: Is Guardian Tales JP free to play?</h3>
160
- <p>A: Yes, Guardian Tales JP is free to play. You can download and play the game without spending any money. However, there are some optional in-game purchases that can enhance your gaming experience, such as gems, costumes, and packages. You can buy these with real money or earn them through various methods in the game.</p>
161
- <h3>Q: Can I play Guardian Tales JP with my friends?</h3>
162
- <p>A: Yes, you can play Guardian Tales JP with your friends. You can add them as friends in the game and chat with them, visit their bases, and send them gifts. You can also invite them to join your guild or team up with them in co-op mode, arena mode, or colosseum mode. Playing with your friends can make the game more fun and rewarding.</p>
163
- <h3>Q: How can I change the language of Guardian Tales JP?</h3>
164
- <p>A: Unfortunately, you cannot change the language of Guardian Tales JP. The game is only available in Japanese, and there is no option to switch to other languages. If you want to play the game in English or other languages, you will have to download the global version of Guardian Tales instead.</p>
165
- <h3>Q: How can I transfer my data from the global version to the Japanese version of Guardian Tales?</h3>
166
- <p>A: Unfortunately, you cannot transfer your data from the global version to the Japanese version of Guardian Tales. The two versions are separate and have different servers, accounts, and data. If you want to play the Japanese version of Guardian Tales, you will have to start from scratch.</p>
167
- <h3>Q: How can I contact the customer service of Guardian Tales JP?</h3>
168
- <p>A: If you have any problems or issues with Guardian Tales JP, you can contact the customer service of the game by following these steps:</p>
169
- <ol>
170
- <li>Go to the settings menu and tap on "Customer Service".</li>
171
- <li>Tap on "Contact Us" and fill out the form with your details and inquiry.</li>
172
- <li>Tap on "Send" and wait for a reply from the customer service team.</li>
173
- </ol>
174
- <p>You can also check the FAQ section for more information and solutions to common problems.</p> 197e85843d<br />
175
- <br />
176
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/metrics/__init__.py DELETED
@@ -1,14 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- """Metrics like CLAP score, FAD, KLD, Visqol, Chroma similarity, etc.
7
- """
8
- # flake8: noqa
9
- from .clap_consistency import CLAPTextConsistencyMetric, TextConsistencyMetric
10
- from .chroma_cosinesim import ChromaCosineSimilarityMetric
11
- from .fad import FrechetAudioDistanceMetric
12
- from .kld import KLDivergenceMetric, PasstKLDivergenceMetric
13
- from .rvm import RelativeVolumeMel
14
- from .visqol import ViSQOL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/Action/base_action.py DELETED
@@ -1,48 +0,0 @@
1
- from Memory import Memory
2
- class Action:
3
- """
4
- The basic action unit of agent
5
- """
6
- def __init__(self,**kwargs):
7
- self.response = None
8
- self.is_user = False
9
- self.res_dict = {}
10
- self.name = ""
11
- self.role = ""
12
- for key,value in kwargs.items():
13
- setattr(self,key,value)
14
-
15
-
16
- def process(self):
17
- """
18
- processing action
19
- Rerutn : memory(Memory)
20
- """
21
- response = self.response
22
- send_name = self.name
23
- send_role = self.role
24
- all = ""
25
- for res in response:
26
- all += res
27
- parse = f"{send_name}:"
28
-
29
- # 将里面对话的第三人称删了
30
- # The third person in the dialogue was deleted.
31
- while parse in all:
32
- index = all.index(parse) + len(parse)
33
- all = all[index:]
34
-
35
- if not self.is_user:
36
- print(f"{send_name}({send_role}):{all}")
37
- # for software
38
- if "<title>" in all:
39
- title = extract(all,"title")
40
- python = extract(all,"python")
41
- os.makedirs("output_code", exist_ok=True)
42
- file_name = "output_code/" + title
43
- with open(file_name, "w", encoding="utf-8") as f:
44
- f.write(python)
45
- memory = Memory(send_role, send_name, all)
46
- return memory
47
-
48
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb16-120e_deepfashion2_long_sleeved_outwear_256x192.py DELETED
@@ -1,172 +0,0 @@
1
- _base_ = [
2
- '../../../_base_/default_runtime.py',
3
- '../../../_base_/datasets/deepfashion2.py'
4
- ]
5
-
6
- default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
7
-
8
- resume = False # 断点恢复
9
- load_from = None # 模型权重加载
10
- train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10) # 训练轮数,测试间隔
11
- param_scheduler = [
12
- dict( # warmup策略
13
- type='LinearLR',
14
- begin=0,
15
- end=500,
16
- start_factor=0.001,
17
- by_epoch=False),
18
- dict( # scheduler
19
- type='MultiStepLR',
20
- begin=0,
21
- end=60,
22
- milestones=[20, 40],
23
- gamma=0.1,
24
- by_epoch=True)
25
- ]
26
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
27
- auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
28
-
29
- backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
30
- dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
31
- data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
32
- data_root = 'data/deepfashion2/' # 数据存放路径
33
- # 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
34
- codec = dict(
35
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
36
-
37
- train_pipeline = [
38
- dict(type='LoadImage'),
39
- dict(type='GetBBoxCenterScale'),
40
- dict(type='RandomFlip', direction='horizontal'),
41
- dict(
42
- type='RandomBBoxTransform',
43
- shift_prob=0,
44
- rotate_factor=60,
45
- scale_factor=(0.75, 1.25)),
46
- dict(type='TopdownAffine', input_size=codec['input_size']),
47
- dict(type='GenerateTarget', encoder=codec),
48
- dict(type='PackPoseInputs')
49
- ]
50
- val_pipeline = [ # 测试时数据增强
51
- dict(type='LoadImage', backend_args=backend_args), # 加载图片
52
- dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
53
- dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
54
- dict(type='PackPoseInputs') # 对target进行打包用于训练
55
- ]
56
- train_dataloader = dict( # 训练数据加载
57
- batch_size=16, # 批次大小
58
- num_workers=6, # 数据加载进程数
59
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
60
- sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
61
- dataset=dict(
62
- type=dataset_type, # 数据集类名
63
- data_root=data_root, # 数据集路径
64
- data_mode=data_mode, # 算法类型
65
- ann_file='train/deepfashion2_long_sleeved_outwear.json', # 标注文件路径
66
- data_prefix=dict(img='train/image/'), # 图像路径
67
- pipeline=train_pipeline # 数据流水线
68
- ))
69
- val_dataloader = dict(
70
- batch_size=16,
71
- num_workers=6,
72
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
73
- drop_last=False,
74
- sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
75
- dataset=dict(
76
- type=dataset_type, # 数据集类名
77
- data_root=data_root, # 数据集路径
78
- data_mode=data_mode, # 算法类型
79
- ann_file='validation/deepfashion2_long_sleeved_outwear.json', # 标注文件路径
80
- data_prefix=dict(img='validation/image/'), # 图像路径
81
- test_mode=True, # 测试模式开关
82
- pipeline=val_pipeline # 数据流水线
83
- ))
84
- test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
85
-
86
- channel_cfg = dict(
87
- num_output_channels=294,
88
- dataset_joints=294,
89
- dataset_channel=[
90
- [
91
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
92
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
93
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
94
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
95
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
96
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
97
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
98
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
99
- 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
100
- 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
101
- 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
102
- 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
103
- 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
104
- 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
105
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
106
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
107
- 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
108
- 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
109
- 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
110
- 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
111
- 285, 286, 287, 288, 289, 290, 291, 292, 293
112
- ],
113
- ],
114
- inference_channel=[
115
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
116
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
117
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
118
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
119
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
120
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
121
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
122
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
123
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
124
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
125
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
126
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
127
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
128
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
129
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
130
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
131
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
132
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
133
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
134
- 290, 291, 292, 293
135
- ])
136
-
137
- model = dict(
138
- type='TopdownPoseEstimator', # 模型结构决定了算法流程
139
- data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
140
- type='PoseDataPreprocessor',
141
- mean=[123.675, 116.28, 103.53],
142
- std=[58.395, 57.12, 57.375],
143
- bgr_to_rgb=True),
144
- backbone=dict(
145
- type='ResNet',
146
- depth=50,
147
- init_cfg=dict(
148
- type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
149
- checkpoint='torchvision://resnet50')),
150
- head=dict( # 模型头部
151
- type='HeatmapHead',
152
- in_channels=2048,
153
- out_channels=channel_cfg['num_output_channels'],
154
- # deconv_out_channels=None,
155
- loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
156
- decoder=codec), # 解码器,将heatmap解码成坐标值
157
- test_cfg=dict(
158
- flip_test=True, # 开启测试时水平翻转集成
159
- flip_mode='heatmap', # 对heatmap进行翻转
160
- shift_heatmap=True, # 对翻转后的结果进行平移提高精度
161
- ))
162
-
163
- val_evaluator = [
164
- dict(type='PCKAccuracy', thr=0.2),
165
- dict(type='AUC'),
166
- dict(type='EPE'),
167
- ]
168
- test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
169
-
170
- visualizer = dict(
171
- vis_backends=[dict(type='LocalVisBackend'),
172
- dict(type='WandbVisBackend')])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/data/scripts/get_imagenet.sh DELETED
@@ -1,51 +0,0 @@
1
- #!/bin/bash
2
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3
- # Download ILSVRC2012 ImageNet dataset https://image-net.org
4
- # Example usage: bash data/scripts/get_imagenet.sh
5
- # parent
6
- # ├── yolov5
7
- # └── datasets
8
- # └── imagenet ← downloads here
9
-
10
- # Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val
11
- if [ "$#" -gt 0 ]; then
12
- for opt in "$@"; do
13
- case "${opt}" in
14
- --train) train=true ;;
15
- --val) val=true ;;
16
- esac
17
- done
18
- else
19
- train=true
20
- val=true
21
- fi
22
-
23
- # Make dir
24
- d='../datasets/imagenet' # unzip directory
25
- mkdir -p $d && cd $d
26
-
27
- # Download/unzip train
28
- if [ "$train" == "true" ]; then
29
- wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images
30
- mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
31
- tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
32
- find . -name "*.tar" | while read NAME; do
33
- mkdir -p "${NAME%.tar}"
34
- tar -xf "${NAME}" -C "${NAME%.tar}"
35
- rm -f "${NAME}"
36
- done
37
- cd ..
38
- fi
39
-
40
- # Download/unzip val
41
- if [ "$val" == "true" ]; then
42
- wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images
43
- mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar
44
- wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs
45
- fi
46
-
47
- # Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail)
48
- # rm train/n04266014/n04266014_10835.JPEG
49
-
50
- # TFRecords (optional)
51
- # wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetAllChildrenSizers.js DELETED
@@ -1,14 +0,0 @@
1
- var GetAllChildrenSizers = function (out) {
2
- if (out === undefined) {
3
- out = [];
4
- }
5
- var startIdx = out.length;
6
- var children = this.getChildrenSizers(out);
7
- var endIdx = out.length;
8
- for (var i = startIdx; i < endIdx; i++) {
9
- children[i].getAllChildrenSizers(out);
10
- }
11
-
12
- return out;
13
- }
14
- export default GetAllChildrenSizers;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RemoveChildMethods.js DELETED
@@ -1,39 +0,0 @@
1
- import RemoveChild from './utils/RemoveChild.js';
2
- import GetParentSizerMethods from './GetParentSizerMethods.js';
3
-
4
- const RemoveItem = Phaser.Utils.Array.Remove;
5
-
6
- export default {
7
- removeFromParentSizer() {
8
- var parent = GetParentSizerMethods.getParentSizer(gameObject);
9
- if (parent) {
10
- parent.remove(this);
11
- }
12
- return this;
13
- },
14
-
15
- removeBackground(gameObject, destroyChild) {
16
- if (this.backgroundChildren === undefined) {
17
- return this;
18
- }
19
-
20
- if (this.getParentSizer(gameObject) !== this) {
21
- return this;
22
- }
23
-
24
- RemoveItem(this.backgroundChildren, gameObject);
25
- RemoveChild.call(this, gameObject, destroyChild);
26
- return this;
27
- },
28
-
29
- removeAllBackgrounds(destroyChild) {
30
- if (this.backgroundChildren === undefined) {
31
- return this;
32
- }
33
-
34
- for (var i = this.backgroundChildren.length - 1; i >= 0; i--) {
35
- this.remove(this.backgroundChildren[i], destroyChild);
36
- }
37
- return this;
38
- },
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.js DELETED
@@ -1,2 +0,0 @@
1
- import InTouching from '../../../plugins/intouching.js'
2
- export default InTouching;
 
 
 
spaces/Aki004/herta-so-vits/vdecoder/__init__.py DELETED
File without changes
spaces/AlekseyKorshuk/rugpt3/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: Rugpt3
3
- emoji: 📚
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio`, `streamlit`, or `static`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/model_irse.py DELETED
@@ -1,84 +0,0 @@
1
- from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
- from models.facial_recognition.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
-
4
- """
5
- Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
6
- """
7
-
8
-
9
- class Backbone(Module):
10
- def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
11
- super(Backbone, self).__init__()
12
- assert input_size in [112, 224], "input_size should be 112 or 224"
13
- assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
14
- assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
15
- blocks = get_blocks(num_layers)
16
- if mode == 'ir':
17
- unit_module = bottleneck_IR
18
- elif mode == 'ir_se':
19
- unit_module = bottleneck_IR_SE
20
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
21
- BatchNorm2d(64),
22
- PReLU(64))
23
- if input_size == 112:
24
- self.output_layer = Sequential(BatchNorm2d(512),
25
- Dropout(drop_ratio),
26
- Flatten(),
27
- Linear(512 * 7 * 7, 512),
28
- BatchNorm1d(512, affine=affine))
29
- else:
30
- self.output_layer = Sequential(BatchNorm2d(512),
31
- Dropout(drop_ratio),
32
- Flatten(),
33
- Linear(512 * 14 * 14, 512),
34
- BatchNorm1d(512, affine=affine))
35
-
36
- modules = []
37
- for block in blocks:
38
- for bottleneck in block:
39
- modules.append(unit_module(bottleneck.in_channel,
40
- bottleneck.depth,
41
- bottleneck.stride))
42
- self.body = Sequential(*modules)
43
-
44
- def forward(self, x):
45
- x = self.input_layer(x)
46
- x = self.body(x)
47
- x = self.output_layer(x)
48
- return l2_norm(x)
49
-
50
-
51
- def IR_50(input_size):
52
- """Constructs a ir-50 model."""
53
- model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
54
- return model
55
-
56
-
57
- def IR_101(input_size):
58
- """Constructs a ir-101 model."""
59
- model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
60
- return model
61
-
62
-
63
- def IR_152(input_size):
64
- """Constructs a ir-152 model."""
65
- model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
66
- return model
67
-
68
-
69
- def IR_SE_50(input_size):
70
- """Constructs a ir_se-50 model."""
71
- model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
72
- return model
73
-
74
-
75
- def IR_SE_101(input_size):
76
- """Constructs a ir_se-101 model."""
77
- model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
78
- return model
79
-
80
-
81
- def IR_SE_152(input_size):
82
- """Constructs a ir_se-152 model."""
83
- model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
84
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/upfirdn2d.cpp DELETED
@@ -1,103 +0,0 @@
1
- // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- //
3
- // NVIDIA CORPORATION and its licensors retain all intellectual property
4
- // and proprietary rights in and to this software, related documentation
5
- // and any modifications thereto. Any use, reproduction, disclosure or
6
- // distribution of this software and related documentation without an express
7
- // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- #include <torch/extension.h>
10
- #include <ATen/cuda/CUDAContext.h>
11
- #include <c10/cuda/CUDAGuard.h>
12
- #include "upfirdn2d.h"
13
-
14
- //------------------------------------------------------------------------
15
-
16
- static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain)
17
- {
18
- // Validate arguments.
19
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
20
- TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x");
21
- TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32");
22
- TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
23
- TORCH_CHECK(f.numel() <= INT_MAX, "f is too large");
24
- TORCH_CHECK(x.dim() == 4, "x must be rank 4");
25
- TORCH_CHECK(f.dim() == 2, "f must be rank 2");
26
- TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1");
27
- TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1");
28
- TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1");
29
-
30
- // Create output tensor.
31
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
32
- int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx;
33
- int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy;
34
- TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1");
35
- torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format());
36
- TORCH_CHECK(y.numel() <= INT_MAX, "output is too large");
37
-
38
- // Initialize CUDA kernel parameters.
39
- upfirdn2d_kernel_params p;
40
- p.x = x.data_ptr();
41
- p.f = f.data_ptr<float>();
42
- p.y = y.data_ptr();
43
- p.up = make_int2(upx, upy);
44
- p.down = make_int2(downx, downy);
45
- p.pad0 = make_int2(padx0, pady0);
46
- p.flip = (flip) ? 1 : 0;
47
- p.gain = gain;
48
- p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
49
- p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0));
50
- p.filterSize = make_int2((int)f.size(1), (int)f.size(0));
51
- p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0));
52
- p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
53
- p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0));
54
- p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z;
55
- p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1;
56
-
57
- // Choose CUDA kernel.
58
- upfirdn2d_kernel_spec spec;
59
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
60
- {
61
- spec = choose_upfirdn2d_kernel<scalar_t>(p);
62
- });
63
-
64
- // Set looping options.
65
- p.loopMajor = (p.sizeMajor - 1) / 16384 + 1;
66
- p.loopMinor = spec.loopMinor;
67
- p.loopX = spec.loopX;
68
- p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1;
69
- p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1;
70
-
71
- // Compute grid size.
72
- dim3 blockSize, gridSize;
73
- if (spec.tileOutW < 0) // large
74
- {
75
- blockSize = dim3(4, 32, 1);
76
- gridSize = dim3(
77
- ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor,
78
- (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1,
79
- p.launchMajor);
80
- }
81
- else // small
82
- {
83
- blockSize = dim3(256, 1, 1);
84
- gridSize = dim3(
85
- ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor,
86
- (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1,
87
- p.launchMajor);
88
- }
89
-
90
- // Launch CUDA kernel.
91
- void* args[] = {&p};
92
- AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
93
- return y;
94
- }
95
-
96
- //------------------------------------------------------------------------
97
-
98
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
99
- {
100
- m.def("upfirdn2d", &upfirdn2d);
101
- }
102
-
103
- //------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/global_config.py DELETED
@@ -1,12 +0,0 @@
1
- # Device
2
- cuda_visible_devices = '0'
3
- device = 'cuda:0'
4
-
5
- # Logs
6
- training_step = 1
7
- image_rec_result_log_snapshot = 100
8
- pivotal_training_steps = 0
9
- model_snapshot_interval = 400
10
-
11
- # Run name to be updated during PTI
12
- run_name = 'exp'
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/text2image.md DELETED
@@ -1,277 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
-
14
- # Text-to-image
15
-
16
- <Tip warning={true}>
17
-
18
- The text-to-image fine-tuning script is experimental. It's easy to overfit and run into issues like catastrophic forgetting. We recommend you explore different hyperparameters to get the best results on your dataset.
19
-
20
- </Tip>
21
-
22
- Text-to-image models like Stable Diffusion generate an image from a text prompt. This guide will show you how to finetune the [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) model on your own dataset with PyTorch and Flax. All the training scripts for text-to-image finetuning used in this guide can be found in this [repository](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) if you're interested in taking a closer look.
23
-
24
- Before running the scripts, make sure to install the library's training dependencies:
25
-
26
- ```bash
27
- pip install git+https://github.com/huggingface/diffusers.git
28
- pip install -U -r requirements.txt
29
- ```
30
-
31
- And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with:
32
-
33
- ```bash
34
- accelerate config
35
- ```
36
-
37
- If you have already cloned the repo, then you won't need to go through these steps. Instead, you can pass the path to your local checkout to the training script and it will be loaded from there.
38
-
39
- ## Hardware requirements
40
-
41
- Using `gradient_checkpointing` and `mixed_precision`, it should be possible to finetune the model on a single 24GB GPU. For higher `batch_size`'s and faster training, it's better to use GPUs with more than 30GB of GPU memory. You can also use JAX/Flax for fine-tuning on TPUs or GPUs, which will be covered [below](#flax-jax-finetuning).
42
-
43
- You can reduce your memory footprint even more by enabling memory efficient attention with xFormers. Make sure you have [xFormers installed](./optimization/xformers) and pass the `--enable_xformers_memory_efficient_attention` flag to the training script.
44
-
45
- xFormers is not available for Flax.
46
-
47
- ## Upload model to Hub
48
-
49
- Store your model on the Hub by adding the following argument to the training script:
50
-
51
- ```bash
52
- --push_to_hub
53
- ```
54
-
55
- ## Save and load checkpoints
56
-
57
- It is a good idea to regularly save checkpoints in case anything happens during training. To save a checkpoint, pass the following argument to the training script:
58
-
59
- ```bash
60
- --checkpointing_steps=500
61
- ```
62
-
63
- Every 500 steps, the full training state is saved in a subfolder in the `output_dir`. The checkpoint has the format `checkpoint-` followed by the number of steps trained so far. For example, `checkpoint-1500` is a checkpoint saved after 1500 training steps.
64
-
65
- To load a checkpoint to resume training, pass the argument `--resume_from_checkpoint` to the training script and specify the checkpoint you want to resume from. For example, the following argument resumes training from the checkpoint saved after 1500 training steps:
66
-
67
- ```bash
68
- --resume_from_checkpoint="checkpoint-1500"
69
- ```
70
-
71
- ## Fine-tuning
72
-
73
- <frameworkcontent>
74
- <pt>
75
- Launch the [PyTorch training script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) for a fine-tuning run on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset like this.
76
-
77
- Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
78
-
79
- ```bash
80
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
81
- export dataset_name="lambdalabs/pokemon-blip-captions"
82
-
83
- accelerate launch --mixed_precision="fp16" train_text_to_image.py \
84
- --pretrained_model_name_or_path=$MODEL_NAME \
85
- --dataset_name=$dataset_name \
86
- --use_ema \
87
- --resolution=512 --center_crop --random_flip \
88
- --train_batch_size=1 \
89
- --gradient_accumulation_steps=4 \
90
- --gradient_checkpointing \
91
- --max_train_steps=15000 \
92
- --learning_rate=1e-05 \
93
- --max_grad_norm=1 \
94
- --lr_scheduler="constant" --lr_warmup_steps=0 \
95
- --output_dir="sd-pokemon-model" \
96
- --push_to_hub
97
- ```
98
-
99
- To finetune on your own dataset, prepare the dataset according to the format required by 🤗 [Datasets](https://huggingface.co/docs/datasets/index). You can [upload your dataset to the Hub](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub), or you can [prepare a local folder with your files](https://huggingface.co/docs/datasets/image_dataset#imagefolder).
100
-
101
- Modify the script if you want to use custom loading logic. We left pointers in the code in the appropriate places to help you. 🤗 The example script below shows how to finetune on a local dataset in `TRAIN_DIR` and where to save the model to in `OUTPUT_DIR`:
102
-
103
- ```bash
104
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
105
- export TRAIN_DIR="path_to_your_dataset"
106
- export OUTPUT_DIR="path_to_save_model"
107
-
108
- accelerate launch train_text_to_image.py \
109
- --pretrained_model_name_or_path=$MODEL_NAME \
110
- --train_data_dir=$TRAIN_DIR \
111
- --use_ema \
112
- --resolution=512 --center_crop --random_flip \
113
- --train_batch_size=1 \
114
- --gradient_accumulation_steps=4 \
115
- --gradient_checkpointing \
116
- --mixed_precision="fp16" \
117
- --max_train_steps=15000 \
118
- --learning_rate=1e-05 \
119
- --max_grad_norm=1 \
120
- --lr_scheduler="constant"
121
- --lr_warmup_steps=0 \
122
- --output_dir=${OUTPUT_DIR} \
123
- --push_to_hub
124
- ```
125
-
126
- #### Training with multiple GPUs
127
-
128
- `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
129
- for running distributed training with `accelerate`. Here is an example command:
130
-
131
- ```bash
132
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
133
- export dataset_name="lambdalabs/pokemon-blip-captions"
134
-
135
- accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image.py \
136
- --pretrained_model_name_or_path=$MODEL_NAME \
137
- --dataset_name=$dataset_name \
138
- --use_ema \
139
- --resolution=512 --center_crop --random_flip \
140
- --train_batch_size=1 \
141
- --gradient_accumulation_steps=4 \
142
- --gradient_checkpointing \
143
- --max_train_steps=15000 \
144
- --learning_rate=1e-05 \
145
- --max_grad_norm=1 \
146
- --lr_scheduler="constant" \
147
- --lr_warmup_steps=0 \
148
- --output_dir="sd-pokemon-model" \
149
- --push_to_hub
150
- ```
151
-
152
- </pt>
153
- <jax>
154
- With Flax, it's possible to train a Stable Diffusion model faster on TPUs and GPUs thanks to [@duongna211](https://github.com/duongna21). This is very efficient on TPU hardware but works great on GPUs too. The Flax training script doesn't support features like gradient checkpointing or gradient accumulation yet, so you'll need a GPU with at least 30GB of memory or a TPU v3.
155
-
156
- Before running the script, make sure you have the requirements installed:
157
-
158
- ```bash
159
- pip install -U -r requirements_flax.txt
160
- ```
161
-
162
- Specify the `MODEL_NAME` environment variable (either a Hub model repository id or a path to the directory containing the model weights) and pass it to the [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) argument.
163
-
164
- Now you can launch the [Flax training script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py) like this:
165
-
166
- ```bash
167
- export MODEL_NAME="runwayml/stable-diffusion-v1-5"
168
- export dataset_name="lambdalabs/pokemon-blip-captions"
169
-
170
- python train_text_to_image_flax.py \
171
- --pretrained_model_name_or_path=$MODEL_NAME \
172
- --dataset_name=$dataset_name \
173
- --resolution=512 --center_crop --random_flip \
174
- --train_batch_size=1 \
175
- --max_train_steps=15000 \
176
- --learning_rate=1e-05 \
177
- --max_grad_norm=1 \
178
- --output_dir="sd-pokemon-model" \
179
- --push_to_hub
180
- ```
181
-
182
- To finetune on your own dataset, prepare the dataset according to the format required by 🤗 [Datasets](https://huggingface.co/docs/datasets/index). You can [upload your dataset to the Hub](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub), or you can [prepare a local folder with your files](https://huggingface.co/docs/datasets/image_dataset#imagefolder).
183
-
184
- Modify the script if you want to use custom loading logic. We left pointers in the code in the appropriate places to help you. 🤗 The example script below shows how to finetune on a local dataset in `TRAIN_DIR`:
185
-
186
- ```bash
187
- export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
188
- export TRAIN_DIR="path_to_your_dataset"
189
-
190
- python train_text_to_image_flax.py \
191
- --pretrained_model_name_or_path=$MODEL_NAME \
192
- --train_data_dir=$TRAIN_DIR \
193
- --resolution=512 --center_crop --random_flip \
194
- --train_batch_size=1 \
195
- --mixed_precision="fp16" \
196
- --max_train_steps=15000 \
197
- --learning_rate=1e-05 \
198
- --max_grad_norm=1 \
199
- --output_dir="sd-pokemon-model" \
200
- --push_to_hub
201
- ```
202
- </jax>
203
- </frameworkcontent>
204
-
205
- ## Training with Min-SNR weighting
206
-
207
- We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence
208
- by rebalancing the loss. In order to use it, one needs to set the `--snr_gamma` argument. The recommended
209
- value when using it is 5.0.
210
-
211
- You can find [this project on Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) that compares the loss surfaces of the following setups:
212
-
213
- * Training without the Min-SNR weighting strategy
214
- * Training with the Min-SNR weighting strategy (`snr_gamma` set to 5.0)
215
- * Training with the Min-SNR weighting strategy (`snr_gamma` set to 1.0)
216
-
217
- For our small Pokemons dataset, the effects of Min-SNR weighting strategy might not appear to be pronounced, but for larger datasets, we believe the effects will be more pronounced.
218
-
219
- Also, note that in this example, we either predict `epsilon` (i.e., the noise) or the `v_prediction`. For both of these cases, the formulation of the Min-SNR weighting strategy that we have used holds.
220
-
221
- <Tip warning={true}>
222
-
223
- Training with Min-SNR weighting strategy is only supported in PyTorch.
224
-
225
- </Tip>
226
-
227
- ## LoRA
228
-
229
- You can also use Low-Rank Adaptation of Large Language Models (LoRA), a fine-tuning technique for accelerating training large models, for fine-tuning text-to-image models. For more details, take a look at the [LoRA training](lora#text-to-image) guide.
230
-
231
- ## Inference
232
-
233
- Now you can load the fine-tuned model for inference by passing the model path or model name on the Hub to the [`StableDiffusionPipeline`]:
234
-
235
- <frameworkcontent>
236
- <pt>
237
- ```python
238
- from diffusers import StableDiffusionPipeline
239
-
240
- model_path = "path_to_saved_model"
241
- pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
242
- pipe.to("cuda")
243
-
244
- image = pipe(prompt="yoda").images[0]
245
- image.save("yoda-pokemon.png")
246
- ```
247
- </pt>
248
- <jax>
249
- ```python
250
- import jax
251
- import numpy as np
252
- from flax.jax_utils import replicate
253
- from flax.training.common_utils import shard
254
- from diffusers import FlaxStableDiffusionPipeline
255
-
256
- model_path = "path_to_saved_model"
257
- pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
258
-
259
- prompt = "yoda pokemon"
260
- prng_seed = jax.random.PRNGKey(0)
261
- num_inference_steps = 50
262
-
263
- num_samples = jax.device_count()
264
- prompt = num_samples * [prompt]
265
- prompt_ids = pipeline.prepare_inputs(prompt)
266
-
267
- # shard inputs and rng
268
- params = replicate(params)
269
- prng_seed = jax.random.split(prng_seed, jax.device_count())
270
- prompt_ids = shard(prompt_ids)
271
-
272
- images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
273
- images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
274
- image.save("yoda-pokemon.png")
275
- ```
276
- </jax>
277
- </frameworkcontent>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/hrfpn.py DELETED
@@ -1,102 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.cnn import ConvModule, caffe2_xavier_init
5
- from torch.utils.checkpoint import checkpoint
6
-
7
- from ..builder import NECKS
8
-
9
-
10
- @NECKS.register_module()
11
- class HRFPN(nn.Module):
12
- """HRFPN (High Resolution Feature Pyramids)
13
-
14
- paper: `High-Resolution Representations for Labeling Pixels and Regions
15
- <https://arxiv.org/abs/1904.04514>`_.
16
-
17
- Args:
18
- in_channels (list): number of channels for each branch.
19
- out_channels (int): output channels of feature pyramids.
20
- num_outs (int): number of output stages.
21
- pooling_type (str): pooling for generating feature pyramids
22
- from {MAX, AVG}.
23
- conv_cfg (dict): dictionary to construct and config conv layer.
24
- norm_cfg (dict): dictionary to construct and config norm layer.
25
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
26
- memory while slowing down the training speed.
27
- stride (int): stride of 3x3 convolutional layers
28
- """
29
-
30
- def __init__(self,
31
- in_channels,
32
- out_channels,
33
- num_outs=5,
34
- pooling_type='AVG',
35
- conv_cfg=None,
36
- norm_cfg=None,
37
- with_cp=False,
38
- stride=1):
39
- super(HRFPN, self).__init__()
40
- assert isinstance(in_channels, list)
41
- self.in_channels = in_channels
42
- self.out_channels = out_channels
43
- self.num_ins = len(in_channels)
44
- self.num_outs = num_outs
45
- self.with_cp = with_cp
46
- self.conv_cfg = conv_cfg
47
- self.norm_cfg = norm_cfg
48
-
49
- self.reduction_conv = ConvModule(
50
- sum(in_channels),
51
- out_channels,
52
- kernel_size=1,
53
- conv_cfg=self.conv_cfg,
54
- act_cfg=None)
55
-
56
- self.fpn_convs = nn.ModuleList()
57
- for i in range(self.num_outs):
58
- self.fpn_convs.append(
59
- ConvModule(
60
- out_channels,
61
- out_channels,
62
- kernel_size=3,
63
- padding=1,
64
- stride=stride,
65
- conv_cfg=self.conv_cfg,
66
- act_cfg=None))
67
-
68
- if pooling_type == 'MAX':
69
- self.pooling = F.max_pool2d
70
- else:
71
- self.pooling = F.avg_pool2d
72
-
73
- def init_weights(self):
74
- """Initialize the weights of module."""
75
- for m in self.modules():
76
- if isinstance(m, nn.Conv2d):
77
- caffe2_xavier_init(m)
78
-
79
- def forward(self, inputs):
80
- """Forward function."""
81
- assert len(inputs) == self.num_ins
82
- outs = [inputs[0]]
83
- for i in range(1, self.num_ins):
84
- outs.append(
85
- F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
86
- out = torch.cat(outs, dim=1)
87
- if out.requires_grad and self.with_cp:
88
- out = checkpoint(self.reduction_conv, out)
89
- else:
90
- out = self.reduction_conv(out)
91
- outs = [out]
92
- for i in range(1, self.num_outs):
93
- outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
94
- outputs = []
95
-
96
- for i in range(self.num_outs):
97
- if outs[i].requires_grad and self.with_cp:
98
- tmp_out = checkpoint(self.fpn_convs[i], outs[i])
99
- else:
100
- tmp_out = self.fpn_convs[i](outs[i])
101
- outputs.append(tmp_out)
102
- return tuple(outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './ocrnet_hr18_512x512_80k_ade20k.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18_small',
4
- backbone=dict(
5
- extra=dict(
6
- stage1=dict(num_blocks=(2, )),
7
- stage2=dict(num_blocks=(2, 2)),
8
- stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
9
- stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
 
 
 
 
 
 
 
 
 
 
spaces/AngoHF/ANGO-Leaderboard/assets/__init__.py DELETED
File without changes
spaces/AnishKumbhar/ChatBot/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ChatBot
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.47.1
8
- app_file: app.py
9
- pinned: false
10
- license: llama2
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ankush05/Newcode/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Newcode
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: streamlit
7
- sdk_version: 1.27.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/diffusionmodules/util.py DELETED
@@ -1,270 +0,0 @@
1
- # adopted from
2
- # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
- # and
4
- # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
- # and
6
- # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
- #
8
- # thanks!
9
-
10
-
11
- import os
12
- import math
13
- import torch
14
- import torch.nn as nn
15
- import numpy as np
16
- from einops import repeat
17
-
18
- from ldm.util import instantiate_from_config
19
-
20
-
21
- def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
22
- if schedule == "linear":
23
- betas = (
24
- torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
25
- )
26
-
27
- elif schedule == "cosine":
28
- timesteps = (
29
- torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
30
- )
31
- alphas = timesteps / (1 + cosine_s) * np.pi / 2
32
- alphas = torch.cos(alphas).pow(2)
33
- alphas = alphas / alphas[0]
34
- betas = 1 - alphas[1:] / alphas[:-1]
35
- betas = np.clip(betas, a_min=0, a_max=0.999)
36
-
37
- elif schedule == "sqrt_linear":
38
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
39
- elif schedule == "sqrt":
40
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
41
- else:
42
- raise ValueError(f"schedule '{schedule}' unknown.")
43
- return betas.numpy()
44
-
45
-
46
- def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
47
- if ddim_discr_method == 'uniform':
48
- c = num_ddpm_timesteps // num_ddim_timesteps
49
- ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
50
- elif ddim_discr_method == 'quad':
51
- ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
52
- else:
53
- raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
54
-
55
- # assert ddim_timesteps.shape[0] == num_ddim_timesteps
56
- # add one to get the final alpha values right (the ones from first scale to data during sampling)
57
- steps_out = ddim_timesteps + 1
58
- if verbose:
59
- print(f'Selected timesteps for ddim sampler: {steps_out}')
60
- return steps_out
61
-
62
-
63
- def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
64
- # select alphas for computing the variance schedule
65
- alphas = alphacums[ddim_timesteps]
66
- alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
67
-
68
- # according the the formula provided in https://arxiv.org/abs/2010.02502
69
- sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
70
- if verbose:
71
- print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
72
- print(f'For the chosen value of eta, which is {eta}, '
73
- f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
74
- return sigmas, alphas, alphas_prev
75
-
76
-
77
- def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
78
- """
79
- Create a beta schedule that discretizes the given alpha_t_bar function,
80
- which defines the cumulative product of (1-beta) over time from t = [0,1].
81
- :param num_diffusion_timesteps: the number of betas to produce.
82
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
83
- produces the cumulative product of (1-beta) up to that
84
- part of the diffusion process.
85
- :param max_beta: the maximum beta to use; use values lower than 1 to
86
- prevent singularities.
87
- """
88
- betas = []
89
- for i in range(num_diffusion_timesteps):
90
- t1 = i / num_diffusion_timesteps
91
- t2 = (i + 1) / num_diffusion_timesteps
92
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
93
- return np.array(betas)
94
-
95
-
96
- def extract_into_tensor(a, t, x_shape):
97
- b, *_ = t.shape
98
- out = a.gather(-1, t)
99
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
100
-
101
-
102
- def checkpoint(func, inputs, params, flag):
103
- """
104
- Evaluate a function without caching intermediate activations, allowing for
105
- reduced memory at the expense of extra compute in the backward pass.
106
- :param func: the function to evaluate.
107
- :param inputs: the argument sequence to pass to `func`.
108
- :param params: a sequence of parameters `func` depends on but does not
109
- explicitly take as arguments.
110
- :param flag: if False, disable gradient checkpointing.
111
- """
112
- if flag:
113
- args = tuple(inputs) + tuple(params)
114
- return CheckpointFunction.apply(func, len(inputs), *args)
115
- else:
116
- return func(*inputs)
117
-
118
-
119
- class CheckpointFunction(torch.autograd.Function):
120
- @staticmethod
121
- def forward(ctx, run_function, length, *args):
122
- ctx.run_function = run_function
123
- ctx.input_tensors = list(args[:length])
124
- ctx.input_params = list(args[length:])
125
- ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(),
126
- "dtype": torch.get_autocast_gpu_dtype(),
127
- "cache_enabled": torch.is_autocast_cache_enabled()}
128
- with torch.no_grad():
129
- output_tensors = ctx.run_function(*ctx.input_tensors)
130
- return output_tensors
131
-
132
- @staticmethod
133
- def backward(ctx, *output_grads):
134
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
135
- with torch.enable_grad(), \
136
- torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
137
- # Fixes a bug where the first op in run_function modifies the
138
- # Tensor storage in place, which is not allowed for detach()'d
139
- # Tensors.
140
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
141
- output_tensors = ctx.run_function(*shallow_copies)
142
- input_grads = torch.autograd.grad(
143
- output_tensors,
144
- ctx.input_tensors + ctx.input_params,
145
- output_grads,
146
- allow_unused=True,
147
- )
148
- del ctx.input_tensors
149
- del ctx.input_params
150
- del output_tensors
151
- return (None, None) + input_grads
152
-
153
-
154
- def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
155
- """
156
- Create sinusoidal timestep embeddings.
157
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
158
- These may be fractional.
159
- :param dim: the dimension of the output.
160
- :param max_period: controls the minimum frequency of the embeddings.
161
- :return: an [N x dim] Tensor of positional embeddings.
162
- """
163
- if not repeat_only:
164
- half = dim // 2
165
- freqs = torch.exp(
166
- -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
167
- ).to(device=timesteps.device)
168
- args = timesteps[:, None].float() * freqs[None]
169
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
170
- if dim % 2:
171
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
172
- else:
173
- embedding = repeat(timesteps, 'b -> b d', d=dim)
174
- return embedding
175
-
176
-
177
- def zero_module(module):
178
- """
179
- Zero out the parameters of a module and return it.
180
- """
181
- for p in module.parameters():
182
- p.detach().zero_()
183
- return module
184
-
185
-
186
- def scale_module(module, scale):
187
- """
188
- Scale the parameters of a module and return it.
189
- """
190
- for p in module.parameters():
191
- p.detach().mul_(scale)
192
- return module
193
-
194
-
195
- def mean_flat(tensor):
196
- """
197
- Take the mean over all non-batch dimensions.
198
- """
199
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
200
-
201
-
202
- def normalization(channels):
203
- """
204
- Make a standard normalization layer.
205
- :param channels: number of input channels.
206
- :return: an nn.Module for normalization.
207
- """
208
- return GroupNorm32(32, channels)
209
-
210
-
211
- # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
212
- class SiLU(nn.Module):
213
- def forward(self, x):
214
- return x * torch.sigmoid(x)
215
-
216
-
217
- class GroupNorm32(nn.GroupNorm):
218
- def forward(self, x):
219
- return super().forward(x.float()).type(x.dtype)
220
-
221
- def conv_nd(dims, *args, **kwargs):
222
- """
223
- Create a 1D, 2D, or 3D convolution module.
224
- """
225
- if dims == 1:
226
- return nn.Conv1d(*args, **kwargs)
227
- elif dims == 2:
228
- return nn.Conv2d(*args, **kwargs)
229
- elif dims == 3:
230
- return nn.Conv3d(*args, **kwargs)
231
- raise ValueError(f"unsupported dimensions: {dims}")
232
-
233
-
234
- def linear(*args, **kwargs):
235
- """
236
- Create a linear module.
237
- """
238
- return nn.Linear(*args, **kwargs)
239
-
240
-
241
- def avg_pool_nd(dims, *args, **kwargs):
242
- """
243
- Create a 1D, 2D, or 3D average pooling module.
244
- """
245
- if dims == 1:
246
- return nn.AvgPool1d(*args, **kwargs)
247
- elif dims == 2:
248
- return nn.AvgPool2d(*args, **kwargs)
249
- elif dims == 3:
250
- return nn.AvgPool3d(*args, **kwargs)
251
- raise ValueError(f"unsupported dimensions: {dims}")
252
-
253
-
254
- class HybridConditioner(nn.Module):
255
-
256
- def __init__(self, c_concat_config, c_crossattn_config):
257
- super().__init__()
258
- self.concat_conditioner = instantiate_from_config(c_concat_config)
259
- self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
260
-
261
- def forward(self, c_concat, c_crossattn):
262
- c_concat = self.concat_conditioner(c_concat)
263
- c_crossattn = self.crossattn_conditioner(c_crossattn)
264
- return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
265
-
266
-
267
- def noise_like(shape, device, repeat=False):
268
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
269
- noise = lambda: torch.randn(shape, device=device)
270
- return repeat_noise() if repeat else noise()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- # SPDX-FileCopyrightText: 2015 Eric Larson
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- """CacheControl import Interface.
6
-
7
- Make it easy to import from cachecontrol without long namespaces.
8
- """
9
- __author__ = "Eric Larson"
10
- __email__ = "[email protected]"
11
- __version__ = "0.12.11"
12
-
13
- from .wrapper import CacheControl
14
- from .adapter import CacheControlAdapter
15
- from .controller import CacheController
16
-
17
- import logging
18
- logging.getLogger(__name__).addHandler(logging.NullHandler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/url.py DELETED
@@ -1,435 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import re
4
- from collections import namedtuple
5
-
6
- from ..exceptions import LocationParseError
7
- from ..packages import six
8
-
9
- url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
10
-
11
- # We only want to normalize urls with an HTTP(S) scheme.
12
- # urllib3 infers URLs without a scheme (None) to be http.
13
- NORMALIZABLE_SCHEMES = ("http", "https", None)
14
-
15
- # Almost all of these patterns were derived from the
16
- # 'rfc3986' module: https://github.com/python-hyper/rfc3986
17
- PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
18
- SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
19
- URI_RE = re.compile(
20
- r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
21
- r"(?://([^\\/?#]*))?"
22
- r"([^?#]*)"
23
- r"(?:\?([^#]*))?"
24
- r"(?:#(.*))?$",
25
- re.UNICODE | re.DOTALL,
26
- )
27
-
28
- IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
29
- HEX_PAT = "[0-9A-Fa-f]{1,4}"
30
- LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
31
- _subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
32
- _variations = [
33
- # 6( h16 ":" ) ls32
34
- "(?:%(hex)s:){6}%(ls32)s",
35
- # "::" 5( h16 ":" ) ls32
36
- "::(?:%(hex)s:){5}%(ls32)s",
37
- # [ h16 ] "::" 4( h16 ":" ) ls32
38
- "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
39
- # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
40
- "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
41
- # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
42
- "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
43
- # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
44
- "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
45
- # [ *4( h16 ":" ) h16 ] "::" ls32
46
- "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
47
- # [ *5( h16 ":" ) h16 ] "::" h16
48
- "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
49
- # [ *6( h16 ":" ) h16 ] "::"
50
- "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
51
- ]
52
-
53
- UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~"
54
- IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
55
- ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
56
- IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
57
- REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
58
- TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
59
-
60
- IPV4_RE = re.compile("^" + IPV4_PAT + "$")
61
- IPV6_RE = re.compile("^" + IPV6_PAT + "$")
62
- IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
63
- BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
64
- ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
65
-
66
- _HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % (
67
- REG_NAME_PAT,
68
- IPV4_PAT,
69
- IPV6_ADDRZ_PAT,
70
- )
71
- _HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
72
-
73
- UNRESERVED_CHARS = set(
74
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
75
- )
76
- SUB_DELIM_CHARS = set("!$&'()*+,;=")
77
- USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
78
- PATH_CHARS = USERINFO_CHARS | {"@", "/"}
79
- QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
80
-
81
-
82
- class Url(namedtuple("Url", url_attrs)):
83
- """
84
- Data structure for representing an HTTP URL. Used as a return value for
85
- :func:`parse_url`. Both the scheme and host are normalized as they are
86
- both case-insensitive according to RFC 3986.
87
- """
88
-
89
- __slots__ = ()
90
-
91
- def __new__(
92
- cls,
93
- scheme=None,
94
- auth=None,
95
- host=None,
96
- port=None,
97
- path=None,
98
- query=None,
99
- fragment=None,
100
- ):
101
- if path and not path.startswith("/"):
102
- path = "/" + path
103
- if scheme is not None:
104
- scheme = scheme.lower()
105
- return super(Url, cls).__new__(
106
- cls, scheme, auth, host, port, path, query, fragment
107
- )
108
-
109
- @property
110
- def hostname(self):
111
- """For backwards-compatibility with urlparse. We're nice like that."""
112
- return self.host
113
-
114
- @property
115
- def request_uri(self):
116
- """Absolute path including the query string."""
117
- uri = self.path or "/"
118
-
119
- if self.query is not None:
120
- uri += "?" + self.query
121
-
122
- return uri
123
-
124
- @property
125
- def netloc(self):
126
- """Network location including host and port"""
127
- if self.port:
128
- return "%s:%d" % (self.host, self.port)
129
- return self.host
130
-
131
- @property
132
- def url(self):
133
- """
134
- Convert self into a url
135
-
136
- This function should more or less round-trip with :func:`.parse_url`. The
137
- returned url may not be exactly the same as the url inputted to
138
- :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
139
- with a blank port will have : removed).
140
-
141
- Example: ::
142
-
143
- >>> U = parse_url('http://google.com/mail/')
144
- >>> U.url
145
- 'http://google.com/mail/'
146
- >>> Url('http', 'username:password', 'host.com', 80,
147
- ... '/path', 'query', 'fragment').url
148
- 'http://username:[email protected]:80/path?query#fragment'
149
- """
150
- scheme, auth, host, port, path, query, fragment = self
151
- url = u""
152
-
153
- # We use "is not None" we want things to happen with empty strings (or 0 port)
154
- if scheme is not None:
155
- url += scheme + u"://"
156
- if auth is not None:
157
- url += auth + u"@"
158
- if host is not None:
159
- url += host
160
- if port is not None:
161
- url += u":" + str(port)
162
- if path is not None:
163
- url += path
164
- if query is not None:
165
- url += u"?" + query
166
- if fragment is not None:
167
- url += u"#" + fragment
168
-
169
- return url
170
-
171
- def __str__(self):
172
- return self.url
173
-
174
-
175
- def split_first(s, delims):
176
- """
177
- .. deprecated:: 1.25
178
-
179
- Given a string and an iterable of delimiters, split on the first found
180
- delimiter. Return two split parts and the matched delimiter.
181
-
182
- If not found, then the first part is the full input string.
183
-
184
- Example::
185
-
186
- >>> split_first('foo/bar?baz', '?/=')
187
- ('foo', 'bar?baz', '/')
188
- >>> split_first('foo/bar?baz', '123')
189
- ('foo/bar?baz', '', None)
190
-
191
- Scales linearly with number of delims. Not ideal for large number of delims.
192
- """
193
- min_idx = None
194
- min_delim = None
195
- for d in delims:
196
- idx = s.find(d)
197
- if idx < 0:
198
- continue
199
-
200
- if min_idx is None or idx < min_idx:
201
- min_idx = idx
202
- min_delim = d
203
-
204
- if min_idx is None or min_idx < 0:
205
- return s, "", None
206
-
207
- return s[:min_idx], s[min_idx + 1 :], min_delim
208
-
209
-
210
- def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
211
- """Percent-encodes a URI component without reapplying
212
- onto an already percent-encoded component.
213
- """
214
- if component is None:
215
- return component
216
-
217
- component = six.ensure_text(component)
218
-
219
- # Normalize existing percent-encoded bytes.
220
- # Try to see if the component we're encoding is already percent-encoded
221
- # so we can skip all '%' characters but still encode all others.
222
- component, percent_encodings = PERCENT_RE.subn(
223
- lambda match: match.group(0).upper(), component
224
- )
225
-
226
- uri_bytes = component.encode("utf-8", "surrogatepass")
227
- is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
228
- encoded_component = bytearray()
229
-
230
- for i in range(0, len(uri_bytes)):
231
- # Will return a single character bytestring on both Python 2 & 3
232
- byte = uri_bytes[i : i + 1]
233
- byte_ord = ord(byte)
234
- if (is_percent_encoded and byte == b"%") or (
235
- byte_ord < 128 and byte.decode() in allowed_chars
236
- ):
237
- encoded_component += byte
238
- continue
239
- encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
240
-
241
- return encoded_component.decode(encoding)
242
-
243
-
244
- def _remove_path_dot_segments(path):
245
- # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
246
- segments = path.split("/") # Turn the path into a list of segments
247
- output = [] # Initialize the variable to use to store output
248
-
249
- for segment in segments:
250
- # '.' is the current directory, so ignore it, it is superfluous
251
- if segment == ".":
252
- continue
253
- # Anything other than '..', should be appended to the output
254
- elif segment != "..":
255
- output.append(segment)
256
- # In this case segment == '..', if we can, we should pop the last
257
- # element
258
- elif output:
259
- output.pop()
260
-
261
- # If the path starts with '/' and the output is empty or the first string
262
- # is non-empty
263
- if path.startswith("/") and (not output or output[0]):
264
- output.insert(0, "")
265
-
266
- # If the path starts with '/.' or '/..' ensure we add one more empty
267
- # string to add a trailing '/'
268
- if path.endswith(("/.", "/..")):
269
- output.append("")
270
-
271
- return "/".join(output)
272
-
273
-
274
- def _normalize_host(host, scheme):
275
- if host:
276
- if isinstance(host, six.binary_type):
277
- host = six.ensure_str(host)
278
-
279
- if scheme in NORMALIZABLE_SCHEMES:
280
- is_ipv6 = IPV6_ADDRZ_RE.match(host)
281
- if is_ipv6:
282
- # IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
283
- # such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
284
- # separator as necessary to return a valid RFC 4007 scoped IP.
285
- match = ZONE_ID_RE.search(host)
286
- if match:
287
- start, end = match.span(1)
288
- zone_id = host[start:end]
289
-
290
- if zone_id.startswith("%25") and zone_id != "%25":
291
- zone_id = zone_id[3:]
292
- else:
293
- zone_id = zone_id[1:]
294
- zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
295
- return host[:start].lower() + zone_id + host[end:]
296
- else:
297
- return host.lower()
298
- elif not IPV4_RE.match(host):
299
- return six.ensure_str(
300
- b".".join([_idna_encode(label) for label in host.split(".")])
301
- )
302
- return host
303
-
304
-
305
- def _idna_encode(name):
306
- if name and any(ord(x) >= 128 for x in name):
307
- try:
308
- from pip._vendor import idna
309
- except ImportError:
310
- six.raise_from(
311
- LocationParseError("Unable to parse URL without the 'idna' module"),
312
- None,
313
- )
314
- try:
315
- return idna.encode(name.lower(), strict=True, std3_rules=True)
316
- except idna.IDNAError:
317
- six.raise_from(
318
- LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
319
- )
320
- return name.lower().encode("ascii")
321
-
322
-
323
- def _encode_target(target):
324
- """Percent-encodes a request target so that there are no invalid characters"""
325
- path, query = TARGET_RE.match(target).groups()
326
- target = _encode_invalid_chars(path, PATH_CHARS)
327
- query = _encode_invalid_chars(query, QUERY_CHARS)
328
- if query is not None:
329
- target += "?" + query
330
- return target
331
-
332
-
333
- def parse_url(url):
334
- """
335
- Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
336
- performed to parse incomplete urls. Fields not provided will be None.
337
- This parser is RFC 3986 and RFC 6874 compliant.
338
-
339
- The parser logic and helper functions are based heavily on
340
- work done in the ``rfc3986`` module.
341
-
342
- :param str url: URL to parse into a :class:`.Url` namedtuple.
343
-
344
- Partly backwards-compatible with :mod:`urlparse`.
345
-
346
- Example::
347
-
348
- >>> parse_url('http://google.com/mail/')
349
- Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
350
- >>> parse_url('google.com:80')
351
- Url(scheme=None, host='google.com', port=80, path=None, ...)
352
- >>> parse_url('/foo?bar')
353
- Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
354
- """
355
- if not url:
356
- # Empty
357
- return Url()
358
-
359
- source_url = url
360
- if not SCHEME_RE.search(url):
361
- url = "//" + url
362
-
363
- try:
364
- scheme, authority, path, query, fragment = URI_RE.match(url).groups()
365
- normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
366
-
367
- if scheme:
368
- scheme = scheme.lower()
369
-
370
- if authority:
371
- auth, _, host_port = authority.rpartition("@")
372
- auth = auth or None
373
- host, port = _HOST_PORT_RE.match(host_port).groups()
374
- if auth and normalize_uri:
375
- auth = _encode_invalid_chars(auth, USERINFO_CHARS)
376
- if port == "":
377
- port = None
378
- else:
379
- auth, host, port = None, None, None
380
-
381
- if port is not None:
382
- port = int(port)
383
- if not (0 <= port <= 65535):
384
- raise LocationParseError(url)
385
-
386
- host = _normalize_host(host, scheme)
387
-
388
- if normalize_uri and path:
389
- path = _remove_path_dot_segments(path)
390
- path = _encode_invalid_chars(path, PATH_CHARS)
391
- if normalize_uri and query:
392
- query = _encode_invalid_chars(query, QUERY_CHARS)
393
- if normalize_uri and fragment:
394
- fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
395
-
396
- except (ValueError, AttributeError):
397
- return six.raise_from(LocationParseError(source_url), None)
398
-
399
- # For the sake of backwards compatibility we put empty
400
- # string values for path if there are any defined values
401
- # beyond the path in the URL.
402
- # TODO: Remove this when we break backwards compatibility.
403
- if not path:
404
- if query is not None or fragment is not None:
405
- path = ""
406
- else:
407
- path = None
408
-
409
- # Ensure that each part of the URL is a `str` for
410
- # backwards compatibility.
411
- if isinstance(url, six.text_type):
412
- ensure_func = six.ensure_text
413
- else:
414
- ensure_func = six.ensure_str
415
-
416
- def ensure_type(x):
417
- return x if x is None else ensure_func(x)
418
-
419
- return Url(
420
- scheme=ensure_type(scheme),
421
- auth=ensure_type(auth),
422
- host=ensure_type(host),
423
- port=port,
424
- path=ensure_type(path),
425
- query=ensure_type(query),
426
- fragment=ensure_type(fragment),
427
- )
428
-
429
-
430
- def get_host(url):
431
- """
432
- Deprecated. Use :func:`parse_url` instead.
433
- """
434
- p = parse_url(url)
435
- return p.scheme or "http", p.hostname, p.port
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/cmd.py DELETED
@@ -1,436 +0,0 @@
1
- """distutils.cmd
2
-
3
- Provides the Command class, the base class for the command classes
4
- in the distutils.command package.
5
- """
6
-
7
- import sys
8
- import os
9
- import re
10
- from distutils.errors import DistutilsOptionError
11
- from distutils import util, dir_util, file_util, archive_util, dep_util
12
- from distutils import log
13
-
14
-
15
- class Command:
16
- """Abstract base class for defining command classes, the "worker bees"
17
- of the Distutils. A useful analogy for command classes is to think of
18
- them as subroutines with local variables called "options". The options
19
- are "declared" in 'initialize_options()' and "defined" (given their
20
- final values, aka "finalized") in 'finalize_options()', both of which
21
- must be defined by every command class. The distinction between the
22
- two is necessary because option values might come from the outside
23
- world (command line, config file, ...), and any options dependent on
24
- other options must be computed *after* these outside influences have
25
- been processed -- hence 'finalize_options()'. The "body" of the
26
- subroutine, where it does all its work based on the values of its
27
- options, is the 'run()' method, which must also be implemented by every
28
- command class.
29
- """
30
-
31
- # 'sub_commands' formalizes the notion of a "family" of commands,
32
- # eg. "install" as the parent with sub-commands "install_lib",
33
- # "install_headers", etc. The parent of a family of commands
34
- # defines 'sub_commands' as a class attribute; it's a list of
35
- # (command_name : string, predicate : unbound_method | string | None)
36
- # tuples, where 'predicate' is a method of the parent command that
37
- # determines whether the corresponding command is applicable in the
38
- # current situation. (Eg. we "install_headers" is only applicable if
39
- # we have any C header files to install.) If 'predicate' is None,
40
- # that command is always applicable.
41
- #
42
- # 'sub_commands' is usually defined at the *end* of a class, because
43
- # predicates can be unbound methods, so they must already have been
44
- # defined. The canonical example is the "install" command.
45
- sub_commands = []
46
-
47
- # -- Creation/initialization methods -------------------------------
48
-
49
- def __init__(self, dist):
50
- """Create and initialize a new Command object. Most importantly,
51
- invokes the 'initialize_options()' method, which is the real
52
- initializer and depends on the actual command being
53
- instantiated.
54
- """
55
- # late import because of mutual dependence between these classes
56
- from distutils.dist import Distribution
57
-
58
- if not isinstance(dist, Distribution):
59
- raise TypeError("dist must be a Distribution instance")
60
- if self.__class__ is Command:
61
- raise RuntimeError("Command is an abstract class")
62
-
63
- self.distribution = dist
64
- self.initialize_options()
65
-
66
- # Per-command versions of the global flags, so that the user can
67
- # customize Distutils' behaviour command-by-command and let some
68
- # commands fall back on the Distribution's behaviour. None means
69
- # "not defined, check self.distribution's copy", while 0 or 1 mean
70
- # false and true (duh). Note that this means figuring out the real
71
- # value of each flag is a touch complicated -- hence "self._dry_run"
72
- # will be handled by __getattr__, below.
73
- # XXX This needs to be fixed.
74
- self._dry_run = None
75
-
76
- # verbose is largely ignored, but needs to be set for
77
- # backwards compatibility (I think)?
78
- self.verbose = dist.verbose
79
-
80
- # Some commands define a 'self.force' option to ignore file
81
- # timestamps, but methods defined *here* assume that
82
- # 'self.force' exists for all commands. So define it here
83
- # just to be safe.
84
- self.force = None
85
-
86
- # The 'help' flag is just used for command-line parsing, so
87
- # none of that complicated bureaucracy is needed.
88
- self.help = 0
89
-
90
- # 'finalized' records whether or not 'finalize_options()' has been
91
- # called. 'finalize_options()' itself should not pay attention to
92
- # this flag: it is the business of 'ensure_finalized()', which
93
- # always calls 'finalize_options()', to respect/update it.
94
- self.finalized = 0
95
-
96
- # XXX A more explicit way to customize dry_run would be better.
97
- def __getattr__(self, attr):
98
- if attr == 'dry_run':
99
- myval = getattr(self, "_" + attr)
100
- if myval is None:
101
- return getattr(self.distribution, attr)
102
- else:
103
- return myval
104
- else:
105
- raise AttributeError(attr)
106
-
107
- def ensure_finalized(self):
108
- if not self.finalized:
109
- self.finalize_options()
110
- self.finalized = 1
111
-
112
- # Subclasses must define:
113
- # initialize_options()
114
- # provide default values for all options; may be customized by
115
- # setup script, by options from config file(s), or by command-line
116
- # options
117
- # finalize_options()
118
- # decide on the final values for all options; this is called
119
- # after all possible intervention from the outside world
120
- # (command-line, option file, etc.) has been processed
121
- # run()
122
- # run the command: do whatever it is we're here to do,
123
- # controlled by the command's various option values
124
-
125
- def initialize_options(self):
126
- """Set default values for all the options that this command
127
- supports. Note that these defaults may be overridden by other
128
- commands, by the setup script, by config files, or by the
129
- command-line. Thus, this is not the place to code dependencies
130
- between options; generally, 'initialize_options()' implementations
131
- are just a bunch of "self.foo = None" assignments.
132
-
133
- This method must be implemented by all command classes.
134
- """
135
- raise RuntimeError(
136
- "abstract method -- subclass %s must override" % self.__class__
137
- )
138
-
139
- def finalize_options(self):
140
- """Set final values for all the options that this command supports.
141
- This is always called as late as possible, ie. after any option
142
- assignments from the command-line or from other commands have been
143
- done. Thus, this is the place to code option dependencies: if
144
- 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
145
- long as 'foo' still has the same value it was assigned in
146
- 'initialize_options()'.
147
-
148
- This method must be implemented by all command classes.
149
- """
150
- raise RuntimeError(
151
- "abstract method -- subclass %s must override" % self.__class__
152
- )
153
-
154
- def dump_options(self, header=None, indent=""):
155
- from distutils.fancy_getopt import longopt_xlate
156
-
157
- if header is None:
158
- header = "command options for '%s':" % self.get_command_name()
159
- self.announce(indent + header, level=log.INFO)
160
- indent = indent + " "
161
- for (option, _, _) in self.user_options:
162
- option = option.translate(longopt_xlate)
163
- if option[-1] == "=":
164
- option = option[:-1]
165
- value = getattr(self, option)
166
- self.announce(indent + "{} = {}".format(option, value), level=log.INFO)
167
-
168
- def run(self):
169
- """A command's raison d'etre: carry out the action it exists to
170
- perform, controlled by the options initialized in
171
- 'initialize_options()', customized by other commands, the setup
172
- script, the command-line, and config files, and finalized in
173
- 'finalize_options()'. All terminal output and filesystem
174
- interaction should be done by 'run()'.
175
-
176
- This method must be implemented by all command classes.
177
- """
178
- raise RuntimeError(
179
- "abstract method -- subclass %s must override" % self.__class__
180
- )
181
-
182
- def announce(self, msg, level=1):
183
- """If the current verbosity level is of greater than or equal to
184
- 'level' print 'msg' to stdout.
185
- """
186
- log.log(level, msg)
187
-
188
- def debug_print(self, msg):
189
- """Print 'msg' to stdout if the global DEBUG (taken from the
190
- DISTUTILS_DEBUG environment variable) flag is true.
191
- """
192
- from distutils.debug import DEBUG
193
-
194
- if DEBUG:
195
- print(msg)
196
- sys.stdout.flush()
197
-
198
- # -- Option validation methods -------------------------------------
199
- # (these are very handy in writing the 'finalize_options()' method)
200
- #
201
- # NB. the general philosophy here is to ensure that a particular option
202
- # value meets certain type and value constraints. If not, we try to
203
- # force it into conformance (eg. if we expect a list but have a string,
204
- # split the string on comma and/or whitespace). If we can't force the
205
- # option into conformance, raise DistutilsOptionError. Thus, command
206
- # classes need do nothing more than (eg.)
207
- # self.ensure_string_list('foo')
208
- # and they can be guaranteed that thereafter, self.foo will be
209
- # a list of strings.
210
-
211
- def _ensure_stringlike(self, option, what, default=None):
212
- val = getattr(self, option)
213
- if val is None:
214
- setattr(self, option, default)
215
- return default
216
- elif not isinstance(val, str):
217
- raise DistutilsOptionError(
218
- "'{}' must be a {} (got `{}`)".format(option, what, val)
219
- )
220
- return val
221
-
222
- def ensure_string(self, option, default=None):
223
- """Ensure that 'option' is a string; if not defined, set it to
224
- 'default'.
225
- """
226
- self._ensure_stringlike(option, "string", default)
227
-
228
- def ensure_string_list(self, option):
229
- r"""Ensure that 'option' is a list of strings. If 'option' is
230
- currently a string, we split it either on /,\s*/ or /\s+/, so
231
- "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
232
- ["foo", "bar", "baz"].
233
- """
234
- val = getattr(self, option)
235
- if val is None:
236
- return
237
- elif isinstance(val, str):
238
- setattr(self, option, re.split(r',\s*|\s+', val))
239
- else:
240
- if isinstance(val, list):
241
- ok = all(isinstance(v, str) for v in val)
242
- else:
243
- ok = False
244
- if not ok:
245
- raise DistutilsOptionError(
246
- "'{}' must be a list of strings (got {!r})".format(option, val)
247
- )
248
-
249
- def _ensure_tested_string(self, option, tester, what, error_fmt, default=None):
250
- val = self._ensure_stringlike(option, what, default)
251
- if val is not None and not tester(val):
252
- raise DistutilsOptionError(
253
- ("error in '%s' option: " + error_fmt) % (option, val)
254
- )
255
-
256
- def ensure_filename(self, option):
257
- """Ensure that 'option' is the name of an existing file."""
258
- self._ensure_tested_string(
259
- option, os.path.isfile, "filename", "'%s' does not exist or is not a file"
260
- )
261
-
262
- def ensure_dirname(self, option):
263
- self._ensure_tested_string(
264
- option,
265
- os.path.isdir,
266
- "directory name",
267
- "'%s' does not exist or is not a directory",
268
- )
269
-
270
- # -- Convenience methods for commands ------------------------------
271
-
272
- def get_command_name(self):
273
- if hasattr(self, 'command_name'):
274
- return self.command_name
275
- else:
276
- return self.__class__.__name__
277
-
278
- def set_undefined_options(self, src_cmd, *option_pairs):
279
- """Set the values of any "undefined" options from corresponding
280
- option values in some other command object. "Undefined" here means
281
- "is None", which is the convention used to indicate that an option
282
- has not been changed between 'initialize_options()' and
283
- 'finalize_options()'. Usually called from 'finalize_options()' for
284
- options that depend on some other command rather than another
285
- option of the same command. 'src_cmd' is the other command from
286
- which option values will be taken (a command object will be created
287
- for it if necessary); the remaining arguments are
288
- '(src_option,dst_option)' tuples which mean "take the value of
289
- 'src_option' in the 'src_cmd' command object, and copy it to
290
- 'dst_option' in the current command object".
291
- """
292
- # Option_pairs: list of (src_option, dst_option) tuples
293
- src_cmd_obj = self.distribution.get_command_obj(src_cmd)
294
- src_cmd_obj.ensure_finalized()
295
- for (src_option, dst_option) in option_pairs:
296
- if getattr(self, dst_option) is None:
297
- setattr(self, dst_option, getattr(src_cmd_obj, src_option))
298
-
299
- def get_finalized_command(self, command, create=1):
300
- """Wrapper around Distribution's 'get_command_obj()' method: find
301
- (create if necessary and 'create' is true) the command object for
302
- 'command', call its 'ensure_finalized()' method, and return the
303
- finalized command object.
304
- """
305
- cmd_obj = self.distribution.get_command_obj(command, create)
306
- cmd_obj.ensure_finalized()
307
- return cmd_obj
308
-
309
- # XXX rename to 'get_reinitialized_command()'? (should do the
310
- # same in dist.py, if so)
311
- def reinitialize_command(self, command, reinit_subcommands=0):
312
- return self.distribution.reinitialize_command(command, reinit_subcommands)
313
-
314
- def run_command(self, command):
315
- """Run some other command: uses the 'run_command()' method of
316
- Distribution, which creates and finalizes the command object if
317
- necessary and then invokes its 'run()' method.
318
- """
319
- self.distribution.run_command(command)
320
-
321
- def get_sub_commands(self):
322
- """Determine the sub-commands that are relevant in the current
323
- distribution (ie., that need to be run). This is based on the
324
- 'sub_commands' class attribute: each tuple in that list may include
325
- a method that we call to determine if the subcommand needs to be
326
- run for the current distribution. Return a list of command names.
327
- """
328
- commands = []
329
- for (cmd_name, method) in self.sub_commands:
330
- if method is None or method(self):
331
- commands.append(cmd_name)
332
- return commands
333
-
334
- # -- External world manipulation -----------------------------------
335
-
336
- def warn(self, msg):
337
- log.warn("warning: %s: %s\n", self.get_command_name(), msg)
338
-
339
- def execute(self, func, args, msg=None, level=1):
340
- util.execute(func, args, msg, dry_run=self.dry_run)
341
-
342
- def mkpath(self, name, mode=0o777):
343
- dir_util.mkpath(name, mode, dry_run=self.dry_run)
344
-
345
- def copy_file(
346
- self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1
347
- ):
348
- """Copy a file respecting verbose, dry-run and force flags. (The
349
- former two default to whatever is in the Distribution object, and
350
- the latter defaults to false for commands that don't define it.)"""
351
- return file_util.copy_file(
352
- infile,
353
- outfile,
354
- preserve_mode,
355
- preserve_times,
356
- not self.force,
357
- link,
358
- dry_run=self.dry_run,
359
- )
360
-
361
- def copy_tree(
362
- self,
363
- infile,
364
- outfile,
365
- preserve_mode=1,
366
- preserve_times=1,
367
- preserve_symlinks=0,
368
- level=1,
369
- ):
370
- """Copy an entire directory tree respecting verbose, dry-run,
371
- and force flags.
372
- """
373
- return dir_util.copy_tree(
374
- infile,
375
- outfile,
376
- preserve_mode,
377
- preserve_times,
378
- preserve_symlinks,
379
- not self.force,
380
- dry_run=self.dry_run,
381
- )
382
-
383
- def move_file(self, src, dst, level=1):
384
- """Move a file respecting dry-run flag."""
385
- return file_util.move_file(src, dst, dry_run=self.dry_run)
386
-
387
- def spawn(self, cmd, search_path=1, level=1):
388
- """Spawn an external command respecting dry-run flag."""
389
- from distutils.spawn import spawn
390
-
391
- spawn(cmd, search_path, dry_run=self.dry_run)
392
-
393
- def make_archive(
394
- self, base_name, format, root_dir=None, base_dir=None, owner=None, group=None
395
- ):
396
- return archive_util.make_archive(
397
- base_name,
398
- format,
399
- root_dir,
400
- base_dir,
401
- dry_run=self.dry_run,
402
- owner=owner,
403
- group=group,
404
- )
405
-
406
- def make_file(
407
- self, infiles, outfile, func, args, exec_msg=None, skip_msg=None, level=1
408
- ):
409
- """Special case of 'execute()' for operations that process one or
410
- more input files and generate one output file. Works just like
411
- 'execute()', except the operation is skipped and a different
412
- message printed if 'outfile' already exists and is newer than all
413
- files listed in 'infiles'. If the command defined 'self.force',
414
- and it is true, then the command is unconditionally run -- does no
415
- timestamp checks.
416
- """
417
- if skip_msg is None:
418
- skip_msg = "skipping %s (inputs unchanged)" % outfile
419
-
420
- # Allow 'infiles' to be a single string
421
- if isinstance(infiles, str):
422
- infiles = (infiles,)
423
- elif not isinstance(infiles, (list, tuple)):
424
- raise TypeError("'infiles' must be a string, or a list or tuple of strings")
425
-
426
- if exec_msg is None:
427
- exec_msg = "generating {} from {}".format(outfile, ', '.join(infiles))
428
-
429
- # If 'outfile' must be regenerated (either because it doesn't
430
- # exist, is out-of-date, or the 'force' flag is true) then
431
- # perform the action that presumably regenerates it
432
- if self.force or dep_util.newer_group(infiles, outfile):
433
- self.execute(func, args, exec_msg, level)
434
- # Otherwise, print the "skip" message
435
- else:
436
- log.debug(skip_msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py DELETED
@@ -1,191 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import logging
4
- import numpy as np
5
- from typing import List, Optional, Union
6
- import torch
7
-
8
- from detectron2.config import configurable
9
-
10
- from . import detection_utils as utils
11
- from . import transforms as T
12
-
13
- """
14
- This file contains the default mapping that's applied to "dataset dicts".
15
- """
16
-
17
- __all__ = ["DatasetMapper"]
18
-
19
-
20
- class DatasetMapper:
21
- """
22
- A callable which takes a dataset dict in Detectron2 Dataset format,
23
- and map it into a format used by the model.
24
-
25
- This is the default callable to be used to map your dataset dict into training data.
26
- You may need to follow it to implement your own one for customized logic,
27
- such as a different way to read or transform images.
28
- See :doc:`/tutorials/data_loading` for details.
29
-
30
- The callable currently does the following:
31
-
32
- 1. Read the image from "file_name"
33
- 2. Applies cropping/geometric transforms to the image and annotations
34
- 3. Prepare data and annotations to Tensor and :class:`Instances`
35
- """
36
-
37
- @configurable
38
- def __init__(
39
- self,
40
- is_train: bool,
41
- *,
42
- augmentations: List[Union[T.Augmentation, T.Transform]],
43
- image_format: str,
44
- use_instance_mask: bool = False,
45
- use_keypoint: bool = False,
46
- instance_mask_format: str = "polygon",
47
- keypoint_hflip_indices: Optional[np.ndarray] = None,
48
- precomputed_proposal_topk: Optional[int] = None,
49
- recompute_boxes: bool = False,
50
- ):
51
- """
52
- NOTE: this interface is experimental.
53
-
54
- Args:
55
- is_train: whether it's used in training or inference
56
- augmentations: a list of augmentations or deterministic transforms to apply
57
- image_format: an image format supported by :func:`detection_utils.read_image`.
58
- use_instance_mask: whether to process instance segmentation annotations, if available
59
- use_keypoint: whether to process keypoint annotations if available
60
- instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
61
- masks into this format.
62
- keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
63
- precomputed_proposal_topk: if given, will load pre-computed
64
- proposals from dataset_dict and keep the top k proposals for each image.
65
- recompute_boxes: whether to overwrite bounding box annotations
66
- by computing tight bounding boxes from instance mask annotations.
67
- """
68
- if recompute_boxes:
69
- assert use_instance_mask, "recompute_boxes requires instance masks"
70
- # fmt: off
71
- self.is_train = is_train
72
- self.augmentations = T.AugmentationList(augmentations)
73
- self.image_format = image_format
74
- self.use_instance_mask = use_instance_mask
75
- self.instance_mask_format = instance_mask_format
76
- self.use_keypoint = use_keypoint
77
- self.keypoint_hflip_indices = keypoint_hflip_indices
78
- self.proposal_topk = precomputed_proposal_topk
79
- self.recompute_boxes = recompute_boxes
80
- # fmt: on
81
- logger = logging.getLogger(__name__)
82
- mode = "training" if is_train else "inference"
83
- logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
84
-
85
- @classmethod
86
- def from_config(cls, cfg, is_train: bool = True):
87
- augs = utils.build_augmentation(cfg, is_train)
88
- if cfg.INPUT.CROP.ENABLED and is_train:
89
- augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
90
- recompute_boxes = cfg.MODEL.MASK_ON
91
- else:
92
- recompute_boxes = False
93
-
94
- ret = {
95
- "is_train": is_train,
96
- "augmentations": augs,
97
- "image_format": cfg.INPUT.FORMAT,
98
- "use_instance_mask": cfg.MODEL.MASK_ON,
99
- "instance_mask_format": cfg.INPUT.MASK_FORMAT,
100
- "use_keypoint": cfg.MODEL.KEYPOINT_ON,
101
- "recompute_boxes": recompute_boxes,
102
- }
103
-
104
- if cfg.MODEL.KEYPOINT_ON:
105
- ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
106
-
107
- if cfg.MODEL.LOAD_PROPOSALS:
108
- ret["precomputed_proposal_topk"] = (
109
- cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
110
- if is_train
111
- else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
112
- )
113
- return ret
114
-
115
- def _transform_annotations(self, dataset_dict, transforms, image_shape):
116
- # USER: Modify this if you want to keep them for some reason.
117
- for anno in dataset_dict["annotations"]:
118
- if not self.use_instance_mask:
119
- anno.pop("segmentation", None)
120
- if not self.use_keypoint:
121
- anno.pop("keypoints", None)
122
-
123
- # USER: Implement additional transformations if you have other types of data
124
- annos = [
125
- utils.transform_instance_annotations(
126
- obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
127
- )
128
- for obj in dataset_dict.pop("annotations")
129
- if obj.get("iscrowd", 0) == 0
130
- ]
131
- instances = utils.annotations_to_instances(
132
- annos, image_shape, mask_format=self.instance_mask_format
133
- )
134
-
135
- # After transforms such as cropping are applied, the bounding box may no longer
136
- # tightly bound the object. As an example, imagine a triangle object
137
- # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
138
- # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
139
- # the intersection of original bounding box and the cropping box.
140
- if self.recompute_boxes:
141
- instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
142
- dataset_dict["instances"] = utils.filter_empty_instances(instances)
143
-
144
- def __call__(self, dataset_dict):
145
- """
146
- Args:
147
- dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
148
-
149
- Returns:
150
- dict: a format that builtin models in detectron2 accept
151
- """
152
- dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
153
- # USER: Write your own image loading if it's not from a file
154
- image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
155
- utils.check_image_size(dataset_dict, image)
156
-
157
- # USER: Remove if you don't do semantic/panoptic segmentation.
158
- if "sem_seg_file_name" in dataset_dict:
159
- sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
160
- else:
161
- sem_seg_gt = None
162
-
163
- aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
164
- transforms = self.augmentations(aug_input)
165
- image, sem_seg_gt = aug_input.image, aug_input.sem_seg
166
-
167
- image_shape = image.shape[:2] # h, w
168
- # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
169
- # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
170
- # Therefore it's important to use torch.Tensor.
171
- dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
172
- if sem_seg_gt is not None:
173
- dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
174
-
175
- # USER: Remove if you don't use pre-computed proposals.
176
- # Most users would not need this feature.
177
- if self.proposal_topk is not None:
178
- utils.transform_proposals(
179
- dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
180
- )
181
-
182
- if not self.is_train:
183
- # USER: Modify this if you want to keep them for some reason.
184
- dataset_dict.pop("annotations", None)
185
- dataset_dict.pop("sem_seg_file_name", None)
186
- return dataset_dict
187
-
188
- if "annotations" in dataset_dict:
189
- self._transform_annotations(dataset_dict, transforms, image_shape)
190
-
191
- return dataset_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/build.py DELETED
@@ -1,24 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from detectron2.utils.registry import Registry
3
-
4
- PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR")
5
- PROPOSAL_GENERATOR_REGISTRY.__doc__ = """
6
- Registry for proposal generator, which produces object proposals from feature maps.
7
-
8
- The registered object will be called with `obj(cfg, input_shape)`.
9
- The call should return a `nn.Module` object.
10
- """
11
-
12
- from . import rpn, rrpn # noqa F401 isort:skip
13
-
14
-
15
- def build_proposal_generator(cfg, input_shape):
16
- """
17
- Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
18
- The name can be "PrecomputedProposals" to use no proposal generator.
19
- """
20
- name = cfg.MODEL.PROPOSAL_GENERATOR.NAME
21
- if name == "PrecomputedProposals":
22
- return None
23
-
24
- return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/lib/utils/sum.ts DELETED
@@ -1,3 +0,0 @@
1
- export function sum(nums: number[]): number {
2
- return nums.reduce((a, b) => a + b, 0);
3
- }
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/compat.py DELETED
@@ -1,350 +0,0 @@
1
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- import copy
15
- import datetime
16
- import sys
17
- import inspect
18
- import warnings
19
- import hashlib
20
- from http.client import HTTPMessage
21
- import logging
22
- import shlex
23
- import re
24
- import os
25
- from collections import OrderedDict
26
- from collections.abc import MutableMapping
27
- from math import floor
28
-
29
- from botocore.vendored import six
30
- from botocore.exceptions import MD5UnavailableError
31
- from dateutil.tz import tzlocal
32
- from urllib3 import exceptions
33
-
34
- logger = logging.getLogger(__name__)
35
-
36
-
37
- class HTTPHeaders(HTTPMessage):
38
- pass
39
-
40
- from urllib.parse import (
41
- quote,
42
- urlencode,
43
- unquote,
44
- unquote_plus,
45
- urlparse,
46
- urlsplit,
47
- urlunsplit,
48
- urljoin,
49
- parse_qsl,
50
- parse_qs,
51
- )
52
- from http.client import HTTPResponse
53
- from io import IOBase as _IOBase
54
- from base64 import encodebytes
55
- from email.utils import formatdate
56
- from itertools import zip_longest
57
- file_type = _IOBase
58
- zip = zip
59
-
60
- # In python3, unquote takes a str() object, url decodes it,
61
- # then takes the bytestring and decodes it to utf-8.
62
- unquote_str = unquote_plus
63
-
64
- def set_socket_timeout(http_response, timeout):
65
- """Set the timeout of the socket from an HTTPResponse.
66
-
67
- :param http_response: An instance of ``httplib.HTTPResponse``
68
-
69
- """
70
- http_response._fp.fp.raw._sock.settimeout(timeout)
71
-
72
- def accepts_kwargs(func):
73
- # In python3.4.1, there's backwards incompatible
74
- # changes when using getargspec with functools.partials.
75
- return inspect.getfullargspec(func)[2]
76
-
77
- def ensure_unicode(s, encoding=None, errors=None):
78
- # NOOP in Python 3, because every string is already unicode
79
- return s
80
-
81
- def ensure_bytes(s, encoding='utf-8', errors='strict'):
82
- if isinstance(s, str):
83
- return s.encode(encoding, errors)
84
- if isinstance(s, bytes):
85
- return s
86
- raise ValueError(f"Expected str or bytes, received {type(s)}.")
87
-
88
-
89
- try:
90
- import xml.etree.cElementTree as ETree
91
- except ImportError:
92
- # cElementTree does not exist from Python3.9+
93
- import xml.etree.ElementTree as ETree
94
- XMLParseError = ETree.ParseError
95
- import json
96
-
97
-
98
- def filter_ssl_warnings():
99
- # Ignore warnings related to SNI as it is not being used in validations.
100
- warnings.filterwarnings(
101
- 'ignore',
102
- message="A true SSLContext object is not available.*",
103
- category=exceptions.InsecurePlatformWarning,
104
- module=r".*urllib3\.util\.ssl_",
105
- )
106
-
107
-
108
- @classmethod
109
- def from_dict(cls, d):
110
- new_instance = cls()
111
- for key, value in d.items():
112
- new_instance[key] = value
113
- return new_instance
114
-
115
-
116
- @classmethod
117
- def from_pairs(cls, pairs):
118
- new_instance = cls()
119
- for key, value in pairs:
120
- new_instance[key] = value
121
- return new_instance
122
-
123
-
124
- HTTPHeaders.from_dict = from_dict
125
- HTTPHeaders.from_pairs = from_pairs
126
-
127
-
128
- def copy_kwargs(kwargs):
129
- """
130
- This used to be a compat shim for 2.6 but is now just an alias.
131
- """
132
- copy_kwargs = copy.copy(kwargs)
133
- return copy_kwargs
134
-
135
-
136
- def total_seconds(delta):
137
- """
138
- Returns the total seconds in a ``datetime.timedelta``.
139
-
140
- This used to be a compat shim for 2.6 but is now just an alias.
141
-
142
- :param delta: The timedelta object
143
- :type delta: ``datetime.timedelta``
144
- """
145
- return delta.total_seconds()
146
-
147
-
148
- # Checks to see if md5 is available on this system. A given system might not
149
- # have access to it for various reasons, such as FIPS mode being enabled.
150
- try:
151
- hashlib.md5()
152
- MD5_AVAILABLE = True
153
- except ValueError:
154
- MD5_AVAILABLE = False
155
-
156
-
157
- def get_md5(*args, **kwargs):
158
- """
159
- Attempts to get an md5 hashing object.
160
-
161
- :param raise_error_if_unavailable: raise an error if md5 is unavailable on
162
- this system. If False, None will be returned if it is unavailable.
163
- :type raise_error_if_unavailable: bool
164
- :param args: Args to pass to the MD5 constructor
165
- :param kwargs: Key word arguments to pass to the MD5 constructor
166
- :return: An MD5 hashing object if available. If it is unavailable, None
167
- is returned if raise_error_if_unavailable is set to False.
168
- """
169
- if MD5_AVAILABLE:
170
- return hashlib.md5(*args, **kwargs)
171
- else:
172
- raise MD5UnavailableError()
173
-
174
-
175
- def compat_shell_split(s, platform=None):
176
- if platform is None:
177
- platform = sys.platform
178
-
179
- if platform == "win32":
180
- return _windows_shell_split(s)
181
- else:
182
- return shlex.split(s)
183
-
184
-
185
- def _windows_shell_split(s):
186
- """Splits up a windows command as the built-in command parser would.
187
-
188
- Windows has potentially bizarre rules depending on where you look. When
189
- spawning a process via the Windows C runtime (which is what python does
190
- when you call popen) the rules are as follows:
191
-
192
- https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
193
-
194
- To summarize:
195
-
196
- * Only space and tab are valid delimiters
197
- * Double quotes are the only valid quotes
198
- * Backslash is interpreted literally unless it is part of a chain that
199
- leads up to a double quote. Then the backslashes escape the backslashes,
200
- and if there is an odd number the final backslash escapes the quote.
201
-
202
- :param s: The command string to split up into parts.
203
- :return: A list of command components.
204
- """
205
- if not s:
206
- return []
207
-
208
- components = []
209
- buff = []
210
- is_quoted = False
211
- num_backslashes = 0
212
- for character in s:
213
- if character == '\\':
214
- # We can't simply append backslashes because we don't know if
215
- # they are being used as escape characters or not. Instead we
216
- # keep track of how many we've encountered and handle them when
217
- # we encounter a different character.
218
- num_backslashes += 1
219
- elif character == '"':
220
- if num_backslashes > 0:
221
- # The backslashes are in a chain leading up to a double
222
- # quote, so they are escaping each other.
223
- buff.append('\\' * int(floor(num_backslashes / 2)))
224
- remainder = num_backslashes % 2
225
- num_backslashes = 0
226
- if remainder == 1:
227
- # The number of backslashes is uneven, so they are also
228
- # escaping the double quote, so it needs to be added to
229
- # the current component buffer.
230
- buff.append('"')
231
- continue
232
-
233
- # We've encountered a double quote that is not escaped,
234
- # so we toggle is_quoted.
235
- is_quoted = not is_quoted
236
-
237
- # If there are quotes, then we may want an empty string. To be
238
- # safe, we add an empty string to the buffer so that we make
239
- # sure it sticks around if there's nothing else between quotes.
240
- # If there is other stuff between quotes, the empty string will
241
- # disappear during the joining process.
242
- buff.append('')
243
- elif character in [' ', '\t'] and not is_quoted:
244
- # Since the backslashes aren't leading up to a quote, we put in
245
- # the exact number of backslashes.
246
- if num_backslashes > 0:
247
- buff.append('\\' * num_backslashes)
248
- num_backslashes = 0
249
-
250
- # Excess whitespace is ignored, so only add the components list
251
- # if there is anything in the buffer.
252
- if buff:
253
- components.append(''.join(buff))
254
- buff = []
255
- else:
256
- # Since the backslashes aren't leading up to a quote, we put in
257
- # the exact number of backslashes.
258
- if num_backslashes > 0:
259
- buff.append('\\' * num_backslashes)
260
- num_backslashes = 0
261
- buff.append(character)
262
-
263
- # Quotes must be terminated.
264
- if is_quoted:
265
- raise ValueError(f"No closing quotation in string: {s}")
266
-
267
- # There may be some leftover backslashes, so we need to add them in.
268
- # There's no quote so we add the exact number.
269
- if num_backslashes > 0:
270
- buff.append('\\' * num_backslashes)
271
-
272
- # Add the final component in if there is anything in the buffer.
273
- if buff:
274
- components.append(''.join(buff))
275
-
276
- return components
277
-
278
-
279
- def get_tzinfo_options():
280
- # Due to dateutil/dateutil#197, Windows may fail to parse times in the past
281
- # with the system clock. We can alternatively fallback to tzwininfo when
282
- # this happens, which will get time info from the Windows registry.
283
- if sys.platform == 'win32':
284
- from dateutil.tz import tzwinlocal
285
-
286
- return (tzlocal, tzwinlocal)
287
- else:
288
- return (tzlocal,)
289
-
290
-
291
- # Detect if CRT is available for use
292
- try:
293
- import awscrt.auth
294
-
295
- # Allow user opt-out if needed
296
- disabled = os.environ.get('BOTO_DISABLE_CRT', "false")
297
- HAS_CRT = not disabled.lower() == 'true'
298
- except ImportError:
299
- HAS_CRT = False
300
-
301
-
302
- ########################################################
303
- # urllib3 compat backports #
304
- ########################################################
305
-
306
- # Vendoring IPv6 validation regex patterns from urllib3
307
- # https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py
308
- IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
309
- IPV4_RE = re.compile("^" + IPV4_PAT + "$")
310
- HEX_PAT = "[0-9A-Fa-f]{1,4}"
311
- LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
312
- _subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
313
- _variations = [
314
- # 6( h16 ":" ) ls32
315
- "(?:%(hex)s:){6}%(ls32)s",
316
- # "::" 5( h16 ":" ) ls32
317
- "::(?:%(hex)s:){5}%(ls32)s",
318
- # [ h16 ] "::" 4( h16 ":" ) ls32
319
- "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
320
- # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
321
- "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
322
- # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
323
- "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
324
- # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
325
- "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
326
- # [ *4( h16 ":" ) h16 ] "::" ls32
327
- "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
328
- # [ *5( h16 ":" ) h16 ] "::" h16
329
- "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
330
- # [ *6( h16 ":" ) h16 ] "::"
331
- "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
332
- ]
333
-
334
- UNRESERVED_PAT = (
335
- r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
336
- )
337
- IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
338
- ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
339
- IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
340
- IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
341
-
342
- # These are the characters that are stripped by post-bpo-43882 urlparse().
343
- UNSAFE_URL_CHARS = frozenset('\t\r\n')
344
-
345
- # Detect if gzip is available for use
346
- try:
347
- import gzip
348
- HAS_GZIP = True
349
- except ImportError:
350
- HAS_GZIP = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigSalmon/BackTranslation/app.py DELETED
@@ -1,117 +0,0 @@
1
- from deep_translator import GoogleTranslator
2
- import streamlit as st
3
-
4
- st.set_page_config(page_title='Language Translator (Adaptation of https://github.com/Ompramod9921/Language_translator)')
5
-
6
- hide_streamlit_style = """
7
- <style>
8
- #MainMenu {visibility: hidden;}
9
- footer {visibility: hidden;}
10
- footer:after {
11
- content: 'Adaptation of https://github.com/Ompramod9921/Language_translator (om pram)'
12
- visibility: visible;
13
- }
14
- </style>
15
- """
16
- st.markdown(hide_streamlit_style, unsafe_allow_html=True)
17
-
18
- st.markdown("<h1 style='text-align: center; font-size: 24px; color: voilet;font-family: Droid Sans'>Language Translator (Adaptation of https://github.com/Ompramod9921/Language_translator)</h1>", unsafe_allow_html=True)
19
- st.write("****")
20
-
21
- text = st.text_area("Enter text:",height=None,max_chars=None,key=None,help="Enter your text here -")
22
- st.write("****")
23
-
24
- option1 = st.selectbox('Input language',('english','hindi','afrikaans', 'albanian', 'amharic', 'arabic', 'armenian', 'azerbaijani', 'basque', 'belarusian', 'bengali', 'bosnian', 'bulgarian', 'catalan', 'cebuano', 'chichewa', 'chinese', 'chinese (simplified)', 'chinese (traditional)', 'corsican', 'croatian', 'czech', 'danish', 'dutch', 'esperanto', 'estonian', 'filipino', 'finnish', 'french', 'frisian', 'galician', 'georgian', 'german', 'greek', 'gujarati', 'haitian creole', 'hausa', 'hawaiian', 'hebrew', 'hmong', 'hungarian', 'icelandic', 'igbo', 'indonesian', 'irish', 'italian', 'japanese', 'javanese', 'kannada', 'kazakh', 'khmer', 'korean', 'kurdish (kurmanji)', 'kyrgyz', 'lao', 'latin', 'latvian', 'lithuanian', 'luxembourgish', 'macedonian', 'malagasy', 'malay', 'malayalam', 'maltese', 'maori', 'marathi', 'mongolian', 'myanmar (burmese)', 'nepali', 'norwegian', 'pashto', 'persian', 'polish', 'portuguese', 'punjabi', 'romanian', 'russian', 'samoan', 'scots gaelic', 'serbian', 'sesotho', 'shona', 'sindhi', 'sinhala', 'slovak', 'slovenian', 'somali', 'spanish', 'sundanese', 'swahili', 'swedish', 'tajik', 'tamil', 'telugu', 'thai', 'turkish', 'ukrainian', 'urdu', 'uzbek', 'vietnamese', 'welsh', 'xhosa', 'yiddish', 'yoruba', 'zulu', 'Filipino'))
25
- option2 = st.selectbox('Output language',('english','hindi','afrikaans', 'albanian', 'amharic', 'arabic', 'armenian', 'azerbaijani', 'basque', 'belarusian', 'bengali', 'bosnian', 'bulgarian', 'catalan', 'cebuano', 'chichewa', 'chinese', 'chinese (simplified)', 'chinese (traditional)', 'corsican', 'croatian', 'czech', 'danish', 'dutch', 'esperanto', 'estonian', 'filipino', 'finnish', 'french', 'frisian', 'galician', 'georgian', 'german', 'greek', 'gujarati', 'haitian creole', 'hausa', 'hawaiian', 'hebrew', 'hmong', 'hungarian', 'icelandic', 'igbo', 'indonesian', 'irish', 'italian', 'japanese', 'javanese', 'kannada', 'kazakh', 'khmer', 'korean', 'kurdish (kurmanji)', 'kyrgyz', 'lao', 'latin', 'latvian', 'lithuanian', 'luxembourgish', 'macedonian', 'malagasy', 'malay', 'malayalam', 'maltese', 'maori', 'marathi', 'mongolian', 'myanmar (burmese)', 'nepali', 'norwegian', 'pashto', 'persian', 'polish', 'portuguese', 'punjabi', 'romanian', 'russian', 'samoan', 'scots gaelic', 'serbian', 'sesotho', 'shona', 'sindhi', 'sinhala', 'slovak', 'slovenian', 'somali', 'spanish', 'sundanese', 'swahili', 'swedish', 'tajik', 'tamil', 'telugu', 'thai', 'turkish', 'ukrainian', 'urdu', 'uzbek', 'vietnamese', 'welsh', 'xhosa', 'yiddish', 'yoruba', 'zulu', 'Filipino'))
26
- st.write("****")
27
-
28
- if st.button('Translate Sentence'):
29
- st.write(" ")
30
- st.write(" ")
31
- if text == "":
32
- st.warning('Please **enter text** for translation')
33
-
34
- else:
35
- if option1 == option2 :
36
- st.error("source and target language can't be the same")
37
- else :
38
- translated = GoogleTranslator(source=option1,target=option2).translate(text=text)
39
- st.write("Translated text -")
40
- st.info(str(translated))
41
- translated_text = str(translated)
42
- back_translated = GoogleTranslator(source=option2,target=option1).translate(text=translated_text)
43
- st.write("Back Translated text -")
44
- st.info(str(back_translated))
45
-
46
- if st.button('Back Translate: Multiple Languages'):
47
- st.write(" ")
48
- st.write(" ")
49
- if text == "":
50
- st.warning('Please **enter text** for translation')
51
- else:
52
- if option1 == option2 :
53
- st.error("source and target language can't be the same")
54
- else:
55
- translated = GoogleTranslator(source=option1,target=option2).translate(text=text)
56
- st.write("Translated text -")
57
- st.info(str(translated))
58
- translated_text = str(translated)
59
- back_translated = GoogleTranslator(source=option2,target=option1).translate(text=translated_text)
60
- st.write("Back Translated text -")
61
- st.info(str(back_translated))
62
-
63
- translated = GoogleTranslator(source=option1,target="albanian").translate(text=text)
64
- st.write("Translated text -")
65
- st.info(str(translated))
66
- translated_text = str(translated)
67
- back_translated = GoogleTranslator(source="albanian",target=option1).translate(text=translated_text)
68
- st.write("Back Translated text -")
69
- st.info(str(back_translated))
70
-
71
- translated = GoogleTranslator(source=option1,target="greek").translate(text=text)
72
- st.write("Translated text -")
73
- st.info(str(translated))
74
- translated_text = str(translated)
75
- back_translated = GoogleTranslator(source="greek",target=option1).translate(text=translated_text)
76
- st.write("Back Translated text -")
77
- st.info(str(back_translated))
78
-
79
- translated = GoogleTranslator(source=option1,target="italian").translate(text=text)
80
- st.write("Translated text -")
81
- st.info(str(translated))
82
- translated_text = str(translated)
83
- back_translated = GoogleTranslator(source="italian",target=option1).translate(text=translated_text)
84
- st.write("Back Translated text -")
85
- st.info(str(back_translated))
86
-
87
- translated = GoogleTranslator(source=option1,target="polish").translate(text=text)
88
- st.write("Translated text -")
89
- st.info(str(translated))
90
- translated_text = str(translated)
91
- back_translated = GoogleTranslator(source="polish",target=option1).translate(text=translated_text)
92
- st.write("Back Translated text -")
93
- st.info(str(back_translated))
94
-
95
- translated = GoogleTranslator(source=option1,target="spanish").translate(text=text)
96
- st.write("Translated text -")
97
- st.info(str(translated))
98
- translated_text = str(translated)
99
- back_translated = GoogleTranslator(source="spanish",target=option1).translate(text=translated_text)
100
- st.write("Back Translated text -")
101
- st.info(str(back_translated))
102
-
103
- translated = GoogleTranslator(source=option1,target="galician").translate(text=text)
104
- st.write("Translated text -")
105
- st.info(str(translated))
106
- translated_text = str(translated)
107
- back_translated = GoogleTranslator(source="galician",target=option1).translate(text=translated_text)
108
- st.write("Back Translated text -")
109
- st.info(str(back_translated))
110
-
111
- translated = GoogleTranslator(source=option1,target="dutch").translate(text=text)
112
- st.write("Translated text -")
113
- st.info(str(translated))
114
- translated_text = str(translated)
115
- back_translated = GoogleTranslator(source="dutch",target=option1).translate(text=translated_text)
116
- st.write("Back Translated text -")
117
- st.info(str(back_translated))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/static_assert.h DELETED
@@ -1,92 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*
18
- * (C) Copyright John Maddock 2000.
19
- *
20
- * Distributed under the Boost Software License, Version 1.0.
21
- * (See accompanying NOTICE file for the complete license)
22
- *
23
- * For more information, see http://www.boost.org
24
- */
25
-
26
- #pragma once
27
-
28
- #include <thrust/detail/config.h>
29
- #include <thrust/detail/type_traits.h>
30
- #include <thrust/detail/preprocessor.h>
31
-
32
- namespace thrust
33
- {
34
-
35
- namespace detail
36
- {
37
-
38
- template <typename, bool x>
39
- struct depend_on_instantiation
40
- {
41
- THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT bool value = x;
42
- };
43
-
44
- #if THRUST_CPP_DIALECT >= 2011
45
-
46
- # if THRUST_CPP_DIALECT >= 2017
47
- # define THRUST_STATIC_ASSERT(B) static_assert(B)
48
- # else
49
- # define THRUST_STATIC_ASSERT(B) static_assert(B, "static assertion failed")
50
- # endif
51
- # define THRUST_STATIC_ASSERT_MSG(B, msg) static_assert(B, msg)
52
-
53
- #else // Older than C++11.
54
-
55
- // HP aCC cannot deal with missing names for template value parameters.
56
- template <bool x> struct STATIC_ASSERTION_FAILURE;
57
-
58
- template <> struct STATIC_ASSERTION_FAILURE<true> {};
59
-
60
- // HP aCC cannot deal with missing names for template value parameters.
61
- template <int x> struct static_assert_test {};
62
-
63
- #if ( (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \
64
- && (THRUST_GCC_VERSION >= 40800)) \
65
- || (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG)
66
- // Clang and GCC 4.8+ will complain about this typedef being unused unless we
67
- // annotate it as such.
68
- # define THRUST_STATIC_ASSERT(B) \
69
- typedef ::thrust::detail::static_assert_test< \
70
- sizeof(::thrust::detail::STATIC_ASSERTION_FAILURE<(bool)(B)>) \
71
- > \
72
- THRUST_PP_CAT2(thrust_static_assert_typedef_, __LINE__) \
73
- __attribute__((unused)) \
74
- /**/
75
- #else
76
- # define THRUST_STATIC_ASSERT(B) \
77
- typedef ::thrust::detail::static_assert_test< \
78
- sizeof(::thrust::detail::STATIC_ASSERTION_FAILURE<(bool)(B)>) \
79
- > \
80
- THRUST_PP_CAT2(thrust_static_assert_typedef_, __LINE__) \
81
- /**/
82
- #endif
83
-
84
- #define THRUST_STATIC_ASSERT_MSG(B, msg) THRUST_STATIC_ASSERT(B)
85
-
86
- #endif // THRUST_CPP_DIALECT >= 2011
87
-
88
- } // namespace detail
89
-
90
- } // end namespace thrust
91
-
92
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/iterator/is_discard_iterator.h DELETED
@@ -1,40 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/type_traits.h>
21
- #include <thrust/iterator/discard_iterator.h>
22
-
23
- namespace thrust
24
- {
25
- namespace detail
26
- {
27
-
28
- template <typename Iterator>
29
- struct is_discard_iterator
30
- : public thrust::detail::false_type
31
- {};
32
-
33
- template <typename System>
34
- struct is_discard_iterator< thrust::discard_iterator<System> >
35
- : public thrust::detail::true_type
36
- {};
37
-
38
- } // end namespace detail
39
- } // end namespace thrust
40
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/app.py DELETED
@@ -1,158 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import argparse
6
- import os
7
- import pathlib
8
- import subprocess
9
-
10
- import gradio as gr
11
-
12
- if os.getenv('SYSTEM') == 'spaces':
13
- import mim
14
-
15
- mim.uninstall('mmcv-full', confirm_yes=True)
16
- mim.install('mmcv-full==1.5.2', is_yes=True)
17
-
18
- with open('patch') as f:
19
- subprocess.run('patch -p1'.split(), cwd='Text2Human', stdin=f)
20
-
21
- from model import Model
22
-
23
- DESCRIPTION = '''# Text2Human
24
-
25
- This is an unofficial demo for <a href="https://github.com/yumingj/Text2Human">https://github.com/yumingj/Text2Human</a> made by <a href="https://huggingface.co/spaces/hysts/Text2Human">@hysts</a>.
26
- You can modify sample steps and seeds. By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
27
-
28
- Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
29
- '''
30
- FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=hysts.text2human" />'
31
-
32
-
33
- def parse_args() -> argparse.Namespace:
34
- parser = argparse.ArgumentParser()
35
- parser.add_argument('--device', type=str, default='cpu')
36
- parser.add_argument('--theme', type=str)
37
- parser.add_argument('--share', action='store_true')
38
- parser.add_argument('--port', type=int)
39
- parser.add_argument('--disable-queue',
40
- dest='enable_queue',
41
- action='store_false')
42
- return parser.parse_args()
43
-
44
-
45
- def set_example_image(example: list) -> dict:
46
- return gr.Image.update(value=example[0])
47
-
48
-
49
- def set_example_text(example: list) -> dict:
50
- return gr.Textbox.update(value=example[0])
51
-
52
-
53
- def main():
54
- args = parse_args()
55
- model = Model(args.device)
56
-
57
- with gr.Blocks(theme=args.theme, css='style.css') as demo:
58
- gr.Markdown(DESCRIPTION)
59
-
60
- with gr.Row():
61
- with gr.Column():
62
- with gr.Row():
63
- input_image = gr.Image(label='Input Pose Image',
64
- type='pil',
65
- elem_id='input-image')
66
- pose_data = gr.Variable()
67
- with gr.Row():
68
- paths = sorted(pathlib.Path('pose_images').glob('*.png'))
69
- example_images = gr.Dataset(components=[input_image],
70
- samples=[[path.as_posix()]
71
- for path in paths])
72
-
73
- with gr.Row():
74
- shape_text = gr.Textbox(
75
- label='Shape Description',
76
- placeholder=
77
- '''<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
78
- Note: The outer clothing type and accessories can be omitted.''')
79
- with gr.Row():
80
- shape_example_texts = gr.Dataset(
81
- components=[shape_text],
82
- samples=[['man, sleeveless T-shirt, long pants'],
83
- ['woman, short-sleeve T-shirt, short jeans']])
84
- with gr.Row():
85
- generate_label_button = gr.Button('Generate Label Image')
86
-
87
- with gr.Column():
88
- with gr.Row():
89
- label_image = gr.Image(label='Label Image',
90
- type='numpy',
91
- elem_id='label-image')
92
-
93
- with gr.Row():
94
- texture_text = gr.Textbox(
95
- label='Texture Description',
96
- placeholder=
97
- '''<upper clothing texture>, <lower clothing texture>, <outer clothing texture>
98
- Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.'''
99
- )
100
- with gr.Row():
101
- texture_example_texts = gr.Dataset(
102
- components=[texture_text],
103
- samples=[['pure color, denim'], ['floral, stripe']])
104
- with gr.Row():
105
- sample_steps = gr.Slider(10,
106
- 300,
107
- value=10,
108
- step=10,
109
- label='Sample Steps')
110
- with gr.Row():
111
- seed = gr.Slider(0, 1000000, value=0, step=1, label='Seed')
112
- with gr.Row():
113
- generate_human_button = gr.Button('Generate Human')
114
-
115
- with gr.Column():
116
- with gr.Row():
117
- result = gr.Image(label='Result',
118
- type='numpy',
119
- elem_id='result-image')
120
-
121
- gr.Markdown(FOOTER)
122
-
123
- input_image.change(fn=model.process_pose_image,
124
- inputs=input_image,
125
- outputs=pose_data)
126
- generate_label_button.click(fn=model.generate_label_image,
127
- inputs=[
128
- pose_data,
129
- shape_text,
130
- ],
131
- outputs=label_image)
132
- generate_human_button.click(fn=model.generate_human,
133
- inputs=[
134
- label_image,
135
- texture_text,
136
- sample_steps,
137
- seed,
138
- ],
139
- outputs=result)
140
- example_images.click(fn=set_example_image,
141
- inputs=example_images,
142
- outputs=example_images.components)
143
- shape_example_texts.click(fn=set_example_text,
144
- inputs=shape_example_texts,
145
- outputs=shape_example_texts.components)
146
- texture_example_texts.click(fn=set_example_text,
147
- inputs=texture_example_texts,
148
- outputs=texture_example_texts.components)
149
-
150
- demo.launch(
151
- enable_queue=args.enable_queue,
152
- server_port=args.port,
153
- share=args.share,
154
- )
155
-
156
-
157
- if __name__ == '__main__':
158
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/match_costs/builder.py DELETED
@@ -1,8 +0,0 @@
1
- from mmcv.utils import Registry, build_from_cfg
2
-
3
- MATCH_COST = Registry('Match Cost')
4
-
5
-
6
- def build_match_cost(cfg, default_args=None):
7
- """Builder of IoU calculator."""
8
- return build_from_cfg(cfg, MATCH_COST, default_args)
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/dense_test_mixins.py DELETED
@@ -1,100 +0,0 @@
1
- from inspect import signature
2
-
3
- import torch
4
-
5
- from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms
6
-
7
-
8
- class BBoxTestMixin(object):
9
- """Mixin class for test time augmentation of bboxes."""
10
-
11
- def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
12
- """Merge augmented detection bboxes and scores.
13
-
14
- Args:
15
- aug_bboxes (list[Tensor]): shape (n, 4*#class)
16
- aug_scores (list[Tensor] or None): shape (n, #class)
17
- img_shapes (list[Tensor]): shape (3, ).
18
-
19
- Returns:
20
- tuple: (bboxes, scores)
21
- """
22
- recovered_bboxes = []
23
- for bboxes, img_info in zip(aug_bboxes, img_metas):
24
- img_shape = img_info[0]['img_shape']
25
- scale_factor = img_info[0]['scale_factor']
26
- flip = img_info[0]['flip']
27
- flip_direction = img_info[0]['flip_direction']
28
- bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
29
- flip_direction)
30
- recovered_bboxes.append(bboxes)
31
- bboxes = torch.cat(recovered_bboxes, dim=0)
32
- if aug_scores is None:
33
- return bboxes
34
- else:
35
- scores = torch.cat(aug_scores, dim=0)
36
- return bboxes, scores
37
-
38
- def aug_test_bboxes(self, feats, img_metas, rescale=False):
39
- """Test det bboxes with test time augmentation.
40
-
41
- Args:
42
- feats (list[Tensor]): the outer list indicates test-time
43
- augmentations and inner Tensor should have a shape NxCxHxW,
44
- which contains features for all images in the batch.
45
- img_metas (list[list[dict]]): the outer list indicates test-time
46
- augs (multiscale, flip, etc.) and the inner list indicates
47
- images in a batch. each dict has image information.
48
- rescale (bool, optional): Whether to rescale the results.
49
- Defaults to False.
50
-
51
- Returns:
52
- list[ndarray]: bbox results of each class
53
- """
54
- # check with_nms argument
55
- gb_sig = signature(self.get_bboxes)
56
- gb_args = [p.name for p in gb_sig.parameters.values()]
57
- if hasattr(self, '_get_bboxes'):
58
- gbs_sig = signature(self._get_bboxes)
59
- else:
60
- gbs_sig = signature(self._get_bboxes_single)
61
- gbs_args = [p.name for p in gbs_sig.parameters.values()]
62
- assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
63
- f'{self.__class__.__name__}' \
64
- ' does not support test-time augmentation'
65
-
66
- aug_bboxes = []
67
- aug_scores = []
68
- aug_factors = [] # score_factors for NMS
69
- for x, img_meta in zip(feats, img_metas):
70
- # only one image in the batch
71
- outs = self.forward(x)
72
- bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
73
- bbox_outputs = self.get_bboxes(*bbox_inputs)[0]
74
- aug_bboxes.append(bbox_outputs[0])
75
- aug_scores.append(bbox_outputs[1])
76
- # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3)
77
- # contains additional element to adjust scores before NMS
78
- if len(bbox_outputs) >= 3:
79
- aug_factors.append(bbox_outputs[2])
80
-
81
- # after merging, bboxes will be rescaled to the original image size
82
- merged_bboxes, merged_scores = self.merge_aug_bboxes(
83
- aug_bboxes, aug_scores, img_metas)
84
- merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None
85
- det_bboxes, det_labels = multiclass_nms(
86
- merged_bboxes,
87
- merged_scores,
88
- self.test_cfg.score_thr,
89
- self.test_cfg.nms,
90
- self.test_cfg.max_per_img,
91
- score_factors=merged_factors)
92
-
93
- if rescale:
94
- _det_bboxes = det_bboxes
95
- else:
96
- _det_bboxes = det_bboxes.clone()
97
- _det_bboxes[:, :4] *= det_bboxes.new_tensor(
98
- img_metas[0][0]['scale_factor'])
99
- bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes)
100
- return bbox_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/structures/rotated_boxes.py DELETED
@@ -1,505 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import math
3
- from typing import List, Tuple
4
- import torch
5
-
6
- from detectron2.layers.rotated_boxes import pairwise_iou_rotated
7
-
8
- from .boxes import Boxes, _maybe_jit_unused
9
-
10
-
11
- class RotatedBoxes(Boxes):
12
- """
13
- This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
14
- It supports some common methods about boxes
15
- (`area`, `clip`, `nonempty`, etc),
16
- and also behaves like a Tensor
17
- (support indexing, `to(device)`, `.device`, and iteration over all boxes)
18
- """
19
-
20
- def __init__(self, tensor: torch.Tensor):
21
- """
22
- Args:
23
- tensor (Tensor[float]): a Nx5 matrix. Each row is
24
- (x_center, y_center, width, height, angle),
25
- in which angle is represented in degrees.
26
- While there's no strict range restriction for it,
27
- the recommended principal range is between [-180, 180) degrees.
28
-
29
- Assume we have a horizontal box B = (x_center, y_center, width, height),
30
- where width is along the x-axis and height is along the y-axis.
31
- The rotated box B_rot (x_center, y_center, width, height, angle)
32
- can be seen as:
33
-
34
- 1. When angle == 0:
35
- B_rot == B
36
- 2. When angle > 0:
37
- B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
38
- 3. When angle < 0:
39
- B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
40
-
41
- Mathematically, since the right-handed coordinate system for image space
42
- is (y, x), where y is top->down and x is left->right, the 4 vertices of the
43
- rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
44
- the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
45
- in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
46
- :math:`(y_c, x_c)` is the center of the rectangle):
47
-
48
- .. math::
49
-
50
- yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
51
-
52
- xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
53
-
54
- which is the standard rigid-body rotation transformation.
55
-
56
- Intuitively, the angle is
57
- (1) the rotation angle from y-axis in image space
58
- to the height vector (top->down in the box's local coordinate system)
59
- of the box in CCW, and
60
- (2) the rotation angle from x-axis in image space
61
- to the width vector (left->right in the box's local coordinate system)
62
- of the box in CCW.
63
-
64
- More intuitively, consider the following horizontal box ABCD represented
65
- in (x1, y1, x2, y2): (3, 2, 7, 4),
66
- covering the [3, 7] x [2, 4] region of the continuous coordinate system
67
- which looks like this:
68
-
69
- .. code:: none
70
-
71
- O--------> x
72
- |
73
- | A---B
74
- | | |
75
- | D---C
76
- |
77
- v y
78
-
79
- Note that each capital letter represents one 0-dimensional geometric point
80
- instead of a 'square pixel' here.
81
-
82
- In the example above, using (x, y) to represent a point we have:
83
-
84
- .. math::
85
-
86
- O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
87
-
88
- We name vector AB = vector DC as the width vector in box's local coordinate system, and
89
- vector AD = vector BC as the height vector in box's local coordinate system. Initially,
90
- when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
91
- in the image space, respectively.
92
-
93
- For better illustration, we denote the center of the box as E,
94
-
95
- .. code:: none
96
-
97
- O--------> x
98
- |
99
- | A---B
100
- | | E |
101
- | D---C
102
- |
103
- v y
104
-
105
- where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
106
-
107
- Also,
108
-
109
- .. math::
110
-
111
- width = |AB| = |CD| = 7 - 3 = 4,
112
- height = |AD| = |BC| = 4 - 2 = 2.
113
-
114
- Therefore, the corresponding representation for the same shape in rotated box in
115
- (x_center, y_center, width, height, angle) format is:
116
-
117
- (5, 3, 4, 2, 0),
118
-
119
- Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
120
- CCW (counter-clockwise) by definition. It looks like this:
121
-
122
- .. code:: none
123
-
124
- O--------> x
125
- | B-C
126
- | | |
127
- | |E|
128
- | | |
129
- | A-D
130
- v y
131
-
132
- The center E is still located at the same point (5, 3), while the vertices
133
- ABCD are rotated by 90 degrees CCW with regard to E:
134
- A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
135
-
136
- Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
137
- vector AD or vector BC (the top->down height vector in box's local coordinate system),
138
- or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
139
- width vector in box's local coordinate system).
140
-
141
- .. math::
142
-
143
- width = |AB| = |CD| = 5 - 1 = 4,
144
- height = |AD| = |BC| = 6 - 4 = 2.
145
-
146
- Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
147
- by definition? It looks like this:
148
-
149
- .. code:: none
150
-
151
- O--------> x
152
- | D-A
153
- | | |
154
- | |E|
155
- | | |
156
- | C-B
157
- v y
158
-
159
- The center E is still located at the same point (5, 3), while the vertices
160
- ABCD are rotated by 90 degrees CW with regard to E:
161
- A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
162
-
163
- .. math::
164
-
165
- width = |AB| = |CD| = 5 - 1 = 4,
166
- height = |AD| = |BC| = 6 - 4 = 2.
167
-
168
- This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
169
- will be 1. However, these two will generate different RoI Pooling results and
170
- should not be treated as an identical box.
171
-
172
- On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
173
- (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
174
- identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
175
- equivalent to rotating the same shape 90 degrees CW.
176
-
177
- We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
178
-
179
- .. code:: none
180
-
181
- O--------> x
182
- |
183
- | C---D
184
- | | E |
185
- | B---A
186
- |
187
- v y
188
-
189
- .. math::
190
-
191
- A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
192
-
193
- width = |AB| = |CD| = 7 - 3 = 4,
194
- height = |AD| = |BC| = 4 - 2 = 2.
195
-
196
- Finally, this is a very inaccurate (heavily quantized) illustration of
197
- how (5, 3, 4, 2, 60) looks like in case anyone wonders:
198
-
199
- .. code:: none
200
-
201
- O--------> x
202
- | B\
203
- | / C
204
- | /E /
205
- | A /
206
- | `D
207
- v y
208
-
209
- It's still a rectangle with center of (5, 3), width of 4 and height of 2,
210
- but its angle (and thus orientation) is somewhere between
211
- (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
212
- """
213
- device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
214
- tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
215
- if tensor.numel() == 0:
216
- # Use reshape, so we don't end up creating a new tensor that does not depend on
217
- # the inputs (and consequently confuses jit)
218
- tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
219
- assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
220
-
221
- self.tensor = tensor
222
-
223
- def clone(self) -> "RotatedBoxes":
224
- """
225
- Clone the RotatedBoxes.
226
-
227
- Returns:
228
- RotatedBoxes
229
- """
230
- return RotatedBoxes(self.tensor.clone())
231
-
232
- @_maybe_jit_unused
233
- def to(self, device: torch.device):
234
- # Boxes are assumed float32 and does not support to(dtype)
235
- return RotatedBoxes(self.tensor.to(device=device))
236
-
237
- def area(self) -> torch.Tensor:
238
- """
239
- Computes the area of all the boxes.
240
-
241
- Returns:
242
- torch.Tensor: a vector with areas of each box.
243
- """
244
- box = self.tensor
245
- area = box[:, 2] * box[:, 3]
246
- return area
247
-
248
- def normalize_angles(self) -> None:
249
- """
250
- Restrict angles to the range of [-180, 180) degrees
251
- """
252
- self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
253
-
254
- def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
255
- """
256
- Clip (in place) the boxes by limiting x coordinates to the range [0, width]
257
- and y coordinates to the range [0, height].
258
-
259
- For RRPN:
260
- Only clip boxes that are almost horizontal with a tolerance of
261
- clip_angle_threshold to maintain backward compatibility.
262
-
263
- Rotated boxes beyond this threshold are not clipped for two reasons:
264
-
265
- 1. There are potentially multiple ways to clip a rotated box to make it
266
- fit within the image.
267
- 2. It's tricky to make the entire rectangular box fit within the image
268
- and still be able to not leave out pixels of interest.
269
-
270
- Therefore we rely on ops like RoIAlignRotated to safely handle this.
271
-
272
- Args:
273
- box_size (height, width): The clipping box's size.
274
- clip_angle_threshold:
275
- Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
276
- we do the clipping as horizontal boxes.
277
- """
278
- h, w = box_size
279
-
280
- # normalize angles to be within (-180, 180] degrees
281
- self.normalize_angles()
282
-
283
- idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
284
-
285
- # convert to (x1, y1, x2, y2)
286
- x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
287
- y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
288
- x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
289
- y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
290
-
291
- # clip
292
- x1.clamp_(min=0, max=w)
293
- y1.clamp_(min=0, max=h)
294
- x2.clamp_(min=0, max=w)
295
- y2.clamp_(min=0, max=h)
296
-
297
- # convert back to (xc, yc, w, h)
298
- self.tensor[idx, 0] = (x1 + x2) / 2.0
299
- self.tensor[idx, 1] = (y1 + y2) / 2.0
300
- # make sure widths and heights do not increase due to numerical errors
301
- self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
302
- self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
303
-
304
- def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
305
- """
306
- Find boxes that are non-empty.
307
- A box is considered empty, if either of its side is no larger than threshold.
308
-
309
- Returns:
310
- Tensor: a binary vector which represents
311
- whether each box is empty (False) or non-empty (True).
312
- """
313
- box = self.tensor
314
- widths = box[:, 2]
315
- heights = box[:, 3]
316
- keep = (widths > threshold) & (heights > threshold)
317
- return keep
318
-
319
- def __getitem__(self, item) -> "RotatedBoxes":
320
- """
321
- Returns:
322
- RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
323
-
324
- The following usage are allowed:
325
-
326
- 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
327
- 2. `new_boxes = boxes[2:10]`: return a slice of boxes.
328
- 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
329
- with `length = len(boxes)`. Nonzero elements in the vector will be selected.
330
-
331
- Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
332
- subject to Pytorch's indexing semantics.
333
- """
334
- if isinstance(item, int):
335
- return RotatedBoxes(self.tensor[item].view(1, -1))
336
- b = self.tensor[item]
337
- assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
338
- item
339
- )
340
- return RotatedBoxes(b)
341
-
342
- def __len__(self) -> int:
343
- return self.tensor.shape[0]
344
-
345
- def __repr__(self) -> str:
346
- return "RotatedBoxes(" + str(self.tensor) + ")"
347
-
348
- def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
349
- """
350
- Args:
351
- box_size (height, width): Size of the reference box covering
352
- [0, width] x [0, height]
353
- boundary_threshold (int): Boxes that extend beyond the reference box
354
- boundary by more than boundary_threshold are considered "outside".
355
-
356
- For RRPN, it might not be necessary to call this function since it's common
357
- for rotated box to extend to outside of the image boundaries
358
- (the clip function only clips the near-horizontal boxes)
359
-
360
- Returns:
361
- a binary vector, indicating whether each box is inside the reference box.
362
- """
363
- height, width = box_size
364
-
365
- cnt_x = self.tensor[..., 0]
366
- cnt_y = self.tensor[..., 1]
367
- half_w = self.tensor[..., 2] / 2.0
368
- half_h = self.tensor[..., 3] / 2.0
369
- a = self.tensor[..., 4]
370
- c = torch.abs(torch.cos(a * math.pi / 180.0))
371
- s = torch.abs(torch.sin(a * math.pi / 180.0))
372
- # This basically computes the horizontal bounding rectangle of the rotated box
373
- max_rect_dx = c * half_w + s * half_h
374
- max_rect_dy = c * half_h + s * half_w
375
-
376
- inds_inside = (
377
- (cnt_x - max_rect_dx >= -boundary_threshold)
378
- & (cnt_y - max_rect_dy >= -boundary_threshold)
379
- & (cnt_x + max_rect_dx < width + boundary_threshold)
380
- & (cnt_y + max_rect_dy < height + boundary_threshold)
381
- )
382
-
383
- return inds_inside
384
-
385
- def get_centers(self) -> torch.Tensor:
386
- """
387
- Returns:
388
- The box centers in a Nx2 array of (x, y).
389
- """
390
- return self.tensor[:, :2]
391
-
392
- def scale(self, scale_x: float, scale_y: float) -> None:
393
- """
394
- Scale the rotated box with horizontal and vertical scaling factors
395
- Note: when scale_factor_x != scale_factor_y,
396
- the rotated box does not preserve the rectangular shape when the angle
397
- is not a multiple of 90 degrees under resize transformation.
398
- Instead, the shape is a parallelogram (that has skew)
399
- Here we make an approximation by fitting a rotated rectangle to the parallelogram.
400
- """
401
- self.tensor[:, 0] *= scale_x
402
- self.tensor[:, 1] *= scale_y
403
- theta = self.tensor[:, 4] * math.pi / 180.0
404
- c = torch.cos(theta)
405
- s = torch.sin(theta)
406
-
407
- # In image space, y is top->down and x is left->right
408
- # Consider the local coordintate system for the rotated box,
409
- # where the box center is located at (0, 0), and the four vertices ABCD are
410
- # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
411
- # the midpoint of the left edge AD of the rotated box E is:
412
- # E = (A+D)/2 = (-w / 2, 0)
413
- # the midpoint of the top edge AB of the rotated box F is:
414
- # F(0, -h / 2)
415
- # To get the old coordinates in the global system, apply the rotation transformation
416
- # (Note: the right-handed coordinate system for image space is yOx):
417
- # (old_x, old_y) = (s * y + c * x, c * y - s * x)
418
- # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
419
- # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
420
- # After applying the scaling factor (sfx, sfy):
421
- # E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
422
- # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
423
- # The new width after scaling tranformation becomes:
424
-
425
- # w(new) = |E(new) - O| * 2
426
- # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
427
- # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
428
- # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
429
- #
430
- # For example,
431
- # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
432
- # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
433
- self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
434
-
435
- # h(new) = |F(new) - O| * 2
436
- # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
437
- # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
438
- # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
439
- #
440
- # For example,
441
- # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
442
- # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
443
- self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
444
-
445
- # The angle is the rotation angle from y-axis in image space to the height
446
- # vector (top->down in the box's local coordinate system) of the box in CCW.
447
- #
448
- # angle(new) = angle_yOx(O - F(new))
449
- # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
450
- # = atan2(sfx * s * h / 2, sfy * c * h / 2)
451
- # = atan2(sfx * s, sfy * c)
452
- #
453
- # For example,
454
- # when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
455
- self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
456
-
457
- @classmethod
458
- @_maybe_jit_unused
459
- def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
460
- """
461
- Concatenates a list of RotatedBoxes into a single RotatedBoxes
462
-
463
- Arguments:
464
- boxes_list (list[RotatedBoxes])
465
-
466
- Returns:
467
- RotatedBoxes: the concatenated RotatedBoxes
468
- """
469
- assert isinstance(boxes_list, (list, tuple))
470
- if len(boxes_list) == 0:
471
- return cls(torch.empty(0))
472
- assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
473
-
474
- # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
475
- cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
476
- return cat_boxes
477
-
478
- @property
479
- def device(self) -> torch.device:
480
- return self.tensor.device
481
-
482
- @torch.jit.unused
483
- def __iter__(self):
484
- """
485
- Yield a box as a Tensor of shape (5,) at a time.
486
- """
487
- yield from self.tensor
488
-
489
-
490
- def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
491
- """
492
- Given two lists of rotated boxes of size N and M,
493
- compute the IoU (intersection over union)
494
- between **all** N x M pairs of boxes.
495
- The box order must be (x_center, y_center, width, height, angle).
496
-
497
- Args:
498
- boxes1, boxes2 (RotatedBoxes):
499
- two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
500
-
501
- Returns:
502
- Tensor: IoU, sized [N,M].
503
- """
504
-
505
- return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .backbone import build_backbone
 
 
spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/core.py DELETED
@@ -1,35 +0,0 @@
1
- import numba
2
-
3
-
4
- @numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True)
5
- def maximum_path_jit(paths, values, t_ys, t_xs):
6
- b = paths.shape[0]
7
- max_neg_val=-1e9
8
- for i in range(int(b)):
9
- path = paths[i]
10
- value = values[i]
11
- t_y = t_ys[i]
12
- t_x = t_xs[i]
13
-
14
- v_prev = v_cur = 0.0
15
- index = t_x - 1
16
-
17
- for y in range(t_y):
18
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
19
- if x == y:
20
- v_cur = max_neg_val
21
- else:
22
- v_cur = value[y-1, x]
23
- if x == 0:
24
- if y == 0:
25
- v_prev = 0.
26
- else:
27
- v_prev = max_neg_val
28
- else:
29
- v_prev = value[y-1, x-1]
30
- value[y, x] += max(v_prev, v_cur)
31
-
32
- for y in range(t_y - 1, -1, -1):
33
- path[y, index] = 1
34
- if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
35
- index = index - 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CodingBillionaire/bark-voice-cloning/hubert/__init__.py DELETED
File without changes
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/blocks.py DELETED
@@ -1,342 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from .vit import (
5
- _make_pretrained_vitb_rn50_384,
6
- _make_pretrained_vitl16_384,
7
- _make_pretrained_vitb16_384,
8
- forward_vit,
9
- )
10
-
11
- def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
- if backbone == "vitl16_384":
13
- pretrained = _make_pretrained_vitl16_384(
14
- use_pretrained, hooks=hooks, use_readout=use_readout
15
- )
16
- scratch = _make_scratch(
17
- [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
- ) # ViT-L/16 - 85.0% Top1 (backbone)
19
- elif backbone == "vitb_rn50_384":
20
- pretrained = _make_pretrained_vitb_rn50_384(
21
- use_pretrained,
22
- hooks=hooks,
23
- use_vit_only=use_vit_only,
24
- use_readout=use_readout,
25
- )
26
- scratch = _make_scratch(
27
- [256, 512, 768, 768], features, groups=groups, expand=expand
28
- ) # ViT-H/16 - 85.0% Top1 (backbone)
29
- elif backbone == "vitb16_384":
30
- pretrained = _make_pretrained_vitb16_384(
31
- use_pretrained, hooks=hooks, use_readout=use_readout
32
- )
33
- scratch = _make_scratch(
34
- [96, 192, 384, 768], features, groups=groups, expand=expand
35
- ) # ViT-B/16 - 84.6% Top1 (backbone)
36
- elif backbone == "resnext101_wsl":
37
- pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
- scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
- elif backbone == "efficientnet_lite3":
40
- pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
- scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
- else:
43
- print(f"Backbone '{backbone}' not implemented")
44
- assert False
45
-
46
- return pretrained, scratch
47
-
48
-
49
- def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
- scratch = nn.Module()
51
-
52
- out_shape1 = out_shape
53
- out_shape2 = out_shape
54
- out_shape3 = out_shape
55
- out_shape4 = out_shape
56
- if expand==True:
57
- out_shape1 = out_shape
58
- out_shape2 = out_shape*2
59
- out_shape3 = out_shape*4
60
- out_shape4 = out_shape*8
61
-
62
- scratch.layer1_rn = nn.Conv2d(
63
- in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
- )
65
- scratch.layer2_rn = nn.Conv2d(
66
- in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
- )
68
- scratch.layer3_rn = nn.Conv2d(
69
- in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
- )
71
- scratch.layer4_rn = nn.Conv2d(
72
- in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
- )
74
-
75
- return scratch
76
-
77
-
78
- def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
- efficientnet = torch.hub.load(
80
- "rwightman/gen-efficientnet-pytorch",
81
- "tf_efficientnet_lite3",
82
- pretrained=use_pretrained,
83
- exportable=exportable
84
- )
85
- return _make_efficientnet_backbone(efficientnet)
86
-
87
-
88
- def _make_efficientnet_backbone(effnet):
89
- pretrained = nn.Module()
90
-
91
- pretrained.layer1 = nn.Sequential(
92
- effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
- )
94
- pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
- pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
- pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
-
98
- return pretrained
99
-
100
-
101
- def _make_resnet_backbone(resnet):
102
- pretrained = nn.Module()
103
- pretrained.layer1 = nn.Sequential(
104
- resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
- )
106
-
107
- pretrained.layer2 = resnet.layer2
108
- pretrained.layer3 = resnet.layer3
109
- pretrained.layer4 = resnet.layer4
110
-
111
- return pretrained
112
-
113
-
114
- def _make_pretrained_resnext101_wsl(use_pretrained):
115
- resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
- return _make_resnet_backbone(resnet)
117
-
118
-
119
-
120
- class Interpolate(nn.Module):
121
- """Interpolation module.
122
- """
123
-
124
- def __init__(self, scale_factor, mode, align_corners=False):
125
- """Init.
126
-
127
- Args:
128
- scale_factor (float): scaling
129
- mode (str): interpolation mode
130
- """
131
- super(Interpolate, self).__init__()
132
-
133
- self.interp = nn.functional.interpolate
134
- self.scale_factor = scale_factor
135
- self.mode = mode
136
- self.align_corners = align_corners
137
-
138
- def forward(self, x):
139
- """Forward pass.
140
-
141
- Args:
142
- x (tensor): input
143
-
144
- Returns:
145
- tensor: interpolated data
146
- """
147
-
148
- x = self.interp(
149
- x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
- )
151
-
152
- return x
153
-
154
-
155
- class ResidualConvUnit(nn.Module):
156
- """Residual convolution module.
157
- """
158
-
159
- def __init__(self, features):
160
- """Init.
161
-
162
- Args:
163
- features (int): number of features
164
- """
165
- super().__init__()
166
-
167
- self.conv1 = nn.Conv2d(
168
- features, features, kernel_size=3, stride=1, padding=1, bias=True
169
- )
170
-
171
- self.conv2 = nn.Conv2d(
172
- features, features, kernel_size=3, stride=1, padding=1, bias=True
173
- )
174
-
175
- self.relu = nn.ReLU(inplace=True)
176
-
177
- def forward(self, x):
178
- """Forward pass.
179
-
180
- Args:
181
- x (tensor): input
182
-
183
- Returns:
184
- tensor: output
185
- """
186
- out = self.relu(x)
187
- out = self.conv1(out)
188
- out = self.relu(out)
189
- out = self.conv2(out)
190
-
191
- return out + x
192
-
193
-
194
- class FeatureFusionBlock(nn.Module):
195
- """Feature fusion block.
196
- """
197
-
198
- def __init__(self, features):
199
- """Init.
200
-
201
- Args:
202
- features (int): number of features
203
- """
204
- super(FeatureFusionBlock, self).__init__()
205
-
206
- self.resConfUnit1 = ResidualConvUnit(features)
207
- self.resConfUnit2 = ResidualConvUnit(features)
208
-
209
- def forward(self, *xs):
210
- """Forward pass.
211
-
212
- Returns:
213
- tensor: output
214
- """
215
- output = xs[0]
216
-
217
- if len(xs) == 2:
218
- output += self.resConfUnit1(xs[1])
219
-
220
- output = self.resConfUnit2(output)
221
-
222
- output = nn.functional.interpolate(
223
- output, scale_factor=2, mode="bilinear", align_corners=True
224
- )
225
-
226
- return output
227
-
228
-
229
-
230
-
231
- class ResidualConvUnit_custom(nn.Module):
232
- """Residual convolution module.
233
- """
234
-
235
- def __init__(self, features, activation, bn):
236
- """Init.
237
-
238
- Args:
239
- features (int): number of features
240
- """
241
- super().__init__()
242
-
243
- self.bn = bn
244
-
245
- self.groups=1
246
-
247
- self.conv1 = nn.Conv2d(
248
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
- )
250
-
251
- self.conv2 = nn.Conv2d(
252
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
- )
254
-
255
- if self.bn==True:
256
- self.bn1 = nn.BatchNorm2d(features)
257
- self.bn2 = nn.BatchNorm2d(features)
258
-
259
- self.activation = activation
260
-
261
- self.skip_add = nn.quantized.FloatFunctional()
262
-
263
- def forward(self, x):
264
- """Forward pass.
265
-
266
- Args:
267
- x (tensor): input
268
-
269
- Returns:
270
- tensor: output
271
- """
272
-
273
- out = self.activation(x)
274
- out = self.conv1(out)
275
- if self.bn==True:
276
- out = self.bn1(out)
277
-
278
- out = self.activation(out)
279
- out = self.conv2(out)
280
- if self.bn==True:
281
- out = self.bn2(out)
282
-
283
- if self.groups > 1:
284
- out = self.conv_merge(out)
285
-
286
- return self.skip_add.add(out, x)
287
-
288
- # return out + x
289
-
290
-
291
- class FeatureFusionBlock_custom(nn.Module):
292
- """Feature fusion block.
293
- """
294
-
295
- def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
- """Init.
297
-
298
- Args:
299
- features (int): number of features
300
- """
301
- super(FeatureFusionBlock_custom, self).__init__()
302
-
303
- self.deconv = deconv
304
- self.align_corners = align_corners
305
-
306
- self.groups=1
307
-
308
- self.expand = expand
309
- out_features = features
310
- if self.expand==True:
311
- out_features = features//2
312
-
313
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
-
315
- self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
- self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
-
318
- self.skip_add = nn.quantized.FloatFunctional()
319
-
320
- def forward(self, *xs):
321
- """Forward pass.
322
-
323
- Returns:
324
- tensor: output
325
- """
326
- output = xs[0]
327
-
328
- if len(xs) == 2:
329
- res = self.resConfUnit1(xs[1])
330
- output = self.skip_add.add(output, res)
331
- # output += res
332
-
333
- output = self.resConfUnit2(output)
334
-
335
- output = nn.functional.interpolate(
336
- output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
- )
338
-
339
- output = self.out_conv(output)
340
-
341
- return output
342
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/io_.py DELETED
@@ -1,216 +0,0 @@
1
- #coding=utf-8
2
- '''
3
- Created on 2016年9月27日
4
-
5
- @author: dengdan
6
-
7
- Tool functions for file system operation and I/O.
8
- In the style of linux shell commands
9
- '''
10
- import os
11
- import pickle as pkl
12
- # import commands
13
- import logging
14
-
15
- # import util
16
-
17
- def mkdir(path):
18
- """
19
- If the target directory does not exists, it and its parent directories will created.
20
- """
21
- path = get_absolute_path(path)
22
- if not exists(path):
23
- os.makedirs(path)
24
- return path
25
-
26
- def make_parent_dir(path):
27
- """make the parent directories for a file."""
28
- parent_dir = get_dir(path)
29
- mkdir(parent_dir)
30
-
31
-
32
- def pwd():
33
- return os.getcwd()
34
-
35
- def dump(path, obj):
36
- path = get_absolute_path(path)
37
- parent_path = get_dir(path)
38
- mkdir(parent_path)
39
- with open(path, 'w') as f:
40
- logging.info('dumping file:' + path);
41
- pkl.dump(obj, f)
42
-
43
- def load(path):
44
- path = get_absolute_path(path)
45
- with open(path, 'r') as f:
46
- data = pkl.load(f)
47
- return data
48
-
49
- def join_path(a, *p):
50
- return os.path.join(a, *p)
51
-
52
- def is_dir(path):
53
- path = get_absolute_path(path)
54
- return os.path.isdir(path)
55
-
56
-
57
- def is_path(path):
58
- path = get_absolute_path(path)
59
- return os.path.ispath(path)
60
-
61
- def get_dir(path):
62
- '''
63
- return the directory it belongs to.
64
- if path is a directory itself, itself will be return
65
- '''
66
- path = get_absolute_path(path)
67
- if is_dir(path):
68
- return path;
69
- return os.path.split(path)[0]
70
-
71
- def get_filename(path):
72
- return os.path.split(path)[1]
73
-
74
- def get_absolute_path(p):
75
- if p.startswith('~'):
76
- p = os.path.expanduser(p)
77
- return os.path.abspath(p)
78
-
79
- def cd(p):
80
- p = get_absolute_path(p)
81
- os.chdir(p)
82
-
83
- # def ls(path = '.', suffix = None):
84
- # """
85
- # list files in a directory.
86
- # return file names in a list
87
- # """
88
- # path = get_absolute_path(path)
89
- # files = os.listdir(path)
90
- #
91
- # if suffix is None:
92
- # return files
93
- #
94
- # filtered = []
95
- # for f in files:
96
- # if util.str.ends_with(f, suffix, ignore_case = True):
97
- # filtered.append(f)
98
- #
99
- # return filtered
100
-
101
- def find_files(pattern):
102
- import glob
103
- return glob.glob(pattern)
104
-
105
- def read_lines(p):
106
- """return the text in a file in lines as a list """
107
- p = get_absolute_path(p)
108
- f = open(p,'r')
109
- return f.readlines()
110
-
111
- def write_lines(p, lines):
112
- p = get_absolute_path(p)
113
- make_parent_dir(p)
114
- with open(p, 'w') as f:
115
- for line in lines:
116
- f.write(line)
117
-
118
-
119
- # def cat(p):
120
- # """return the text in a file as a whole"""
121
- # cmd = 'cat ' + p
122
- # return commands.getoutput(cmd)
123
-
124
- def exists(path):
125
- path = get_absolute_path(path)
126
- return os.path.exists(path)
127
-
128
- def load_mat(path):
129
- import scipy.io as sio
130
- path = get_absolute_path(path)
131
- return sio.loadmat(path)
132
-
133
- def dump_mat(path, dict_obj, append = True):
134
- import scipy.io as sio
135
- path = get_absolute_path(path)
136
- make_parent_dir(path)
137
- sio.savemat(file_name = path, mdict = dict_obj, appendmat = append)
138
-
139
- def dir_mat(path):
140
- '''
141
- list the variables in mat file.
142
- return a list: [(name, shape, dtype), ...]
143
- '''
144
- import scipy.io as sio
145
- path = get_absolute_path(path)
146
- return sio.whosmat(path)
147
-
148
- SIZE_UNIT_K = 1024
149
- SIZE_UNIT_M = SIZE_UNIT_K ** 2
150
- SIZE_UNIT_G = SIZE_UNIT_K ** 3
151
- def get_file_size(path, unit = SIZE_UNIT_K):
152
- size = os.path.getsize(get_absolute_path(path))
153
- return size * 1.0 / unit
154
-
155
-
156
- def create_h5(path):
157
- import h5py
158
- path = get_absolute_path(path)
159
- make_parent_dir(path)
160
- return h5py.File(path, 'w');
161
-
162
- def open_h5(path, mode = 'r'):
163
- import h5py
164
- path = get_absolute_path(path)
165
- return h5py.File(path, mode);
166
-
167
- def read_h5(h5, key):
168
- return h5[key][:]
169
- def read_h5_attrs(h5, key, attrs):
170
- return h5[key].attrs[attrs]
171
-
172
- def copy(src, dest):
173
- import shutil
174
- shutil.copy(get_absolute_path(src), get_absolute_path(dest))
175
-
176
- cp = copy
177
-
178
- def remove(p):
179
- import os
180
- os.remove(get_absolute_path(p))
181
- rm = remove
182
-
183
- # def search(pattern, path, file_only = True):
184
- # """
185
- # Search files whose name matches the give pattern. The search scope
186
- # is the directory and sub-directories of 'path'.
187
- # """
188
- # path = get_absolute_path(path)
189
- # pattern_here = util.io.join_path(path, pattern)
190
- # targets = []
191
- #
192
- # # find matchings in current directory
193
- # candidates = find_files(pattern_here)
194
- # for can in candidates:
195
- # if util.io.is_dir(can) and file_only:
196
- # continue
197
- # else:
198
- # targets.append(can)
199
- #
200
- # # find matching in sub-dirs
201
- # files = ls(path)
202
- # for f in files:
203
- # fpath = util.io.join_path(path, f)
204
- # if is_dir(fpath):
205
- # targets_in_sub_dir = search(pattern, fpath, file_only)
206
- # targets.extend(targets_in_sub_dir)
207
- # return targets
208
-
209
- def dump_json(path, data):
210
- import json
211
- path = get_absolute_path(path)
212
- make_parent_dir(path)
213
-
214
- with open(path, 'w') as f:
215
- json.dump(data, f)
216
- return path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-4ccfb72c.css DELETED
@@ -1 +0,0 @@
1
- .wrap.svelte-1sc8eck{display:flex;flex-direction:column;flex-flow:column;margin:0;padding:0;height:100%}.codemirror-wrapper.svelte-1sc8eck{height:100%;overflow:auto}.cm-editor{height:100%}.cm-selectionBackground{background-color:#b9d2ff30!important}.cm-focused{outline:none!important}button.svelte-qi7jcw{position:relative;cursor:pointer;padding:5px;width:22px;height:22px}.check.svelte-qi7jcw{position:absolute;top:0;right:0;z-index:var(--layer-top);background:var(--background-fill-primary);padding:var(--size-1);width:100%;height:100%;color:var(--body-text-color)}a.svelte-14d303a{position:relative;cursor:pointer;padding:5px;width:22px;height:22px}.copied.svelte-14d303a{color:var(--color-green-500)}.check.svelte-14d303a{position:absolute;top:0;right:0;z-index:var(--layer-top);background:var(--background-fill-primary);padding:var(--size-1);width:100%;height:100%;color:var(--body-text-color)}div.svelte-1yin446{display:flex;position:absolute;top:var(--block-label-margin);right:var(--block-label-margin);align-items:center;z-index:var(--layer-2);transition:.15s;box-shadow:var(--shadow-drop);border:1px solid var(--border-color-primary);border-top:none;border-right:none;border-radius:var(--block-label-right-radius);background:var(--block-label-background-fill);overflow:hidden;color:var(--block-label-text-color);font:var(--font);font-size:var(--button-small-text-size)}
 
 
spaces/Dagfinn1962/stablediffusion-articlera/theme.css DELETED
@@ -1 +0,0 @@
1
- {"theme": {"background_accent": "*primary_500", "background_accent_soft": "#919cbf", "background_accent_soft_dark": "*neutral_700", "background_primary": "#586794", "background_primary_dark": "*neutral_950", "background_secondary": "#586794", "background_secondary_dark": "*neutral_900", "block_background": "#7280ad", "block_background_dark": "#31395294", "block_border_color": "*border_color_primary", "block_border_color_dark": "*border_color_primary", "block_border_width": "1px", "block_info_color": "#f8f8f2", "block_info_color_dark": "#f8f8f2", "block_info_text_size": "*text_sm", "block_info_text_weight": "400", "block_label_background": "*background_primary", "block_label_background_dark": "*background_secondary", "block_label_border_color": "*border_color_primary", "block_label_border_color_dark": "*border_color_primary", "block_label_border_width": "1px", "block_label_icon_color": "*block_label_text_color", "block_label_margin": "0", "block_label_padding": "*spacing_sm *spacing_lg", "block_label_radius": "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", "block_label_right_radius": "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", "block_label_text_color": "#f8f8f2", "block_label_text_color_dark": "#f8f8f2", "block_label_text_size": "*text_sm", "block_label_text_weight": "400", "block_padding": "*spacing_xl calc(*spacing_xl + 2px)", "block_radius": "*radius_lg", "block_shadow": "none", "block_title_background": "none", "block_title_border_color": "none", "block_title_border_width": "0px", "block_title_padding": "0", "block_title_radius": "none", "block_title_text_color": "#f8f8f2", "block_title_text_color_dark": "#f8f8f2", "block_title_text_size": "*text_md", "block_title_text_weight": "400", "body_background": "#586794", "body_background_dark": "*background_primary", "body_text_color": "#f8f8f2", "body_text_color_dark": "#f8f8f2", "body_text_color_subdued": "#f8f8f2", "body_text_color_subdued_dark": "*neutral_400", "body_text_size": "*text_md", "body_text_weight": "400", "border_color_accent": "#818eb6", "border_color_accent_dark": "*neutral_600", "border_color_primary": "*neutral_200", "border_color_primary_dark": "*neutral_700", "button_border_width": "*input_border_width", "button_cancel_background": "*button_secondary_background", "button_cancel_background_dark": "*button_secondary_background", "button_cancel_background_hover": "*button_cancel_background", "button_cancel_background_hover_dark": "*button_cancel_background", "button_cancel_border_color": "*button_secondary_border_color", "button_cancel_border_color_dark": "*button_secondary_border_color", "button_cancel_border_color_hover": "*button_cancel_border_color", "button_cancel_border_color_hover_dark": "*button_cancel_border_color", "button_cancel_text_color": "*button_secondary_text_color", "button_cancel_text_color_dark": "*button_secondary_text_color", "button_cancel_text_color_hover": "*button_cancel_text_color", "button_cancel_text_color_hover_dark": "*button_cancel_text_color", "button_large_padding": "*spacing_lg calc(2 * *spacing_lg)", "button_large_radius": "*radius_lg", "button_large_text_size": "*text_lg", "button_large_text_weight": "600", "button_primary_background": "#ffa1d7", "button_primary_background_dark": "#ff79c6", "button_primary_background_hover": "*button_primary_background", "button_primary_background_hover_dark": "*button_primary_background", "button_primary_border_color": "*primary_200", "button_primary_border_color_dark": "*primary_600", "button_primary_border_color_hover": "*button_primary_border_color", "button_primary_border_color_hover_dark": "*button_primary_border_color", "button_primary_text_color": "*primary_600", "button_primary_text_color_dark": "white", "button_primary_text_color_hover": "*button_primary_text_color", "button_primary_text_color_hover_dark": "*button_primary_text_color", "button_secondary_background": "*neutral_200", "button_secondary_background_dark": "*neutral_600", "button_secondary_background_hover": "*button_secondary_background", "button_secondary_background_hover_dark": "*button_secondary_background", "button_secondary_border_color": "*neutral_200", "button_secondary_border_color_dark": "*neutral_600", "button_secondary_border_color_hover": "*button_secondary_border_color", "button_secondary_border_color_hover_dark": "*button_secondary_border_color", "button_secondary_text_color": "#f8f8f2", "button_secondary_text_color_dark": "white", "button_secondary_text_color_hover": "*button_secondary_text_color", "button_secondary_text_color_hover_dark": "*button_secondary_text_color", "button_shadow": "none", "button_shadow_active": "none", "button_shadow_hover": "none", "button_small_padding": "*spacing_sm calc(2 * *spacing_sm)", "button_small_radius": "*radius_lg", "button_small_text_size": "*text_md", "button_small_text_weight": "400", "button_transition": "background-color 0.2s ease", "checkbox_background": "*background_primary", "checkbox_background_dark": "*neutral_800", "checkbox_background_focus": "*checkbox_background", "checkbox_background_focus_dark": "*checkbox_background", "checkbox_background_hover": "*checkbox_background", "checkbox_background_hover_dark": "*checkbox_background", "checkbox_background_selected": "#ff79c6", "checkbox_background_selected_dark": "#ff79c6", "checkbox_border_color": "*neutral_300", "checkbox_border_color_dark": "*neutral_700", "checkbox_border_color_focus": "*secondary_500", "checkbox_border_color_focus_dark": "*secondary_500", "checkbox_border_color_hover": "*neutral_300", "checkbox_border_color_hover_dark": "*neutral_600", "checkbox_border_color_selected": "*secondary_600", "checkbox_border_color_selected_dark": "*secondary_600", "checkbox_border_radius": "*radius_sm", "checkbox_border_width": "*input_border_width", "checkbox_label_background": "*button_secondary_background", "checkbox_label_background_dark": "*button_secondary_background", "checkbox_label_background_hover": "*button_secondary_background_hover", "checkbox_label_background_hover_dark": "*button_secondary_background_hover", "checkbox_label_background_selected": "*checkbox_label_background", "checkbox_label_background_selected_dark": "*checkbox_label_background", "checkbox_label_border_color": "*border_color_primary", "checkbox_label_border_color_dark": "*border_color_primary", "checkbox_label_border_color_hover": "*checkbox_label_border_color", "checkbox_label_border_color_hover_dark": "*checkbox_label_border_color", "checkbox_label_border_width": "*input_border_width", "checkbox_label_gap": "*spacing_lg", "checkbox_label_padding": "*spacing_md calc(2 * *spacing_md)", "checkbox_label_shadow": "none", "checkbox_label_text_size": "*text_md", "checkbox_label_text_weight": "400", "checkbox_shadow": "*input_shadow", "checkbox_text_color": "*body_text_color", "checkbox_text_color_dark": "*body_text_color", "checkbox_text_color_selected": "*checkbox_text_color", "checkbox_text_color_selected_dark": "*checkbox_text_color", "container_radius": "*radius_lg", "embed_radius": "*radius_lg", "error_background": "#fee2e2", "error_background_dark": "*background_primary", "error_border_color": "#fecaca", "error_border_color_dark": "*border_color_primary", "error_border_width": "1px", "error_color": "#ef4444", "error_color_dark": "#ef4444", "font": "'Poppins'", "font_mono": "'IBM Plex Mono', 'ui-monospace', 'Consolas', monospace", "form_gap_width": "0px", "header_text_weight": "600", "input_background": "*neutral_100", "input_background_dark": "*neutral_700", "input_background_focus": "*secondary_500", "input_background_focus_dark": "*secondary_600", "input_background_hover": "*input_background", "input_background_hover_dark": "*input_background", "input_border_color": "*border_color_primary", "input_border_color_dark": "*border_color_primary", "input_border_color_focus": "*secondary_300", "input_border_color_focus_dark": "*neutral_700", "input_border_color_hover": "*input_border_color", "input_border_color_hover_dark": "*input_border_color", "input_border_width": "0px", "input_padding": "*spacing_xl", "input_placeholder_color": "*neutral_400", "input_placeholder_color_dark": "*neutral_500", "input_radius": "*radius_lg", "input_shadow": "none", "input_shadow_focus": "*input_shadow", "input_text_size": "*text_md", "input_text_weight": "400", "layout_gap": "*spacing_xxl", "link_text_color": "*secondary_600", "link_text_color_active": "*secondary_600", "link_text_color_active_dark": "*secondary_500", "link_text_color_dark": "*secondary_500", "link_text_color_hover": "*secondary_700", "link_text_color_hover_dark": "*secondary_400", "link_text_color_visited": "*secondary_500", "link_text_color_visited_dark": "*secondary_600", "loader_color": "*background_accent", "neutral_100": "#919cbf", "neutral_200": "#818eb6", "neutral_300": "#7280ad", "neutral_400": "#6272a4", "neutral_50": "#a1aac8", "neutral_500": "#586794", "neutral_600": "#4e5b83", "neutral_700": "#455073", "neutral_800": "#3b4462", "neutral_900": "#313952", "neutral_950": "#272e42", "panel_background": "*background_secondary", "panel_background_dark": "#31395294", "panel_border_color": "*border_color_primary", "panel_border_color_dark": "*border_color_primary", "panel_border_width": "0", "primary_100": "#fce7f3", "primary_200": "#fbcfe8", "primary_300": "#f9a8d4", "primary_400": "#f472b6", "primary_50": "#fdf2f8", "primary_500": "#ec4899", "primary_600": "#db2777", "primary_700": "#be185d", "primary_800": "#9d174d", "primary_900": "#831843", "primary_950": "#6e1a3d", "prose_text_size": "*text_md", "prose_text_weight": "400", "radius_lg": "8px", "radius_md": "6px", "radius_sm": "4px", "radius_xl": "12px", "radius_xs": "2px", "radius_xxl": "22px", "radius_xxs": "1px", "secondary_100": "#dbeafe", "secondary_200": "#bfdbfe", "secondary_300": "#93c5fd", "secondary_400": "#60a5fa", "secondary_50": "#eff6ff", "secondary_500": "#3b82f6", "secondary_600": "#2563eb", "secondary_700": "#1d4ed8", "secondary_800": "#1e40af", "secondary_900": "#1e3a8a", "secondary_950": "#1d3660", "section_header_text_size": "*text_md", "section_header_text_weight": "400", "shadow_drop": "rgba(0,0,0,0.05) 0px 1px 2px 0px", "shadow_drop_lg": "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", "shadow_inset": "rgba(0,0,0,0.05) 0px 2px 4px 0px inset", "shadow_spread": "3px", "shadow_spread_dark": "1px", "slider_color": "#ffa1d7", "slider_color_dark": "#ff79c6", "spacing_lg": "8px", "spacing_md": "6px", "spacing_sm": "4px", "spacing_xl": "10px", "spacing_xs": "2px", "spacing_xxl": "16px", "spacing_xxs": "1px", "stat_color_background": "*primary_300", "stat_color_background_dark": "*primary_500", "table_border_color": "*neutral_300", "table_border_color_dark": "*neutral_700", "table_even_background": "#7280ad", "table_even_background_dark": "*neutral_950", "table_odd_background": "*neutral_50", "table_odd_background_dark": "*neutral_900", "table_radius": "*radius_lg", "table_row_focus": "*background_accent_soft", "table_row_focus_dark": "*background_accent_soft", "text_lg": "16px", "text_md": "14px", "text_sm": "12px", "text_xl": "22px", "text_xs": "10px", "text_xxl": "26px", "text_xxs": "9px"}, "version": "0.3.2"}
 
 
spaces/Djacon/emotion_detection/files/js/summarizer.js DELETED
@@ -1,213 +0,0 @@
1
- // Form Divs
2
- const sumText = document.getElementById('sum-text-div');
3
- const sumFile = document.getElementById('sum-file-div')
4
- const sumVideo = document.getElementById('sum-video-div');
5
-
6
- // Form Data
7
- const selectOption = document.getElementById('sum-type');
8
- const sumTextInput = document.getElementById('sum-text-input');
9
- const sumFileInput = document.getElementById('sum-file-input');
10
- const sumVideoInput = document.getElementById('sum-video-input');
11
-
12
- // Error Output Section
13
- const sumError = document.getElementById('sum-err');
14
-
15
- // Result Section
16
- const extractText = document.getElementById('extracted-text');
17
- const summaryText = document.getElementById('summarized-text');
18
-
19
- // Word Counter
20
- const wordsCount = document.getElementById('word-counter');
21
-
22
- // Tabs
23
- const original = document.getElementById('sum-original');
24
- const summary = document.getElementById('sum-summary');
25
- const showOriginal = document.getElementById('show-original');
26
- const showSummary = document.getElementById('show-summary');
27
-
28
- const MAX_SIZE = 20000;
29
-
30
-
31
- function _summarize() {
32
- var xhr = new XMLHttpRequest();
33
- xhr.open('POST', '/predict_summarization', true);
34
- xhr.setRequestHeader('Content-Type', 'application/json');
35
-
36
- var data = JSON.stringify({ 'sum_type': selectOption.value, 'text': extractText.value });
37
-
38
- xhr.onreadystatechange = function () {
39
- if (xhr.readyState === 4 && xhr.status === 200) {
40
- result = xhr.responseText.split('\\n').join('\n');
41
- summaryText.value = result.slice(1, -1);
42
- _show_summary();
43
- }
44
- };
45
-
46
- xhr.send(data);
47
- return;
48
- }
49
-
50
- function _extractFile() {
51
- const file = sumFileInput.files[0];
52
- if (file.type === 'text/plain') {
53
- const reader = new FileReader();
54
- reader.onload = function() {
55
- sumTextInput.value = reader.result.slice(0, MAX_SIZE);
56
- };
57
- reader.readAsText(file, 'CP1251');
58
- return;
59
- } else if (file.type === 'application/pdf') {
60
- sumTextInput.value = '';
61
- const reader = new FileReader();
62
- reader.onload = function (e) {
63
- const pdfData = e.target.result;
64
- pdfjsLib.getDocument(pdfData).promise.then(function (pdfDocument) {
65
- for (let pageNum = 1; pageNum <= pdfDocument.numPages; pageNum++) {
66
- pdfDocument.getPage(pageNum).then(function (pdfPage) {
67
- pdfPage.getTextContent().then(function (textContent) {
68
- let size = sumTextInput.value.length;
69
- let pageText = [];
70
- for (const textItem of textContent.items) {
71
- pageText.push(textItem.str);
72
- size += textItem.str.length;
73
- if (size > MAX_SIZE) break;
74
- }
75
- sumTextInput.value += pageText.join(' ');
76
- });
77
- });
78
- }
79
- });
80
- };
81
- reader.readAsDataURL(file);
82
- }
83
- return;
84
- }
85
-
86
-
87
- async function summarize(event) {
88
- event.preventDefault();
89
-
90
- switch (selectOption.value) {
91
- case 'sum-text':
92
- len = sumTextInput.value.trim().length
93
- if (len < 250) {
94
- sumError.innerText = `The text size should be at least 250 characters (${len} < 250)`;
95
- sumError.classList.remove('hidden');
96
- return;
97
- }
98
- break;
99
- case 'sum-video':
100
- regex = /^((((http)s?:\/\/)?((www\.)|(m\.))?youtube.com\/watch\?([^\?]*&)?v=.+)|(((http)s?:\/\/)?youtu.be\/([^\?=]+)(\?[^?]+)?))$/
101
- if (!sumVideoInput.value.match(regex)) {
102
- sumError.innerText = 'Invalid youtube link';
103
- sumError.classList.remove('hidden');
104
- return;
105
- }
106
- break;
107
- }
108
-
109
- sumError.classList.add('hidden');
110
-
111
- _show_summary();
112
-
113
- // Here we can finally summarize data
114
- summaryText.value = 'Please wait...';
115
- switch (selectOption.value) {
116
- case 'sum-text':
117
- extractText.value = sumTextInput.value.trim().slice(0, MAX_SIZE);
118
- break;
119
- case 'sum-video':
120
- extractText.value = sumVideoInput.value.slice(0, MAX_SIZE);
121
- break;
122
- }
123
- _summarize();
124
- }
125
-
126
-
127
- function _update_option() {
128
- switch (selectOption.value) {
129
- case 'sum-text':
130
- sumText.classList.remove('hidden');
131
- sumVideo.classList.add('hidden');
132
-
133
- sumTextInput.setAttribute('required', '');
134
- sumVideoInput.removeAttribute('required');
135
- break;
136
- case 'sum-video':
137
- sumText.classList.add('hidden');
138
- sumVideo.classList.remove('hidden');
139
-
140
- sumTextInput.removeAttribute('required');
141
- sumVideoInput.setAttribute('required', '');
142
- break;
143
- }
144
- sumError.classList.add('hidden');
145
- }
146
-
147
- function _update_counter() {
148
- let text = sumTextInput.value.trim()
149
- if (text === '') {
150
- sumFile.classList.remove('hidden');
151
- wordsCount.classList.add('hidden');
152
- return;
153
- }
154
-
155
- sumFile.classList.add('hidden');
156
- wordsCount.classList.remove('hidden');
157
- wordsCount.innerHTML = `Words: ${text.split(/\s+/).length} | Chars: ${text.length}`
158
- }
159
-
160
-
161
- function _show_summary() {
162
- showOriginal.classList.remove('bg-gray-100');
163
- showSummary.classList.add('bg-gray-100');
164
-
165
- summary.classList.remove('hidden');
166
- original.classList.add('hidden');
167
- }
168
-
169
- function _show_original() {
170
- showOriginal.classList.add('bg-gray-100');
171
- showSummary.classList.remove('bg-gray-100');
172
-
173
- original.classList.remove('hidden');
174
- summary.classList.add('hidden');
175
- }
176
-
177
-
178
- document.addEventListener('DOMContentLoaded', function () {
179
- selectOption.addEventListener('change', _update_option);
180
-
181
- var submitButton = document.getElementById('submit');
182
- submitButton.addEventListener('click', summarize);
183
-
184
- sumFileInput.addEventListener('change', async function() {
185
- const allowedTypes = ['application/pdf', 'text/plain'];
186
- const file = sumFileInput.files[0];
187
-
188
- if (!file) {
189
- sumError.classList.remove('hidden');
190
- return;
191
- }
192
-
193
- if (!allowedTypes.includes(file.type)) {
194
- sumError.innerText = 'Not supported type (Only `.pdf` or `.txt`)';
195
- sumError.classList.remove('hidden');
196
- return;
197
- }
198
-
199
- // Back to main option
200
- selectOption.options[0].selected = true;
201
- _update_option();
202
- _extractFile();
203
-
204
- await (new Promise(resolve => setTimeout(resolve, 1000)));
205
- _update_counter();
206
- sumError.classList.add('hidden');
207
- });
208
-
209
- sumTextInput.addEventListener('input', _update_counter);
210
-
211
- showSummary.addEventListener('click', _show_summary);
212
- showOriginal.addEventListener('click', _show_original);
213
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DrSong/ChatGLM-6B-ChatBot/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ChatGLM 6B ChatBot
3
- emoji: 🐨
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.20.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference