parquet-converter commited on
Commit
9d49f17
·
1 Parent(s): 2fad3fa

Update parquet files (step 11 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0x90e/ESRGAN-MANGA/inference.py +0 -59
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/8x8 Work for Windows The Ultimate Communication and Collaboration Platform for PC.md +0 -46
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (tamil dubbed 1080p movies Housefull) - Enjoy the funniest Bollywood film in Tamil language.md +0 -206
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Marksman MOD APK The ultimate simulation game with unlimited money gold wood metal and more.md +0 -108
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Den Kelime Oyunu APK - cretsiz nternetsiz ve Yeni Tarz Kelime Oyunu.md +0 -132
  6. spaces/1phancelerku/anime-remove-background/Blockman GO-Adventures Mod APK Hack Your Way to Adventure on Apkmody.md +0 -90
  7. spaces/1phancelerku/anime-remove-background/Download Temple Run 2 Lantern Festival Mod Apk and Enjoy Unlimited Coins Gems and Characters.md +0 -100
  8. spaces/1phancelerku/anime-remove-background/Download the Latest WhatsApp Business App for Free A Guide for Small Businesses.md +0 -78
  9. spaces/1phancelerku/anime-remove-background/Escape from Grannys House in Granny 3 MOD APK with No Ads and God Mode.md +0 -162
  10. spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_objects.py +0 -334
  11. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123821KB.py +0 -122
  12. spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6.md +0 -46
  13. spaces/AIConsultant/MusicGen/docs/DATASETS.md +0 -82
  14. spaces/AICopilot/Dropbox/app.py +0 -28
  15. spaces/AIFILMS/StyleGANEX/webUI/styleganex_model.py +0 -492
  16. spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffsinger_task.py +0 -490
  17. spaces/AIWaves/Debate/README.md +0 -13
  18. spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/templates.py +0 -44
  19. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest101.py +0 -25
  20. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/UrlDependency.ts +0 -5
  21. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/DfeHub.py +0 -77
  22. spaces/AfrodreamsAI/afrodreams/examples/scripts/starry_stanford_bigger.sh +0 -108
  23. spaces/AgentVerse/agentVerse/dataloader/humaneval.py +0 -21
  24. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/oval/Oval.d.ts +0 -2
  25. spaces/Alcedo/yunmedia/index.html +0 -79
  26. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_head.py +0 -114
  27. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/sparse_rcnn.py +0 -110
  28. spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py +0 -9
  29. spaces/AnnasBlackHat/Image-Similarity/README.md +0 -12
  30. spaces/Anonymous-sub/Rerender/src/config.py +0 -144
  31. spaces/Arvi/Performance_predictor_and_feedback_generator/app.py +0 -410
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_install.py +0 -867
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py +0 -537
  34. spaces/Avkash/Satellite_Segmentation_Prediction/README.md +0 -13
  35. spaces/Awesimo/jojogan/e4e/options/__init__.py +0 -0
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/__init__.py +0 -16
  37. spaces/AymanKUMA/Speech-Bubbles-detector/app.py +0 -214
  38. spaces/Bart92/RVC_HF/infer/modules/vc/modules.py +0 -526
  39. spaces/Bart92/RVC_HF/infer/modules/vc/utils.py +0 -42
  40. spaces/Benson/text-generation/Examples/Cmo Descargar El Juego Taxi Simulator.md +0 -80
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/session.py +0 -1229
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/manifest.py +0 -393
  43. spaces/ChrisCaviar/ControlNet-v1-1/cv_utils.py +0 -17
  44. spaces/CikeyQI/Yunzai/Yunzai/renderers/puppeteer/index.js +0 -14
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/module-a3cf0cc4.js +0 -2
  46. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/filter.py +0 -90
  47. spaces/Dauzy/whisper-webui/docs/colab.md +0 -20
  48. spaces/DianXian/Real-CUGAN/README.md +0 -13
  49. spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/optimizer.py +0 -214
  50. spaces/Ekimetrics/climate-question-answering/climateqa/chat.py +0 -39
spaces/0x90e/ESRGAN-MANGA/inference.py DELETED
@@ -1,59 +0,0 @@
1
- import sys
2
- import cv2
3
- import numpy as np
4
- import torch
5
- import ESRGAN.architecture as esrgan
6
- import ESRGAN_plus.architecture as esrgan_plus
7
- from run_cmd import run_cmd
8
- from ESRGANer import ESRGANer
9
-
10
- def is_cuda():
11
- if torch.cuda.is_available():
12
- return True
13
- else:
14
- return False
15
-
16
- model_type = sys.argv[2]
17
-
18
- if model_type == "Anime":
19
- model_path = "models/4x-AnimeSharp.pth"
20
- if model_type == "Photo":
21
- model_path = "models/4x_Valar_v1.pth"
22
- else:
23
- model_path = "models/4x_NMKD-Siax_200k.pth"
24
-
25
- OUTPUT_PATH = sys.argv[1]
26
- device = torch.device('cuda' if is_cuda() else 'cpu')
27
-
28
- if model_type != "Photo":
29
- model = esrgan.RRDB_Net(3, 3, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')
30
- else:
31
- model = esrgan_plus.RRDB_Net(3, 3, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')
32
-
33
- if is_cuda():
34
- print("Using GPU 🥶")
35
- model.load_state_dict(torch.load(model_path), strict=True)
36
- else:
37
- print("Using CPU 😒")
38
- model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=True)
39
-
40
- model.eval()
41
-
42
- for k, v in model.named_parameters():
43
- v.requires_grad = False
44
- model = model.to(device)
45
-
46
- # Read image
47
- img = cv2.imread(OUTPUT_PATH, cv2.IMREAD_COLOR)
48
- img = img * 1.0 / 255
49
- img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
50
- img_LR = img.unsqueeze(0)
51
- img_LR = img_LR.to(device)
52
-
53
- upsampler = ESRGANer(model=model)
54
- output = upsampler.enhance(img_LR)
55
-
56
- output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
57
- output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
58
- output = (output * 255.0).round()
59
- cv2.imwrite(OUTPUT_PATH, output, [int(cv2.IMWRITE_PNG_COMPRESSION), 5])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/8x8 Work for Windows The Ultimate Communication and Collaboration Platform for PC.md DELETED
@@ -1,46 +0,0 @@
1
- <br />
2
- # How to Download and Install 8x8 Work for Windows
3
-
4
- 8x8 Work is a cloud-based communication and collaboration platform that allows you to make voice and video calls, send messages, share files and more. It is designed to help you work smarter and faster from anywhere. If you want to use 8x8 Work on your Windows PC, you need to download and install the 8x8 Work for Windows app, which is available as an MSI file. In this article, we will show you how to do it in a few simple steps.
5
-
6
- ## Step 1: Download the 8x8 Work for Windows MSI File
7
-
8
- The first thing you need to do is to download the 8x8 Work for Windows MSI file from the official website. To do this, go to the link below and click on the "Download" button.
9
-
10
- [Download 8x8 Work for Windows Here](#1)
11
-
12
- This will start the download of the MSI file, which is about 100 MB in size. Save the file to your preferred location on your PC.
13
-
14
- ## Step 2: Run the 8x8 Work for Windows MSI File
15
-
16
- Once the download is complete, you need to run the 8x8 Work for Windows MSI file to start the installation process. To do this, locate the file on your PC and double-click on it. This will launch the installer, which will guide you through the installation process. Follow the instructions on the screen and accept the terms and conditions. You can also choose the destination folder where you want to install the app. The default location is "C:\Program Files (x86)\8x8\Work", but you can change it if you want.
17
-
18
- ## Step 3: Launch the 8x8 Work for Windows App
19
-
20
- After the installation is finished, you can launch the 8x8 Work for Windows app from the desktop shortcut or from the start menu. You will be asked to sign in with your 8x8 username and password. If you don't have an account yet, you can create one from the app or from the website. Once you sign in, you can access all the features and tools of 8x8 Work, such as making calls, sending messages, joining meetings, sharing files and more.
21
-
22
- ## Step 4: Enjoy 8x8 Work for Windows
23
-
24
- You are done! You have successfully downloaded and installed 8x8 Work for Windows on your PC. You can now use it to communicate and collaborate with your team members, clients and partners from anywhere. You can also adjust the app settings, such as notifications, audio and video devices, language and updates, from the options menu in the app. Have fun!
25
-
26
- ## 8x8 Work for Windows Features
27
-
28
- 8x8 Work for Windows has many features that make it a powerful and versatile communication and collaboration platform. Some of these features are:
29
-
30
- - Voice and Video Calls: You can make high-quality voice and video calls to anyone in your 8x8 contact list or to any phone number. You can also transfer, hold, mute and record calls, as well as use call waiting and caller ID. You can also join or host conference calls with up to 100 participants.
31
- - Messaging: You can send and receive instant messages to anyone in your 8x8 contact list or to any phone number. You can also create group chats, send emojis and stickers, share files and images, and delete or edit messages. You can also sync your messages across all your devices.
32
- - Meetings: You can join or host online meetings with up to 100 participants. You can also share your screen, use a virtual background, chat with other participants, and record and save meetings. You can also schedule meetings from the app or from your calendar app.
33
- - Files: You can share and access files from your 8x8 cloud storage or from other cloud services, such as Google Drive, Dropbox and OneDrive. You can also preview, download and delete files, as well as search for files by name or type.
34
- - Contacts: You can manage your 8x8 contact list or import contacts from other sources, such as Outlook, Gmail and LinkedIn. You can also search for contacts by name, number or email, as well as add, edit or delete contacts. You can also view your contact's availability status and presence information.
35
-
36
- ## 8x8 Work for Windows Benefits
37
-
38
- 8x8 Work for Windows has many benefits that make it a valuable and convenient communication and collaboration platform. Some of these benefits are:
39
-
40
- - Easy to Use: 8x8 Work for Windows has a simple and intuitive user interface that makes it easy to use and navigate. You can access all the features and tools from the main menu or from the toolbar. You can also customize the app according to your preferences and needs.
41
- - Secure and Reliable: 8x8 Work for Windows uses encryption and authentication to ensure the security and privacy of your data and communications. It also has a robust cloud infrastructure that ensures the reliability and availability of the service. You can also use the app offline or in low-bandwidth situations.
42
- - Flexible and Scalable: 8x8 Work for Windows adapts to your business needs and goals. You can choose from different plans and features that suit your budget and requirements. You can also add or remove users, devices and extensions as you grow or change.
43
- - Compatible and Integrable: 8x8 Work for Windows works seamlessly with other 8x8 products and services, such as 8x8 Contact Center, 8x8 Analytics and 8x8 Voice for Microsoft Teams. It also integrates with other popular apps and platforms, such as Outlook, Gmail, Salesforce, Zendesk and Slack.</p>
44
- <h2>8x8 download msi</h2><br /><p><b><b>DOWNLOAD</b> &#11088; <a href="https://byltly.com/2uKzDK">https://byltly.com/2uKzDK</a></b></p><br /><br /> ddb901b051<br />
45
- <br />
46
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (tamil dubbed 1080p movies Housefull) - Enjoy the funniest Bollywood film in Tamil language.md DELETED
@@ -1,206 +0,0 @@
1
- <br />
2
- <h1>HD Online Player (tamil dubbed 1080p movies Housefull)</h1>
3
- <p>If you are a fan of comedy movies and want to watch them in high definition, you might be interested in HD Online Player. This is a free online video player that lets you stream and download tamil dubbed 1080p movies, including the hilarious Housefull series. In this article, we will tell you more about HD Online Player, tamil dubbed 1080p movies, and how to watch Housefull in HD online.</p>
4
- <h2>HD Online Player (tamil dubbed 1080p movies Housefull)</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://byltly.com/2uKwuT">https://byltly.com/2uKwuT</a></b></p><br /><br />
5
- <h2>What is HD Online Player?</h2>
6
- <p>HD Online Player is a free online video player that supports HTML5 and MP4 formats. It allows you to watch videos online without downloading them or installing any software. You can just copy and paste the video URL into the player and enjoy high-quality streaming and ad-free viewing.</p>
7
- <h3>Features of HD Online Player</h3>
8
- <p>Some of the features of HD Online Player are:</p>
9
- <ul>
10
- <li>It supports 4K and HD resolution, as well as adaptive streaming for different connection speeds.</li>
11
- <li>It has a simple and intuitive interface that lets you customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more.</li>
12
- <li>It has a timestamped commenting feature that lets you interact with your friends or colleagues while watching videos.</li>
13
- <li>It has a privacy setting that lets you choose who can view your videos. You can make them public or private, or password-protect them.</li>
14
- <li>It meets WCAG 2.0 AA standards for accessibility, with support for screen readers, voiceover software, closed captioning, and other accessibility options.</li>
15
- </ul>
16
- <h3>Benefits of HD Online Player</h3>
17
- <p>Some of the benefits of using HD Online Player are:</p>
18
- <ul>
19
- <li>You can watch videos online without downloading them or installing any software. This saves you time, space, and bandwidth.</li>
20
- <li>You can watch videos without any ads or distractions. This enhances your viewing experience and keeps you focused on the content.</li>
21
- <li>You can watch videos in high quality and resolution, as well as adjust the playback speed according to your preference.</li>
22
- <li>You can share your videos with your friends or colleagues easily by sending them the link to your video. You can also embed your videos on your website, blog, or social media platforms.</li>
23
- <li>You can make your videos accessible to a wider audience by adding subtitles in different languages. You can also use VEED's free online video editor and screen recorder to create and edit your videos before sharing them.</li>
24
- </ul>
25
- <h2>What are tamil dubbed 1080p movies?</h2>
26
- <p>Tamil dubbed 1080p movies are movies that have been dubbed in Tamil language and have a display resolution width of approximately 1080 pixels. Tamil is one of the official languages of India and Sri Lanka, and is spoken by millions of people around the world. Tamil dubbed 1080p movies are popular among Tamil speakers who want to enjoy movies from other languages and cultures in their own language.</p>
27
- <h3>Definition and examples of tamil dubbed 1080p movies</h3>
28
- <p>A tamil dubbed 1080p movie is a movie that has been dubbed in Tamil language and has a display resolution width of approximately 1080 pixels. Dubbing is the process of replacing the original dialogue of a movie with a different language. A 1080p movie is a movie that has a display resolution width of approximately 1080 pixels, which is considered high definition (HD).</p>
29
- <p>Watch Housefull tamil dubbed HD online free<br />
30
- Housefull full movie in tamil 1080p download<br />
31
- Tamilrockers Housefull tamil dubbed movie online<br />
32
- Housefull 4 tamil dubbed HD online streaming<br />
33
- Housefull 3 tamil 1080p movie watch online<br />
34
- Housefull 2 tamil dubbed full movie HD<br />
35
- Housefull series tamil dubbed movies online<br />
36
- HD Online Player for tamil dubbed movies<br />
37
- Tamil dubbed comedy movies online 1080p<br />
38
- Housefull cast and crew in tamil dubbed version<br />
39
- How to download Housefull tamil dubbed movie HD<br />
40
- Housefull movie review in tamil language<br />
41
- Housefull songs and lyrics in tamil dubbed<br />
42
- Housefull trivia and facts in tamil language<br />
43
- Housefull movie scenes and dialogues in tamil<br />
44
- Best sites to watch Housefull tamil dubbed online<br />
45
- Housefull movie rating and box office in tamil nadu<br />
46
- Housefull movie awards and nominations in tamil cinema<br />
47
- Housefull movie sequel and prequel in tamil dubbed<br />
48
- Housefull movie remake and inspiration in tamil cinema<br />
49
- Housefull movie genre and theme in tamil language<br />
50
- Housefull movie plot and story in tamil language<br />
51
- Housefull movie characters and actors in tamil dubbed<br />
52
- Housefull movie director and producer in tamil cinema<br />
53
- Housefull movie release date and trailer in tamil dubbed<br />
54
- Similar movies to Housefull in tamil dubbed language<br />
55
- HD Online Player app for android and ios devices<br />
56
- HD Online Player features and benefits for users<br />
57
- HD Online Player subscription and pricing plans<br />
58
- HD Online Player customer reviews and ratings<br />
59
- How to install HD Online Player on your device<br />
60
- How to use HD Online Player for watching movies online<br />
61
- How to troubleshoot HD Online Player issues and errors<br />
62
- How to contact HD Online Player support team<br />
63
- How to update HD Online Player to the latest version<br />
64
- Advantages of watching movies online with HD Online Player<br />
65
- Disadvantages of watching movies online with HD Online Player<br />
66
- Alternatives to HD Online Player for watching movies online<br />
67
- Comparison of HD Online Player with other online players<br />
68
- Tips and tricks for using HD Online Player effectively<br />
69
- FAQs about HD Online Player and its services<br />
70
- Privacy policy and terms of service of HD Online Player<br />
71
- How to cancel HD Online Player subscription and account<br />
72
- How to get a refund from HD Online Player if not satisfied<br />
73
- How to share feedback and suggestions with HD Online Player team<br />
74
- How to join HD Online Player community and forum <br />
75
- How to access HD Online Player premium content and offers <br />
76
- How to earn rewards and points with HD Online Player <br />
77
- How to redeem coupons and vouchers with HD Online Player</p>
78
- <p>Some examples of tamil dubbed 1080p movies are:</p>
79
- <table>
80
- <tr>
81
- <th>Title</th>
82
- <th>Original Language</th>
83
- <th>Genre</th>
84
- <th>Synopsis</th>
85
- </tr>
86
- <tr>
87
- <td>Housefull</td>
88
- <td>Hindi</td>
89
- <td>Comedy</td>
90
- <td>A man who believes he is cursed with bad luck tries to find true love with the help of his best friend.</td>
91
- </tr>
92
- <tr>
93
- <td>The Avengers</td>
94
- <td>English</td>
95
- <td>Action/Sci-Fi</td>
96
- <td>A team of superheroes must stop an alien invasion led by a rogue god.</td>
97
- </tr>
98
- <tr>
99
- <td>Baahubali</td>
100
- <td>Telugu</td>
101
- <td>Epic/Fantasy</td>
102
- <td>A young man learns about his royal heritage and sets out to reclaim his throne from an evil tyrant.</td>
103
- </tr>
104
- <tr>
105
- <td>The Lion King</td>
106
- <td>English</td>
107
- <td>Animation/Musical</td>
108
- <td>A lion cub runs away from his kingdom after his father's death and returns as an adult to challenge his uncle.</td>
109
- </tr>
110
- <tr>
111
- <td>Pirates of the Caribbean</td>
112
- <td>English</td>
113
- <td>Adventure/Fantasy</td>
114
- <td>A pirate captain and a blacksmith join forces to rescue a governor's daughter from a cursed crew of undead pirates.</td>
115
- </tr>
116
- </table>
117
- <h3>Popular genres and titles of tamil dubbed 1080p movies</h3>
118
- <p>Tamil dubbed 1080p movies cover a wide range of genres and themes, from comedy to action, from romance to horror, from drama to fantasy, and more. Some of the popular genres and titles of tamil dubbed 1080p movies are:</p>
119
- <ul>
120
- <li>Comedy: Housefull series, Golmaal series, Hangover series, Dhamaal series, etc.</li>
121
- <li>Action: The Avengers series, Fast and Furious series, Mission Impossible series, John Wick series, etc.</li>
122
- <li>Romance: Titanic, The Notebook, Aashiqui 2, The Fault in Our Stars, etc.</li>
123
- <li>Horror: The Conjuring series, Annabelle series, The Exorcist, The Ring, etc.</li>
124
- <li>Drama: The Godfather series, The Shawshank Redemption, Slumdog Millionaire, Dangal, etc.</li>
125
- <li>Fantasy: Harry Potter series, Lord of the Rings series, Baahubali series, Avatar, etc.</li>
126
- </ul>
127
- <h3>Advantages and disadvantages of tamil dubbed 1080p movies</h3>
128
- <p>Tamil dubbed 1080p movies have their own advantages and disadvantages. Some of them are:</p>
129
- <table style="width:100%">
130
- <tr style="text-align:left">
131
- <th style="width:50%">Advantages</th><th style="width:50%">Disadvantages</th></tr><tr style="text-align:left"><td><ul><li>You can enjoy movies from other languages and cultures in your own language.</li><li>You can watch movies in high quality and resolution without compromising on speed or performance.</li><li>You can access a large collection of movies from various genres and themes online for free or at low cost.</li></ul></td><td><ul><li>You may miss out on the original voice acting and expressions of the actors.</li><li>You may encounter poor dubbing quality or synchronization issues in some cases.</li><li>You may face legal or ethical issues if you watch pirated or unauthorized copies of movies online.</li></ul></td></tr></table>
132
- <h2>How to watch Housefull in HD online?</h2>
133
- <p>If you want to watch Housefull in HD online using HD Online Player, you need to follow these steps:</p>
134
- <h3>Introduction and synopsis of Housefull</h3>
135
- <p>Housefull is a 2010 Indian Hindi-language comedy film directed by Sajid Khan and starring Akshay Kumar, Riteish Deshmukh, Arjun Rampal, Deepika Padukone, Lara Dutta, and Jiah Khan. It is the first installment in the Housefull film series. ), a man who believes he is cursed with bad luck and tries to find true love with the help of his best friend Bob (Deshmukh). However, his attempts lead to hilarious complications and misunderstandings involving three women: Sandy (Padukone), Devika (Khan), and Hetal (Dutta). Meanwhile, Bob's brother-in-law Major Krishna Rao (Rampal) suspects that Aarush and Bob are having affairs with his wife Pooja (Malaika Arora Khan) and sister Hetal.</p>
136
- <p>Housefull is a fun-filled comedy that will make you laugh out loud with its witty dialogues, hilarious situations, and amazing performances. It is a perfect movie to watch with your friends or family.</p>
137
- <h3>Steps to watch Housefull in HD online using HD Online Player</h3>
138
- <p>To watch Housefull in HD online using HD Online Player, you need to follow these steps:</p>
139
- <ol>
140
- <li>Go to the website of HD Online Player and click on the "Video Player" option.</li>
141
- <li>Copy and paste the URL of the video source of Housefull in the player. You can find the URL from various online platforms that offer tamil dubbed 1080p movies, such as TamilRockers, Moviesda, Isaimini, etc. However, be careful of the legal and ethical issues involved in watching pirated or unauthorized copies of movies online.</li>
142
- <li>Click on the "Play" button and enjoy watching Housefull in HD online. You can also customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more.</li>
143
- <li>You can also share your video link with your friends or colleagues by clicking on the "Share" button. You can also embed your video on your website, blog, or social media platforms.</li>
144
- </ol>
145
- <h3>Tips and tricks to enhance your viewing experience</h3>
146
- <p>Here are some tips and tricks to enhance your viewing experience while watching Housefull in HD online using HD Online Player:</p>
147
- <ul>
148
- <li>Use a stable and fast internet connection to avoid buffering or lagging issues.</li>
149
- <li>Use headphones or speakers to enjoy the sound effects and music of the movie.</li>
150
- <li>Use subtitles if you are not familiar with Tamil language or if you want to improve your Tamil skills.</li>
151
- <li>Use the timestamped commenting feature to interact with your friends or colleagues while watching the movie. You can also use emojis and GIFs to express your reactions.</li>
152
- <li>Use the accessibility options if you need them, such as screen readers, voiceover software, closed captioning, etc.</li>
153
- </ul>
154
- <h2>Conclusion</h2>
155
- <p>In conclusion, HD Online Player is a free online video player that lets you watch videos online without downloading them or installing any software. It supports HTML5 and MP4 formats and offers high-quality streaming and ad-free viewing. You can also customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more. You can also share your videos with your friends or colleagues easily by sending them the link to your video. You can also embed your videos on your website, blog, or social media platforms.</p>
156
- <p>Tamil dubbed 1080p movies are movies that have been dubbed in Tamil language and have a display resolution width of approximately 1080 pixels. They are popular among Tamil speakers who want to enjoy movies from other languages and cultures in their own language. They cover a wide range of genres and themes, from comedy to action, from romance to horror, from drama to fantasy, and more. Some of the popular genres and titles of tamil dubbed 1080p movies are comedy (Housefull series), action (The Avengers series), romance (Titanic), horror (The Conjuring series), drama (The Godfather series), fantasy (Harry Potter series), etc.</p>
157
- <p>Housefull is a 2010 Indian Hindi-language comedy film directed by Sajid Khan and starring Akshay Kumar, Riteish Deshmukh, Arjun Rampal, Deepika Padukone, Lara Dutta, and Jiah Khan. It is the first installment in the Housefull film series. ), a man who believes he is cursed with bad luck and tries to find true love with the help of his best friend Bob (Deshmukh). However, his attempts lead to hilarious complications and misunderstandings involving three women: Sandy (Padukone), Devika (Khan), and Hetal (Dutta). Meanwhile, Bob's brother-in-law Major Krishna Rao (Rampal) suspects that Aarush and Bob are having affairs with his wife Pooja (Malaika Arora Khan) and sister Hetal.</p>
158
- <p>Housefull is a fun-filled comedy that will make you laugh out loud with its witty dialogues, hilarious situations, and amazing performances. It is a perfect movie to watch with your friends or family. You can watch Housefull in HD online using HD Online Player by following the steps mentioned above. You can also use the tips and tricks to enhance your viewing experience.</p>
159
- <p>We hope you enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy watching!</p>
160
- <h3>FAQs</h3>
161
- <p>Here are some frequently asked questions about HD Online Player, tamil dubbed 1080p movies, and Housefull:</p>
162
- <ol>
163
- <li>What are the advantages of using HD Online Player over other online video players?</li>
164
- <p>Some of the advantages of using HD Online Player over other online video players are:</p>
165
- <ul>
166
- <li>It supports 4K and HD resolution, as well as adaptive streaming for different connection speeds.</li>
167
- <li>It has a simple and intuitive interface that lets you customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more.</li>
168
- <li>It has a timestamped commenting feature that lets you interact with your friends or colleagues while watching videos.</li>
169
- <li>It has a privacy setting that lets you choose who can view your videos. You can make them public or private, or password-protect them.</li>
170
- <li>It meets WCAG 2.0 AA standards for accessibility, with support for screen readers, voiceover software, closed captioning, and other accessibility options.</li>
171
- <li>It allows you to watch videos online without downloading them or installing any software. This saves you time, space, and bandwidth.</li>
172
- <li>It allows you to watch videos without any ads or distractions. This enhances your viewing experience and keeps you focused on the content.</li>
173
- <li>It allows you to share your videos with your friends or colleagues easily by sending them the link to your video. You can also embed your videos on your website, blog, or social media platforms.</li>
174
- </ul>
175
- <li>What are the disadvantages of watching tamil dubbed 1080p movies online?</li>
176
- <p>Some of the disadvantages of watching tamil dubbed 1080p movies online are:</p>
177
- <ul>
178
- <li>You may miss out on the original voice acting and expressions of the actors.</li>
179
- <li>You may encounter poor dubbing quality or synchronization issues in some cases.</li>
180
- <li>You may face legal or ethical issues if you watch pirated or unauthorized copies of movies online.</li>
181
- </ul>
182
- <li>What are some of the popular genres and titles of tamil dubbed 1080p movies?</li>
183
- <p>Some of the popular genres and titles of tamil dubbed 1080p movies are:</p>
184
- <ul>
185
- <li>Comedy: Housefull series, Golmaal series, Hangover series, Dhamaal series, etc.</li>
186
- <li>Action: The Avengers series, Fast and Furious series, Mission Impossible series, John Wick series, etc.</li>
187
- <li>Romance: Titanic, The Notebook, Aashiqui 2, The Fault in Our Stars, etc.</li>
188
- <li>Horror: The Conjuring series, Annabelle series, The Exorcist, The Ring, etc.</li>
189
- <li>Drama: The Godfather series, The Shawshank Redemption, Slumdog Millionaire, Dangal, etc.</li>
190
- <li>Fantasy: Harry Potter series, Lord of the Rings series, Baahubali series, Avatar, etc.</li>
191
- </ul>
192
- <li>What is the plot of Housefull?</li>
193
- <p>The plot of Housefull is:</p>
194
- ), his attempts lead to hilarious complications and misunderstandings involving three women: Sandy (Padukone), Devika (Khan), and Hetal (Dutta). Meanwhile, Bob's brother-in-law Major Krishna Rao (Rampal) suspects that Aarush and Bob are having affairs with his wife Pooja (Malaika Arora Khan) and sister Hetal.</p>
195
- <li>How can I watch Housefull in HD online using HD Online Player?</li>
196
- <p>To watch Housefull in HD online using HD Online Player, you need to follow these steps:</p>
197
- <ol>
198
- <li>Go to the website of HD Online Player and click on the "Video Player" option.</li>
199
- <li>Copy and paste the URL of the video source of Housefull in the player. You can find the URL from various online platforms that offer tamil dubbed 1080p movies, such as TamilRockers, Moviesda, Isaimini, etc. However, be careful of the legal and ethical issues involved in watching pirated or unauthorized copies of movies online.</li>
200
- <li>Click on the "Play" button and enjoy watching Housefull in HD online. You can also customize your video player with your colors, logo, thumbnail, playbar, speed controls, chaptering, and more.</li>
201
- <li>You can also share your video link with your friends or colleagues by clicking on the "Share" button. You can also embed your video on your website, blog, or social media platforms.</li>
202
- </ol>
203
- </ol>
204
- </p> 0a6ba089eb<br />
205
- <br />
206
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/American Marksman MOD APK The ultimate simulation game with unlimited money gold wood metal and more.md DELETED
@@ -1,108 +0,0 @@
1
- <br />
2
- <h1>American Marksman MOD APK: A Shooting Game with Unlimited Money</h1>
3
- <h2>Introduction</h2>
4
- <p>Do you love shooting games? Do you want to test your skills as a marksman and complete challenging missions? If yes, then you should try American Marksman, a simulation game that lets you experience the thrill of being a sniper. But wait, there's more! You can also enjoy the game with unlimited money, gold, wood, and metal by downloading the American Marksman MOD APK. In this article, we will tell you everything you need to know about this amazing mod, including its features, how to download and install it, and some FAQs.</p>
5
- <h2>american marksman unlimited money apk download</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://urlin.us/2uST67">https://urlin.us/2uST67</a></b></p><br /><br />
6
- <h3>What is American Marksman?</h3>
7
- <p>American Marksman is a simulation game developed by Game Pickle. It is available for Android devices and has more than 1 million downloads on Google Play Store. The game puts you in the role of a sniper who has to complete various missions, such as assassinating targets, protecting allies, or destroying enemy bases. You can choose from a wide range of weapons, such as rifles, pistols, shotguns, or grenades. You can also upgrade your weapons and equipment to improve your performance and accuracy. The game has realistic graphics and sound effects that make you feel like you are in a real battlefield.</p>
8
- <h3>Why download American Marksman MOD APK?</h3>
9
- <p>While American Marksman is a fun and addictive game, it also has some drawbacks. For example, you need to spend real money to buy more gold, wood, or metal, which are essential resources for upgrading your weapons and equipment. You also have to watch ads to get extra rewards or bonuses. These can be annoying and frustrating for some players who just want to enjoy the game without any interruptions or limitations. That's why downloading the American Marksman MOD APK is a great idea. This mod gives you unlimited money, gold, wood, and metal, so you can buy anything you want without spending a dime. It also removes all the ads from the game, so you can play without any distractions or delays.</p>
10
- <h2>Features of American Marksman MOD APK</h2>
11
- <h3>Unlimited money, gold, wood, and metal</h3>
12
- <p>The most obvious feature of the American Marksman MOD APK is that it gives you unlimited money, gold, wood, and metal. These are the main currencies in the game that you need to upgrade your weapons and equipment. With unlimited resources, you can buy any weapon or item you want without worrying about running out of money. You can also upgrade your weapons and equipment to the maximum level and enjoy their full potential. This will make your missions easier and more fun.</p>
13
- <h3>No ads</h3>
14
- <p>Another feature of the American Marksman MOD APK is that it removes all the ads from the game. Ads are usually displayed after completing a mission or when you want to get extra rewards or bonuses. They can be annoying and distracting for some players who just want to play the game without any interruptions or delays. By downloading the modded version of the game, you can get rid of all the ads and enjoy a smooth and uninterrupted gaming experience.</p>
15
- <h3>Realistic graphics and sound effects</h3>
16
- <p>The American Marksman MOD APK also preserves the original quality of the game's graphics and sound effects. The game has realistic 3D graphics that create a immersive atmosphere for the players. The game also has realistic sound effects that enhance the gameplay and make you feel like you are in a real battlefield. You can hear the sound of gunshots, explosions, wind, or birds as you play the game. The modded version of the game does not compromise on these aspects and delivers a high-quality gaming experience.</p> <h3>Various weapons and missions</h3>
17
- <p>The American Marksman MOD APK also offers a variety of weapons and missions for the players. You can choose from different types of weapons, such as rifles, pistols, shotguns, or grenades. Each weapon has its own advantages and disadvantages, so you need to choose wisely depending on the mission and the target. You can also customize your weapons with different scopes, silencers, or magazines. The game has more than 100 missions that test your skills as a marksman. You have to complete different objectives, such as assassinating targets, protecting allies, or destroying enemy bases. The missions are challenging and diverse, so you will never get bored of playing the game.</p>
18
- <p>american marksman mod apk free download<br />
19
- american marksman hack apk unlimited resources<br />
20
- american marksman simulation game mod apk<br />
21
- american marksman latest version mod apk<br />
22
- american marksman cheats apk unlimited money<br />
23
- american marksman premium apk mod unlocked<br />
24
- american marksman shooting game mod apk<br />
25
- american marksman full apk mod unlimited gold<br />
26
- american marksman cracked apk mod unlimited wood<br />
27
- american marksman pro apk mod unlimited metal<br />
28
- american marksman android game mod apk<br />
29
- american marksman offline mod apk unlimited money<br />
30
- american marksman 2023 mod apk free download<br />
31
- american marksman v1.0.7 mod apk unlimited resources<br />
32
- american marksman no ads mod apk unlimited money<br />
33
- american marksman 3d game mod apk unlimited gold<br />
34
- american marksman sniper game mod apk unlimited wood<br />
35
- american marksman realistic game mod apk unlimited metal<br />
36
- american marksman best game mod apk unlimited money<br />
37
- american marksman new game mod apk free download<br />
38
- american marksman online mod apk unlimited resources<br />
39
- american marksman multiplayer mod apk unlimited money<br />
40
- american marksman pvp game mod apk unlimited gold<br />
41
- american marksman survival game mod apk unlimited wood<br />
42
- american marksman adventure game mod apk unlimited metal<br />
43
- american marksman action game mod apk unlimited money<br />
44
- american marksman strategy game mod apk free download<br />
45
- american marksman fun game mod apk unlimited resources<br />
46
- american marksman addictive game mod apk unlimited money<br />
47
- american marksman challenging game mod apk unlimited gold<br />
48
- american marksman easy game mod apk unlimited wood<br />
49
- american marksman hard game mod apk unlimited metal<br />
50
- american marksman amazing game mod apk unlimited money<br />
51
- american marksman awesome game mod apk free download<br />
52
- american marksman cool game mod apk unlimited resources<br />
53
- american marksman epic game mod apk unlimited money<br />
54
- american marksman fantastic game mod apk unlimited gold<br />
55
- american marksman incredible game mod apk unlimited wood<br />
56
- american marksman superb game mod apk unlimited metal<br />
57
- american marksman wonderful game mod apk unlimited money<br />
58
- download american marksman mod apk for free<br />
59
- download american marksman hack apk with unlimited resources<br />
60
- download american marksman simulation game with mod apk <br />
61
- download american marksman latest version with mod apk <br />
62
- download american marksman cheats apk with unlimited money <br />
63
- download american marksman premium apk with mod unlocked <br />
64
- download american marksman shooting game with mod apk <br />
65
- download american marksman full apk with mod unlimited gold <br />
66
- download american marksman cracked apk with mod unlimited wood</p>
67
- <h3>Easy controls and gameplay</h3>
68
- <p>The American Marksman MOD APK also has easy controls and gameplay that make the game suitable for anyone. The game has a simple user interface that shows you all the information you need, such as your health, ammo, or mission details. The game also has a tutorial that guides you through the basics of the game. The game has easy controls that let you aim, shoot, zoom, or reload with just a few taps on the screen. The game also has an auto-fire option that lets you shoot automatically when you aim at a target. The game has a smooth and fast gameplay that lets you enjoy the game without any lags or glitches.</p>
69
- <h2>How to download and install American Marksman MOD APK?</h2>
70
- <p>If you are interested in downloading and installing the American Marksman MOD APK, you can follow these simple steps:</p>
71
- <h3>Step 1: Download the APK file from a trusted source</h3>
72
- <p>The first step is to download the APK file of the American Marksman MOD APK from a trusted source. You can use the link below to download the file directly to your device. The file size is about 100 MB, so make sure you have enough space on your device before downloading it.</p>
73
- <p><a href="">Download American Marksman MOD APK</a></p>
74
- <h3>Step 2: Enable unknown sources on your device</h3>
75
- <p>The next step is to enable unknown sources on your device. This is necessary because the APK file is not from the official Google Play Store, so you need to allow your device to install apps from other sources. To do this, go to your device settings and look for the security or privacy option. Then, find the unknown sources option and enable it. This will allow you to install the APK file without any problems.</p>
76
- <h3>Step 3: Install the APK file and launch the game</h3>
77
- <p>The final step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device and tap on it. Then, follow the instructions on the screen to install the app. Once the installation is done, you can launch the game and enjoy it with unlimited money, gold, wood, and metal.</p>
78
- <h2>Conclusion</h2>
79
- <p>American Marksman is a simulation game that lets you experience the thrill of being a sniper. You can complete various missions, such as assassinating targets, protecting allies, or destroying enemy bases. You can also choose from a wide range of weapons, such as rifles, pistols, shotguns, or grenades. You can also upgrade your weapons and equipment to improve your performance and accuracy. The game has realistic graphics and sound effects that make you feel like you are in a real battlefield.</p>
80
- <p>If you want to enjoy the game with unlimited money, gold, wood, and metal, you should download the American Marksman MOD APK. This mod gives you unlimited resources that let you buy anything you want without spending a dime. It also removes all the ads from the game, so you can play without any distractions or delays.</p>
81
- <p>To download and install the American Marksman MOD APK, you just need to follow these simple steps:</p>
82
- <ul>
83
- <li>Download the APK file from a trusted source</li>
84
- <li>Enable unknown sources on your device</li>
85
- <li>Install the APK file and launch the game</li>
86
- </ul>
87
- <p>That's it! You can now enjoy the game with unlimited money, gold, wood, and metal.</p>
88
- <h4>FAQs</h4>
89
- <p>Here are some frequently asked questions about the American Marksman MOD APK:</p>
90
- <ol>
91
- <li>Is American Marksman MOD APK safe to download and install?</li>
92
- <p>Yes, American Marksman MOD APK is safe to download and install. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a trusted source and scan it with an antivirus before installing it.</p>
93
- <li>Do I need to root my device to use American Marksman MOD APK?</li>
94
- <p>No, you do not need to <p>root your device to use American Marksman MOD APK. It works on both rooted and non-rooted devices. However, some features may require root access to work properly.</p>
95
- <li>What are the minimum requirements to play American Marksman MOD APK?</li>
96
- <p>The minimum requirements to play American Marksman MOD APK are:</p>
97
- <ul>
98
- <li>Android 4.4 or higher</li>
99
- <li>At least 1 GB of RAM</li>
100
- <li>At least 200 MB of free storage space</li>
101
- </ul>
102
- <li>Can I play American Marksman MOD APK online with other players?</li>
103
- <p>No, American Marksman MOD APK is not an online game. It is a single-player game that does not require an internet connection to play. You can play it offline anytime and anywhere you want.</p>
104
- <li>Can I update American Marksman MOD APK to the latest version?</li>
105
- <p>Yes, you can update American Marksman MOD APK to the latest version. However, you need to download and install the new version manually from the same source you downloaded the previous version. You cannot update it from the Google Play Store or any other app store.</p>
106
- </ol></p> 197e85843d<br />
107
- <br />
108
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Den Kelime Oyunu APK - cretsiz nternetsiz ve Yeni Tarz Kelime Oyunu.md DELETED
@@ -1,132 +0,0 @@
1
- <br />
2
- <br>- Earn coins and unlock new levels<br>- Learn new words and improve your vocabulary<br>- Enjoy the sound and visual effects | Explain how the game works, what are the objectives, and what are the challenges. | | H3: Download and Install | - Go to APKCombo or Google Play Store<br>- Choose the latest version of the game<br>- Allow unknown sources if needed<br>- Follow the instructions and launch the game | Provide a step-by-step guide on how to get the game on your device, with links and screenshots. | | H3: Hints and Bonuses | - Use coins to reveal letters or words<br>- Watch ads to get more coins or hints<br>- Complete daily tasks and achievements<br>- Check out the word treasury and titles | Give some tips on how to use the in-game resources wisely, and how to earn more rewards. | | H2: Why You Should Play Düşen Kelime Oyunu APK | | Body: Highlight the advantages of playing this game, such as brain exercise, relaxation, education, and entertainment. | | H3: Brain Exercise | - Stimulate your cognitive skills<br>- Enhance your memory and concentration<br>- Prevent Alzheimer's disease<br>- Challenge yourself with different levels of difficulty | Explain how playing word puzzles can benefit your mental health and performance, with scientific evidence. | | H3: Relaxation | - Reduce stress and anxiety<br>- Improve your mood and well-being<br>- Have fun and enjoy yourself<br>- Play offline and at your own pace | Explain how playing word puzzles can help you relax and unwind, with personal examples. | | H3: Education | - Learn new Turkish words and meanings<br>- Expand your vocabulary and knowledge<br>- Improve your spelling and grammar<br>- Discover new facts and trivia | Explain how playing word puzzles can enrich your language skills and general culture, with examples from the game. | | H3: Entertainment | - Experience a new style of word puzzle game<br>- Explore different themes and categories<br>- Compete with other players online<br>- Share your progress and achievements with friends | Explain how playing word puzzles can keep you entertained and engaged, with features from the game. | | H2: Conclusion | | Conclusion: Summarize the main points of the article, restate the benefits of playing düşen kelime oyunu apk, and end with a call to action. | Table 2: Article with HTML formatting <h1>Düşen Kelime Oyunu APK: A Fun and Relaxing Word Puzzle Game</h1>
3
- <p>Do you love word games? Do you want to improve your Turkish vocabulary while having fun? Do you need a break from your busy life? If you answered yes to any of these questions, then you should try düşen kelime oyunu apk.</p>
4
- <p>Düşen kelime oyunu apk is a new style of Turkish word puzzle game that has gained over 3 million downloads in a short time. It is a free and offline game that lets you find hidden words and clear the letter boxes. As you play, you will earn coins, unlock new levels, learn new words, and enjoy the sound and visual effects.</p>
5
- <h2>düşen kelime oyunu apk</h2><br /><p><b><b>DOWNLOAD</b> &#187; <a href="https://urlin.us/2uSX7p">https://urlin.us/2uSX7p</a></b></p><br /><br />
6
- <p>In this article, we will show you how to play düşen kelime oyunu apk, why you should play it, and what are the benefits of playing it. By the end of this article, you will be ready to download this amazing game and start your word adventure.</p>
7
- <h2>How to Play Düşen Kelime Oyunu APK</h2>
8
- <p>Playing düşen kelime oyunu apk is easy and fun. All you need is a smartphone or tablet with Android operating system. Here are the steps to follow:</p>
9
- <p>Düşen Kelime Oyunu indir<br />
10
- Düşen Kelime Oyunu ücretsiz<br />
11
- Düşen Kelime Oyunu internetsiz<br />
12
- Düşen Kelime Oyunu hileli apk<br />
13
- Düşen Kelime Oyunu mod apk<br />
14
- Düşen Kelime Oyunu son sürüm apk<br />
15
- Düşen Kelime Oyunu güncel apk<br />
16
- Düşen Kelime Oyunu android oyun club<br />
17
- Düşen Kelime Oyunu apk dayı<br />
18
- Düşen Kelime Oyunu apk indir cepde<br />
19
- Düşen Kelime Oyunu apk indir android oyun club<br />
20
- Düşen Kelime Oyunu apk indir hileli<br />
21
- Düşen Kelime Oyunu apk indir mod<br />
22
- Düşen Kelime Oyunu apk indir son sürüm<br />
23
- Düşen Kelime Oyunu apk indir güncel<br />
24
- Düşen Kelime Oyunu apk indir ücretsiz<br />
25
- Düşen Kelime Oyunu apk indir internetsiz<br />
26
- Düşen Kelime Oyunu nasıl oynanır<br />
27
- Düşen Kelime Oyunu ipucu nasıl alınır<br />
28
- Düşen Kelime Oyunu kelime hazinesi nedir<br />
29
- Düşen Kelime Oyunu unvanlar nelerdir<br />
30
- Düşen Kelime Oyunu yorumlar<br />
31
- Düşen Kelime Oyunu puanlama sistemi<br />
32
- Düşen Kelime Oyunu bölüm sayısı<br />
33
- Düşen Kelime Oyunu yeni bölüm ne zaman gelecek<br />
34
- Düşen Kelime Oyunu en zor bölüm hangisi<br />
35
- Düşen Kelime Oyunu en kolay bölüm hangisi<br />
36
- Düşen Kelime Oyunu en çok sevilen bölüm hangisi<br />
37
- Düşen Kelime Oyunu en çok bilinen kelimeler nelerdir<br />
38
- Düşen Kelime Oyunu en çok öğrenilen kelimeler nelerdir<br />
39
- Düşen Kelime Oyunu beyin geliştirir mi<br />
40
- Düşen Kelime Oyunu zeka geliştirir mi<br />
41
- Dü��en Kelime Oyunu hafıza güçlendirir mi<br />
42
- Düşen Kelime Oyunu alzheimer önler mi<br />
43
- Düşen Kelime Oyunu rahatlatır mı<br />
44
- Düşen Kelime Oyunu eğlenceli mi<br />
45
- Düşen Kelime Oyunu bağımlılık yapar mı<br />
46
- Düşen! kelime oyununun farkı nedir<br />
47
- düsen kelimenin farkı nedir (without Turkish characters)<br />
48
- dus kelimenin farkı nedir (without Turkish characters and punctuation)<br />
49
- düsen kelimenin avantajları nelerdir (without Turkish characters)<br />
50
- dus kelimenin avantajları nelerdir (without Turkish characters and punctuation)<br />
51
- düsen kelimenin dezavantajları nelerdir (without Turkish characters)<br />
52
- dus kelimenin dezavantajları nelerdir (without Turkish characters and punctuation)<br />
53
- düsen kelimenin alternatifleri nelerdir (without Turkish characters)<br />
54
- dus kelimenin alternatifleri nelerdir (without Turkish characters and punctuation)<br />
55
- düsen kelimenin rakipleri kimlerdir (without Turkish characters)<br />
56
- dus kelimenin rakipleri kimlerdir (without Turkish characters and punctuation)<br />
57
- düsen kelimenin geliştiricisi kimdir (without Turkish characters)</p>
58
- <h3>Rules and Features</h3>
59
- <ul>
60
- <li>Find hidden words by swiping your finger over the letters on the screen.</li>
61
- <li>When you find a word, it will disappear from the board and the letter boxes will fall down to create new words <li>The game has hundreds of levels with different themes and categories, such as animals, fruits, sports, countries, etc.</li>
62
- <li>You can earn coins by finding words, completing levels, and watching ads. You can use coins to reveal letters or words when you are stuck.</li>
63
- <li>You can also learn new words and their meanings by tapping on them in the word treasury. You can also earn titles by finding special words.</li>
64
- <li>The game has sound and visual effects that make it more enjoyable and relaxing. You can also turn them off if you prefer.</li>
65
- </ul>
66
- <h3>Download and Install</h3>
67
- <ul>
68
- <li>Go to <a href="">APKCombo</a> or <a href="">Google Play Store</a> and search for düşen kelime oyunu apk.</li>
69
- <li>Choose the latest version of the game and tap on the download button.</li>
70
- <li>If you are downloading from APKCombo, you may need to allow unknown sources in your device settings.</li>
71
- <li>Follow the instructions on the screen and wait for the installation to finish.</li>
72
- <li>Launch the game and start playing.</li>
73
- </ul>
74
- <p>Here are some screenshots of the game:</p>
75
- <img src="" alt="Screenshot 1" width="300" height="500">
76
- <img src="" alt="Screenshot 2" width="300" height="500">
77
- <img src="" alt="Screenshot 3" width="300" height="500">
78
- <h3>Hints and Bonuses</h3>
79
- <ul>
80
- <li>If you are having trouble finding a word, you can use coins to reveal a letter or a word. You can also watch an ad to get a free hint.</li>
81
- <li>You can earn more coins by watching ads, completing daily tasks and achievements, and finding bonus words.</li>
82
- <li>You can also check out the word treasury and see all the words you have found so far. You can tap on any word to see its meaning and pronunciation.</li>
83
- <li>You can also earn titles by finding special words, such as names of cities, countries, animals, etc. You can see your titles in the profile section.</li>
84
- </ul>
85
- <h2>Why You Should Play Düşen Kelime Oyunu APK</h2>
86
- <p>Düşen kelime oyunu apk is not only a fun and relaxing game, but also a beneficial one. Playing this game can help you improve your brain health, mood, language skills, and general knowledge. Here are some of the reasons why you should play this game:</p>
87
- <h3>Brain Exercise</h3>
88
- <ul>
89
- <li>Playing word puzzles can stimulate your cognitive skills, such as attention, memory, logic, and problem-solving.</li>
90
- <li>Playing word puzzles can also enhance your concentration and focus, as you have to scan the board and find the words quickly.</li>
91
- <li>Playing word puzzles can also prevent Alzheimer's disease and dementia, as it keeps your brain active and reduces the risk of cognitive decline.</li>
92
- <li>Playing word puzzles can also challenge yourself with different levels of difficulty, from easy to hard. You can also compare your scores with other players online and see how you rank.</li>
93
- </ul>
94
- <h3>Relaxation</h3>
95
- <ul>
96
- <li>Playing word puzzles can reduce stress and anxiety, as it distracts you from your worries and calms your mind.</li>
97
- <li>Playing word puzzles can also improve your mood and well-being, as it gives you a sense of achievement and satisfaction when you find a word or complete a level.</li>
98
- <li>Playing word puzzles can also have fun and enjoy yourself, as it entertains you with its sound and visual effects. You can also play offline and at your own pace.</li>
99
- <li>Playing word puzzles can also be a great way to spend some quality time with yourself or with your friends. You can play alone or with others online or offline.</li>
100
- </ul>
101
- <h3>Education</h3>
102
- <ul>
103
- <li>Playing word puzzles can help you learn new Turkish words and meanings, as it exposes you to a variety of words from different themes and categories.</li>
104
- <li>Playing word puzzles can also expand your vocabulary and knowledge, as it teaches you new synonyms, antonyms, idioms, proverbs, etc.</li>
105
- <li>Playing word puzzles can also improve your spelling and grammar, as it makes you pay attention to the correct order and form of the letters.</li>
106
- <li>Playing word puzzles can also discover new facts and trivia, as it introduces you to interesting information about various topics.</li>
107
- </ul>
108
- <h3>Entertainment</h3>
109
- <ul <li>Playing word puzzles can help you experience a new style of word puzzle game, as it combines the elements of crossword, word search, and word connect games.</li>
110
- <li>Playing word puzzles can also help you explore different themes and categories, as it offers you a variety of topics to choose from, such as animals, fruits, sports, countries, etc.</li>
111
- <li>Playing word puzzles can also help you compete with other players online, as it allows you to join the global leaderboard and see how you rank among other players.</li>
112
- <li>Playing word puzzles can also help you share your progress and achievements with friends, as it enables you to connect with Facebook and invite your friends to play with you.</li>
113
- </ul>
114
- <h2>Conclusion</h2>
115
- <p>Düşen kelime oyunu apk is a fun and relaxing word puzzle game that can benefit your brain health, mood, language skills, and general knowledge. It is easy and fun to play, and it offers you hundreds of levels with different themes and categories. You can also earn coins, hints, bonuses, titles, and achievements as you play. You can also compete with other players online and share your progress with friends. You can download this game for free from APKCombo or Google Play Store and start your word adventure today.</p>
116
- <p>If you are looking for a new and exciting way to improve your Turkish vocabulary while having fun, then düşen kelime oyunu apk is the game for you. Download it now and see for yourself why millions of people love this game.</p>
117
- <p>Are you ready to play düşen kelime oyunu apk? Here are some FAQs that might help you:</p>
118
- <h4>FAQs</h4>
119
- <ol>
120
- <li>What is the meaning of düşen kelime oyunu?</li>
121
- <p>Düşen kelime oyunu means falling word game in Turkish. It is a word puzzle game that involves finding hidden words and clearing the letter boxes.</p>
122
- <li>How many levels are there in düşen kelime oyunu apk?</li>
123
- <p>There are over 500 levels in düşen kelime oyunu apk, each with a different theme and category. You can unlock new levels by earning coins or watching ads.</p>
124
- <li>How can I get more coins in düşen kelime oyunu apk?</li>
125
- <p>You can get more coins by finding words, completing levels, watching ads, completing daily tasks and achievements, and finding bonus words. You can use coins to reveal letters or words when you are stuck.</p>
126
- <li>How can I learn new words in düşen kelime oyunu apk?</li>
127
- <p>You can learn new words by tapping on them in the word treasury. You will see their meaning and pronunciation. You can also earn titles by finding special words.</p>
128
- <li>How can I play with other players in düşen kelime oyunu apk?</li>
129
- <p>You can play with other players online by joining the global leaderboard. You will see your rank and score among other players. You can also connect with Facebook and invite your friends to play with you.</p>
130
- </ol></p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Blockman GO-Adventures Mod APK Hack Your Way to Adventure on Apkmody.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>How to Hack Blockman Go Adventures with APKMODY</h1>
3
- <p>Blockman Go Adventures is a popular sandbox game that offers a variety of gameplay options for players to enjoy. However, some players may find it hard to unlock all the mini-games, modes, accessories and resources in the game. That's why some players resort to hacking Blockman Go Adventures with APKMODY, a website that provides modded APK files for Android games and apps. In this article, we will show you how to hack Blockman Go Adventures with APKMODY and what are the benefits of doing so.</p>
4
- <h2>What is Blockman Go Adventures?</h2>
5
- <p>Blockman Go Adventures is a free app that includes minigames, chatting and making friends. You can play various block style minigames here, such as Bed Wars, Sky Block, Egg War, Murder Mystery, Sky Wars and more. Each minigame has its own rules, objectives and rewards. You can also create your own minigames and share them with other players.</p>
6
- <h2>hack blockman go adventures apkmody</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://jinyurl.com/2uNLRo">https://jinyurl.com/2uNLRo</a></b></p><br /><br />
7
- <h3>A sandbox game with various mini-games and modes</h3>
8
- <p>Blockman Go Adventures is a sandbox game that lets you play, craft and share your fun experiences with your friends. You can explore different worlds, build structures, fight enemies, collect resources and complete quests. You can also join the adventures and venture into the countless minigames from all the different genres. There is always something new and exciting for you to discover every day.</p>
9
- <h3>A social platform with chat and friends features</h3>
10
- <p>Blockman Go Adventures is also a social platform that allows you to chat and make friends with other players. You can join or create parties, clans and guilds. You can also send messages, voice chats, gifts and emojis. You can customize your avatar with creative selections of fashionable accessories. With a growing inventory of items, the sky's the only limit.</p>
11
- <h2>What is APKMODY?</h2>
12
- <p>APKMODY is a website that provides modded APK files for Android games and apps. At APKMODY, you can easily search and download thousands of MOD APK, Premium APK and Original APK games and apps for free. You can use the search button to find what you're looking for, or browse the pre-designed categories.</p>
13
- <h3>A website that provides modded APK files for Android games and apps</h3>
14
- <p>A modded APK file is an altered version of an original APK file that has been modified by someone to add or remove some features or functions. For example, a modded APK file may have unlimited resources, unlocked levels, removed ads or added cheats. A modded APK file may also have a different name or icon than the original one.</p>
15
- <h3>A source of unlimited resources, features and fun for Blockman Go Adventures</h3>
16
- <p>APKMODY provides a modded APK file for Blockman Go Adventures that has many advantages over the original one. The modded APK file has a mod menu that lets you enable or disable various hacks in the game. The hacks include fly hack, unlimited Gcubes and money. With these hacks, you can enjoy Block man Go Adventures without any limitations or restrictions. You can also have more fun and creativity with the modded APK file.</p>
17
- <p>Blockman Go Adventures Mod Menu Fly Hack<br />
18
- Blockman Go Mod Apk Unlimited Gcubes and Money<br />
19
- Blockman Go Adventures Hack Download Media Fire<br />
20
- How to Hack Blockman Go Adventures v2.46.1<br />
21
- Blockman Go Mod Apk v2.45.2 Link in Comment<br />
22
- Blockman Go Adventures Fly Hack No Root<br />
23
- Blockman Go Mod Menu Apk Latest Version<br />
24
- Blockman Go Adventures Unlimited Gcubes Mod<br />
25
- Blockman Go Hack Apkmody Free Download<br />
26
- Blockman Go Adventures Mod Apk v2.46.1 YouTube<br />
27
- Blockman Go Mod Apk v2.45.2 YouTube Video<br />
28
- Blockman Go Adventures Hack Tutorial 2023<br />
29
- Blockman Go Mod Menu Apk Media Fire Link<br />
30
- Blockman Go Adventures Unlimited Money Hack<br />
31
- Blockman Go Hack Apkmody No Survey No Password<br />
32
- Blockman Go Adventures Mod Apk v2.46.1 Features<br />
33
- Blockman Go Mod Apk v2.45.2 Download Now<br />
34
- Blockman Go Adventures Fly Hack Tutorial<br />
35
- Blockman Go Mod Menu Apk How to Install<br />
36
- Blockman Go Adventures Unlimited Gcubes Generator<br />
37
- Blockman Go Hack Apkmody Online Tool<br />
38
- Blockman Go Adventures Mod Apk v2.46.1 Review<br />
39
- Blockman Go Mod Apk v2.45.2 Gameplay Video<br />
40
- Blockman Go Adventures Fly Hack APK Download<br />
41
- Blockman Go Mod Menu Apk No Ban No Root<br />
42
- Blockman Go Adventures Unlimited Money Mod APK<br />
43
- Blockman Go Hack Apkmody 100% Working 2023<br />
44
- Blockman Go Adventures Mod Apk v2.46.1 Update<br />
45
- Blockman Go Mod Apk v2.45.2 New Features<br />
46
- Blockman Go Adventures Fly Hack iOS Android<br />
47
- Blockman Go Mod Menu Apk All Skins Unlocked<br />
48
- Blockman Go Adventures Unlimited Gcubes Cheat<br />
49
- Blockman Go Hack Apkmody Safe and Secure<br />
50
- Blockman Go Adventures Mod Apk v2.46.1 Download Link<br />
51
- Blockman Go Mod Apk v2.45.2 Free Download Link<br />
52
- Blockman Go Adventures Fly Hack No Verification<br />
53
- Blockman Go Mod Menu Apk Easy to Use<br />
54
- Blockman Go Adventures Unlimited Money Cheat Code<br />
55
- Blockman Go Hack Apkmody Latest Version 2023<br />
56
- Blockman Go Adventures Mod Apk v2.46.1 Media Fire Link</p>
57
- <h2>How to hack Blockman Go Adventures with APKMODY?</h2>
58
- <p>Hacking Blockman Go Adventures with APKMODY is very easy and simple. You just need to follow these steps:</p>
59
- <h3>Download the modded APK file from APKMODY website</h3>
60
- <p>First, you need to visit the APKMODY website and search for Blockman Go Adventures. You will see the modded APK file for the game with a download button. Click on the download button and wait for the file to be downloaded to your device. The file size is about 140 MB, so make sure you have enough storage space and a stable internet connection.</p>
61
- <h3>Install the modded APK file on your Android device</h3>
62
- <p>Next, you need to install the modded APK file on your Android device. Before you do that, you need to enable the installation of apps from unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store. To do that, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the modded APK file in your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</p>
63
- <h3>Enjoy the hacked Blockman Go Adventures with mod menu, fly hack, unlimited Gcubes and money</h3>
64
- <p>Finally, you can enjoy the hacked Blockman Go Adventures with all the features and hacks that you want. To access the mod menu, you need to tap on the floating icon on the screen. The mod menu will show you all the hacks that you can enable or disable in the game. You can use the fly hack to fly around the map, the unlimited Gcubes and money hack to buy anything you want in the game, and other hacks that will make your gameplay more fun and easy.</p>
65
- <h2>What are the benefits of hacking Blockman Go Adventures with APKMODY?</h2>
66
- <p>Hacking Blockman Go Adventures with APKMODY has many benefits that will enhance your gaming experience. Here are some of them:</p>
67
- <h3>You can access all the mini-games and modes without restrictions</h3>
68
- <p>Some of the mini-games and modes in Blockman Go Adventures require you to have a certain amount of Gcubes or money to play them. For example, you need 100 Gcubes to play Bed Wars, 50 Gcubes to play Sky Wars, and 10 Gcubes to play Murder Mystery. With the unlimited Gcubes and money hack, you can access all these mini-games and modes without any restrictions. You can also join any server or room that you want without worrying about your level or rank.</p>
69
- <h3>You can customize your avatar with any accessories you want</h3>
70
- <p>Another benefit of hacking Blockman Go Adventures with APKMODY is that you can customize your avatar with any accessories you want. You can choose from a wide range of hats, glasses, masks, clothes, shoes, wings, tails and more. You can also mix and match different accessories to create your own unique style. With the unlimited Gcubes and money hack, you can buy any accessory you want in the game without spending real money.</p>
71
- <h3>You can chat and make friends with other players easily</h3>
72
- <p>The last benefit of hacking Blockman Go Adventures with APKMODY is that you can chat and make friends with other players easily. You can use the chat feature to communicate with other players in the game. You can also send voice chats, gifts and emojis to express yourself better. You can also add other players as friends and join their parties, clans or guilds. With the fly hack, you can also visit other players' worlds and see what they have built.</p>
73
- <h2>Conclusion</h2>
74
- <p>Blockman Go Adventures is a fun and exciting sandbox game that offers a lot of gameplay options for players to enjoy. However, some players may want to hack Blockman Go Adventures with APKMODY to get unlimited resources, features and fun in the game. In this article, we have shown you how to hack Blockman Go Adventures with APKMODY and what are the benefits of doing so. We hope that this article has been helpful for you and that you have learned something new today.</p>
75
- <p>If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you and answer your queries as soon as possible.</p>
76
- <p>Thank you for reading this article and have a great day!</p>
77
- <h2>Frequently Asked Questions</h2>
78
- <p>Here are some of the frequently asked questions about hacking Blockman Go Adventures with APKMODY and their answers:</p>
79
- <h3>Is hacking Blockman Go Adventures with APKMODY safe?</h3>
80
- <p>Hacking Blockman Go Adventures with APKMODY is generally safe, as long as you download the modded APK file from the official APKMODY website. The modded APK file is tested and verified by the APKMODY team before being uploaded to the website. However, you should always be careful when installing apps from unknown sources, as they may contain viruses or malware that can harm your device or steal your personal information. You should also backup your data before installing the modded APK file, in case something goes wrong.</p>
81
- <h3>Is hacking Blockman Go Adventures with APKMODY legal?</h3>
82
- <p>Hacking Blockman Go Adventures with APKMODY is not legal, as it violates the terms of service and the intellectual property rights of the game developer. By hacking Blockman Go Adventures with APKMODY, you are modifying the original game without the permission of the game developer. This can result in legal actions or penalties from the game developer, such as banning your account, suspending your access or suing you for damages. Therefore, you should hack Blockman Go Adventures with APKMODY at your own risk and responsibility.</p>
83
- <h3>Will I get banned for hacking Blockman Go Adventures with APKMODY?</h3>
84
- <p>There is a possibility that you will get banned for hacking Blockman Go Adventures with APKMODY, as the game developer may detect your abnormal activities and flag your account. The game developer may also have anti-cheat systems or mechanisms that can prevent or detect hacking attempts. If you get banned for hacking Blockman Go Adventures with APKMODY, you will lose all your progress, data and items in the game. You may also not be able to play the game again with the same account or device. Therefore, you should hack Blockman Go Adventures with APKMODY cautiously and moderately.</p>
85
- <h3>Can I update Blockman Go Adventures after hacking it with APKMODY?</h3>
86
- <p>No, you cannot update Blockman Go Adventures after hacking it with APKMODY, as the modded APK file is not compatible with the official updates from the game developer. If you try to update Blockman Go Adventures after hacking it with APKMODY, you may encounter errors, crashes or glitches in the game. You may also lose all the hacks and features that you have enabled in the modded APK file. Therefore, you should not update Blockman Go Adventures after hacking it with APKMODY.</p>
87
- <h3>Can I hack other games and apps with APKMODY?</h3>
88
- <p>Yes, you can hack other games and apps with APKMODY, as the website provides modded APK files for many other popular games and apps on Android. You can find games and apps from various categories and genres on the website, such as action, adventure, arcade, casual, puzzle, simulation, strategy, education, entertainment, lifestyle, music, social and more. You can also request for new games and apps to be modded by the APKMODY team on their website.</p> 197e85843d<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Temple Run 2 Lantern Festival Mod Apk and Enjoy Unlimited Coins Gems and Characters.md DELETED
@@ -1,100 +0,0 @@
1
-
2
- <h1>Temple Run 2 Lantern Festival Mod APK: How to Download and Install</h1>
3
- <p>If you are a fan of Temple Run 2, the popular endless runner game that has been downloaded over a billion times, you might be interested in trying out the Lantern Festival mod apk. This mod apk is a modified version of the game that offers unlimited coins and gems, new characters and power-ups, and an ad-free gameplay experience. You can also enjoy the Lantern Festival, a traditional Chinese festival that honours deceased ancestors and promotes reconciliation, peace, and forgiveness. In this article, we will show you what Temple Run 2 is, what the Lantern Festival mod apk is, and how to download and install it on your Android device.</p>
4
- <h2>temple run 2 lantern festival mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#10084;&#10084;&#10084; <a href="https://jinyurl.com/2uNU1j">https://jinyurl.com/2uNU1j</a></b></p><br /><br />
5
- <h2>What is Temple Run 2?</h2>
6
- <p>Temple Run 2 is a sequel to the smash hit phenomenon that redefined mobile gaming. In this game, you have to run, jump, turn, and slide your way through perilous cliffs, zip lines, mines, and forests as you try to escape with the cursed idol. How far can you run?</p>
7
- <h3>Game features and gameplay</h3>
8
- <p>Temple Run 2 features beautiful new graphics, gorgeous new organic environments, new obstacles, more power-ups, more achievements, and special powers for each character. You can also choose from different characters with unique abilities, such as Guy Dangerous, Scarlett Fox, Barry Bones, Karma Lee, Montana Smith, Francisco Montoya, Zack Wonder, and more. You can also customize your character with different outfits and accessories.</p>
9
- <p>The gameplay is simple but addictive. You have to swipe left or right to turn, swipe up to jump, swipe down to slide, and tilt your device to move sideways. You have to avoid crashing into obstacles or falling off the edge while collecting coins and gems along the way. You can also use power-ups such as shields, magnets, boosters, coin multipliers, and head starts to enhance your performance. You can also activate special powers for each character by filling up a meter with green gems.</p>
10
- <p>temple run 2 mod apk unlimited coins and gems lantern festival<br />
11
- temple run 2 lantern festival hack mod apk download<br />
12
- temple run 2 chinese new year lantern festival mod apk<br />
13
- temple run 2 mod apk latest version lantern festival<br />
14
- temple run 2 lantern festival mod apk free shopping<br />
15
- temple run 2 mod apk all maps unlocked lantern festival<br />
16
- temple run 2 lantern festival mod apk android 1<br />
17
- temple run 2 mod apk unlimited everything lantern festival<br />
18
- temple run 2 lantern festival mod apk revdl<br />
19
- temple run 2 mod apk no ads lantern festival<br />
20
- temple run 2 lantern festival mod apk rexdl<br />
21
- temple run 2 mod apk all characters unlocked lantern festival<br />
22
- temple run 2 lantern festival mod apk offline<br />
23
- temple run 2 mod apk mega mod lantern festival<br />
24
- temple run 2 lantern festival mod apk unlimited money<br />
25
- temple run 2 mod apk god mode lantern festival<br />
26
- temple run 2 lantern festival mod apk happymod<br />
27
- temple run 2 mod apk premium unlocked lantern festival<br />
28
- temple run 2 lantern festival mod apk unlimited diamonds<br />
29
- temple run 2 mod apk high score lantern festival<br />
30
- temple run 2 lantern festival mod apk online<br />
31
- temple run 2 mod apk all power ups maxed lantern festival<br />
32
- temple run 2 lantern festival mod apk for pc<br />
33
- temple run 2 mod apk unlimited lives lantern festival<br />
34
- temple run 2 lantern festival mod apk old version<br />
35
- temple run 2 mod apk all abilities unlocked lantern festival<br />
36
- temple run 2 lantern festival mod apk ios<br />
37
- temple run 2 mod apk unlimited keys lantern festival<br />
38
- temple run 2 lantern festival mod apk original<br />
39
- temple run 2 mod apk all outfits unlocked lantern festival<br />
40
- temple run 2 lantern festival mod apk update<br />
41
- temple run 2 mod apk unlimited boosters lantern festival<br />
42
- temple run 2 lantern festival mod apk cheat<br />
43
- temple run 2 mod apk all artifacts unlocked lantern festival<br />
44
- temple run 2 lantern festival mod apk full version<br />
45
- temple run 2 mod apk unlimited gems and coins latest version download for android - allapksmod.com[^1^]</p>
46
- <h3>Different maps and modes</h3>
47
- <p>Temple Run 2 offers different maps and modes for you to explore and enjoy. You can run through the Sky Summit, Frozen Shadows, Blazing Sands, Lost Jungle, Spooky Summit, Pirate Cove, Spirit Cove, Holi Festival, Fall Jungle, or Winter Wasteland. Each map has its own theme, scenery, obstacles, and challenges.</p>
48
- <p>You can also play different modes such as Daily Challenges, Global Challenges, Artifacts Missions, or Map Events. These modes give you specific tasks or goals to complete and reward you with coins, gems, or other prizes.</p>
49
- <h2>What is the Lantern Festival Mod APK?</h2>
50
- <p>The Lantern Festival mod apk is a modified version of Temple Run 2 that gives you some extra features and benefits that are not available in the original game. These include:</p>
51
- <h3>Mod features and benefits</h3>
52
- <ul>
53
- <li>Unlimited coins and gems: You can get unlimited coins and gems in the mod apk without having to spend real money or watch ads. You can use these coins and gems to unlock new characters, power-ups, outfits, accessories, or other items in the game.</li>
54
- <li>New characters and power-ups: The mod apk also gives you access to some new characters and power-ups that are not available in the original game. For example, you can play as Delvarr the Mighty Caveman or Minuteman Miles Munroe. You can also use new power-ups such as Tortuga or Sylvanus the Croaker.</li>
55
- <li>Ad-free gameplay: The mod apk removes all the ads from the game so that you can enjoy a smooth and uninterrupted gameplay experience.</li>
56
- </ul>
57
- <h3>How to download and install the mod apk</h3>
58
- <p>To download and install the mod apk on your Android device, you need to follow these steps:</p>
59
- < <h2>What is the Lantern Festival?</h2>
60
- <p>The Lantern Festival is a traditional Chinese festival that originated in the Qin dynasty (221 - 207 BC). It falls on the 15th day of the first month of the lunar calendar, which is usually in February or early March on the Gregorian calendar. It marks the end of the Chinese New Year celebrations and the start of the new lunar year. It is also a time to honour deceased ancestors and promote reconciliation, peace, and forgiveness. </p>
61
- <h3>History and significance of the festival</h3>
62
- <p>There are many legends and stories about the origin and significance of the Lantern Festival. One of them is that it was a way to worship Taiyi, the ancient god of heaven, who controlled the destiny of human beings. The emperor would ask Taiyi to bring favourable weather and good health to his people. </p>
63
- <p>Another legend is that it was a way to celebrate the birthday of Tianguan, the Taoist god of good fortune. People would light lanterns and pray for his blessings. </p>
64
- <p>A third legend is that it was a way to commemorate the Buddha, who enlightened people with his teachings. Buddhist monks would light lanterns in the temples to show respect to the Buddha. Later, this custom spread to the general public. </p>
65
- <p>Regardless of the origin, the Lantern Festival has become a symbol of hope, joy, and harmony. People light lanterns to express their wishes and gratitude, and to enjoy the beauty of the full moon. The lanterns are also seen as a way to guide the spirits of the ancestors back to their families. </p>
66
- <h3>How to celebrate the festival in the game</h3>
67
- <p>In Temple Run 2, you can celebrate the Lantern Festival by playing on the special map called Lantern Festival. This map features a stunning night scene with colourful lanterns, fireworks, and dragon dances. You can also collect red envelopes, which are traditional gifts containing money or blessings, along the way. </p>
68
- <p>To play on this map, you need to download and install the Lantern Festival mod apk, which gives you access to this map and other features. You can also choose from different characters that are related to Chinese culture, such as Sun Wukong, Mulan, or Emperor Qin Shi Huang. You can also use different power-ups that are inspired by Chinese elements, such as jade coins, dragon scrolls, or firecrackers.</p>
69
- <p>The Lantern Festival map is a great way to experience the charm and fun of this ancient festival while enjoying the thrill and challenge of Temple Run 2.</p> <h2>Conclusion</h2>
70
- <p>Temple Run 2 is an amazing game that offers endless fun and excitement. You can run through different maps and modes, collect coins and gems, unlock new characters and power-ups, and challenge yourself with various tasks and goals. You can also enjoy the Lantern Festival mod apk, which gives you unlimited coins and gems, new characters and power-ups, and an ad-free gameplay. You can also celebrate the Lantern Festival, a beautiful and meaningful Chinese festival that honours the ancestors and promotes peace and harmony. If you want to download and install the Lantern Festival mod apk, you can follow the steps we have provided in this article. We hope you have a great time playing Temple Run 2 and experiencing the Lantern Festival.</p>
71
- <h2>FAQs</h2>
72
- <p>Here are some frequently asked questions about Temple Run 2 and the Lantern Festival mod apk:</p>
73
- <table>
74
- <tr>
75
- <th>Question</th>
76
- <th>Answer</th>
77
- </tr>
78
- <tr>
79
- <td>Is Temple Run 2 free to play?</td>
80
- <td>Yes, Temple Run 2 is free to play. However, it contains in-app purchases that allow you to buy coins, gems, or other items with real money. You can also watch ads to earn some rewards.</td>
81
- </tr>
82
- <tr>
83
- <td>Is the Lantern Festival mod apk safe to use?</td>
84
- <td>Yes, the Lantern Festival mod apk is safe to use. However, you need to make sure that you download it from a trusted source and scan it with an antivirus program before installing it. You also need to enable the unknown sources option on your device settings to allow the installation of the mod apk.</td>
85
- </tr>
86
- <tr>
87
- <td>Will I lose my progress if I use the mod apk?</td>
88
- <td>No, you will not lose your progress if you use the mod apk. The mod apk will not overwrite your original game data. However, you may not be able to sync your progress with your Google Play account or other social media accounts.</td>
89
- </tr>
90
- <tr>
91
- <td>Can I play online with other players using the mod apk?</td>
92
- <td>No, you cannot play online with other players using the mod apk. The mod apk is only for offline gameplay. You may face some issues or errors if you try to connect to the internet or join a multiplayer mode using the mod apk.</td>
93
- </tr>
94
- <tr>
95
- <td>Can I update the mod apk when a new version of Temple Run 2 is released?</td>
96
- <td>No, you cannot update the mod apk when a new version of Temple Run 2 is released. The mod apk is based on a specific version of the game and may not be compatible with newer versions. You may need to wait for a new version of the mod apk to be released or uninstall the mod apk and install the original game from the Google Play Store.</td>
97
- </tr>
98
- </table></p> 197e85843d<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download the Latest WhatsApp Business App for Free A Guide for Small Businesses.md DELETED
@@ -1,78 +0,0 @@
1
- <br />
2
- <h1>Download the latest WhatsApp Business and transform your business</h1> | | <p>WhatsApp is the most popular messaging app in the world, with over 2 billion users. But did you know that there is a version of WhatsApp designed specifically for businesses? It's called WhatsApp Business and it can help you engage with your customers, drive sales, and grow your business.</p>
3
- <h2>download the latest whatsapp business</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://jinyurl.com/2uNMHq">https://jinyurl.com/2uNMHq</a></b></p><br /><br /> | | <h2>What is WhatsApp Business and how is it different from WhatsApp?</h2> | | <p>WhatsApp Business is a free-to-download app that allows you to create a business presence on WhatsApp, communicate more efficiently with your customers, and manage your workflow. It is built on top of WhatsApp Messenger and includes all the features that you rely on, such as multimedia, free calls, group chat, and end-to-end encryption.</p> | | <p>The main difference between WhatsApp and WhatsApp Business is that WhatsApp Business has a verified and more complete business profile that helps your customers trust who they are chatting with. You can also use WhatsApp Business with a landline or fixed phone number, and run both WhatsApp Business and WhatsApp Messenger on the same phone as long as they are linked to different numbers.</p> | | <h3>WhatsApp Business features and benefits</h3> | | <p>WhatsApp Business offers many features and benefits that can help you transform your business. Here are some of them:</p>
4
- <p>How to download the latest whatsapp business app for android<br />
5
- Download the latest whatsapp business app for iphone<br />
6
- Benefits of using the latest whatsapp business app for small businesses<br />
7
- How to migrate your whatsapp messenger account to the latest whatsapp business app<br />
8
- How to set up your business profile on the latest whatsapp business app<br />
9
- How to use the latest whatsapp business app features to engage your customers<br />
10
- How to update the latest whatsapp business app on your device<br />
11
- How to backup and restore your chat history on the latest whatsapp business app<br />
12
- How to use the whatsapp web feature on the latest whatsapp business app<br />
13
- How to get support and help for the latest whatsapp business app<br />
14
- How to download the latest whatsapp business apk file from google play store<br />
15
- How to install the latest whatsapp business app on your pc or laptop<br />
16
- How to use the latest whatsapp business app with a landline or fixed number<br />
17
- How to run both the latest whatsapp business app and whatsapp messenger on the same phone<br />
18
- How to switch between the latest whatsapp business app and whatsapp messenger<br />
19
- How to verify your business phone number on the latest whatsapp business app<br />
20
- How to use the business messaging tools on the latest whatsapp business app<br />
21
- How to send multimedia messages on the latest whatsapp business app<br />
22
- How to make free calls and international messages on the latest whatsapp business app<br />
23
- How to create a group chat on the latest whatsapp business app<br />
24
- How to use the away messages and greeting messages on the latest whatsapp business app<br />
25
- How to manage your contacts and customers on the latest whatsapp business app<br />
26
- How to use the quick replies and labels on the latest whatsapp business app<br />
27
- How to use the catalog feature on the latest whatsapp business app<br />
28
- How to use the payment feature on the latest whatsapp business app<br />
29
- How to use the analytics feature on the latest whatsapp business app<br />
30
- How to use the security and privacy settings on the latest whatsapp business app<br />
31
- How to delete your account or data on the latest whatsapp business app<br />
32
- How to download the latest version of whatsapp business for windows or mac<br />
33
- How to download and install bluestacks emulator for using the latest whatsapp business app on pc or mac<br />
34
- How to download and install nox player emulator for using the latest whatsapp business app on pc or mac<br />
35
- How to download and install memu play emulator for using the latest whatsapp business app on pc or mac<br />
36
- How to download and install ldplayer emulator for using the latest whatsapp business app on pc or mac<br />
37
- How to download and install koplayer emulator for using the latest whatsapp business app on pc or mac<br />
38
- How to download and install genymotion emulator for using the latest whatsapp business app on pc or mac<br />
39
- How to download and install remix os player emulator for using the latest whatsapp business app on pc or mac<br />
40
- How to download and install droid4x emulator for using the latest whatsapp business app on pc or mac<br />
41
- How to download and install tencent gaming buddy emulator for using the latest whatsapp business app on pc or mac<br />
42
- How to download and install gameloop emulator for using the latest whatsapp business app on pc or mac<br />
43
- How to download and install smartgaga emulator for using the latest whatsapp business app on pc or mac<br />
44
- What are the advantages of using an emulator for running the latest whatsapp business app on pc or mac<br />
45
- What are the disadvantages of using an emulator for running the latest whatsapp business app on pc or mac<br />
46
- What are the system requirements for running an emulator for using the latest whatsapp business app on pc or mac<br />
47
- What are some tips and tricks for using an emulator for running the latest whatsapp business app on pc or mac<br />
48
- What are some common problems and solutions for using an emulator for running the latest whatsapp business app on pc or mac</p> | | <h4>How to create a business profile</h4> | | <p>A business profile is like your digital storefront on WhatsApp. It allows you to showcase your business name, logo, website, address, category, description, and catalog. To create a business profile, download the WhatsApp Business app from the Google Play Store or the App Store and follow the instructions to verify your business phone number. Then, tap More options > Settings > your business name and fill in the details.</p> | | <h4>How to use messaging tools</h4> | | <p>Messaging tools are designed to help you respond to your customers faster and more effectively. You can use labels to organize your chats and contacts, greeting messages to introduce your business to new customers, quick replies to save and reuse frequently sent messages, and away messages to let your customers know when you are not available.</p> | | <h4>How to showcase your products and services</h4> | | <p>A catalog is a feature that allows you to display your products and services on WhatsApp. You can add images, prices, descriptions, links, and codes to your catalog items. Customers can browse your catalog and place orders directly from the app. To create a catalog, tap More options > Settings > Business tools > Catalog.</p> | | <h3>How to download the latest WhatsApp Business app</h3> | | <p>If you want to download the latest WhatsApp Business app, follow these steps:</p> | | <h4>For Android devices</h4> | | <ul>
49
- <li>Open the Google Play Store on your device.</li>
50
- <li>Search for WhatsApp Business or tap this link: [4](https://play.google.com/store/apps/details?id=com.whatsapp.w4b).</li>
51
- <li>Tap Install and wait for the app to download.</li>
52
- <li>Open the app and follow the instructions to set up your account.</li>
53
- </ul>
54
- | | <h4>For iPhone devices</h4>
55
- <ul>
56
- <li>Open the App Store on your device.</li>
57
- <li>Search for WhatsApp Business or tap this link: [17](https://apps.apple.com/us/app/whatsapp-business/id1386412985).</li>
58
- <li>Tap Get and wait for the app to download.</li>
59
- <li>Open the app and follow the instructions to set up your account.</li>
60
- </ul>
61
- | <h3>Conclusion</h3>
62
- <p>If you want to take your business communication to the next level, download the latest WhatsApp Business app today. You will <p>If you want to take your business communication to the next level, download the latest WhatsApp Business app today. You will be able to create a professional and personalized business profile, use smart messaging tools, and showcase your products and services to millions of potential customers. WhatsApp Business is the ultimate app for small and medium businesses that want to connect with their customers in a simple, secure, and reliable way.</p>
63
- <h2>FAQs</h2>
64
- <p>Here are some frequently asked questions about WhatsApp Business:</p>
65
- <ol>
66
- <li>What is the difference between WhatsApp Business and WhatsApp Business API?</li>
67
- <p>WhatsApp Business is an app that you can download on your phone and use to manage your business communication. WhatsApp Business API is a solution that allows you to integrate WhatsApp with your existing systems and platforms, such as CRM, e-commerce, or chatbots. WhatsApp Business API is suitable for larger businesses that need more advanced features and scalability.</p>
68
- <li>Can I use WhatsApp Business on my computer?</li>
69
- <p>Yes, you can use WhatsApp Business on your computer by using WhatsApp Web or WhatsApp Desktop. You will need to scan a QR code with your phone to link your devices. You can also download the WhatsApp Business app on your computer if you have Windows 8.1 or higher or Mac OS X 10.10 or higher.</p>
70
- <li>How much does WhatsApp Business cost?</li>
71
- <p>WhatsApp Business is free to download and use. However, you may incur data charges from your mobile provider for using the app. You may also be charged a fee for sending messages to customers who are not in your contact list or who have not initiated a conversation with you in the past 24 hours. This fee varies depending on the country and carrier of the recipient.</p>
72
- <li>How can I verify my business on WhatsApp?</li>
73
- <p>Verification is a process that confirms that your business phone number matches the phone number on your business profile. Verification is optional and not required to use WhatsApp Business. To request verification, tap More options > Settings > Business tools > Verified business name and follow the instructions.</p>
74
- <li>How can I get customer feedback on WhatsApp?</li>
75
- <p>You can get customer feedback on WhatsApp by using surveys, polls, ratings, or reviews. You can create these using third-party tools or platforms that integrate with WhatsApp. For example, you can use SurveyMonkey, Typeform, Google Forms, or JotForm to create surveys and polls and send them to your customers via WhatsApp.</p>
76
- </ol></p> 401be4b1e0<br />
77
- <br />
78
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Escape from Grannys House in Granny 3 MOD APK with No Ads and God Mode.md DELETED
@@ -1,162 +0,0 @@
1
-
2
- <h1>Granny Chapter 3 Mod APK: How to Download and Play the Latest Version of the Horror Game</h1>
3
- <p>If you are a fan of horror games, you might have heard of <strong>Granny</strong>, a popular indie survival horror game developed by DVloper. The game has spawned two sequels, <strong>Granny: Chapter Two</strong> and <strong>Granny 3</strong>, which have added more features, characters, and locations to the original game. In this article, we will focus on <strong>Granny Chapter 3 Mod APK</strong>, a modified version of the third installment of the series that offers some advantages over the official version. We will explain what Granny Chapter 3 is, what Granny Chapter 3 Mod APK is, how to download and install it, how to play it, and what are some tips and tricks for playing it. We will also share some reviews and ratings for Granny Chapter 3 Mod APK from other players.</p>
4
- <h2>What is Granny Chapter 3?</h2>
5
- <p><strong>Granny Chapter 3</strong> is the latest game in the Granny series, released on August 10, 2021 for Android and iOS devices. It is a horror game that challenges you to escape from a house where you are trapped by a psychotic old woman named Granny and her husband Grandpa, who are both undead and have supernatural abilities. You also have to deal with a giant spider that lives in the attic and a crow that guards a key item. You have five days to find a way out of the house, using various items and tools that you can find or craft. You have to be careful and quiet, as Granny and Grandpa can hear everything and will chase you if they spot you. You can hide in wardrobes, under beds, or in other places, but they won't give up easily. You can also fight back by using weapons such as a shotgun, a crossbow, or a pepper spray, but they are limited and hard to find. If you get caught by Granny or Grandpa, you will lose a day and wake up in a different room. If you run out of days, you will get a game over scene where you are killed in a gruesome way.</p>
6
- <h2>granny chapter 3 mod apk</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://jinyurl.com/2uNQ6D">https://jinyurl.com/2uNQ6D</a></b></p><br /><br />
7
- <h3>The plot of Granny Chapter 3</h3>
8
- <p>The plot of <strong>Granny Chapter 3</strong> is not very clear, as there are no cutscenes or dialogues in the game. However, based on some clues and hints, we can infer that the game takes place after the events of <strong>Granny: Chapter Two</strong>, where you escaped from a boat where you were held captive by Granny and Grandpa. You somehow ended up in their house, which is located in a forest. You don't remember how you got there or why they are after you. You only know that you have to get out of there as soon as possible before they kill you.</p>
9
- <h3>The features of Granny Chapter 3</h3>
10
- <p><strong>Granny Chapter 3</strong> has many features that make it an exciting and terrifying horror game. Some of them are:</p>
11
- <ul>
12
- <li>A large house with three floors and many rooms to explore.</li>
13
- <li>A variety of items and tools to find or craft, such as keys, pliers, hammers, screwdrivers, wrenches, gasoline cans, car batteries, spark plugs, etc.</ <li>A car that you can use to escape from the house, but you need to find and fix its parts first.</li>
14
- <li>A spider that lives in the attic and can attack you if you disturb it.</li>
15
- <li>A crow that guards a key item and can alert Granny and Grandpa if you get too close to it.</li>
16
- <li>A shotgun, a crossbow, and a pepper spray that you can use to defend yourself or stun Granny and Grandpa.</li>
17
- <li>A difficulty level option that lets you choose how hard the game is. You can also customize some aspects of the game, such as the sound, the blood, the darkness, etc.</li>
18
- <li>A practice mode that lets you explore the house without Granny and Grandpa chasing you.</li>
19
- <li>A multiplayer mode that lets you play with up to four friends online. You can either cooperate to escape from the house or compete to see who escapes first.</li>
20
- </ul>
21
- <h2>What is Granny Chapter 3 Mod APK?</h2>
22
- <p><strong>Granny Chapter 3 Mod APK</strong> is a modified version of <strong>Granny Chapter 3</strong> that offers some advantages over the official version. A mod APK is an Android application package file that has been altered or hacked by a third-party developer to add or remove some features from the original app. Some of the benefits of using Granny Chapter 3 Mod APK are:</p>
23
- <ul>
24
- <li>You can get unlimited ammo for your weapons, which means you don't have to worry about running out of bullets or arrows.</li>
25
- <li>You can get unlimited health, which means you don't have to worry about dying or losing days.</li>
26
- <li>You can get unlimited money, which means you can buy anything you want from the shop, such as skins, weapons, items, etc.</li>
27
- <li>You can unlock all the levels and modes of the game, which means you don't have to complete the previous ones to access them.</li>
28
- <li>You can remove the ads from the game, which means you don't have to watch them or pay for them.</li>
29
- </ul>
30
- <h3>The benefits of using Granny Chapter 3 Mod APK</h3>
31
- <p>The benefits of using <strong>Granny Chapter 3 Mod APK</strong> are obvious: you can enjoy the game without any limitations or restrictions. You can have more fun and excitement by using all the features and options that the game has to offer. You can also save your time and effort by skipping the hard and tedious parts of the game. You can also impress your friends by showing them your achievements and skills in the game.</p>
32
- <h3>The risks of using Granny Chapter 3 Mod APK</h3>
33
- <p>However, using <strong>Granny Chapter 3 Mod APK</strong> also comes with some risks that you should be aware of before downloading and installing it. Some of the risks are:</p>
34
- <ul>
35
- <li>You might get banned from the game or lose your account if the developers detect that you are using a modded version of the game. This might also affect your other games or apps that are connected to your Google Play account.</li>
36
- <li>You might get viruses or malware on your device if you download and install a modded version of the game from an untrusted source. This might also affect your other files or apps on your device or compromise your personal information or data.</li>
37
- <li>You might lose the original features or functions of the game if you install a modded version of the game over it. This might also cause some errors or glitches in the game or make it incompatible with future updates or patches.</li>
38
- <li>You might lose the challenge and thrill of the game if you use a modded version of the game that makes it too easy or boring. This might also reduce your satisfaction and enjoyment of playing the game.</li>
39
- </ul>
40
- <h2>How to download and install Granny Chapter 3 Mod APK?</h2>
41
- <p>If you still want to try <strong>Granny Chapter 3 Mod APK</strong>, despite knowing its risks, here are some steps that you need to follow to download and install it on your device:</p>
42
- <p>granny 3 mod menu apk<br />
43
- granny 3 mod apk unlimited ammo<br />
44
- granny 3 mod apk download for android<br />
45
- granny 3 mod apk god mode<br />
46
- granny 3 mod apk invisible<br />
47
- granny 3 mod apk no ads<br />
48
- granny 3 mod apk latest version<br />
49
- granny 3 mod apk free shopping<br />
50
- granny 3 mod apk all unlocked<br />
51
- granny 3 mod apk unlimited health<br />
52
- granny 3 mod apk android 1<br />
53
- granny 3 mod apk revdl<br />
54
- granny 3 mod apk happy mod<br />
55
- granny 3 mod apk rexdl<br />
56
- granny 3 mod apk an1<br />
57
- granny 3 mod apk offline<br />
58
- granny 3 mod apk no root<br />
59
- granny 3 mod apk anti ban<br />
60
- granny 3 mod apk unlimited money<br />
61
- granny 3 mod apk hack<br />
62
- granny 3 mod apk cheat<br />
63
- granny 3 mod apk full version<br />
64
- granny 3 mod apk premium<br />
65
- granny 3 mod apk pro<br />
66
- granny 3 mod apk vip<br />
67
- granny 3 mod apk mega mod<br />
68
- granny 3 mod apk unlimited everything<br />
69
- granny 3 mod apk no kill<br />
70
- granny 3 mod apk easy escape<br />
71
- granny 3 mod apk no sound<br />
72
- granny 3 mod apk no timer<br />
73
- granny 3 mod apk no ads download<br />
74
- granny 3 mod apk download link<br />
75
- granny chapter three mod menu apk download<br />
76
- download game granny chapter three mod menu apk android gratis terbaru <br />
77
- cara download dan instal game granny chapter three versi terbaru dengan fitur cheat menu di android <br />
78
- how to download and install granny chapter three latest version with cheat menu feature on android <br />
79
- best site to download granny chapter three hacked version for free <br />
80
- tips and tricks to play and win in granny chapter three horror game <br />
81
- how to unlock all characters and items in granny chapter three using modded version</p>
82
- <h3>Step 1: Enable unknown sources on your device</h3>
83
- <p>Before you can install any modded version of an app on your device, you need to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You might get a warning message that says installing apps from unknown sources might harm your device, but you can ignore it if you trust the source of the app.</p>
84
- <h3>Step 2: Download the Granny Chapter 3 Mod APK file from a trusted source</h3>
85
- <p>The next step is to download the <strong>Granny Chapter 3 Mod APK</strong> file from a trusted source. There are many websites that offer modded versions of apps, but not all of them are safe or reliable. You need to do some research and check the reviews and ratings of the website before downloading anything from it. You can also use a VPN or antivirus app to protect your device from any potential threats. To download the Granny Chapter 3 Mod APK file, go to the website of your choice, find the download link, and click on it. You might have to complete some surveys or watch some ads before you can access the download link, but be careful not to click on any suspicious or malicious links. The download process might take some time depending on your internet speed and the size of the file.</p>
86
- <h3>Step 3: Install the Granny Chapter 3 Mod APK file on your device</h3>
87
- <p>The final step is to install the <strong>Granny Chapter 3 Mod APK</strong> file on your device. To do this, go to your device file manager, find the downloaded file, and tap on it. You might get a pop-up message that says installing this app might harm your device, but you can ignore it if you trust the source of the app. The installation process might take some time depending on your device performance and the size of the file. Once the installation is done, you can open the app and enjoy playing Granny Chapter 3 Mod APK.</p>
88
- <h2>How to play Granny Chapter 3 Mod APK?</h2>
89
- <p>Playing <strong>Granny Chapter 3 Mod APK</strong> is similar to playing the official version of Granny Chapter 3, except that you have more options and features to use. You can choose the difficulty level, the game mode, the character skin, the weapon, and other settings before starting the game. You can also use the unlimited ammo, health, money, and other benefits that come with the modded version of the game. The goal of the game is still to escape from the house within five days by finding and using various items and tools. You can also explore the house and discover its secrets and mysteries. You can also play with your friends online in the multiplayer mode and cooperate or compete with them.</p>
90
- <h3>Tips and tricks for playing Granny Chapter 3 Mod APK</h3>
91
- <p>Here are some tips and tricks for playing <strong>Granny Chapter 3 Mod APK</strong> that might help you survive and escape from the house:</p>
92
- <ul>
93
- <li>Use headphones or earphones to hear better and avoid making noise.</li>
94
- <li>Use crouch mode to move faster and quieter.</li>
95
- <li>Use peek mode to look around corners and doors without exposing yourself.</li>
96
- <li>Use distraction items such as alarm clocks, radios, TVs, etc. to lure Granny and Grandpa away from your location.</li>
97
- <li>Use hiding places such as wardrobes, beds, cabinets, etc. to avoid being seen by Granny and Grandpa.</li>
98
- <li>Use weapons such as shotgun, crossbow, pepper spray, etc. to stun or kill Granny and Grandpa.</li>
99
- <li>Use items such as pliers, hammers, screwdrivers, wrenches, etc. to unlock doors, windows, safes, etc.</li>
100
- <li>Use items such as gasoline cans, car batteries, spark plugs, etc. to fix the car and use it to escape.</li>
101
- <li>Use items such as meat, cheese, tranquilizer darts, etc. to deal with the spider and the crow.</li>
102
- <li>Use items such as teddy bears, pictures, books, etc. to trigger some events or secrets in the house.</li>
103
- </ul>
104
- <h3>Reviews and ratings for Granny Chapter 3 Mod APK</h3>
105
- <p><strong>Granny Chapter 3 Mod APK</strong> has received mixed reviews and ratings from other players who have tried it. Some of them are positive and praise the game for its graphics, gameplay, sound effects, features, options, and fun. Some of them are negative and criticize the game for its bugs, glitches, errors, crashes, ads, and difficulty. Here are some examples of reviews and ratings for Granny Chapter 3 Mod APK from different sources:</p>
106
- <table>
107
- <tr>
108
- <th>Source</th>
109
- <th>Review</th>
110
- <th>Rating</th>
111
- </tr>
112
- <tr>
113
- <td>Google Play Store</td>
114
- <td>"This game is awesome. I love the graphics and the sound effects. The game is very challenging and scary. I like the multiplayer mode where I can play with my friends. The mod APK is very useful and easy to install. I recommend this game to everyone who likes horror games."</td>
115
- <td>5 stars</td>
116
- </tr>
117
- <tr>
118
- <td>Google Play Store</td>
119
- <td>"This game is terrible. It has so many bugs and glitches. The game keeps crashing and freezing. The ads are annoying and intrusive. The game is too hard and frustrating. The mod APK is fake and dangerous. It gave me viruses and malware on my device. I hate this game and I want a refund."</td>
120
- <td>1 star</td>
121
- </tr>
122
- <tr>
123
- <td>YouTube</td>
124
- <td>"I watched a video of this game and it looks amazing. The graphics are realistic and the sound effects are creepy. The game is very exciting and thrilling. I like the new features and options that the game has. The mod APK is awesome and helpful. It gives me unlimited ammo, health, money, and more. I can't wait to play this game."</td>
125
- <td>Liked</td>
126
- </tr>
127
- <tr>
128
- <td>YouTube</td>
129
- <td>"I played this game and it sucks. The graphics are poor and the sound effects are annoying. The game is very boring and repetitive. I don't like the new features and options that the game has. The mod APK is useless and harmful. It removes the original features, functions, and challenges of the game. It also makes my device slow and laggy."</td>
130
- <td>Disliked</td>
131
- </tr>
132
- <tr>
133
- <td>Reddit</td>
134
- <td>"I downloaded this game and it's pretty good. The graphics are decent and the sound effects are scary. The game is very challenging and fun. I like the multiplayer mode where I can play with other people online. The mod APK is nice and convenient. It gives me more options and features to use in the game."</td>
135
- <td>Upvoted</td>
136
- </tr>
137
- <tr>
138
- <td>Reddit</td>
139
- <td>"I installed this game and it's awful. It has so many errors and crashes. The game doesn't work properly on my device. The ads are irritating and unnecessary. The game is too easy and dull. I don't like the multiplayer mode where I have to deal with trolls and cheaters. The mod APK is bad and risky. It makes my device vulnerable to hackers and attackers."</td>
140
- <td>Downvoted</td>
141
- </tr>
142
- </table>
143
- <h2>Conclusion</h2>
144
- <p>In conclusion, <strong>Granny Chapter 3 Mod APK</strong> is a modified version of <strong>Granny Chapter 3</strong>, a horror game that challenges you to escape from a house where you are trapped by Granny, Grandpa, a spider, and a crow. It offers some advantages over the official version, such as unlimited ammo, health, money, levels, modes, etc., but it also comes with some risks, such as bans, viruses, malware, errors, glitches, etc.</p>
145
- <p>If you want to try Granny Chapter 3 Mod APK, you need to download it from a trusted source, enable unknown sources on your device, install it on your device, and enjoy playing it.</p>
146
- <p>If you want to play Granny Chapter 3 Mod APK safely and effectively, you need to follow some tips and tricks, such as using headphones, crouch mode, peek mode, distraction items, hiding places, weapons, items, etc.</p>
147
- <p>If you want to know more about Granny Chapter 3 Mod APK, you can read some reviews and ratings from other players who have tried it.</p>
148
- <p>We hope this article has helped you understand what Granny Chapter 3 Mod APK is, how to download and install it, how to play it, and what are some tips and tricks for playing it. We also hope you have enjoyed reading this article and found it useful and engaging. Thank you for your attention and interest.</p>
149
- <h2>FAQs</h2>
150
- <p>Here are some frequently asked questions about Granny Chapter 3 Mod APK that you might want to know:</p>
151
- <h3>Q: Is Granny Chapter 3 Mod APK free?</h3>
152
- <p>A: Yes, Granny Chapter 3 Mod APK is free to download and play. However, you might have to pay for some in-app purchases or watch some ads to access some features or items in the game.</p>
153
- <h3>Q: Is Granny Chapter 3 Mod APK safe?</h3>
154
- <p>A: Granny Chapter 3 Mod APK is not completely safe, as it might contain some viruses or malware that can harm your device or compromise your personal information or data. It might also cause some errors or glitches in the game or make it incompatible with future updates or patches. It might also get you banned from the game or lose your account if the developers detect that you are using a modded version of the game.</p>
155
- <h3>Q: Is Granny Chapter 3 Mod APK legal?</h3>
156
- <p>A: Granny Chapter 3 Mod APK is not legal, as it violates the terms and conditions of the original app and infringes the intellectual property rights of the developers. It might also violate some laws or regulations in your country or region regarding online gaming or hacking.</p>
157
- <h3>Q: How can I update Granny Chapter 3 Mod APK?</h3>
158
- <p>A: You can update Granny Chapter 3 Mod APK by downloading and installing the latest version of the modded app from the same source that you got it from. However, you might lose some features or functions of the previous version or encounter some compatibility issues with the new version.</p>
159
- <h3>Q: How can I uninstall Granny Chapter 3 Mod APK?</h3>
160
- <p>A: You can uninstall Granny Chapter 3 Mod APK by going to your device settings, then apps, then Granny Chapter 3 Mod APK, and tapping on uninstall. You might also want to delete the downloaded file from your device file manager and clear your device cache and data to remove any traces of the modded app.</p> 401be4b1e0<br />
161
- <br />
162
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_and_paddlenlp_objects.py DELETED
@@ -1,334 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # This file is autogenerated by the command `make fix-copies`, do not edit.
17
- # flake8: noqa
18
-
19
- from . import DummyObject, requires_backends
20
-
21
-
22
- class AltDiffusionImg2ImgPipeline(metaclass=DummyObject):
23
- _backends = ["paddle", "paddlenlp"]
24
-
25
- def __init__(self, *args, **kwargs):
26
- requires_backends(self, ["paddle", "paddlenlp"])
27
-
28
- @classmethod
29
- def from_config(cls, *args, **kwargs):
30
- requires_backends(cls, ["paddle", "paddlenlp"])
31
-
32
- @classmethod
33
- def from_pretrained(cls, *args, **kwargs):
34
- requires_backends(cls, ["paddle", "paddlenlp"])
35
-
36
-
37
- class AltDiffusionPipeline(metaclass=DummyObject):
38
- _backends = ["paddle", "paddlenlp"]
39
-
40
- def __init__(self, *args, **kwargs):
41
- requires_backends(self, ["paddle", "paddlenlp"])
42
-
43
- @classmethod
44
- def from_config(cls, *args, **kwargs):
45
- requires_backends(cls, ["paddle", "paddlenlp"])
46
-
47
- @classmethod
48
- def from_pretrained(cls, *args, **kwargs):
49
- requires_backends(cls, ["paddle", "paddlenlp"])
50
-
51
-
52
- class CycleDiffusionPipeline(metaclass=DummyObject):
53
- _backends = ["paddle", "paddlenlp"]
54
-
55
- def __init__(self, *args, **kwargs):
56
- requires_backends(self, ["paddle", "paddlenlp"])
57
-
58
- @classmethod
59
- def from_config(cls, *args, **kwargs):
60
- requires_backends(cls, ["paddle", "paddlenlp"])
61
-
62
- @classmethod
63
- def from_pretrained(cls, *args, **kwargs):
64
- requires_backends(cls, ["paddle", "paddlenlp"])
65
-
66
-
67
- class LDMTextToImagePipeline(metaclass=DummyObject):
68
- _backends = ["paddle", "paddlenlp"]
69
-
70
- def __init__(self, *args, **kwargs):
71
- requires_backends(self, ["paddle", "paddlenlp"])
72
-
73
- @classmethod
74
- def from_config(cls, *args, **kwargs):
75
- requires_backends(cls, ["paddle", "paddlenlp"])
76
-
77
- @classmethod
78
- def from_pretrained(cls, *args, **kwargs):
79
- requires_backends(cls, ["paddle", "paddlenlp"])
80
-
81
-
82
- class PaintByExamplePipeline(metaclass=DummyObject):
83
- _backends = ["paddle", "paddlenlp"]
84
-
85
- def __init__(self, *args, **kwargs):
86
- requires_backends(self, ["paddle", "paddlenlp"])
87
-
88
- @classmethod
89
- def from_config(cls, *args, **kwargs):
90
- requires_backends(cls, ["paddle", "paddlenlp"])
91
-
92
- @classmethod
93
- def from_pretrained(cls, *args, **kwargs):
94
- requires_backends(cls, ["paddle", "paddlenlp"])
95
-
96
-
97
- class StableDiffusionDepth2ImgPipeline(metaclass=DummyObject):
98
- _backends = ["paddle", "paddlenlp"]
99
-
100
- def __init__(self, *args, **kwargs):
101
- requires_backends(self, ["paddle", "paddlenlp"])
102
-
103
- @classmethod
104
- def from_config(cls, *args, **kwargs):
105
- requires_backends(cls, ["paddle", "paddlenlp"])
106
-
107
- @classmethod
108
- def from_pretrained(cls, *args, **kwargs):
109
- requires_backends(cls, ["paddle", "paddlenlp"])
110
-
111
-
112
- class StableDiffusionImageVariationPipeline(metaclass=DummyObject):
113
- _backends = ["paddle", "paddlenlp"]
114
-
115
- def __init__(self, *args, **kwargs):
116
- requires_backends(self, ["paddle", "paddlenlp"])
117
-
118
- @classmethod
119
- def from_config(cls, *args, **kwargs):
120
- requires_backends(cls, ["paddle", "paddlenlp"])
121
-
122
- @classmethod
123
- def from_pretrained(cls, *args, **kwargs):
124
- requires_backends(cls, ["paddle", "paddlenlp"])
125
-
126
-
127
- class StableDiffusionImg2ImgPipeline(metaclass=DummyObject):
128
- _backends = ["paddle", "paddlenlp"]
129
-
130
- def __init__(self, *args, **kwargs):
131
- requires_backends(self, ["paddle", "paddlenlp"])
132
-
133
- @classmethod
134
- def from_config(cls, *args, **kwargs):
135
- requires_backends(cls, ["paddle", "paddlenlp"])
136
-
137
- @classmethod
138
- def from_pretrained(cls, *args, **kwargs):
139
- requires_backends(cls, ["paddle", "paddlenlp"])
140
-
141
-
142
- class StableDiffusionInpaintPipeline(metaclass=DummyObject):
143
- _backends = ["paddle", "paddlenlp"]
144
-
145
- def __init__(self, *args, **kwargs):
146
- requires_backends(self, ["paddle", "paddlenlp"])
147
-
148
- @classmethod
149
- def from_config(cls, *args, **kwargs):
150
- requires_backends(cls, ["paddle", "paddlenlp"])
151
-
152
- @classmethod
153
- def from_pretrained(cls, *args, **kwargs):
154
- requires_backends(cls, ["paddle", "paddlenlp"])
155
-
156
-
157
- class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject):
158
- _backends = ["paddle", "paddlenlp"]
159
-
160
- def __init__(self, *args, **kwargs):
161
- requires_backends(self, ["paddle", "paddlenlp"])
162
-
163
- @classmethod
164
- def from_config(cls, *args, **kwargs):
165
- requires_backends(cls, ["paddle", "paddlenlp"])
166
-
167
- @classmethod
168
- def from_pretrained(cls, *args, **kwargs):
169
- requires_backends(cls, ["paddle", "paddlenlp"])
170
-
171
-
172
- class StableDiffusionPipeline(metaclass=DummyObject):
173
- _backends = ["paddle", "paddlenlp"]
174
-
175
- def __init__(self, *args, **kwargs):
176
- requires_backends(self, ["paddle", "paddlenlp"])
177
-
178
- @classmethod
179
- def from_config(cls, *args, **kwargs):
180
- requires_backends(cls, ["paddle", "paddlenlp"])
181
-
182
- @classmethod
183
- def from_pretrained(cls, *args, **kwargs):
184
- requires_backends(cls, ["paddle", "paddlenlp"])
185
-
186
-
187
- class StableDiffusionMegaPipeline(metaclass=DummyObject):
188
- _backends = ["paddle", "paddlenlp"]
189
-
190
- def __init__(self, *args, **kwargs):
191
- requires_backends(self, ["paddle", "paddlenlp"])
192
-
193
- @classmethod
194
- def from_config(cls, *args, **kwargs):
195
- requires_backends(cls, ["paddle", "paddlenlp"])
196
-
197
- @classmethod
198
- def from_pretrained(cls, *args, **kwargs):
199
- requires_backends(cls, ["paddle", "paddlenlp"])
200
-
201
-
202
- class StableDiffusionPipelineAllInOne(metaclass=DummyObject):
203
- _backends = ["paddle", "paddlenlp"]
204
-
205
- def __init__(self, *args, **kwargs):
206
- requires_backends(self, ["paddle", "paddlenlp"])
207
-
208
- @classmethod
209
- def from_config(cls, *args, **kwargs):
210
- requires_backends(cls, ["paddle", "paddlenlp"])
211
-
212
- @classmethod
213
- def from_pretrained(cls, *args, **kwargs):
214
- requires_backends(cls, ["paddle", "paddlenlp"])
215
-
216
-
217
- class StableDiffusionPipelineSafe(metaclass=DummyObject):
218
- _backends = ["paddle", "paddlenlp"]
219
-
220
- def __init__(self, *args, **kwargs):
221
- requires_backends(self, ["paddle", "paddlenlp"])
222
-
223
- @classmethod
224
- def from_config(cls, *args, **kwargs):
225
- requires_backends(cls, ["paddle", "paddlenlp"])
226
-
227
- @classmethod
228
- def from_pretrained(cls, *args, **kwargs):
229
- requires_backends(cls, ["paddle", "paddlenlp"])
230
-
231
-
232
- class StableDiffusionUpscalePipeline(metaclass=DummyObject):
233
- _backends = ["paddle", "paddlenlp"]
234
-
235
- def __init__(self, *args, **kwargs):
236
- requires_backends(self, ["paddle", "paddlenlp"])
237
-
238
- @classmethod
239
- def from_config(cls, *args, **kwargs):
240
- requires_backends(cls, ["paddle", "paddlenlp"])
241
-
242
- @classmethod
243
- def from_pretrained(cls, *args, **kwargs):
244
- requires_backends(cls, ["paddle", "paddlenlp"])
245
-
246
-
247
- class UnCLIPPipeline(metaclass=DummyObject):
248
- _backends = ["paddle", "paddlenlp"]
249
-
250
- def __init__(self, *args, **kwargs):
251
- requires_backends(self, ["paddle", "paddlenlp"])
252
-
253
- @classmethod
254
- def from_config(cls, *args, **kwargs):
255
- requires_backends(cls, ["paddle", "paddlenlp"])
256
-
257
- @classmethod
258
- def from_pretrained(cls, *args, **kwargs):
259
- requires_backends(cls, ["paddle", "paddlenlp"])
260
-
261
-
262
- class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject):
263
- _backends = ["paddle", "paddlenlp"]
264
-
265
- def __init__(self, *args, **kwargs):
266
- requires_backends(self, ["paddle", "paddlenlp"])
267
-
268
- @classmethod
269
- def from_config(cls, *args, **kwargs):
270
- requires_backends(cls, ["paddle", "paddlenlp"])
271
-
272
- @classmethod
273
- def from_pretrained(cls, *args, **kwargs):
274
- requires_backends(cls, ["paddle", "paddlenlp"])
275
-
276
-
277
- class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject):
278
- _backends = ["paddle", "paddlenlp"]
279
-
280
- def __init__(self, *args, **kwargs):
281
- requires_backends(self, ["paddle", "paddlenlp"])
282
-
283
- @classmethod
284
- def from_config(cls, *args, **kwargs):
285
- requires_backends(cls, ["paddle", "paddlenlp"])
286
-
287
- @classmethod
288
- def from_pretrained(cls, *args, **kwargs):
289
- requires_backends(cls, ["paddle", "paddlenlp"])
290
-
291
-
292
- class VersatileDiffusionPipeline(metaclass=DummyObject):
293
- _backends = ["paddle", "paddlenlp"]
294
-
295
- def __init__(self, *args, **kwargs):
296
- requires_backends(self, ["paddle", "paddlenlp"])
297
-
298
- @classmethod
299
- def from_config(cls, *args, **kwargs):
300
- requires_backends(cls, ["paddle", "paddlenlp"])
301
-
302
- @classmethod
303
- def from_pretrained(cls, *args, **kwargs):
304
- requires_backends(cls, ["paddle", "paddlenlp"])
305
-
306
-
307
- class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject):
308
- _backends = ["paddle", "paddlenlp"]
309
-
310
- def __init__(self, *args, **kwargs):
311
- requires_backends(self, ["paddle", "paddlenlp"])
312
-
313
- @classmethod
314
- def from_config(cls, *args, **kwargs):
315
- requires_backends(cls, ["paddle", "paddlenlp"])
316
-
317
- @classmethod
318
- def from_pretrained(cls, *args, **kwargs):
319
- requires_backends(cls, ["paddle", "paddlenlp"])
320
-
321
-
322
- class VQDiffusionPipeline(metaclass=DummyObject):
323
- _backends = ["paddle", "paddlenlp"]
324
-
325
- def __init__(self, *args, **kwargs):
326
- requires_backends(self, ["paddle", "paddlenlp"])
327
-
328
- @classmethod
329
- def from_config(cls, *args, **kwargs):
330
- requires_backends(cls, ["paddle", "paddlenlp"])
331
-
332
- @classmethod
333
- def from_pretrained(cls, *args, **kwargs):
334
- requires_backends(cls, ["paddle", "paddlenlp"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123821KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6.md DELETED
@@ -1,46 +0,0 @@
1
- # Engineering Wiki
2
-
3
- <aside>
4
- 💡 **Notion Tip:** Use this template to organize important information for your team. Add owners, verification, and tags to pages to keep them up to date. Just replace this sample content with your own.
5
-
6
- </aside>
7
-
8
- ## Codebase
9
-
10
- ---
11
-
12
- [Code Reviews](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Code%20Reviews%202b60c26d2a2e4a348f8f14c77023c385.md)
13
-
14
- [ABstract(插件化AB Testing平台)](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/ABstract%EF%BC%88%E6%8F%92%E4%BB%B6%E5%8C%96AB%20Testing%E5%B9%B3%E5%8F%B0%EF%BC%89%20746b87acd94643ca871ec661b63f196c.md)
15
-
16
- [VUE](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/VUE%209501304a2b03470cad0eea93992d65ae.md)
17
-
18
- [Backend](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Backend%20137c41fa386f43249b249e956eb06bb0.md)
19
-
20
- [AWS](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/AWS%20b022fe0cb7084cc0b64624f7bc8cde2c.md)
21
-
22
- [Redis](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Redis%209e063b60eca24a1783c225cfdc21dd8c.md)
23
-
24
- [CircleCI](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/CircleCI%20719905fcb593423cad302d3fdc1c5dff.md)
25
-
26
- [Smart Domain](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Smart%20Domain%203b0daf8bb0d740439426cfab214f1fa6.md)
27
-
28
- ## Guides & Processes
29
-
30
- ---
31
-
32
- [Getting Started](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Getting%20Started%206bc871dcdd4a4554b5b22c0c40740841.md)
33
-
34
- [Engineering Guidelines](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Engineering%20Guidelines%204208cbd4733d4f6f94982f3fb24f6379.md)
35
-
36
- [Development Lifecycle ](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Development%20Lifecycle%20e20a5470e52f49e9bbc4f255cf81db4b.md)
37
-
38
- [How to Deploy](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/How%20to%20Deploy%20b7c4f3fd308944af8ba4637ec40fa4f9.md)
39
-
40
- [Useful Commands](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Useful%20Commands%208a05b1de77ec44b6a55e388c2cc7fe47.md)
41
-
42
- [Engineering Interviews](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Engineering%20Interviews%204be8039581d04456b0151f2cc4b22130.md)
43
-
44
- [How to QA ](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/How%20to%20QA%202f036148193a4fccac2c9e8ae9e6d197.md)
45
-
46
- [Engineering Wiki](Engineering%20Wiki%202402f5396a3244fdb3f1d135bdb0f3d6/Engineering%20Wiki%208da06b3dcf1b4eaaa3e90aa70feefe56.md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/docs/DATASETS.md DELETED
@@ -1,82 +0,0 @@
1
- # AudioCraft datasets
2
-
3
- Our dataset manifest files consist in 1-json-per-line files, potentially gzipped,
4
- as `data.jsons` or `data.jsons.gz` files. This JSON contains the path to the audio
5
- file and associated metadata. The manifest files are then provided in the configuration,
6
- as `datasource` sub-configuration. A datasource contains the pointers to the paths of
7
- the manifest files for each AudioCraft stage (or split) along with additional information
8
- (eg. maximum sample rate to use against this dataset). All the datasources are under the
9
- `dset` group config, with a dedicated configuration file for each dataset.
10
-
11
- ## Getting started
12
-
13
- ### Example
14
-
15
- See the provided example in the directory that provides a manifest to use the example dataset
16
- provided under the [dataset folder](../dataset/example).
17
-
18
- The manifest files are stored in the [egs folder](../egs/example).
19
-
20
- ```shell
21
- egs/
22
- example/data.json.gz
23
- ```
24
-
25
- A datasource is defined in the configuration folder, in the dset group config for this dataset
26
- at [config/dset/audio/example](../config/dset/audio/example.yaml):
27
-
28
- ```shell
29
- # @package __global__
30
-
31
- datasource:
32
- max_sample_rate: 44100
33
- max_channels: 2
34
-
35
- train: egs/example
36
- valid: egs/example
37
- evaluate: egs/example
38
- generate: egs/example
39
- ```
40
-
41
- For proper dataset, one should create manifest for each of the splits and specify the correct path
42
- to the given manifest in the datasource for each split.
43
-
44
- Then, using a dataset through the configuration can be done pointing to the
45
- corresponding dataset configuration:
46
- ```shell
47
- dset=<dataset_name> # <dataset_name> should match the yaml file name
48
-
49
- # for example
50
- dset=audio/example
51
- ```
52
-
53
- ### Creating manifest files
54
-
55
- Assuming you want to create manifest files to load with AudioCraft's AudioDataset, you can use
56
- the following command to create new manifest files from a given folder containing audio files:
57
-
58
- ```shell
59
- python -m audiocraft.data.audio_dataset <path_to_dataset_folder> egs/my_dataset/my_dataset_split/data.jsonl.gz
60
-
61
- # For example to generate the manifest for dset=audio/example
62
- # note: we don't use any split and we don't compress the jsonl file for this dummy example
63
- python -m audiocraft.data.audio_dataset dataset/example egs/example/data.jsonl
64
-
65
- # More info with: python -m audiocraft.data.audio_dataset --help
66
- ```
67
-
68
- ## Additional information
69
-
70
- ### MusicDataset and metadata
71
-
72
- The MusicDataset is an AudioDataset with additional metadata. The MusicDataset expects
73
- the additional metadata to be stored in a JSON file that has the same path as the corresponding
74
- audio file, but with a `.json` extension.
75
-
76
- ### SoundDataset and metadata
77
-
78
- The SoundDataset is an AudioDataset with descriptions metadata. Similarly to the MusicDataset,
79
- the SoundDataset expects the additional metadata to be stored in a JSON file that has the same
80
- path as the corresponding audio file, but with a `.json` extension. Additionally, the SoundDataset
81
- supports an additional parameter pointing to an extra folder `external_metadata_source` containing
82
- all the JSON metadata files given they have the same filename as the audio file.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AICopilot/Dropbox/app.py DELETED
@@ -1,28 +0,0 @@
1
- import streamlit as st
2
-
3
- # query params exist
4
- try:
5
- options = ['cat', 'dog', 'mouse', 'bat', 'duck']
6
-
7
- query_params = st.experimental_get_query_params()
8
- query_option = query_params['option'][0] #throws an exception when visiting http://host:port
9
-
10
- option_selected = st.sidebar.selectbox('Pick option',
11
- options,
12
- index=options.index(query_option))
13
- if option_selected:
14
- st.experimental_set_query_params(option=option_selected)
15
-
16
- # run when query params don't exist. e.g on first launch
17
- except: # catch exception and set query param to predefined value
18
- options = ['cat', 'dog', 'mouse', 'bat', 'duck']
19
- st.experimental_set_query_params(option=options[1]) # defaults to dog
20
-
21
- query_params = st.experimental_get_query_params()
22
- query_option = query_params['option'][0]
23
-
24
- option_selected = st.sidebar.selectbox('Pick option',
25
- options,
26
- index=options.index(query_option))
27
- if option_selected:
28
- st.experimental_set_query_params(option=option_selected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/webUI/styleganex_model.py DELETED
@@ -1,492 +0,0 @@
1
- from __future__ import annotations
2
- import numpy as np
3
- import gradio as gr
4
-
5
- import os
6
- import pathlib
7
- import gc
8
- import torch
9
- import dlib
10
- import cv2
11
- import PIL
12
- from tqdm import tqdm
13
- import numpy as np
14
- import torch.nn.functional as F
15
- import torchvision
16
- from torchvision import transforms, utils
17
- from argparse import Namespace
18
- from datasets import augmentations
19
- from huggingface_hub import hf_hub_download
20
- from scripts.align_all_parallel import align_face
21
- from latent_optimization import latent_optimization
22
- from utils.inference_utils import save_image, load_image, visualize, get_video_crop_parameter, tensor2cv2, tensor2label, labelcolormap
23
- from models.psp import pSp
24
- from models.bisenet.model import BiSeNet
25
- from models.stylegan2.model import Generator
26
-
27
- class Model():
28
- def __init__(self, device):
29
- super().__init__()
30
-
31
- self.device = device
32
- self.task_name = None
33
- self.editing_w = None
34
- self.pspex = None
35
- self.landmarkpredictor = dlib.shape_predictor(hf_hub_download('PKUWilliamYang/VToonify', 'models/shape_predictor_68_face_landmarks.dat'))
36
- self.transform = transforms.Compose([
37
- transforms.ToTensor(),
38
- transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
39
- ])
40
- self.to_tensor = transforms.Compose([
41
- transforms.ToTensor(),
42
- transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
43
- ])
44
- self.maskpredictor = BiSeNet(n_classes=19)
45
- self.maskpredictor.load_state_dict(torch.load(hf_hub_download('PKUWilliamYang/VToonify', 'models/faceparsing.pth'), map_location='cpu'))
46
- self.maskpredictor.to(self.device).eval()
47
- self.parameters = {}
48
- self.parameters['inversion'] = {'path':'pretrained_models/styleganex_inversion.pt', 'image_path':'./data/ILip77SbmOE.png'}
49
- self.parameters['sr-32'] = {'path':'pretrained_models/styleganex_sr32.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'}
50
- self.parameters['sr'] = {'path':'pretrained_models/styleganex_sr.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'}
51
- self.parameters['sketch2face'] = {'path':'pretrained_models/styleganex_sketch2face.pt', 'image_path':'./data/234_sketch.jpg'}
52
- self.parameters['mask2face'] = {'path':'pretrained_models/styleganex_mask2face.pt', 'image_path':'./data/540.jpg'}
53
- self.parameters['edit_age'] = {'path':'pretrained_models/styleganex_edit_age.pt', 'image_path':'./data/390.mp4'}
54
- self.parameters['edit_hair'] = {'path':'pretrained_models/styleganex_edit_hair.pt', 'image_path':'./data/390.mp4'}
55
- self.parameters['toonify_pixar'] = {'path':'pretrained_models/styleganex_toonify_pixar.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'}
56
- self.parameters['toonify_cartoon'] = {'path':'pretrained_models/styleganex_toonify_cartoon.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'}
57
- self.parameters['toonify_arcane'] = {'path':'pretrained_models/styleganex_toonify_arcane.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'}
58
- self.print_log = True
59
- self.editing_dicts = torch.load(hf_hub_download('PKUWilliamYang/StyleGANEX', 'direction_dics.pt'))
60
- self.generator = Generator(1024, 512, 8)
61
- self.model_type = None
62
- self.error_info = 'Error: no face detected! \
63
- StyleGANEX uses dlib.get_frontal_face_detector but sometimes it fails to detect a face. \
64
- You can try several times or use other images until a face is detected, \
65
- then switch back to the original image.'
66
-
67
- def load_model(self, task_name: str) -> None:
68
- if task_name == self.task_name:
69
- return
70
- if self.pspex is not None:
71
- del self.pspex
72
- torch.cuda.empty_cache()
73
- gc.collect()
74
- path = self.parameters[task_name]['path']
75
- local_path = hf_hub_download('PKUWilliamYang/StyleGANEX', path)
76
- ckpt = torch.load(local_path, map_location='cpu')
77
- opts = ckpt['opts']
78
- opts['checkpoint_path'] = local_path
79
- opts['device'] = self.device
80
- opts = Namespace(**opts)
81
- self.pspex = pSp(opts, ckpt).to(self.device).eval()
82
- self.pspex.latent_avg = self.pspex.latent_avg.to(self.device)
83
- if 'editing_w' in ckpt.keys():
84
- self.editing_w = ckpt['editing_w'].clone().to(self.device)
85
- self.task_name = task_name
86
- torch.cuda.empty_cache()
87
- gc.collect()
88
-
89
- def load_G_model(self, model_type: str) -> None:
90
- if model_type == self.model_type:
91
- return
92
- torch.cuda.empty_cache()
93
- gc.collect()
94
- local_path = hf_hub_download('rinong/stylegan-nada-models', model_type+'.pt')
95
- self.generator.load_state_dict(torch.load(local_path, map_location='cpu')['g_ema'], strict=False)
96
- self.generator.to(self.device).eval()
97
- self.model_type = model_type
98
- torch.cuda.empty_cache()
99
- gc.collect()
100
-
101
- def tensor2np(self, img):
102
- tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
103
- return tmp
104
-
105
- def process_sr(self, input_image: str, resize_scale: int, model: str) -> list[np.ndarray]:
106
- #false_image = np.zeros((256,256,3), np.uint8)
107
- #info = 'Error: no face detected! Please retry or change the photo.'
108
-
109
- if input_image is None:
110
- #return [false_image, false_image], 'Error: fail to load empty file.'
111
- raise gr.Error("Error: fail to load empty file.")
112
- frame = cv2.imread(input_image)
113
- if frame is None:
114
- #return [false_image, false_image], 'Error: fail to load the image.'
115
- raise gr.Error("Error: fail to load the image.")
116
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
117
-
118
- if model is None or model == 'SR for 32x':
119
- task_name = 'sr-32'
120
- resize_scale = 32
121
- else:
122
- task_name = 'sr'
123
-
124
- with torch.no_grad():
125
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
126
- if paras is None:
127
- #return [false_image, false_image], info
128
- raise gr.Error(self.error_info)
129
- h,w,top,bottom,left,right,scale = paras
130
- H, W = int(bottom-top), int(right-left)
131
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
132
- x1 = PIL.Image.fromarray(np.uint8(frame))
133
- x1 = augmentations.BilinearResize(factors=[resize_scale//4])(x1)
134
- x1_up = x1.resize((W, H))
135
- x2_up = align_face(np.array(x1_up), self.landmarkpredictor)
136
- if x2_up is None:
137
- #return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.'
138
- raise gr.Error(self.error_info)
139
- x1_up = transforms.ToTensor()(x1_up).unsqueeze(dim=0).to(self.device) * 2 - 1
140
- x2_up = self.transform(x2_up).unsqueeze(dim=0).to(self.device)
141
- if self.print_log: print('image loaded')
142
- self.load_model(task_name)
143
- if self.print_log: print('model %s loaded'%(task_name))
144
- y_hat = torch.clamp(self.pspex(x1=x1_up, x2=x2_up, use_skip=self.pspex.opts.use_skip, resize=False), -1, 1)
145
-
146
- return [self.tensor2np(x1_up[0]), self.tensor2np(y_hat[0])]
147
-
148
-
149
- def process_s2f(self, input_image: str, seed: int) -> np.ndarray:
150
- task_name = 'sketch2face'
151
- with torch.no_grad():
152
- x1 = transforms.ToTensor()(PIL.Image.open(input_image)).unsqueeze(0).to(self.device)
153
- if x1.shape[2] > 513:
154
- x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8]
155
- if x1.shape[3] > 513:
156
- x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8]
157
- x1 = x1[:,0:1] # uploaded files will be transformed to 3-channel RGB image!
158
- if self.print_log: print('image loaded')
159
- self.load_model(task_name)
160
- if self.print_log: print('model %s loaded'%(task_name))
161
- self.pspex.train()
162
- torch.manual_seed(seed)
163
- y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip,
164
- inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7)
165
- y_hat = torch.clamp(y_hat, -1, 1)
166
- self.pspex.eval()
167
- return self.tensor2np(y_hat[0])
168
-
169
- def process_m2f(self, input_image: str, input_type: str, seed: int) -> list[np.ndarray]:
170
- #false_image = np.zeros((256,256,3), np.uint8)
171
- if input_image is None:
172
- raise gr.Error('Error: fail to load empty file.' )
173
- #return [false_image, false_image], 'Error: fail to load empty file.'
174
- task_name = 'mask2face'
175
- with torch.no_grad():
176
- if input_type == 'parsing mask':
177
- x1 = PIL.Image.open(input_image).getchannel(0) # uploaded files will be transformed to 3-channel RGB image!
178
- x1 = augmentations.ToOneHot(19)(x1)
179
- x1 = transforms.ToTensor()(x1).unsqueeze(dim=0).float().to(self.device)
180
- #print(x1.shape)
181
- else:
182
- frame = cv2.imread(input_image)
183
- if frame is None:
184
- #return [false_image, false_image], 'Error: fail to load the image.'
185
- raise gr.Error('Error: fail to load the image.' )
186
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
187
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
188
- if paras is None:
189
- #return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.'
190
- raise gr.Error(self.error_info)
191
- h,w,top,bottom,left,right,scale = paras
192
- H, W = int(bottom-top), int(right-left)
193
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
194
- # convert face image to segmentation mask
195
- x1 = self.to_tensor(frame).unsqueeze(0).to(self.device)
196
- # upsample image for precise segmentation
197
- x1 = F.interpolate(x1, scale_factor=2, mode='bilinear')
198
- x1 = self.maskpredictor(x1)[0]
199
- x1 = F.interpolate(x1, scale_factor=0.5).argmax(dim=1)
200
- x1 = F.one_hot(x1, num_classes=19).permute(0, 3, 1, 2).float().to(self.device)
201
-
202
- if x1.shape[2] > 513:
203
- x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8]
204
- if x1.shape[3] > 513:
205
- x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8]
206
-
207
- x1_viz = (tensor2label(x1[0], 19) / 192 * 256).astype(np.uint8)
208
-
209
- if self.print_log: print('image loaded')
210
- self.load_model(task_name)
211
- if self.print_log: print('model %s loaded'%(task_name))
212
- self.pspex.train()
213
- torch.manual_seed(seed)
214
- y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip,
215
- inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7)
216
- y_hat = torch.clamp(y_hat, -1, 1)
217
- self.pspex.eval()
218
- return [x1_viz, self.tensor2np(y_hat[0])]
219
-
220
-
221
- def process_editing(self, input_image: str, scale_factor: float, model_type: str) -> np.ndarray:
222
- #false_image = np.zeros((256,256,3), np.uint8)
223
- #info = 'Error: no face detected! Please retry or change the photo.'
224
-
225
- if input_image is None:
226
- #return false_image, false_image, 'Error: fail to load empty file.'
227
- raise gr.Error('Error: fail to load empty file.')
228
- frame = cv2.imread(input_image)
229
- if frame is None:
230
- #return false_image, false_image, 'Error: fail to load the image.'
231
- raise gr.Error('Error: fail to load the image.')
232
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
233
-
234
- if model_type is None or model_type == 'reduce age':
235
- task_name = 'edit_age'
236
- else:
237
- task_name = 'edit_hair'
238
-
239
- with torch.no_grad():
240
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
241
- if paras is None:
242
- #return false_image, false_image, info
243
- raise gr.Error(self.error_info)
244
- h,w,top,bottom,left,right,scale = paras
245
- H, W = int(bottom-top), int(right-left)
246
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
247
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
248
- x2 = align_face(frame, self.landmarkpredictor)
249
- if x2 is None:
250
- #return false_image, 'Error: no face detected! Please retry or change the photo.'
251
- raise gr.Error(self.error_info)
252
- x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
253
- if self.print_log: print('image loaded')
254
- self.load_model(task_name)
255
- if self.print_log: print('model %s loaded'%(task_name))
256
- y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True,
257
- resize=False, editing_w= - scale_factor* self.editing_w[0:1])
258
- y_hat = torch.clamp(y_hat, -1, 1)
259
-
260
- return self.tensor2np(y_hat[0])
261
-
262
- def process_vediting(self, input_video: str, scale_factor: float, model_type: str, frame_num: int) -> tuple[list[np.ndarray], str]:
263
- #false_image = np.zeros((256,256,3), np.uint8)
264
- #info = 'Error: no face detected! Please retry or change the video.'
265
-
266
- if input_video is None:
267
- #return [false_image], 'default.mp4', 'Error: fail to load empty file.'
268
- raise gr.Error('Error: fail to load empty file.')
269
- video_cap = cv2.VideoCapture(input_video)
270
- success, frame = video_cap.read()
271
- if success is False:
272
- #return [false_image], 'default.mp4', 'Error: fail to load the video.'
273
- raise gr.Error('Error: fail to load the video.')
274
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
275
-
276
- if model_type is None or model_type == 'reduce age':
277
- task_name = 'edit_age'
278
- else:
279
- task_name = 'edit_hair'
280
-
281
- with torch.no_grad():
282
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
283
- if paras is None:
284
- #return [false_image], 'default.mp4', info
285
- raise gr.Error(self.error_info)
286
- h,w,top,bottom,left,right,scale = paras
287
- H, W = int(bottom-top), int(right-left)
288
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
289
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
290
- x2 = align_face(frame, self.landmarkpredictor)
291
- if x2 is None:
292
- #return [false_image], 'default.mp4', info
293
- raise gr.Error(self.error_info)
294
- x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
295
- if self.print_log: print('first frame loaded')
296
- self.load_model(task_name)
297
- if self.print_log: print('model %s loaded'%(task_name))
298
-
299
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
300
- videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H))
301
-
302
- viz_frames = []
303
- for i in range(frame_num):
304
- if i > 0:
305
- success, frame = video_cap.read()
306
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
307
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
308
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
309
- y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True,
310
- resize=False, editing_w= - scale_factor * self.editing_w[0:1])
311
- y_hat = torch.clamp(y_hat, -1, 1)
312
- videoWriter.write(tensor2cv2(y_hat[0].cpu()))
313
- if i < min(frame_num, 4):
314
- viz_frames += [self.tensor2np(y_hat[0])]
315
-
316
- videoWriter.release()
317
-
318
- return viz_frames, 'output.mp4'
319
-
320
-
321
- def process_toonify(self, input_image: str, style_type: str) -> np.ndarray:
322
- #false_image = np.zeros((256,256,3), np.uint8)
323
- #info = 'Error: no face detected! Please retry or change the photo.'
324
-
325
- if input_image is None:
326
- raise gr.Error('Error: fail to load empty file.')
327
- #return false_image, false_image, 'Error: fail to load empty file.'
328
- frame = cv2.imread(input_image)
329
- if frame is None:
330
- raise gr.Error('Error: fail to load the image.')
331
- #return false_image, false_image, 'Error: fail to load the image.'
332
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
333
-
334
- if style_type is None or style_type == 'Pixar':
335
- task_name = 'toonify_pixar'
336
- elif style_type == 'Cartoon':
337
- task_name = 'toonify_cartoon'
338
- else:
339
- task_name = 'toonify_arcane'
340
-
341
- with torch.no_grad():
342
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
343
- if paras is None:
344
- raise gr.Error(self.error_info)
345
- #return false_image, false_image, info
346
- h,w,top,bottom,left,right,scale = paras
347
- H, W = int(bottom-top), int(right-left)
348
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
349
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
350
- x2 = align_face(frame, self.landmarkpredictor)
351
- if x2 is None:
352
- raise gr.Error(self.error_info)
353
- #return false_image, 'Error: no face detected! Please retry or change the photo.'
354
- x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
355
- if self.print_log: print('image loaded')
356
- self.load_model(task_name)
357
- if self.print_log: print('model %s loaded'%(task_name))
358
- y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False)
359
- y_hat = torch.clamp(y_hat, -1, 1)
360
-
361
- return self.tensor2np(y_hat[0])
362
-
363
-
364
- def process_vtoonify(self, input_video: str, style_type: str, frame_num: int) -> tuple[list[np.ndarray], str]:
365
- #false_image = np.zeros((256,256,3), np.uint8)
366
- #info = 'Error: no face detected! Please retry or change the video.'
367
-
368
- if input_video is None:
369
- raise gr.Error('Error: fail to load empty file.')
370
- #return [false_image], 'default.mp4', 'Error: fail to load empty file.'
371
- video_cap = cv2.VideoCapture(input_video)
372
- success, frame = video_cap.read()
373
- if success is False:
374
- raise gr.Error('Error: fail to load the video.')
375
- #return [false_image], 'default.mp4', 'Error: fail to load the video.'
376
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
377
-
378
- if style_type is None or style_type == 'Pixar':
379
- task_name = 'toonify_pixar'
380
- elif style_type == 'Cartoon':
381
- task_name = 'toonify_cartoon'
382
- else:
383
- task_name = 'toonify_arcane'
384
-
385
- with torch.no_grad():
386
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
387
- if paras is None:
388
- raise gr.Error(self.error_info)
389
- #return [false_image], 'default.mp4', info
390
- h,w,top,bottom,left,right,scale = paras
391
- H, W = int(bottom-top), int(right-left)
392
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
393
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
394
- x2 = align_face(frame, self.landmarkpredictor)
395
- if x2 is None:
396
- raise gr.Error(self.error_info)
397
- #return [false_image], 'default.mp4', info
398
- x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
399
- if self.print_log: print('first frame loaded')
400
- self.load_model(task_name)
401
- if self.print_log: print('model %s loaded'%(task_name))
402
-
403
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
404
- videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H))
405
-
406
- viz_frames = []
407
- for i in range(frame_num):
408
- if i > 0:
409
- success, frame = video_cap.read()
410
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
411
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
412
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
413
- y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False)
414
- y_hat = torch.clamp(y_hat, -1, 1)
415
- videoWriter.write(tensor2cv2(y_hat[0].cpu()))
416
- if i < min(frame_num, 4):
417
- viz_frames += [self.tensor2np(y_hat[0])]
418
-
419
- videoWriter.release()
420
-
421
- return viz_frames, 'output.mp4'
422
-
423
-
424
- def process_inversion(self, input_image: str, optimize: str, input_latent: file-object, editing_options: str,
425
- scale_factor: float, seed: int) -> tuple[np.ndarray, np.ndarray]:
426
- #false_image = np.zeros((256,256,3), np.uint8)
427
- #info = 'Error: no face detected! Please retry or change the photo.'
428
-
429
- if input_image is None:
430
- raise gr.Error('Error: fail to load empty file.')
431
- #return false_image, false_image, 'Error: fail to load empty file.'
432
- frame = cv2.imread(input_image)
433
- if frame is None:
434
- raise gr.Error('Error: fail to load the image.')
435
- #return false_image, false_image, 'Error: fail to load the image.'
436
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
437
-
438
- task_name = 'inversion'
439
- self.load_model(task_name)
440
- if self.print_log: print('model %s loaded'%(task_name))
441
- if input_latent is not None:
442
- if '.pt' not in input_latent.name:
443
- raise gr.Error('Error: the latent format is wrong')
444
- #return false_image, false_image, 'Error: the latent format is wrong'
445
- latents = torch.load(input_latent.name)
446
- if 'wplus' not in latents.keys() or 'f' not in latents.keys():
447
- raise gr.Error('Error: the latent format is wrong')
448
- #return false_image, false_image, 'Error: the latent format is wrong'
449
- wplus = latents['wplus'].to(self.device) # w+
450
- f = [latents['f'][0].to(self.device)] # f
451
- elif optimize == 'Latent optimization':
452
- wplus, f, _, _, _ = latent_optimization(frame, self.pspex, self.landmarkpredictor,
453
- step=500, device=self.device)
454
- else:
455
- with torch.no_grad():
456
- paras = get_video_crop_parameter(frame, self.landmarkpredictor)
457
- if paras is None:
458
- raise gr.Error(self.error_info)
459
- #return false_image, false_image, info
460
- h,w,top,bottom,left,right,scale = paras
461
- H, W = int(bottom-top), int(right-left)
462
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
463
- x1 = self.transform(frame).unsqueeze(0).to(self.device)
464
- x2 = align_face(frame, self.landmarkpredictor)
465
- if x2 is None:
466
- raise gr.Error(self.error_info)
467
- #return false_image, false_image, 'Error: no face detected! Please retry or change the photo.'
468
- x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
469
- if self.print_log: print('image loaded')
470
- wplus = self.pspex.encoder(x2) + self.pspex.latent_avg.unsqueeze(0)
471
- _, f = self.pspex.encoder(x1, return_feat=True)
472
-
473
- with torch.no_grad():
474
- y_hat, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f)
475
- y_hat = torch.clamp(y_hat, -1, 1)
476
-
477
- if 'Style Mixing' in editing_options:
478
- torch.manual_seed(seed)
479
- wplus[:, 8:] = self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,10,1) * 0.7
480
- y_hat_edit, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f)
481
- elif 'Attribute Editing' in editing_options:
482
- editing_w = self.editing_dicts[editing_options[19:]].to(self.device)
483
- y_hat_edit, _ = self.pspex.decoder([wplus+scale_factor*editing_w], input_is_latent=True, first_layer_feature=f)
484
- elif 'Domain Transfer' in editing_options:
485
- self.load_G_model(editing_options[17:])
486
- if self.print_log: print('model %s loaded'%(editing_options[17:]))
487
- y_hat_edit, _ = self.generator([wplus], input_is_latent=True, first_layer_feature=f)
488
- else:
489
- y_hat_edit = y_hat
490
- y_hat_edit = torch.clamp(y_hat_edit, -1, 1)
491
-
492
- return self.tensor2np(y_hat[0]), self.tensor2np(y_hat_edit[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffsinger_task.py DELETED
@@ -1,490 +0,0 @@
1
- import torch
2
-
3
- import utils
4
- from utils.hparams import hparams
5
- from modules.diff.net import DiffNet
6
- from modules.diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion
7
- from tasks.svs.diffspeech_task import DiffSpeechTask
8
- from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
9
- from modules.fastspeech.pe import PitchExtractor
10
- from modules.fastspeech.fs2 import FastSpeech2
11
- from modules.diffsinger_midi.fs2 import FastSpeech2MIDI
12
- from modules.fastspeech.tts_modules import mel2ph_to_dur
13
-
14
- from modules.diff.candidate_decoder import FFT
15
- from utils.pitch_utils import denorm_f0
16
- from tasks.tts.fs2_utils import FastSpeechDataset
17
- from tasks.tts.fs2 import FastSpeech2Task
18
-
19
- import numpy as np
20
- import os
21
- import torch.nn.functional as F
22
-
23
- DIFF_DECODERS = {
24
- 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
25
- 'fft': lambda hp: FFT(
26
- hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
27
- }
28
-
29
-
30
- class DiffSingerTask(DiffSpeechTask):
31
- def __init__(self):
32
- super(DiffSingerTask, self).__init__()
33
- self.dataset_cls = FastSpeechDataset
34
- self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
35
- if hparams.get('pe_enable') is not None and hparams['pe_enable']:
36
- self.pe = PitchExtractor().cuda()
37
- utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
38
- self.pe.eval()
39
-
40
- def build_tts_model(self):
41
- # import torch
42
- # from tqdm import tqdm
43
- # v_min = torch.ones([80]) * 100
44
- # v_max = torch.ones([80]) * -100
45
- # for i, ds in enumerate(tqdm(self.dataset_cls('train'))):
46
- # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max)
47
- # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min)
48
- # if i % 100 == 0:
49
- # print(i, v_min, v_max)
50
- # print('final', v_min, v_max)
51
- mel_bins = hparams['audio_num_mel_bins']
52
- self.model = GaussianDiffusion(
53
- phone_encoder=self.phone_encoder,
54
- out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
55
- timesteps=hparams['timesteps'],
56
- K_step=hparams['K_step'],
57
- loss_type=hparams['diff_loss_type'],
58
- spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
59
- )
60
- if hparams['fs2_ckpt'] != '':
61
- utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
62
- # self.model.fs2.decoder = None
63
- for k, v in self.model.fs2.named_parameters():
64
- v.requires_grad = False
65
-
66
- def validation_step(self, sample, batch_idx):
67
- outputs = {}
68
- txt_tokens = sample['txt_tokens'] # [B, T_t]
69
-
70
- target = sample['mels'] # [B, T_s, 80]
71
- energy = sample['energy']
72
- # fs2_mel = sample['fs2_mels']
73
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
74
- mel2ph = sample['mel2ph']
75
- f0 = sample['f0']
76
- uv = sample['uv']
77
-
78
- outputs['losses'] = {}
79
-
80
- outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
81
-
82
-
83
- outputs['total_loss'] = sum(outputs['losses'].values())
84
- outputs['nsamples'] = sample['nsamples']
85
- outputs = utils.tensors_to_scalars(outputs)
86
- if batch_idx < hparams['num_valid_plots']:
87
- model_out = self.model(
88
- txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True)
89
-
90
- if hparams.get('pe_enable') is not None and hparams['pe_enable']:
91
- gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
92
- pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
93
- else:
94
- gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
95
- pred_f0 = model_out.get('f0_denorm')
96
- self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
97
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
98
- self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')
99
- return outputs
100
-
101
-
102
- class ShallowDiffusionOfflineDataset(FastSpeechDataset):
103
- def __getitem__(self, index):
104
- sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index)
105
- item = self._get_item(index)
106
-
107
- if self.prefix != 'train' and hparams['fs2_ckpt'] != '':
108
- fs2_ckpt = os.path.dirname(hparams['fs2_ckpt'])
109
- item_name = item['item_name']
110
- fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer.
111
- sample['fs2_mel'] = fs2_mel
112
- return sample
113
-
114
- def collater(self, samples):
115
- batch = super(ShallowDiffusionOfflineDataset, self).collater(samples)
116
- if self.prefix != 'train' and hparams['fs2_ckpt'] != '':
117
- batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0)
118
- return batch
119
-
120
-
121
- class DiffSingerOfflineTask(DiffSingerTask):
122
- def __init__(self):
123
- super(DiffSingerOfflineTask, self).__init__()
124
- self.dataset_cls = ShallowDiffusionOfflineDataset
125
-
126
- def build_tts_model(self):
127
- mel_bins = hparams['audio_num_mel_bins']
128
- self.model = OfflineGaussianDiffusion(
129
- phone_encoder=self.phone_encoder,
130
- out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
131
- timesteps=hparams['timesteps'],
132
- K_step=hparams['K_step'],
133
- loss_type=hparams['diff_loss_type'],
134
- spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
135
- )
136
- # if hparams['fs2_ckpt'] != '':
137
- # utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
138
- # self.model.fs2.decoder = None
139
-
140
- def run_model(self, model, sample, return_output=False, infer=False):
141
- txt_tokens = sample['txt_tokens'] # [B, T_t]
142
- target = sample['mels'] # [B, T_s, 80]
143
- mel2ph = sample['mel2ph'] # [B, T_s]
144
- f0 = sample['f0']
145
- uv = sample['uv']
146
- energy = sample['energy']
147
- fs2_mel = None #sample['fs2_mels']
148
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
149
- if hparams['pitch_type'] == 'cwt':
150
- cwt_spec = sample[f'cwt_spec']
151
- f0_mean = sample['f0_mean']
152
- f0_std = sample['f0_std']
153
- sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
154
-
155
- output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
156
- ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer)
157
-
158
- losses = {}
159
- if 'diff_loss' in output:
160
- losses['mel'] = output['diff_loss']
161
- # self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
162
- # if hparams['use_pitch_embed']:
163
- # self.add_pitch_loss(output, sample, losses)
164
- if hparams['use_energy_embed']:
165
- self.add_energy_loss(output['energy_pred'], energy, losses)
166
-
167
- if not return_output:
168
- return losses
169
- else:
170
- return losses, output
171
-
172
- def validation_step(self, sample, batch_idx):
173
- outputs = {}
174
- txt_tokens = sample['txt_tokens'] # [B, T_t]
175
-
176
- target = sample['mels'] # [B, T_s, 80]
177
- energy = sample['energy']
178
- # fs2_mel = sample['fs2_mels']
179
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
180
- mel2ph = sample['mel2ph']
181
- f0 = sample['f0']
182
- uv = sample['uv']
183
-
184
- outputs['losses'] = {}
185
-
186
- outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
187
-
188
-
189
- outputs['total_loss'] = sum(outputs['losses'].values())
190
- outputs['nsamples'] = sample['nsamples']
191
- outputs = utils.tensors_to_scalars(outputs)
192
- if batch_idx < hparams['num_valid_plots']:
193
- fs2_mel = sample['fs2_mels']
194
- model_out = self.model(
195
- txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy,
196
- ref_mels=[None, fs2_mel], infer=True)
197
- if hparams.get('pe_enable') is not None and hparams['pe_enable']:
198
- gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
199
- pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
200
- else:
201
- gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
202
- pred_f0 = model_out.get('f0_denorm')
203
- self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
204
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
205
- self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}')
206
- return outputs
207
-
208
- def test_step(self, sample, batch_idx):
209
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
210
- txt_tokens = sample['txt_tokens']
211
- energy = sample['energy']
212
- if hparams['profile_infer']:
213
- pass
214
- else:
215
- mel2ph, uv, f0 = None, None, None
216
- if hparams['use_gt_dur']:
217
- mel2ph = sample['mel2ph']
218
- if hparams['use_gt_f0']:
219
- f0 = sample['f0']
220
- uv = sample['uv']
221
- fs2_mel = sample['fs2_mels']
222
- outputs = self.model(
223
- txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy,
224
- infer=True)
225
- sample['outputs'] = self.model.out2mel(outputs['mel_out'])
226
- sample['mel2ph_pred'] = outputs['mel2ph']
227
-
228
- if hparams.get('pe_enable') is not None and hparams['pe_enable']:
229
- sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
230
- sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel
231
- else:
232
- sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)
233
- sample['f0_pred'] = outputs.get('f0_denorm')
234
- return self.after_infer(sample)
235
-
236
-
237
- class MIDIDataset(FastSpeechDataset):
238
- def __getitem__(self, index):
239
- sample = super(MIDIDataset, self).__getitem__(index)
240
- item = self._get_item(index)
241
- sample['f0_midi'] = torch.FloatTensor(item['f0_midi'])
242
- sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]
243
-
244
- return sample
245
-
246
- def collater(self, samples):
247
- batch = super(MIDIDataset, self).collater(samples)
248
- batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0)
249
- batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)
250
- # print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all())
251
- return batch
252
-
253
-
254
- class OpencpopDataset(FastSpeechDataset):
255
- def __getitem__(self, index):
256
- sample = super(OpencpopDataset, self).__getitem__(index)
257
- item = self._get_item(index)
258
- sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]
259
- sample['midi_dur'] = torch.FloatTensor(item['midi_dur'])[:hparams['max_frames']]
260
- sample['is_slur'] = torch.LongTensor(item['is_slur'])[:hparams['max_frames']]
261
- sample['word_boundary'] = torch.LongTensor(item['word_boundary'])[:hparams['max_frames']]
262
- return sample
263
-
264
- def collater(self, samples):
265
- batch = super(OpencpopDataset, self).collater(samples)
266
- batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)
267
- batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0)
268
- batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0)
269
- batch['word_boundary'] = utils.collate_1d([s['word_boundary'] for s in samples], 0)
270
- return batch
271
-
272
-
273
- class DiffSingerMIDITask(DiffSingerTask):
274
- def __init__(self):
275
- super(DiffSingerMIDITask, self).__init__()
276
- # self.dataset_cls = MIDIDataset
277
- self.dataset_cls = OpencpopDataset
278
-
279
- def run_model(self, model, sample, return_output=False, infer=False):
280
- txt_tokens = sample['txt_tokens'] # [B, T_t]
281
- target = sample['mels'] # [B, T_s, 80]
282
- # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]
283
- mel2ph = sample['mel2ph']
284
- if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']:
285
- f0 = None
286
- uv = None
287
- else:
288
- f0 = sample['f0']
289
- uv = sample['uv']
290
- energy = sample['energy']
291
-
292
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
293
- if hparams['pitch_type'] == 'cwt':
294
- cwt_spec = sample[f'cwt_spec']
295
- f0_mean = sample['f0_mean']
296
- f0_std = sample['f0_std']
297
- sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
298
-
299
- output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
300
- ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'],
301
- midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
302
-
303
- losses = {}
304
- if 'diff_loss' in output:
305
- losses['mel'] = output['diff_loss']
306
- self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses)
307
- if hparams['use_pitch_embed']:
308
- self.add_pitch_loss(output, sample, losses)
309
- if hparams['use_energy_embed']:
310
- self.add_energy_loss(output['energy_pred'], energy, losses)
311
- if not return_output:
312
- return losses
313
- else:
314
- return losses, output
315
-
316
- def validation_step(self, sample, batch_idx):
317
- outputs = {}
318
- txt_tokens = sample['txt_tokens'] # [B, T_t]
319
-
320
- target = sample['mels'] # [B, T_s, 80]
321
- energy = sample['energy']
322
- # fs2_mel = sample['fs2_mels']
323
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
324
- mel2ph = sample['mel2ph']
325
-
326
- outputs['losses'] = {}
327
-
328
- outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
329
-
330
- outputs['total_loss'] = sum(outputs['losses'].values())
331
- outputs['nsamples'] = sample['nsamples']
332
- outputs = utils.tensors_to_scalars(outputs)
333
- if batch_idx < hparams['num_valid_plots']:
334
- model_out = self.model(
335
- txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True,
336
- pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
337
-
338
- if hparams.get('pe_enable') is not None and hparams['pe_enable']:
339
- gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
340
- pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
341
- else:
342
- gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
343
- pred_f0 = model_out.get('f0_denorm')
344
- self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
345
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
346
- self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')
347
- if hparams['use_pitch_embed']:
348
- self.plot_pitch(batch_idx, sample, model_out)
349
- return outputs
350
-
351
- def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None):
352
- """
353
- :param dur_pred: [B, T], float, log scale
354
- :param mel2ph: [B, T]
355
- :param txt_tokens: [B, T]
356
- :param losses:
357
- :return:
358
- """
359
- B, T = txt_tokens.shape
360
- nonpadding = (txt_tokens != 0).float()
361
- dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
362
- is_sil = torch.zeros_like(txt_tokens).bool()
363
- for p in self.sil_ph:
364
- is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
365
- is_sil = is_sil.float() # [B, T_txt]
366
-
367
- # phone duration loss
368
- if hparams['dur_loss'] == 'mse':
369
- losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
370
- losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
371
- dur_pred = (dur_pred.exp() - 1).clamp(min=0)
372
- else:
373
- raise NotImplementedError
374
-
375
- # use linear scale for sent and word duration
376
- if hparams['lambda_word_dur'] > 0:
377
- idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1]
378
- # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur
379
- word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred)
380
- word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt)
381
- wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
382
- word_nonpadding = (word_dur_g > 0).float()
383
- wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
384
- losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
385
- if hparams['lambda_sent_dur'] > 0:
386
- sent_dur_p = dur_pred.sum(-1)
387
- sent_dur_g = dur_gt.sum(-1)
388
- sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
389
- losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
390
-
391
-
392
- class AuxDecoderMIDITask(FastSpeech2Task):
393
- def __init__(self):
394
- super().__init__()
395
- # self.dataset_cls = MIDIDataset
396
- self.dataset_cls = OpencpopDataset
397
-
398
- def build_tts_model(self):
399
- if hparams.get('use_midi') is not None and hparams['use_midi']:
400
- self.model = FastSpeech2MIDI(self.phone_encoder)
401
- else:
402
- self.model = FastSpeech2(self.phone_encoder)
403
-
404
- def run_model(self, model, sample, return_output=False):
405
- txt_tokens = sample['txt_tokens'] # [B, T_t]
406
- target = sample['mels'] # [B, T_s, 80]
407
- mel2ph = sample['mel2ph'] # [B, T_s]
408
- f0 = sample['f0']
409
- uv = sample['uv']
410
- energy = sample['energy']
411
-
412
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
413
- if hparams['pitch_type'] == 'cwt':
414
- cwt_spec = sample[f'cwt_spec']
415
- f0_mean = sample['f0_mean']
416
- f0_std = sample['f0_std']
417
- sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
418
-
419
- output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
420
- ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False, pitch_midi=sample['pitch_midi'],
421
- midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
422
-
423
- losses = {}
424
- self.add_mel_loss(output['mel_out'], target, losses)
425
- self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses)
426
- if hparams['use_pitch_embed']:
427
- self.add_pitch_loss(output, sample, losses)
428
- if hparams['use_energy_embed']:
429
- self.add_energy_loss(output['energy_pred'], energy, losses)
430
- if not return_output:
431
- return losses
432
- else:
433
- return losses, output
434
-
435
- def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None):
436
- """
437
- :param dur_pred: [B, T], float, log scale
438
- :param mel2ph: [B, T]
439
- :param txt_tokens: [B, T]
440
- :param losses:
441
- :return:
442
- """
443
- B, T = txt_tokens.shape
444
- nonpadding = (txt_tokens != 0).float()
445
- dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
446
- is_sil = torch.zeros_like(txt_tokens).bool()
447
- for p in self.sil_ph:
448
- is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
449
- is_sil = is_sil.float() # [B, T_txt]
450
-
451
- # phone duration loss
452
- if hparams['dur_loss'] == 'mse':
453
- losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
454
- losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
455
- dur_pred = (dur_pred.exp() - 1).clamp(min=0)
456
- else:
457
- raise NotImplementedError
458
-
459
- # use linear scale for sent and word duration
460
- if hparams['lambda_word_dur'] > 0:
461
- idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1]
462
- # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur
463
- word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred)
464
- word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt)
465
- wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
466
- word_nonpadding = (word_dur_g > 0).float()
467
- wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
468
- losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
469
- if hparams['lambda_sent_dur'] > 0:
470
- sent_dur_p = dur_pred.sum(-1)
471
- sent_dur_g = dur_gt.sum(-1)
472
- sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
473
- losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
474
-
475
- def validation_step(self, sample, batch_idx):
476
- outputs = {}
477
- outputs['losses'] = {}
478
- outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True)
479
- outputs['total_loss'] = sum(outputs['losses'].values())
480
- outputs['nsamples'] = sample['nsamples']
481
- mel_out = self.model.out2mel(model_out['mel_out'])
482
- outputs = utils.tensors_to_scalars(outputs)
483
- # if sample['mels'].shape[0] == 1:
484
- # self.add_laplace_var(mel_out, sample['mels'], outputs)
485
- if batch_idx < hparams['num_valid_plots']:
486
- self.plot_mel(batch_idx, sample['mels'], mel_out)
487
- self.plot_dur(batch_idx, sample, model_out)
488
- if hparams['use_pitch_embed']:
489
- self.plot_pitch(batch_idx, sample, model_out)
490
- return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Debate
3
- emoji: 🐠
4
- colorFrom: purple
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.44.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/ChatGPTandLangchain/templates.py DELETED
@@ -1,44 +0,0 @@
1
- css = '''
2
- <style>
3
- .chat-message {
4
- padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
5
- }
6
- .chat-message.user {
7
- background-color: #2b313e
8
- }
9
- .chat-message.bot {
10
- background-color: #475063
11
- }
12
- .chat-message .avatar {
13
- width: 20%;
14
- }
15
- .chat-message .avatar img {
16
- max-width: 78px;
17
- max-height: 78px;
18
- border-radius: 50%;
19
- object-fit: cover;
20
- }
21
- .chat-message .message {
22
- width: 80%;
23
- padding: 0 1.5rem;
24
- color: #fff;
25
- }
26
- '''
27
-
28
- bot_template = '''
29
- <div class="chat-message bot">
30
- <div class="avatar">
31
- <img src="https://cdna.artstation.com/p/assets/images/images/054/910/878/large/aaron-wacker-cyberpunk-computer-devices-iot.jpg?1665656564" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
32
- </div>
33
- <div class="message">{{MSG}}</div>
34
- </div>
35
- '''
36
-
37
- user_template = '''
38
- <div class="chat-message user">
39
- <div class="avatar">
40
- <img src="https://cdnb.artstation.com/p/assets/images/images/054/910/875/large/aaron-wacker-cyberpunk-computer-brain-design.jpg?1665656558">
41
- </div>
42
- <div class="message">{{MSG}}</div>
43
- </div>
44
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest101.py DELETED
@@ -1,25 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='ImageClassifier',
4
- backbone=dict(
5
- type='ResNeSt',
6
- depth=101,
7
- num_stages=4,
8
- stem_channels=128,
9
- out_indices=(3, ),
10
- style='pytorch'),
11
- neck=dict(type='GlobalAveragePooling'),
12
- head=dict(
13
- type='LinearClsHead',
14
- num_classes=1000,
15
- in_channels=2048,
16
- loss=dict(
17
- type='LabelSmoothLoss',
18
- label_smooth_val=0.1,
19
- num_classes=1000,
20
- reduction='mean',
21
- loss_weight=1.0),
22
- topk=(1, 5),
23
- cal_acc=False),
24
- train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)),
25
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/UrlDependency.ts DELETED
@@ -1,5 +0,0 @@
1
- /* eslint-disable no-shadow */
2
- export enum UrlDependency {
3
- ConversationList = "conversation:list",
4
- Conversation = "conversation",
5
- }
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/DfeHub.py DELETED
@@ -1,77 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import re
5
- import time
6
-
7
- import requests
8
-
9
- from ...typing import Any, CreateResult
10
- from ..base_provider import BaseProvider
11
-
12
-
13
- class DfeHub(BaseProvider):
14
- url = "https://chat.dfehub.com/"
15
- supports_stream = True
16
- supports_gpt_35_turbo = True
17
-
18
- @staticmethod
19
- def create_completion(
20
- model: str,
21
- messages: list[dict[str, str]],
22
- stream: bool, **kwargs: Any) -> CreateResult:
23
-
24
- headers = {
25
- "authority" : "chat.dfehub.com",
26
- "accept" : "*/*",
27
- "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
28
- "content-type" : "application/json",
29
- "origin" : "https://chat.dfehub.com",
30
- "referer" : "https://chat.dfehub.com/",
31
- "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
32
- "sec-ch-ua-mobile" : "?0",
33
- "sec-ch-ua-platform": '"macOS"',
34
- "sec-fetch-dest" : "empty",
35
- "sec-fetch-mode" : "cors",
36
- "sec-fetch-site" : "same-origin",
37
- "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
38
- "x-requested-with" : "XMLHttpRequest",
39
- }
40
-
41
- json_data = {
42
- "messages" : messages,
43
- "model" : "gpt-3.5-turbo",
44
- "temperature" : kwargs.get("temperature", 0.5),
45
- "presence_penalty" : kwargs.get("presence_penalty", 0),
46
- "frequency_penalty" : kwargs.get("frequency_penalty", 0),
47
- "top_p" : kwargs.get("top_p", 1),
48
- "stream" : True
49
- }
50
-
51
- response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
52
- headers=headers, json=json_data, timeout=3)
53
-
54
- for chunk in response.iter_lines():
55
- if b"detail" in chunk:
56
- delay = re.findall(r"\d+\.\d+", chunk.decode())
57
- delay = float(delay[-1])
58
- time.sleep(delay)
59
- yield from DfeHub.create_completion(model, messages, stream, **kwargs)
60
- if b"content" in chunk:
61
- data = json.loads(chunk.decode().split("data: ")[1])
62
- yield (data["choices"][0]["delta"]["content"])
63
-
64
- @classmethod
65
- @property
66
- def params(cls):
67
- params = [
68
- ("model", "str"),
69
- ("messages", "list[dict[str, str]]"),
70
- ("stream", "bool"),
71
- ("temperature", "float"),
72
- ("presence_penalty", "int"),
73
- ("frequency_penalty", "int"),
74
- ("top_p", "int"),
75
- ]
76
- param = ", ".join([": ".join(p) for p in params])
77
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AfrodreamsAI/afrodreams/examples/scripts/starry_stanford_bigger.sh DELETED
@@ -1,108 +0,0 @@
1
- # To run this script you'll need to download the ultra-high res
2
- # scan of Starry Night from the Google Art Project, using this command:
3
- # wget -c https://upload.wikimedia.org/wikipedia/commons/e/ea/Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg -O starry_night_gigapixel.jpg
4
- # Or you can manually download the image from here: https://commons.wikimedia.org/wiki/File:Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg
5
-
6
- STYLE_IMAGE=starry_night_gigapixel.jpg
7
- CONTENT_IMAGE=examples/inputs/hoovertowernight.jpg
8
-
9
- STYLE_WEIGHT=5e2
10
- STYLE_SCALE=1.0
11
-
12
- STYLE_WEIGHT2=2500 # Style weight for image size 2048 and above
13
-
14
- PYTHON=python3 # Change to Python if using Python 2
15
- SCRIPT=neural_style.py
16
- GPU=0
17
-
18
- NEURAL_STYLE=$PYTHON
19
- NEURAL_STYLE+=" "
20
- NEURAL_STYLE+=$SCRIPT
21
-
22
- # Uncomment if using pip package
23
- #NEURAL_STYLE=neural-style
24
-
25
-
26
- $NEURAL_STYLE \
27
- -content_image $CONTENT_IMAGE \
28
- -style_image $STYLE_IMAGE \
29
- -style_scale $STYLE_SCALE \
30
- -print_iter 1 \
31
- -style_weight $STYLE_WEIGHT \
32
- -image_size 256 \
33
- -output_image out1.png \
34
- -tv_weight 0 \
35
- -gpu $GPU \
36
- -backend cudnn -cudnn_autotune
37
-
38
- $NEURAL_STYLE \
39
- -content_image $CONTENT_IMAGE \
40
- -style_image $STYLE_IMAGE \
41
- -init image -init_image out1.png \
42
- -style_scale $STYLE_SCALE \
43
- -print_iter 1 \
44
- -style_weight $STYLE_WEIGHT \
45
- -image_size 512 \
46
- -num_iterations 500 \
47
- -output_image out2.png \
48
- -tv_weight 0 \
49
- -gpu $GPU \
50
- -backend cudnn -cudnn_autotune
51
-
52
- $NEURAL_STYLE \
53
- -content_image $CONTENT_IMAGE \
54
- -style_image $STYLE_IMAGE \
55
- -init image -init_image out2.png \
56
- -style_scale $STYLE_SCALE \
57
- -print_iter 1 \
58
- -style_weight $STYLE_WEIGHT \
59
- -image_size 1024 \
60
- -num_iterations 200 \
61
- -output_image out3.png \
62
- -tv_weight 0 \
63
- -gpu $GPU \
64
- -backend cudnn -cudnn_autotune
65
-
66
- $NEURAL_STYLE \
67
- -content_image $CONTENT_IMAGE \
68
- -style_image $STYLE_IMAGE \
69
- -init image -init_image out3.png \
70
- -style_scale $STYLE_SCALE \
71
- -print_iter 1 \
72
- -style_weight $STYLE_WEIGHT2 \
73
- -image_size 2048 \
74
- -num_iterations 200 \
75
- -output_image out4.png \
76
- -tv_weight 0 \
77
- -gpu $GPU \
78
- -backend cudnn
79
-
80
- $NEURAL_STYLE \
81
- -content_image $CONTENT_IMAGE \
82
- -style_image $STYLE_IMAGE \
83
- -init image -init_image out4.png \
84
- -style_scale $STYLE_SCALE \
85
- -print_iter 1 \
86
- -style_weight $STYLE_WEIGHT2 \
87
- -image_size 3620 \
88
- -num_iterations 200 \
89
- -output_image out5.png \
90
- -tv_weight 0 \
91
- -gpu 0,1,2,3 \
92
- -multidevice_strategy 3,6,12 \
93
- -backend cudnn
94
-
95
- $NEURAL_STYLE \
96
- -content_image $CONTENT_IMAGE \
97
- -style_image $STYLE_IMAGE \
98
- -init image -init_image out5.png \
99
- -style_scale $STYLE_SCALE \
100
- -print_iter 1 \
101
- -style_weight $STYLE_WEIGHT2 \
102
- -image_size 4016 \
103
- -num_iterations 200 \
104
- -output_image out6.png \
105
- -tv_weight 0 \
106
- -gpu 0,1,2,3,4,5,6,7 \
107
- -multidevice_strategy 2,4,6,9,15,18,22 \
108
- -backend cudnn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/dataloader/humaneval.py DELETED
@@ -1,21 +0,0 @@
1
- from .dataloader import DataLoader
2
- from . import dataloader_registry
3
- import json
4
-
5
-
6
- @dataloader_registry.register("tasksolving/humaneval/gpt-4")
7
- @dataloader_registry.register("tasksolving/humaneval/gpt-3.5")
8
- class HumanevalLoader(DataLoader):
9
- def __init__(self, path: str):
10
- super().__init__(path)
11
-
12
- def load(self):
13
- with open(self.path) as f:
14
- for line in f:
15
- line = json.loads(line)
16
- self.examples.append(
17
- {
18
- "input": line["prompt"],
19
- "answer": line["test"],
20
- }
21
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/oval/Oval.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Base from '../base/Base';
2
- export default class Oval extends Base { }
 
 
 
spaces/Alcedo/yunmedia/index.html DELETED
@@ -1,79 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <title># Yunzai 云媒体服务器</title>
5
- <meta charset="utf-8" />
6
- <style type="text/css">
7
- html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{margin:0;padding:0;border:0}body{font-family:Helvetica,arial,freesans,clean,sans-serif;font-size:14px;line-height:1.6;color:#333;background-color:#fff;padding:20px;max-width:960px;margin:0 auto}body>*:first-child{margin-top:0 !important}body>*:last-child{margin-bottom:0 !important}p,blockquote,ul,ol,dl,table,pre{margin:15px 0}h1,h2,h3,h4,h5,h6{margin:20px 0 10px;padding:0;font-weight:bold;-webkit-font-smoothing:antialiased}h1 tt,h1 code,h2 tt,h2 code,h3 tt,h3 code,h4 tt,h4 code,h5 tt,h5 code,h6 tt,h6 code{font-size:inherit}h1{font-size:28px;color:#000}h2{font-size:24px;border-bottom:1px solid #ccc;color:#000}h3{font-size:18px}h4{font-size:16px}h5{font-size:14px}h6{color:#777;font-size:14px}body>h2:first-child,body>h1:first-child,body>h1:first-child+h2,body>h3:first-child,body>h4:first-child,body>h5:first-child,body>h6:first-child{margin-top:0;padding-top:0}a:first-child h1,a:first-child h2,a:first-child h3,a:first-child h4,a:first-child h5,a:first-child h6{margin-top:0;padding-top:0}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:10px}a{color:#4183c4;text-decoration:none}a:hover{text-decoration:underline}ul,ol{padding-left:30px}ul li>:first-child,ol li>:first-child,ul li ul:first-of-type,ol li ol:first-of-type,ul li ol:first-of-type,ol li ul:first-of-type{margin-top:0}ul ul,ul ol,ol ol,ol ul{margin-bottom:0}dl{padding:0}dl dt{font-size:14px;font-weight:bold;font-style:italic;padding:0;margin:15px 0 5px}dl dt:first-child{padding:0}dl dt>:first-child{margin-top:0}dl dt>:last-child{margin-bottom:0}dl dd{margin:0 0 15px;padding:0 15px}dl dd>:first-child{margin-top:0}dl dd>:last-child{margin-bottom:0}pre,code,tt{font-size:12px;font-family:Consolas,"Liberation Mono",Courier,monospace}code,tt{margin:0;padding:0;white-space:nowrap;border:1px solid #eaeaea;background-color:#f8f8f8;border-radius:3px}pre>code{margin:0;padding:0;white-space:pre;border:0;background:transparent}pre{background-color:#f8f8f8;border:1px solid #ccc;font-size:13px;line-height:19px;overflow:auto;padding:6px 10px;border-radius:3px}pre code,pre tt{background-color:transparent;border:0}blockquote{border-left:4px solid #DDD;padding:0 15px;color:#777}blockquote>:first-child{margin-top:0}blockquote>:last-child{margin-bottom:0}hr{clear:both;margin:15px 0;height:0;overflow:hidden;border:0;background:transparent;border-bottom:4px solid #ddd;padding:0}table th{font-weight:bold}table th,table td{border:1px solid #ccc;padding:6px 13px}table tr{border-top:1px solid #ccc;background-color:#fff}table tr:nth-child(2n){background-color:#f8f8f8}img{max-width:100%}
8
- </style>
9
- </head>
10
- <body>
11
- <h1>Yunzai 云媒体服务器</h1>
12
- <p>当前服务支持音频转码、网页截图、网址访问检查</p>
13
- <h3>音频转码</h3>
14
- <h4>描述</h4>
15
- <p>将音频链接、数据、文件转换成 SILK 格式数据,可直接发送音频文件到接口</p>
16
- <h4>请求说明</h4>
17
- <blockquote><p>请求方式:POST<br>
18
- 请求 URL :<a href="#">/audio</a></p>
19
- </blockquote>
20
- <h4>请求参数</h4>
21
- <table>
22
- <thead>
23
- <tr><th>字段</th><th>字段类型</th><th>字段说明</th></tr>
24
- </thead>
25
- <tbody>
26
- <tr><td>recordUrl</td><td>string</td><td>原始音频链接</td></tr>
27
- <tr><td>recordBuffer</td><td>object</td><td>原始音频数据</td></tr>
28
- <tr><td>recordBuffer.type</td><td>string</td><td>数据类型</td></tr>
29
- <tr><td>recordBuffer.data</td><td>array</td><td>数据</td></tr>
30
- </tbody>
31
- </table>
32
- <hr />
33
- <h3>网页截图</h3>
34
- <h4>描述</h4>
35
- <p>将音频链接、数据、文件转换成 SILK 格式数据,可直接发送音频文件到接口</p>
36
- <h4>请求说明</h4>
37
- <blockquote><p>请求方式:POST<br>
38
- 请求 URL :<a href="#">/screenshot</a></p>
39
- </blockquote>
40
- <h4>请求参数</h4>
41
- <table>
42
- <thead>
43
- <tr><th>字段</th><th>字段类型</th><th>字段说明</th></tr>
44
- </thead>
45
- <tbody>
46
- <tr><td>url</td><td>string</td><td>请求的网址</td></tr>
47
- <tr><td>option</td><td>object</td><td>参数</td></tr>
48
- <tr><td>option.width</td><td>int</td><td>渲染窗口宽度</td></tr>
49
- <tr><td>option.height</td><td>int</td><td>渲染窗口高度</td></tr>
50
- <tr><td>option.dpr</td><td>int</td><td>渲染DPR</td></tr>
51
- <tr><td>option.timeout</td><td>int</td><td>访问超时时间</td></tr>
52
- <tr><td>option.wait</td><td>int</td><td>页面等待时间</td></tr>
53
- <tr><td>option.waitUtil</td><td>string('load'、'domcontentloaded'、'networkidle0'、'networkidle2')</td><td>waitUtil 参数</td></tr>
54
- <tr><td>option.func</td><td>int</td><td>waitFunction参数</td></tr>
55
- <tr><td>option.selector</td><td>string</td><td>页面加载完成选择器</td></tr>
56
- <tr><td>type</td><td>string</td><td>返回类型,可选 base64 和 image</td></tr>
57
- </tbody>
58
- </table>
59
- <hr />
60
- <h3>网址访问检查</h3>
61
- <h4>描述</h4>
62
- <p>检查网址是否能够正常访问</p>
63
- <h4>请求说明</h4>
64
- <blockquote><p>请求方式:POST<br>
65
- 请求 URL :<a href="#">/check</a></p>
66
- </blockquote>
67
- <h4>请求参数</h4>
68
- <table>
69
- <thead>
70
- <tr><th>字段</th><th>字段类型</th><th>字段说明</th></tr>
71
- </thead>
72
- <tbody>
73
- <tr><td>url</td><td>string</td><td>需检查的网址</td></tr>
74
- </tbody>
75
- </table>
76
-
77
- <!-- Use style in style1-->
78
- </body>
79
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/retina_head.py DELETED
@@ -1,114 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
3
-
4
- from ..builder import HEADS
5
- from .anchor_head import AnchorHead
6
-
7
-
8
- @HEADS.register_module()
9
- class RetinaHead(AnchorHead):
10
- r"""An anchor-based head used in `RetinaNet
11
- <https://arxiv.org/pdf/1708.02002.pdf>`_.
12
-
13
- The head contains two subnetworks. The first classifies anchor boxes and
14
- the second regresses deltas for the anchors.
15
-
16
- Example:
17
- >>> import torch
18
- >>> self = RetinaHead(11, 7)
19
- >>> x = torch.rand(1, 7, 32, 32)
20
- >>> cls_score, bbox_pred = self.forward_single(x)
21
- >>> # Each anchor predicts a score for each class except background
22
- >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
23
- >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
24
- >>> assert cls_per_anchor == (self.num_classes)
25
- >>> assert box_per_anchor == 4
26
- """
27
-
28
- def __init__(self,
29
- num_classes,
30
- in_channels,
31
- stacked_convs=4,
32
- conv_cfg=None,
33
- norm_cfg=None,
34
- anchor_generator=dict(
35
- type='AnchorGenerator',
36
- octave_base_scale=4,
37
- scales_per_octave=3,
38
- ratios=[0.5, 1.0, 2.0],
39
- strides=[8, 16, 32, 64, 128]),
40
- **kwargs):
41
- self.stacked_convs = stacked_convs
42
- self.conv_cfg = conv_cfg
43
- self.norm_cfg = norm_cfg
44
- super(RetinaHead, self).__init__(
45
- num_classes,
46
- in_channels,
47
- anchor_generator=anchor_generator,
48
- **kwargs)
49
-
50
- def _init_layers(self):
51
- """Initialize layers of the head."""
52
- self.relu = nn.ReLU(inplace=True)
53
- self.cls_convs = nn.ModuleList()
54
- self.reg_convs = nn.ModuleList()
55
- for i in range(self.stacked_convs):
56
- chn = self.in_channels if i == 0 else self.feat_channels
57
- self.cls_convs.append(
58
- ConvModule(
59
- chn,
60
- self.feat_channels,
61
- 3,
62
- stride=1,
63
- padding=1,
64
- conv_cfg=self.conv_cfg,
65
- norm_cfg=self.norm_cfg))
66
- self.reg_convs.append(
67
- ConvModule(
68
- chn,
69
- self.feat_channels,
70
- 3,
71
- stride=1,
72
- padding=1,
73
- conv_cfg=self.conv_cfg,
74
- norm_cfg=self.norm_cfg))
75
- self.retina_cls = nn.Conv2d(
76
- self.feat_channels,
77
- self.num_anchors * self.cls_out_channels,
78
- 3,
79
- padding=1)
80
- self.retina_reg = nn.Conv2d(
81
- self.feat_channels, self.num_anchors * 4, 3, padding=1)
82
-
83
- def init_weights(self):
84
- """Initialize weights of the head."""
85
- for m in self.cls_convs:
86
- normal_init(m.conv, std=0.01)
87
- for m in self.reg_convs:
88
- normal_init(m.conv, std=0.01)
89
- bias_cls = bias_init_with_prob(0.01)
90
- normal_init(self.retina_cls, std=0.01, bias=bias_cls)
91
- normal_init(self.retina_reg, std=0.01)
92
-
93
- def forward_single(self, x):
94
- """Forward feature of a single scale level.
95
-
96
- Args:
97
- x (Tensor): Features of a single scale level.
98
-
99
- Returns:
100
- tuple:
101
- cls_score (Tensor): Cls scores for a single scale level
102
- the channels number is num_anchors * num_classes.
103
- bbox_pred (Tensor): Box energies / deltas for a single scale
104
- level, the channels number is num_anchors * 4.
105
- """
106
- cls_feat = x
107
- reg_feat = x
108
- for cls_conv in self.cls_convs:
109
- cls_feat = cls_conv(cls_feat)
110
- for reg_conv in self.reg_convs:
111
- reg_feat = reg_conv(reg_feat)
112
- cls_score = self.retina_cls(cls_feat)
113
- bbox_pred = self.retina_reg(reg_feat)
114
- return cls_score, bbox_pred
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/sparse_rcnn.py DELETED
@@ -1,110 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .two_stage import TwoStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class SparseRCNN(TwoStageDetector):
7
- r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
8
- Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
9
-
10
- def __init__(self, *args, **kwargs):
11
- super(SparseRCNN, self).__init__(*args, **kwargs)
12
- assert self.with_rpn, 'Sparse R-CNN do not support external proposals'
13
-
14
- def forward_train(self,
15
- img,
16
- img_metas,
17
- gt_bboxes,
18
- gt_labels,
19
- gt_bboxes_ignore=None,
20
- gt_masks=None,
21
- proposals=None,
22
- **kwargs):
23
- """Forward function of SparseR-CNN in train stage.
24
-
25
- Args:
26
- img (Tensor): of shape (N, C, H, W) encoding input images.
27
- Typically these should be mean centered and std scaled.
28
- img_metas (list[dict]): list of image info dict where each dict
29
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
30
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
31
- For details on the values of these keys see
32
- :class:`mmdet.datasets.pipelines.Collect`.
33
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
34
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
35
- gt_labels (list[Tensor]): class indices corresponding to each box
36
- gt_bboxes_ignore (None | list[Tensor): specify which bounding
37
- boxes can be ignored when computing the loss.
38
- gt_masks (List[Tensor], optional) : Segmentation masks for
39
- each box. But we don't support it in this architecture.
40
- proposals (List[Tensor], optional): override rpn proposals with
41
- custom proposals. Use when `with_rpn` is False.
42
-
43
- Returns:
44
- dict[str, Tensor]: a dictionary of loss components
45
- """
46
-
47
- assert proposals is None, 'Sparse R-CNN does not support' \
48
- ' external proposals'
49
- assert gt_masks is None, 'Sparse R-CNN does not instance segmentation'
50
-
51
- x = self.extract_feat(img)
52
- proposal_boxes, proposal_features, imgs_whwh = \
53
- self.rpn_head.forward_train(x, img_metas)
54
- roi_losses = self.roi_head.forward_train(
55
- x,
56
- proposal_boxes,
57
- proposal_features,
58
- img_metas,
59
- gt_bboxes,
60
- gt_labels,
61
- gt_bboxes_ignore=gt_bboxes_ignore,
62
- gt_masks=gt_masks,
63
- imgs_whwh=imgs_whwh)
64
- return roi_losses
65
-
66
- def simple_test(self, img, img_metas, rescale=False):
67
- """Test function without test time augmentation.
68
-
69
- Args:
70
- imgs (list[torch.Tensor]): List of multiple images
71
- img_metas (list[dict]): List of image information.
72
- rescale (bool): Whether to rescale the results.
73
- Defaults to False.
74
-
75
- Returns:
76
- list[list[np.ndarray]]: BBox results of each image and classes.
77
- The outer list corresponds to each image. The inner list
78
- corresponds to each class.
79
- """
80
- x = self.extract_feat(img)
81
- proposal_boxes, proposal_features, imgs_whwh = \
82
- self.rpn_head.simple_test_rpn(x, img_metas)
83
- bbox_results = self.roi_head.simple_test(
84
- x,
85
- proposal_boxes,
86
- proposal_features,
87
- img_metas,
88
- imgs_whwh=imgs_whwh,
89
- rescale=rescale)
90
- return bbox_results
91
-
92
- def forward_dummy(self, img):
93
- """Used for computing network flops.
94
-
95
- See `mmdetection/tools/analysis_tools/get_flops.py`
96
- """
97
- # backbone
98
- x = self.extract_feat(img)
99
- # rpn
100
- num_imgs = len(img)
101
- dummy_img_metas = [
102
- dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
103
- ]
104
- proposal_boxes, proposal_features, imgs_whwh = \
105
- self.rpn_head.simple_test_rpn(x, dummy_img_metas)
106
- # roi_head
107
- roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
108
- proposal_features,
109
- dummy_img_metas)
110
- return roi_outs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/encnet_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
 
 
 
 
 
 
 
 
 
 
spaces/AnnasBlackHat/Image-Similarity/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Image Similarity
3
- emoji: 🐨
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/src/config.py DELETED
@@ -1,144 +0,0 @@
1
- import json
2
- import os
3
- from typing import Optional, Sequence, Tuple
4
-
5
- from src.video_util import get_frame_count
6
-
7
-
8
- class RerenderConfig:
9
-
10
- def __init__(self):
11
- ...
12
-
13
- def create_from_parameters(self,
14
- input_path: str,
15
- output_path: str,
16
- prompt: str,
17
- work_dir: Optional[str] = None,
18
- key_subdir: str = 'keys',
19
- frame_count: Optional[int] = None,
20
- interval: int = 10,
21
- crop: Sequence[int] = (0, 0, 0, 0),
22
- sd_model: Optional[str] = None,
23
- a_prompt: str = '',
24
- n_prompt: str = '',
25
- ddim_steps=20,
26
- scale=7.5,
27
- control_type: str = 'HED',
28
- control_strength=1,
29
- seed: int = -1,
30
- image_resolution: int = 512,
31
- x0_strength: float = -1,
32
- style_update_freq: int = 10,
33
- cross_period: Tuple[float, float] = (0, 1),
34
- warp_period: Tuple[float, float] = (0, 0.1),
35
- mask_period: Tuple[float, float] = (0.5, 0.8),
36
- ada_period: Tuple[float, float] = (1.0, 1.0),
37
- mask_strength: float = 0.5,
38
- inner_strength: float = 0.9,
39
- smooth_boundary: bool = True,
40
- color_preserve: bool = True,
41
- **kwargs):
42
- self.input_path = input_path
43
- self.output_path = output_path
44
- self.prompt = prompt
45
- self.work_dir = work_dir
46
- if work_dir is None:
47
- self.work_dir = os.path.dirname(output_path)
48
- self.key_dir = os.path.join(self.work_dir, key_subdir)
49
- self.first_dir = os.path.join(self.work_dir, 'first')
50
-
51
- # Split video into frames
52
- if not os.path.isfile(input_path):
53
- raise FileNotFoundError(f'Cannot find video file {input_path}')
54
- self.input_dir = os.path.join(self.work_dir, 'video')
55
-
56
- self.frame_count = frame_count
57
- if frame_count is None:
58
- self.frame_count = get_frame_count(self.input_path)
59
- self.interval = interval
60
- self.crop = crop
61
- self.sd_model = sd_model
62
- self.a_prompt = a_prompt
63
- self.n_prompt = n_prompt
64
- self.ddim_steps = ddim_steps
65
- self.scale = scale
66
- self.control_type = control_type
67
- if self.control_type == 'canny':
68
- self.canny_low = kwargs.get('canny_low', 100)
69
- self.canny_high = kwargs.get('canny_high', 200)
70
- else:
71
- self.canny_low = None
72
- self.canny_high = None
73
- self.control_strength = control_strength
74
- self.seed = seed
75
- self.image_resolution = image_resolution
76
- self.x0_strength = x0_strength
77
- self.style_update_freq = style_update_freq
78
- self.cross_period = cross_period
79
- self.mask_period = mask_period
80
- self.warp_period = warp_period
81
- self.ada_period = ada_period
82
- self.mask_strength = mask_strength
83
- self.inner_strength = inner_strength
84
- self.smooth_boundary = smooth_boundary
85
- self.color_preserve = color_preserve
86
-
87
- os.makedirs(self.input_dir, exist_ok=True)
88
- os.makedirs(self.work_dir, exist_ok=True)
89
- os.makedirs(self.key_dir, exist_ok=True)
90
- os.makedirs(self.first_dir, exist_ok=True)
91
-
92
- def create_from_path(self, cfg_path: str):
93
- with open(cfg_path, 'r') as fp:
94
- cfg = json.load(fp)
95
- kwargs = dict()
96
-
97
- def append_if_not_none(key):
98
- value = cfg.get(key, None)
99
- if value is not None:
100
- kwargs[key] = value
101
-
102
- kwargs['input_path'] = cfg['input']
103
- kwargs['output_path'] = cfg['output']
104
- kwargs['prompt'] = cfg['prompt']
105
- append_if_not_none('work_dir')
106
- append_if_not_none('key_subdir')
107
- append_if_not_none('frame_count')
108
- append_if_not_none('interval')
109
- append_if_not_none('crop')
110
- append_if_not_none('sd_model')
111
- append_if_not_none('a_prompt')
112
- append_if_not_none('n_prompt')
113
- append_if_not_none('ddim_steps')
114
- append_if_not_none('scale')
115
- append_if_not_none('control_type')
116
- if kwargs.get('control_type', '') == 'canny':
117
- append_if_not_none('canny_low')
118
- append_if_not_none('canny_high')
119
- append_if_not_none('control_strength')
120
- append_if_not_none('seed')
121
- append_if_not_none('image_resolution')
122
- append_if_not_none('x0_strength')
123
- append_if_not_none('style_update_freq')
124
- append_if_not_none('cross_period')
125
- append_if_not_none('warp_period')
126
- append_if_not_none('mask_period')
127
- append_if_not_none('ada_period')
128
- append_if_not_none('mask_strength')
129
- append_if_not_none('inner_strength')
130
- append_if_not_none('smooth_boundary')
131
- append_if_not_none('color_perserve')
132
- self.create_from_parameters(**kwargs)
133
-
134
- @property
135
- def use_warp(self):
136
- return self.warp_period[0] <= self.warp_period[1]
137
-
138
- @property
139
- def use_mask(self):
140
- return self.mask_period[0] <= self.mask_period[1]
141
-
142
- @property
143
- def use_ada(self):
144
- return self.ada_period[0] <= self.ada_period[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arvi/Performance_predictor_and_feedback_generator/app.py DELETED
@@ -1,410 +0,0 @@
1
- def assign_weights(Name,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13,col14,col15):
2
- import gradio as gr
3
- import pandas as pd
4
- import numpy as np
5
- df=pd.read_csv('/content/final_processed.csv')
6
- df.drop(['Unnamed: 0'], axis=1,inplace=True)
7
- from sklearn import preprocessing
8
- label_encoder = preprocessing.LabelEncoder()
9
-
10
-
11
- y={'academic time':col2,'task dedication':col3,'physical activity':col4,'favourite sport':col5,'family time':col6,'poor sleep':col7,'fitness':col8,
12
- 'loss of concentration':col9,'eating habits':col10,'free time':col11,'motivation':col12,'social media':col13,'social media on academics':col14,'performance':col15}
13
- df=df.append(y,ignore_index=True)
14
-
15
-
16
- df['academic time']= label_encoder.fit_transform(df['academic time'])
17
- df['task dedication']= label_encoder.fit_transform(df['task dedication'])
18
- df['physical activity']= label_encoder.fit_transform(df['physical activity'])
19
- df['favorite sport']= label_encoder.fit_transform(df['favorite sport'])
20
- df['family time']= label_encoder.fit_transform(df['family time'])
21
- df['poor sleep']= label_encoder.fit_transform(df['poor sleep'])
22
- df['fitness']= label_encoder.fit_transform(df['fitness'])
23
- df['loss of concentration']= label_encoder.fit_transform(df['loss of concentration'])
24
- df['eating habits']= label_encoder.fit_transform(df['eating habits'])
25
- df['free time']= label_encoder.fit_transform(df['free time'])
26
- df['motivation']= label_encoder.fit_transform(df['motivation'])
27
- df['social media']= label_encoder.fit_transform(df['social media'])
28
- df['socail media on academics']= label_encoder.fit_transform(df['socail media on academics'])
29
- df['performance']= label_encoder.fit_transform(df['performance'])
30
-
31
- df.loc[df['academic time'] == 4, 'weight_academic'] =0.45
32
- df.loc[df['academic time'] == 1, 'weight_academic'] =0.15
33
- df.loc[df['academic time'] == 0, 'weight_academic'] =0.05
34
- df.loc[df['academic time'] == 2, 'weight_academic'] =0.35
35
- df.loc[df['academic time'] == 3, 'weight_academic'] =0.00
36
-
37
- df.loc[df['task dedication'] == 0, 'weight_task'] =0.00
38
- df.loc[df['task dedication'] == 1, 'weight_task'] =0.05
39
- df.loc[df['task dedication'] == 2, 'weight_task'] =0.20
40
- df.loc[df['task dedication'] == 3, 'weight_task'] =0.25
41
- df.loc[df['task dedication'] == 4, 'weight_task'] =0.50
42
-
43
- df.loc[df['physical activity'] == 0, 'weight_physic'] =0.00
44
- df.loc[df['physical activity'] == 1, 'weight_physic'] =1.00
45
-
46
- df.loc[df['favorite sport'] == 0, 'weight_play'] =0.20
47
- df.loc[df['favorite sport'] == 1, 'weight_play'] =0.20
48
- df.loc[df['favorite sport'] == 2, 'weight_play'] =0.20
49
- df.loc[df['favorite sport'] == 3, 'weight_play'] =0.20
50
- df.loc[df['favorite sport'] == 4, 'weight_play'] =0.00
51
- df.loc[df['favorite sport'] == 5, 'weight_play'] =0.20
52
-
53
- df.loc[df['family time'] == 3, 'weight_familytime'] =0.40
54
- df.loc[df['family time'] == 2, 'weight_familytime'] =0.10
55
- df.loc[df['family time'] == 1, 'weight_familytime'] =0.00
56
- df.loc[df['family time'] == 0, 'weight_familytime'] =0.40
57
- df.loc[df['family time'] == 4, 'weight_familytime'] =0.10
58
-
59
- df.loc[df['poor sleep'] == 4, 'weight_sleep'] =0.00
60
- df.loc[df['poor sleep'] == 3, 'weight_sleep'] =0.05
61
- df.loc[df['poor sleep'] == 0, 'weight_sleep'] =0.00
62
- df.loc[df['poor sleep'] == 2, 'weight_sleep'] =0.40
63
- df.loc[df['poor sleep'] == 1, 'weight_sleep'] =0.55
64
-
65
- df.loc[df['loss of concentration'] == 4, 'weight_conc'] =0.20
66
- df.loc[df['loss of concentration'] == 0, 'weight_conc'] =0.05
67
- df.loc[df['loss of concentration'] == 1, 'weight_conc'] =0.00
68
- df.loc[df['loss of concentration'] == 3, 'weight_conc'] =0.75
69
- df.loc[df['loss of concentration'] == 2, 'weight_conc'] =0.05
70
-
71
- df.loc[df['eating habits'] == 4, 'weight_eating'] =0.20
72
- df.loc[df['eating habits'] == 0, 'weight_eating'] =0.05
73
- df.loc[df['eating habits'] == 1, 'weight_eating'] =0.00
74
- df.loc[df['eating habits'] == 3, 'weight_eating'] =0.75
75
- df.loc[df['eating habits'] == 2, 'weight_eating'] =0.05
76
-
77
- df.loc[df['fitness'] == 2, 'weight_fit'] =0.60
78
- df.loc[df['fitness'] == 0, 'weight_fit'] =0.10
79
- df.loc[df['fitness'] == 1, 'weight_fit'] =0.30
80
- df.loc[df['fitness'] == 3, 'weight_fit'] =0.00
81
-
82
- df.loc[df['free time'] == 3, 'weight_time'] =0.50
83
- df.loc[df['free time'] == 2, 'weight_time'] =0.10
84
- df.loc[df['free time'] == 1, 'weight_time'] =0.20
85
- df.loc[df['free time'] == 0, 'weight_time'] =0.20
86
-
87
- df.loc[df['motivation'] == 3, 'weight_motivation'] =0.30
88
- df.loc[df['motivation'] == 2, 'weight_motivation'] =0.25
89
- df.loc[df['motivation'] == 1, 'weight_motivation'] =0.25
90
- df.loc[df['motivation'] == 0, 'weight_motivation'] =0.20
91
-
92
- df.loc[df['social media'] == 3, 'weight_media'] =0.00
93
- df.loc[df['social media'] == 2, 'weight_media'] =0.65
94
- df.loc[df['social media'] == 1, 'weight_media'] =0.10
95
- df.loc[df['social media'] == 0, 'weight_media'] =0.25
96
-
97
-
98
- df.loc[df['socail media on academics'] == 0, 'weight_media_academics'] =0.00
99
- df.loc[df['socail media on academics'] == 1, 'weight_media_academics'] =1.00
100
-
101
- df.loc[df['performance'] == 4, 'weight_performance']=0.55
102
- df.loc[df['performance'] == 3, 'weight_performance']=0.00
103
- df.loc[df['performance'] == 2, 'weight_performance']=0.30
104
- df.loc[df['performance'] == 1, 'weight_performance']=0.10
105
- df.loc[df['performance'] == 0, 'weight_performance']=0.05
106
-
107
- df['total']=df.iloc[:,14:].sum(axis=1)
108
-
109
-
110
- df.loc[(df['weight_academic']<0.35) | (df['weight_task']<0.25),'academic value']=0
111
- df.loc[(df['weight_academic']>=0.35) & (df['weight_task']>=0.25),'academic value']=1
112
- df.inplace=1
113
-
114
- df.loc[(df['weight_academic']<0.35) | (df['weight_time']<0.20),'time value']=0
115
- df.loc[(df['weight_academic']>=0.35) & (df['weight_time']>=0.20),'time value']=1
116
- df.inplace=1
117
-
118
- df.loc[((df['weight_academic']<=0.35) & (df['weight_conc']>=0.20)) | ((df['weight_academic']>=0.35) & (df['weight_conc']>=0.20)),'productive value']=1
119
- df.loc[((df['weight_academic']>=0.35) & (df['weight_conc']<0.20)) | ((df['weight_academic']<0.35) & (df['weight_conc']<0.20)),'productive value']=0
120
- df.inplace=1
121
-
122
- df.loc[(df['weight_physic']==1) & (df['weight_play']==0.2) & (df['weight_fit']>=0.3) & (df['weight_eating']>=0.20),'fitness_value']=1
123
- df.loc[(df['weight_physic']!=1) | (df['weight_play']!=0.2) | (df['weight_fit']<0.3) | (df['weight_eating']<0.20),'fitness_value']=0
124
- df.inplace=1
125
-
126
-
127
- df.loc[(df['weight_sleep']>=0.40) & (df['weight_conc']>=0.20) ,'sleep value']=1
128
- df.loc[(df['weight_sleep']<0.40) | (df['weight_conc']<0.20),'sleep value']=0
129
- df.inplace=1
130
-
131
- df.loc[(df['weight_familytime']==0.40) & (df['weight_motivation']==0.25) ,'motivation value']=1
132
- df.loc[(df['weight_familytime']!=0.40) | (df['weight_motivation']!=0.25),'motivation value']=0
133
- df.inplace=1
134
-
135
- df.loc[(df['weight_performance']>=0.30) ,'performance_value']=1
136
- df.loc[(df['weight_performance']<0.30),'performance_value']=0
137
- df.inplace=1
138
-
139
- df.loc[(df['weight_media']>=0.25) & (df['weight_media_academics']==0.00) ,'media_value']=1
140
- df.loc[(df['weight_media']<0.25) | (df['weight_media_academics']!=0.00),'media_value']=0
141
- df.inplace=1
142
-
143
- df.loc[df['total']>=4.0,'overall']=1
144
- df.loc[df['total']<4.0,'overall']=0
145
- df.inplace=1
146
-
147
-
148
- X = df[['academic time',
149
- 'task dedication',
150
- 'physical activity',
151
- 'favorite sport',
152
- 'family time',
153
- 'poor sleep',
154
- 'fitness',
155
- 'loss of concentration',
156
- 'eating habits',
157
- 'free time',
158
- 'motivation',
159
- 'social media',
160
- 'socail media on academics',
161
- 'performance',
162
- 'weight_academic',
163
- 'weight_task',
164
- 'weight_physic',
165
- 'weight_play',
166
- 'weight_familytime',
167
- 'weight_sleep',
168
- 'weight_conc',
169
- 'weight_eating',
170
- 'weight_fit',
171
- 'weight_time',
172
- 'weight_motivation',
173
- 'weight_media',
174
- 'weight_media_academics',
175
- 'weight_performance',
176
- 'total'
177
- ]]
178
- y1 = df['academic value']
179
- y2=df['time value']
180
- y3=df['productive value']
181
- y4=df['fitness_value']
182
- y5=df['sleep value']
183
- y6=df['motivation value']
184
- y7=df['performance_value']
185
- y8=df['media_value']
186
- y9=df['overall']
187
- from sklearn.model_selection import train_test_split
188
- X_train,X_test,y1_train,y1_test = train_test_split(X,y1,test_size=0.3,random_state = 0,shuffle = True)
189
- X_train,X_test,y2_train,y2_test = train_test_split(X,y2,test_size=0.3,random_state = 0,shuffle = True)
190
- X_train,X_test,y3_train,y3_test = train_test_split(X,y3,test_size=0.3,random_state = 0,shuffle = True)
191
- X_train,X_test,y4_train,y4_test = train_test_split(X,y4,test_size=0.3,random_state = 0,shuffle = True)
192
- X_train,X_test,y5_train,y5_test = train_test_split(X,y5,test_size=0.3,random_state = 0,shuffle = True)
193
- X_train,X_test,y6_train,y6_test = train_test_split(X,y6,test_size=0.3,random_state = 0,shuffle = True)
194
- X_train,X_test,y7_train,y7_test = train_test_split(X,y7,test_size=0.3,random_state = 0,shuffle = True)
195
- X_train,X_test,y8_train,y8_test = train_test_split(X,y8,test_size=0.3,random_state = 0,shuffle = True)
196
- X_train,X_test,y9_train,y9_test = train_test_split(X,y9,test_size=0.3,random_state = 0,shuffle = True)
197
- from sklearn.ensemble import RandomForestClassifier as rfc
198
- import xgboost as xgb
199
- rfc1 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
200
- max_depth = 5, alpha = 10, n_estimators = 10)
201
- rfc2 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
202
- max_depth = 5, alpha = 10, n_estimators = 10)
203
- rfc3 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
204
- max_depth = 5, alpha = 10, n_estimators = 10)
205
- rfc4 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
206
- max_depth = 5, alpha = 10, n_estimators = 10)
207
- rfc5 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
208
- max_depth = 5, alpha = 10, n_estimators = 10)
209
- rfc6 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
210
- max_depth = 5, alpha = 10, n_estimators = 10)
211
- rfc7 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
212
- max_depth = 5, alpha = 10, n_estimators = 10)
213
- rfc8 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
214
- max_depth = 5, alpha = 10, n_estimators = 10)
215
- rfc9 = xgb.XGBClassifier(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
216
- max_depth = 5, alpha = 10, n_estimators = 10)
217
- rfc1.fit(X_train,y1_train)
218
- rfc2.fit(X_train,y2_train)
219
- rfc3.fit(X_train,y3_train)
220
- rfc4.fit(X_train,y4_train)
221
- rfc5.fit(X_train,y5_train)
222
- rfc6.fit(X_train,y6_train)
223
- rfc7.fit(X_train,y7_train)
224
- rfc8.fit(X_train,y8_train)
225
- rfc9.fit(X_train,y9_train)
226
- import random
227
-
228
- z=df.tail(1)
229
-
230
-
231
-
232
-
233
- if z['academic value'].eq(1).all():
234
- a=['You are in the right track just try to stick on to your schedule','HARRRRRDDDD WORK always payys off you seem to be going in the right track',
235
- 'The way is classiscal!! a tip for you is to listen to some classical music before studying ','You are driven by your own intrest keep riding',
236
- 'Your study time is great ,now its to take a short break',
237
- 'WOWWW you are just a just synonym of hard work and dedication ' ]
238
- res1="feedback on youe study schedule --> " +random.choice(a)
239
- if z['academic value'].eq(0).all():
240
- b=['If you know your “WHY”, finding your “HOW" will not be difficult.you just need to start working','Focusing is about saying no.just learn to say no to things which distracts you .u just need to put a little more focus on your studytime',
241
- 'Be the early bird that gets the first worm.set your body clock and start working','listen to directions,follow through assignments,learn for yourself.you just need to enjoy the process',
242
- 'measure for progress not the time you are working ,try to put in more studytime','postponment will postpone you,finish your daily tasks when you have the time',
243
- 'you are just off track,there is still time and sure that you will reach great heights ','you surely have the talent its now in your hands to make wonders!!!! talent without hardwork?? what do you think ','enroll yourself to a personalized learning environament which gives you a controll and education experience ']
244
- res1="feedback on youe study schedule --> "+random.choice(b)
245
-
246
-
247
- if z['time value'].eq(1).all():
248
- c=['there is a saying give me 6 hours to chop a tree and i will spend the 1st hr sharpening the axe, the fact here is you have sharpenend your axe','your timimg is great you are managing time well'
249
- 'its seems you hsve been studying long take a quick break and come back ','you are enjoying your time keep putting the same efforts you put','keep managing the time like the way you are doing now,this attribute will take care of the rest'
250
- ,'you seem to stay organized and on track with your procative planning and systematic scheduling ']
251
- res2="Feedback on how you manage time --> "+random.choice(c)
252
- if z['time value'].eq(0).all():
253
- d=['you have to start spending time on academics and show some interest in succeeding,you are the pilot who should stop time from flying and bring it on your control','start working and stick to a time table and set your body clock','try to be more organized and start spending quality time towards studies'
254
- 'start learning to manage time and priortize on your academics','spend more time on your weak areas ,try to strech out for long hours','the biggest obstracle stopping you from winning is time management,prepare a timetable and stick to it',
255
- 'play while you play and work while you work dont try to mix up things','dont try to procastinate finish your day to day jobs when and where you get time']
256
- res2="Feedback on how you manage time --> "+random.choice(d)
257
-
258
- if z['productive value'].eq(1).all():
259
- e=['you are smart,productive and have a good way of preparation in your studies','Be more proactive and try to participate in class,you are effiecient and can reach heights with your effectiveness','you have the ability to study things smartly and quickly,pick areas which are more brain-storming',
260
- 'you have the ability to intepret things and your mind is sharp and you are a good listener','you are the master-mind,you are the person who shouldnt miss out in enrolling to IIts,NITs or whatever','you are productive person if u feel you are not delivering your 100% its not because because you arent studying,its something else']
261
- res3="Feedback on your productivity --> "+random.choice(e)
262
- if z['productive value'].eq(0).all():
263
- f=['Try to stick on to an approach which is convinient to you ,have a clear mind before you start working','start solving more,puzzles and a daily sudoko is a good start, you just need to be on your toes and tune your mind to solve various activities ','think!think!think analyse where you lack and start building strategies to improve yourself'
264
- 'class participation its high time you start taking decisions and choose to be proactive','connect everything with what you are learining so that it will stick in your mind and helps you to recollect when and where you require','enjoy the process of learning dont be monotonous and a bookworm tame your mind to face your challenges','actively consult your instructor to enrich yourself with lot ways to improve your productivity',
265
- 'rather than a brute-force approach try to think more of an optimal solution to a problem','gather a lot of resoruces and try to sit in your desk ,take mobile breaks(short one), an online chess game might be an eye opener for your next session ']
266
- res3="Feedback on your productivity --> "+random.choice(f)
267
-
268
- if z['fitness_value'].eq(1).all():
269
- g=['fitness is your key ,if your body is strong your mind is stronger. Maintaining a good fitness is really important for your health as well as it empowers your learining ',' I can see you have spent time in maintaing your body. Keep winning more golds ','you have choosen to step out of your comfort zone and by trying to put some gains,this will surely be a stepping stone in other important sectors','your fitness level is reasonably good indicating that you are sticking to a schedule kind of person which is really good',
270
- 'you are in a good shape which is a key for self_confidence and gives you a lot of motivation','you are a sportive person ,this will really help you to socialize and gives you a lot of energy to start new things ','you are an open-minded person ,this is really the best character one could ask for,half the problems are over if one is listening and able to make good decisions ']
271
- res4="Feedback on your fitness --> "+random.choice(g)
272
- if z['fitness_value'].eq(0).all():
273
- h=['A weak body is a liability, you guys being the future generation should definetly be fit and healthy to lead the society at its best','your body should always get the first priority and should be taken care properly',
274
- 'Any physical activity will make you disipline and gives you self confidence. Join your school team today ','out of all a hungry stomach isnt fit for a brisk study session ,being physically fit lets you do more activity even improve your academics ',
275
- 'engage yourself in any physical activity for 20 mins as it can improve your concentration and helps your focus in learning ','out of your busy schedule try devoting just 15 mins get down do some pushups or squats or a brisk jog will do good ']
276
- res4="Feedback on your fitness --> "+random.choice(h)
277
-
278
- if z['sleep value'].eq(1).all():
279
- i=['Good that you have a proper sleep, just stick to it and try finishing all your work in the day time and get enough rest','Its pretty impressive that you are giving enough importance to your sleep, shows that you have good time management skills and a sweet dream','getting a good sleep even during your stressed timetables shows that you stay at the moment',
280
- 'a good fitness routine followed by a good-sleep is a good sunday schedule and a good starter for a hectic next week which i hope you would have experienced many times','its good that you have a good sleep everynight this is big boost for a bright tomorrow']
281
- res5="Feedback on your sleep time --> "+random.choice(i)
282
- if z['sleep value'].eq(0).all():
283
-
284
- j=['The time we sleep is only when we rest our mind, eyes and the whole body which is really crucial for a stduent',' Try not using any devices an hour before you sleep, have a good sleep cycle for atleast 6 to 7 hrs a day','Get enough rest, dont stress your body too much.',
285
- 'Prioritize your sleep, dont have caffinated drinks late in the evening and getting good sleep will make you feel fresh and enegrytic all day long ',
286
- 'a 7 - hour refresh will set your body clock for the rest of your day so please ensure that you get adequate rest','if you are sleep deprieved make sure you exhaust all your energy during the day and make sure you get a pleasant and peaceful sleep',
287
- 'tests prove that sleep deprivation is a result for low academic performance make sure you dont fall under that','Please ensure that the extra miles which you are putting doesnt affect your sleep']
288
-
289
- res5="Feedback on your sleep time --> "+random.choice(j)
290
-
291
- if z['motivation value'].eq(1).all():
292
- k=['you are fairly motivated ,Motivation drives everyone to work better to achive something,it lits a light inside you ','you should be really proud that you have good motivation at a really young age,use it in areas where you feel a bit off',
293
- 'None of the greatest achievers couldnt have done it without motivation and self motivation is really powerfull tool to success ,you are one among them Keep going!',
294
- 'a good level of motivation gives you high spirits and a good attitude,your attitude builds YOU']
295
-
296
- res6="motivation factor --> "+random.choice(k)
297
- if z['motivation value'].eq(0).all():
298
-
299
- l=['Nobody in the world is born with motivation,in this modern era you cant expect external motivation,you better be your own motivation','messi took eighteen years to be the G.O.A.T ignoring all demotivation and insults its finally your time',
300
- 'change your scenery sitting in a desk all-day makes you dull ,to renew interest,a new setting can be just what some students need to stay motivated to learn',
301
- 'lay-out clear objectives before you start learning so that there is no confussion','Make your goals high but attainable dont be afraid to push yourself to get more out of them ',
302
- 'Spend some quality time with your family listen to their experiences and try to dollow their footsteps']
303
-
304
-
305
- res6="motivation factor --> "+random.choice(l)
306
-
307
- if z['performance_value'].eq(1).all():
308
- m=['Good job you!! Your hardwork and efforts paid off, you have nothing to worry about ,you are academically strong','To be honest that grades made me a little jealous. I can see the work you are putting towards academics',
309
- 'Give a big hit on boards make your parents and teachers proud, trust me that is super satisfying','academic performance gives you a lot of boost to you take that put in all other aspects which will give you overall developement',
310
- 'the most satisfying thing is scoring high its great that you are easily doing it','you are almost sorted out you now just have to take care of the bits and pieces']
311
-
312
- res7="Feedback on your performance --> "+random.choice(m)
313
-
314
- if z['performance_value'].eq(0).all():
315
- n=['Its never late to begin. Divide your work, note important things mentioned in class spend more time in studies','Dont be ashamed to ask doubts we dont mind others judging. So we start from physics today? jk',
316
- 'Start studying with your friends, seek help from teachers,Remember the hardwork you put never fails you','analyse where you are making errors if you find that you are making mistakes while writing try practicing the sample papers it will help you to an extent'
317
- ,'you are almost there!!take short notes of the theoritical concepts so that it will be easy for reference','dont worry about where you are standing at the moment ,back yourself ,start it from scratch']
318
-
319
- res7="Feedback on your performance --> "+random.choice(n)
320
-
321
- if z['media_value'].eq(1).all():
322
- o=[' In the world of people being addicted to social media today, its happy to see someone like you','Its good that you are not scrolling too much','Having a good social profile is important and you having a limit is really impressive'
323
- ,'Having the self control on yourself is really great but ensure that dont overdo on anything else','you are self-conscious which is really a great character to acquire']
324
-
325
- res8="Feedback on your social media time --> "+random.choice(o)
326
-
327
- if z['media_value'].eq(0).all():
328
- p=['Its really common for this generation people to get addicted to social media. All you have to do is keep track of the time, dont over do stuffs and you dont have to post a story everyday.',
329
- 'Nothing wrong becoming a social idle, but right now concentrate in your studies','socially active is essential but over - scrolling will trap you in the matrix which you are unaware of',
330
- 'stay in your limits socially active for more than a hour during high school is ill advised','knowing that its impacting you and using social media again !! what is that??']
331
-
332
- res8="Feedback on your social media time --> "+random.choice(p)
333
-
334
-
335
- if z['overall'].eq(1).all():
336
- q=['OMG!! Im thinking of getting a piece of advise from you you are almost there good that you equally participate in everything','You are an explorer and can learn new things easily,you are about to win the race',
337
- 'Your works are impressing everyone right from your teacher,friends and your parents, You are active,brisk and have good potential to improve your performance',
338
- 'You are doing great ,you are ready for new challenges and failures doesnt bother you ','You are multi tasker and ensure that you dont sink with over-confidence','Dont put yourself in any kind of pressure, eventhough you feel stressed time will answer to it and you will pass with flying colours'
339
- 'You are growing with confidence, take it to learn new things,choose your core and find your destiny']
340
-
341
- res9=random.choice(q)
342
-
343
- if z['overall'].eq(0).all():
344
-
345
- r=['Its all good everyone goes out of form,the comeback is always on start putting consistent efforts','Put in the time, hardwork and you can already see it coming,you are just a few steps dowm','When we hit out lowest point we are open to the greatest change you are going to bring the best out of it. And yes that was said by Avatar Roku'
346
- ,'Choose the right person whom you feel will take you through all the obstracles you need make things more clear','The best view comes after the hardest climb you can climb the moutain ahead of you','You just need to reboot and have a good set-up ,stay optimistic and everything will take care of itself if you take one step at a time',
347
- 'You are nearing the pinacle of your true potential,just few changes hear and there you will be on your prime']
348
-
349
- res9=random.choice(r)
350
-
351
-
352
-
353
-
354
-
355
-
356
-
357
-
358
- return "hi " + str (Name) + " this is a predictive model there may be some incorrect guesses so just take the points which you feel may work in your case \nalso if u feel the feeadbacks are harsh please flag your opinion \ntake your time to read this and hope u like it 😊\n\n\n"+ res1+" ,\n " + res2 +" ,\n " + res3 +" ,\n " + res4 +" ,\n " + res5 +" ,\n " + res6 +" ,\n " + res7 +" ,\n " + res8 +" ,\n\n\n " + res9
359
-
360
-
361
-
362
- demo = gr.Interface(
363
- assign_weights,
364
- [
365
- "text",
366
- gr.Dropdown(['Science','Commerce'], label="Choose your stream"),
367
- gr.Radio(["<5", "5 - 12", "13 - 20", "20 - 30",">30"],label='On an average, how many hours a week do you spend on academics?'),
368
- gr.Radio(["0 - 20%", "20 - 40%", "40 - 60%", "60 - 80%","80 -100%"],label='How willing are you to work on a particular task ?'),
369
- gr.Radio(["Yes", "No", ],label='Do you take up any physical activity at regular intervals(at least 3 hours a week) ?'),
370
- gr.Radio(["Football", "Cricket", "Basketball", "Tennis" , "Chess" ,"Other","Not interested in sports"],label='Choose your favourite sport you follow or play'),
371
- gr.Radio(["Never", "Occasionally", "Sometimes", "Often" , "Always"],label='How often do you spend time with your friends and family?'),
372
- gr.Radio(["Always", "Very often", "Sometimes", "Rarely" ,"Never"],label='Has poor sleep troubled you in the last month?'),
373
- gr.Radio(["Perfect", "Good", "Average", "Poor"],label='What is your current level of fitness?'),
374
- gr.Radio(["Never", "Once in a while", "About half the time", "Most of the time","Always"],label='Do you feel kinda losing concentration during classes and other activities'),
375
- gr.Radio(["Never", "Once in a while", "About half the time", "Most of the time","Always"],label='is there a change in your eating habits(either under eating or overeating'),
376
- gr.Radio(["< 2", "2 - 5", "5 - 8", "> 8"],label='How many hours of free time do you have after school?'),
377
- gr.Radio(["Asking a lot of questions to the teacher", "Completing various assignments", "Sports and other extracurricular activities", "Other"],label='What motivates you to learn more?'),
378
- gr.Radio(["<30 mins", "30 - 60", "60 - 120", ">120 mins"],label='How long you spend your time on social media on a daily basis? '),
379
- gr.Radio(["Yes", "No"],label='Do you feel that spending time on social media has been a reason for the deterioration in your academic performance?'),
380
- gr.Radio(["<30%", "30% - 50%", "50% - 70%", "70% - 90%",">90%"],label='How much you score in your academics'),
381
- ],
382
- "text",
383
-
384
- title="Performance predictor and feedback generator",
385
- description="Here's a sample performance calculator. Enjoy!",
386
-
387
- )
388
- demo.launch(share=True, debug=True)
389
-
390
-
391
-
392
-
393
-
394
-
395
-
396
-
397
-
398
-
399
-
400
-
401
-
402
-
403
-
404
-
405
-
406
-
407
-
408
-
409
-
410
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_install.py DELETED
@@ -1,867 +0,0 @@
1
- # The following comment should be removed at some point in the future.
2
- # mypy: strict-optional=False
3
-
4
- import functools
5
- import logging
6
- import os
7
- import shutil
8
- import sys
9
- import uuid
10
- import zipfile
11
- from optparse import Values
12
- from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union
13
-
14
- from pip._vendor.packaging.markers import Marker
15
- from pip._vendor.packaging.requirements import Requirement
16
- from pip._vendor.packaging.specifiers import SpecifierSet
17
- from pip._vendor.packaging.utils import canonicalize_name
18
- from pip._vendor.packaging.version import Version
19
- from pip._vendor.packaging.version import parse as parse_version
20
- from pip._vendor.pyproject_hooks import BuildBackendHookCaller
21
-
22
- from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment
23
- from pip._internal.exceptions import InstallationError
24
- from pip._internal.locations import get_scheme
25
- from pip._internal.metadata import (
26
- BaseDistribution,
27
- get_default_environment,
28
- get_directory_distribution,
29
- get_wheel_distribution,
30
- )
31
- from pip._internal.metadata.base import FilesystemWheel
32
- from pip._internal.models.direct_url import DirectUrl
33
- from pip._internal.models.link import Link
34
- from pip._internal.operations.build.metadata import generate_metadata
35
- from pip._internal.operations.build.metadata_editable import generate_editable_metadata
36
- from pip._internal.operations.build.metadata_legacy import (
37
- generate_metadata as generate_metadata_legacy,
38
- )
39
- from pip._internal.operations.install.editable_legacy import (
40
- install_editable as install_editable_legacy,
41
- )
42
- from pip._internal.operations.install.wheel import install_wheel
43
- from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
44
- from pip._internal.req.req_uninstall import UninstallPathSet
45
- from pip._internal.utils.deprecation import deprecated
46
- from pip._internal.utils.hashes import Hashes
47
- from pip._internal.utils.misc import (
48
- ConfiguredBuildBackendHookCaller,
49
- ask_path_exists,
50
- backup_dir,
51
- display_path,
52
- hide_url,
53
- redact_auth_from_url,
54
- )
55
- from pip._internal.utils.packaging import safe_extra
56
- from pip._internal.utils.subprocess import runner_with_spinner_message
57
- from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
58
- from pip._internal.utils.virtualenv import running_under_virtualenv
59
- from pip._internal.vcs import vcs
60
-
61
- logger = logging.getLogger(__name__)
62
-
63
-
64
- class InstallRequirement:
65
- """
66
- Represents something that may be installed later on, may have information
67
- about where to fetch the relevant requirement and also contains logic for
68
- installing the said requirement.
69
- """
70
-
71
- def __init__(
72
- self,
73
- req: Optional[Requirement],
74
- comes_from: Optional[Union[str, "InstallRequirement"]],
75
- editable: bool = False,
76
- link: Optional[Link] = None,
77
- markers: Optional[Marker] = None,
78
- use_pep517: Optional[bool] = None,
79
- isolated: bool = False,
80
- *,
81
- global_options: Optional[List[str]] = None,
82
- hash_options: Optional[Dict[str, List[str]]] = None,
83
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
84
- constraint: bool = False,
85
- extras: Collection[str] = (),
86
- user_supplied: bool = False,
87
- permit_editable_wheels: bool = False,
88
- ) -> None:
89
- assert req is None or isinstance(req, Requirement), req
90
- self.req = req
91
- self.comes_from = comes_from
92
- self.constraint = constraint
93
- self.editable = editable
94
- self.permit_editable_wheels = permit_editable_wheels
95
-
96
- # source_dir is the local directory where the linked requirement is
97
- # located, or unpacked. In case unpacking is needed, creating and
98
- # populating source_dir is done by the RequirementPreparer. Note this
99
- # is not necessarily the directory where pyproject.toml or setup.py is
100
- # located - that one is obtained via unpacked_source_directory.
101
- self.source_dir: Optional[str] = None
102
- if self.editable:
103
- assert link
104
- if link.is_file:
105
- self.source_dir = os.path.normpath(os.path.abspath(link.file_path))
106
-
107
- if link is None and req and req.url:
108
- # PEP 508 URL requirement
109
- link = Link(req.url)
110
- self.link = self.original_link = link
111
-
112
- # When this InstallRequirement is a wheel obtained from the cache of locally
113
- # built wheels, this is the source link corresponding to the cache entry, which
114
- # was used to download and build the cached wheel.
115
- self.cached_wheel_source_link: Optional[Link] = None
116
-
117
- # Information about the location of the artifact that was downloaded . This
118
- # property is guaranteed to be set in resolver results.
119
- self.download_info: Optional[DirectUrl] = None
120
-
121
- # Path to any downloaded or already-existing package.
122
- self.local_file_path: Optional[str] = None
123
- if self.link and self.link.is_file:
124
- self.local_file_path = self.link.file_path
125
-
126
- if extras:
127
- self.extras = extras
128
- elif req:
129
- self.extras = {safe_extra(extra) for extra in req.extras}
130
- else:
131
- self.extras = set()
132
- if markers is None and req:
133
- markers = req.marker
134
- self.markers = markers
135
-
136
- # This holds the Distribution object if this requirement is already installed.
137
- self.satisfied_by: Optional[BaseDistribution] = None
138
- # Whether the installation process should try to uninstall an existing
139
- # distribution before installing this requirement.
140
- self.should_reinstall = False
141
- # Temporary build location
142
- self._temp_build_dir: Optional[TempDirectory] = None
143
- # Set to True after successful installation
144
- self.install_succeeded: Optional[bool] = None
145
- # Supplied options
146
- self.global_options = global_options if global_options else []
147
- self.hash_options = hash_options if hash_options else {}
148
- self.config_settings = config_settings
149
- # Set to True after successful preparation of this requirement
150
- self.prepared = False
151
- # User supplied requirement are explicitly requested for installation
152
- # by the user via CLI arguments or requirements files, as opposed to,
153
- # e.g. dependencies, extras or constraints.
154
- self.user_supplied = user_supplied
155
-
156
- self.isolated = isolated
157
- self.build_env: BuildEnvironment = NoOpBuildEnvironment()
158
-
159
- # For PEP 517, the directory where we request the project metadata
160
- # gets stored. We need this to pass to build_wheel, so the backend
161
- # can ensure that the wheel matches the metadata (see the PEP for
162
- # details).
163
- self.metadata_directory: Optional[str] = None
164
-
165
- # The static build requirements (from pyproject.toml)
166
- self.pyproject_requires: Optional[List[str]] = None
167
-
168
- # Build requirements that we will check are available
169
- self.requirements_to_check: List[str] = []
170
-
171
- # The PEP 517 backend we should use to build the project
172
- self.pep517_backend: Optional[BuildBackendHookCaller] = None
173
-
174
- # Are we using PEP 517 for this requirement?
175
- # After pyproject.toml has been loaded, the only valid values are True
176
- # and False. Before loading, None is valid (meaning "use the default").
177
- # Setting an explicit value before loading pyproject.toml is supported,
178
- # but after loading this flag should be treated as read only.
179
- self.use_pep517 = use_pep517
180
-
181
- # This requirement needs more preparation before it can be built
182
- self.needs_more_preparation = False
183
-
184
- def __str__(self) -> str:
185
- if self.req:
186
- s = str(self.req)
187
- if self.link:
188
- s += " from {}".format(redact_auth_from_url(self.link.url))
189
- elif self.link:
190
- s = redact_auth_from_url(self.link.url)
191
- else:
192
- s = "<InstallRequirement>"
193
- if self.satisfied_by is not None:
194
- if self.satisfied_by.location is not None:
195
- location = display_path(self.satisfied_by.location)
196
- else:
197
- location = "<memory>"
198
- s += f" in {location}"
199
- if self.comes_from:
200
- if isinstance(self.comes_from, str):
201
- comes_from: Optional[str] = self.comes_from
202
- else:
203
- comes_from = self.comes_from.from_path()
204
- if comes_from:
205
- s += f" (from {comes_from})"
206
- return s
207
-
208
- def __repr__(self) -> str:
209
- return "<{} object: {} editable={!r}>".format(
210
- self.__class__.__name__, str(self), self.editable
211
- )
212
-
213
- def format_debug(self) -> str:
214
- """An un-tested helper for getting state, for debugging."""
215
- attributes = vars(self)
216
- names = sorted(attributes)
217
-
218
- state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names))
219
- return "<{name} object: {{{state}}}>".format(
220
- name=self.__class__.__name__,
221
- state=", ".join(state),
222
- )
223
-
224
- # Things that are valid for all kinds of requirements?
225
- @property
226
- def name(self) -> Optional[str]:
227
- if self.req is None:
228
- return None
229
- return self.req.name
230
-
231
- @functools.lru_cache() # use cached_property in python 3.8+
232
- def supports_pyproject_editable(self) -> bool:
233
- if not self.use_pep517:
234
- return False
235
- assert self.pep517_backend
236
- with self.build_env:
237
- runner = runner_with_spinner_message(
238
- "Checking if build backend supports build_editable"
239
- )
240
- with self.pep517_backend.subprocess_runner(runner):
241
- return "build_editable" in self.pep517_backend._supported_features()
242
-
243
- @property
244
- def specifier(self) -> SpecifierSet:
245
- return self.req.specifier
246
-
247
- @property
248
- def is_pinned(self) -> bool:
249
- """Return whether I am pinned to an exact version.
250
-
251
- For example, some-package==1.2 is pinned; some-package>1.2 is not.
252
- """
253
- specifiers = self.specifier
254
- return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
255
-
256
- def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool:
257
- if not extras_requested:
258
- # Provide an extra to safely evaluate the markers
259
- # without matching any extra
260
- extras_requested = ("",)
261
- if self.markers is not None:
262
- return any(
263
- self.markers.evaluate({"extra": extra}) for extra in extras_requested
264
- )
265
- else:
266
- return True
267
-
268
- @property
269
- def has_hash_options(self) -> bool:
270
- """Return whether any known-good hashes are specified as options.
271
-
272
- These activate --require-hashes mode; hashes specified as part of a
273
- URL do not.
274
-
275
- """
276
- return bool(self.hash_options)
277
-
278
- def hashes(self, trust_internet: bool = True) -> Hashes:
279
- """Return a hash-comparer that considers my option- and URL-based
280
- hashes to be known-good.
281
-
282
- Hashes in URLs--ones embedded in the requirements file, not ones
283
- downloaded from an index server--are almost peers with ones from
284
- flags. They satisfy --require-hashes (whether it was implicitly or
285
- explicitly activated) but do not activate it. md5 and sha224 are not
286
- allowed in flags, which should nudge people toward good algos. We
287
- always OR all hashes together, even ones from URLs.
288
-
289
- :param trust_internet: Whether to trust URL-based (#md5=...) hashes
290
- downloaded from the internet, as by populate_link()
291
-
292
- """
293
- good_hashes = self.hash_options.copy()
294
- if trust_internet:
295
- link = self.link
296
- elif self.original_link and self.user_supplied:
297
- link = self.original_link
298
- else:
299
- link = None
300
- if link and link.hash:
301
- good_hashes.setdefault(link.hash_name, []).append(link.hash)
302
- return Hashes(good_hashes)
303
-
304
- def from_path(self) -> Optional[str]:
305
- """Format a nice indicator to show where this "comes from" """
306
- if self.req is None:
307
- return None
308
- s = str(self.req)
309
- if self.comes_from:
310
- if isinstance(self.comes_from, str):
311
- comes_from = self.comes_from
312
- else:
313
- comes_from = self.comes_from.from_path()
314
- if comes_from:
315
- s += "->" + comes_from
316
- return s
317
-
318
- def ensure_build_location(
319
- self, build_dir: str, autodelete: bool, parallel_builds: bool
320
- ) -> str:
321
- assert build_dir is not None
322
- if self._temp_build_dir is not None:
323
- assert self._temp_build_dir.path
324
- return self._temp_build_dir.path
325
- if self.req is None:
326
- # Some systems have /tmp as a symlink which confuses custom
327
- # builds (such as numpy). Thus, we ensure that the real path
328
- # is returned.
329
- self._temp_build_dir = TempDirectory(
330
- kind=tempdir_kinds.REQ_BUILD, globally_managed=True
331
- )
332
-
333
- return self._temp_build_dir.path
334
-
335
- # This is the only remaining place where we manually determine the path
336
- # for the temporary directory. It is only needed for editables where
337
- # it is the value of the --src option.
338
-
339
- # When parallel builds are enabled, add a UUID to the build directory
340
- # name so multiple builds do not interfere with each other.
341
- dir_name: str = canonicalize_name(self.name)
342
- if parallel_builds:
343
- dir_name = f"{dir_name}_{uuid.uuid4().hex}"
344
-
345
- # FIXME: Is there a better place to create the build_dir? (hg and bzr
346
- # need this)
347
- if not os.path.exists(build_dir):
348
- logger.debug("Creating directory %s", build_dir)
349
- os.makedirs(build_dir)
350
- actual_build_dir = os.path.join(build_dir, dir_name)
351
- # `None` indicates that we respect the globally-configured deletion
352
- # settings, which is what we actually want when auto-deleting.
353
- delete_arg = None if autodelete else False
354
- return TempDirectory(
355
- path=actual_build_dir,
356
- delete=delete_arg,
357
- kind=tempdir_kinds.REQ_BUILD,
358
- globally_managed=True,
359
- ).path
360
-
361
- def _set_requirement(self) -> None:
362
- """Set requirement after generating metadata."""
363
- assert self.req is None
364
- assert self.metadata is not None
365
- assert self.source_dir is not None
366
-
367
- # Construct a Requirement object from the generated metadata
368
- if isinstance(parse_version(self.metadata["Version"]), Version):
369
- op = "=="
370
- else:
371
- op = "==="
372
-
373
- self.req = Requirement(
374
- "".join(
375
- [
376
- self.metadata["Name"],
377
- op,
378
- self.metadata["Version"],
379
- ]
380
- )
381
- )
382
-
383
- def warn_on_mismatching_name(self) -> None:
384
- metadata_name = canonicalize_name(self.metadata["Name"])
385
- if canonicalize_name(self.req.name) == metadata_name:
386
- # Everything is fine.
387
- return
388
-
389
- # If we're here, there's a mismatch. Log a warning about it.
390
- logger.warning(
391
- "Generating metadata for package %s "
392
- "produced metadata for project name %s. Fix your "
393
- "#egg=%s fragments.",
394
- self.name,
395
- metadata_name,
396
- self.name,
397
- )
398
- self.req = Requirement(metadata_name)
399
-
400
- def check_if_exists(self, use_user_site: bool) -> None:
401
- """Find an installed distribution that satisfies or conflicts
402
- with this requirement, and set self.satisfied_by or
403
- self.should_reinstall appropriately.
404
- """
405
- if self.req is None:
406
- return
407
- existing_dist = get_default_environment().get_distribution(self.req.name)
408
- if not existing_dist:
409
- return
410
-
411
- version_compatible = self.req.specifier.contains(
412
- existing_dist.version,
413
- prereleases=True,
414
- )
415
- if not version_compatible:
416
- self.satisfied_by = None
417
- if use_user_site:
418
- if existing_dist.in_usersite:
419
- self.should_reinstall = True
420
- elif running_under_virtualenv() and existing_dist.in_site_packages:
421
- raise InstallationError(
422
- f"Will not install to the user site because it will "
423
- f"lack sys.path precedence to {existing_dist.raw_name} "
424
- f"in {existing_dist.location}"
425
- )
426
- else:
427
- self.should_reinstall = True
428
- else:
429
- if self.editable:
430
- self.should_reinstall = True
431
- # when installing editables, nothing pre-existing should ever
432
- # satisfy
433
- self.satisfied_by = None
434
- else:
435
- self.satisfied_by = existing_dist
436
-
437
- # Things valid for wheels
438
- @property
439
- def is_wheel(self) -> bool:
440
- if not self.link:
441
- return False
442
- return self.link.is_wheel
443
-
444
- @property
445
- def is_wheel_from_cache(self) -> bool:
446
- # When True, it means that this InstallRequirement is a local wheel file in the
447
- # cache of locally built wheels.
448
- return self.cached_wheel_source_link is not None
449
-
450
- # Things valid for sdists
451
- @property
452
- def unpacked_source_directory(self) -> str:
453
- return os.path.join(
454
- self.source_dir, self.link and self.link.subdirectory_fragment or ""
455
- )
456
-
457
- @property
458
- def setup_py_path(self) -> str:
459
- assert self.source_dir, f"No source dir for {self}"
460
- setup_py = os.path.join(self.unpacked_source_directory, "setup.py")
461
-
462
- return setup_py
463
-
464
- @property
465
- def setup_cfg_path(self) -> str:
466
- assert self.source_dir, f"No source dir for {self}"
467
- setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg")
468
-
469
- return setup_cfg
470
-
471
- @property
472
- def pyproject_toml_path(self) -> str:
473
- assert self.source_dir, f"No source dir for {self}"
474
- return make_pyproject_path(self.unpacked_source_directory)
475
-
476
- def load_pyproject_toml(self) -> None:
477
- """Load the pyproject.toml file.
478
-
479
- After calling this routine, all of the attributes related to PEP 517
480
- processing for this requirement have been set. In particular, the
481
- use_pep517 attribute can be used to determine whether we should
482
- follow the PEP 517 or legacy (setup.py) code path.
483
- """
484
- pyproject_toml_data = load_pyproject_toml(
485
- self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self)
486
- )
487
-
488
- if pyproject_toml_data is None:
489
- if self.config_settings:
490
- deprecated(
491
- reason=f"Config settings are ignored for project {self}.",
492
- replacement=(
493
- "to use --use-pep517 or add a "
494
- "pyproject.toml file to the project"
495
- ),
496
- gone_in="23.3",
497
- )
498
- self.use_pep517 = False
499
- return
500
-
501
- self.use_pep517 = True
502
- requires, backend, check, backend_path = pyproject_toml_data
503
- self.requirements_to_check = check
504
- self.pyproject_requires = requires
505
- self.pep517_backend = ConfiguredBuildBackendHookCaller(
506
- self,
507
- self.unpacked_source_directory,
508
- backend,
509
- backend_path=backend_path,
510
- )
511
-
512
- def isolated_editable_sanity_check(self) -> None:
513
- """Check that an editable requirement if valid for use with PEP 517/518.
514
-
515
- This verifies that an editable that has a pyproject.toml either supports PEP 660
516
- or as a setup.py or a setup.cfg
517
- """
518
- if (
519
- self.editable
520
- and self.use_pep517
521
- and not self.supports_pyproject_editable()
522
- and not os.path.isfile(self.setup_py_path)
523
- and not os.path.isfile(self.setup_cfg_path)
524
- ):
525
- raise InstallationError(
526
- f"Project {self} has a 'pyproject.toml' and its build "
527
- f"backend is missing the 'build_editable' hook. Since it does not "
528
- f"have a 'setup.py' nor a 'setup.cfg', "
529
- f"it cannot be installed in editable mode. "
530
- f"Consider using a build backend that supports PEP 660."
531
- )
532
-
533
- def prepare_metadata(self) -> None:
534
- """Ensure that project metadata is available.
535
-
536
- Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.
537
- Under legacy processing, call setup.py egg-info.
538
- """
539
- assert self.source_dir
540
- details = self.name or f"from {self.link}"
541
-
542
- if self.use_pep517:
543
- assert self.pep517_backend is not None
544
- if (
545
- self.editable
546
- and self.permit_editable_wheels
547
- and self.supports_pyproject_editable()
548
- ):
549
- self.metadata_directory = generate_editable_metadata(
550
- build_env=self.build_env,
551
- backend=self.pep517_backend,
552
- details=details,
553
- )
554
- else:
555
- self.metadata_directory = generate_metadata(
556
- build_env=self.build_env,
557
- backend=self.pep517_backend,
558
- details=details,
559
- )
560
- else:
561
- self.metadata_directory = generate_metadata_legacy(
562
- build_env=self.build_env,
563
- setup_py_path=self.setup_py_path,
564
- source_dir=self.unpacked_source_directory,
565
- isolated=self.isolated,
566
- details=details,
567
- )
568
-
569
- # Act on the newly generated metadata, based on the name and version.
570
- if not self.name:
571
- self._set_requirement()
572
- else:
573
- self.warn_on_mismatching_name()
574
-
575
- self.assert_source_matches_version()
576
-
577
- @property
578
- def metadata(self) -> Any:
579
- if not hasattr(self, "_metadata"):
580
- self._metadata = self.get_dist().metadata
581
-
582
- return self._metadata
583
-
584
- def get_dist(self) -> BaseDistribution:
585
- if self.metadata_directory:
586
- return get_directory_distribution(self.metadata_directory)
587
- elif self.local_file_path and self.is_wheel:
588
- return get_wheel_distribution(
589
- FilesystemWheel(self.local_file_path), canonicalize_name(self.name)
590
- )
591
- raise AssertionError(
592
- f"InstallRequirement {self} has no metadata directory and no wheel: "
593
- f"can't make a distribution."
594
- )
595
-
596
- def assert_source_matches_version(self) -> None:
597
- assert self.source_dir
598
- version = self.metadata["version"]
599
- if self.req.specifier and version not in self.req.specifier:
600
- logger.warning(
601
- "Requested %s, but installing version %s",
602
- self,
603
- version,
604
- )
605
- else:
606
- logger.debug(
607
- "Source in %s has version %s, which satisfies requirement %s",
608
- display_path(self.source_dir),
609
- version,
610
- self,
611
- )
612
-
613
- # For both source distributions and editables
614
- def ensure_has_source_dir(
615
- self,
616
- parent_dir: str,
617
- autodelete: bool = False,
618
- parallel_builds: bool = False,
619
- ) -> None:
620
- """Ensure that a source_dir is set.
621
-
622
- This will create a temporary build dir if the name of the requirement
623
- isn't known yet.
624
-
625
- :param parent_dir: The ideal pip parent_dir for the source_dir.
626
- Generally src_dir for editables and build_dir for sdists.
627
- :return: self.source_dir
628
- """
629
- if self.source_dir is None:
630
- self.source_dir = self.ensure_build_location(
631
- parent_dir,
632
- autodelete=autodelete,
633
- parallel_builds=parallel_builds,
634
- )
635
-
636
- # For editable installations
637
- def update_editable(self) -> None:
638
- if not self.link:
639
- logger.debug(
640
- "Cannot update repository at %s; repository location is unknown",
641
- self.source_dir,
642
- )
643
- return
644
- assert self.editable
645
- assert self.source_dir
646
- if self.link.scheme == "file":
647
- # Static paths don't get updated
648
- return
649
- vcs_backend = vcs.get_backend_for_scheme(self.link.scheme)
650
- # Editable requirements are validated in Requirement constructors.
651
- # So here, if it's neither a path nor a valid VCS URL, it's a bug.
652
- assert vcs_backend, f"Unsupported VCS URL {self.link.url}"
653
- hidden_url = hide_url(self.link.url)
654
- vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0)
655
-
656
- # Top-level Actions
657
- def uninstall(
658
- self, auto_confirm: bool = False, verbose: bool = False
659
- ) -> Optional[UninstallPathSet]:
660
- """
661
- Uninstall the distribution currently satisfying this requirement.
662
-
663
- Prompts before removing or modifying files unless
664
- ``auto_confirm`` is True.
665
-
666
- Refuses to delete or modify files outside of ``sys.prefix`` -
667
- thus uninstallation within a virtual environment can only
668
- modify that virtual environment, even if the virtualenv is
669
- linked to global site-packages.
670
-
671
- """
672
- assert self.req
673
- dist = get_default_environment().get_distribution(self.req.name)
674
- if not dist:
675
- logger.warning("Skipping %s as it is not installed.", self.name)
676
- return None
677
- logger.info("Found existing installation: %s", dist)
678
-
679
- uninstalled_pathset = UninstallPathSet.from_dist(dist)
680
- uninstalled_pathset.remove(auto_confirm, verbose)
681
- return uninstalled_pathset
682
-
683
- def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str:
684
- def _clean_zip_name(name: str, prefix: str) -> str:
685
- assert name.startswith(
686
- prefix + os.path.sep
687
- ), f"name {name!r} doesn't start with prefix {prefix!r}"
688
- name = name[len(prefix) + 1 :]
689
- name = name.replace(os.path.sep, "/")
690
- return name
691
-
692
- path = os.path.join(parentdir, path)
693
- name = _clean_zip_name(path, rootdir)
694
- return self.name + "/" + name
695
-
696
- def archive(self, build_dir: Optional[str]) -> None:
697
- """Saves archive to provided build_dir.
698
-
699
- Used for saving downloaded VCS requirements as part of `pip download`.
700
- """
701
- assert self.source_dir
702
- if build_dir is None:
703
- return
704
-
705
- create_archive = True
706
- archive_name = "{}-{}.zip".format(self.name, self.metadata["version"])
707
- archive_path = os.path.join(build_dir, archive_name)
708
-
709
- if os.path.exists(archive_path):
710
- response = ask_path_exists(
711
- "The file {} exists. (i)gnore, (w)ipe, "
712
- "(b)ackup, (a)bort ".format(display_path(archive_path)),
713
- ("i", "w", "b", "a"),
714
- )
715
- if response == "i":
716
- create_archive = False
717
- elif response == "w":
718
- logger.warning("Deleting %s", display_path(archive_path))
719
- os.remove(archive_path)
720
- elif response == "b":
721
- dest_file = backup_dir(archive_path)
722
- logger.warning(
723
- "Backing up %s to %s",
724
- display_path(archive_path),
725
- display_path(dest_file),
726
- )
727
- shutil.move(archive_path, dest_file)
728
- elif response == "a":
729
- sys.exit(-1)
730
-
731
- if not create_archive:
732
- return
733
-
734
- zip_output = zipfile.ZipFile(
735
- archive_path,
736
- "w",
737
- zipfile.ZIP_DEFLATED,
738
- allowZip64=True,
739
- )
740
- with zip_output:
741
- dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory))
742
- for dirpath, dirnames, filenames in os.walk(dir):
743
- for dirname in dirnames:
744
- dir_arcname = self._get_archive_name(
745
- dirname,
746
- parentdir=dirpath,
747
- rootdir=dir,
748
- )
749
- zipdir = zipfile.ZipInfo(dir_arcname + "/")
750
- zipdir.external_attr = 0x1ED << 16 # 0o755
751
- zip_output.writestr(zipdir, "")
752
- for filename in filenames:
753
- file_arcname = self._get_archive_name(
754
- filename,
755
- parentdir=dirpath,
756
- rootdir=dir,
757
- )
758
- filename = os.path.join(dirpath, filename)
759
- zip_output.write(filename, file_arcname)
760
-
761
- logger.info("Saved %s", display_path(archive_path))
762
-
763
- def install(
764
- self,
765
- global_options: Optional[Sequence[str]] = None,
766
- root: Optional[str] = None,
767
- home: Optional[str] = None,
768
- prefix: Optional[str] = None,
769
- warn_script_location: bool = True,
770
- use_user_site: bool = False,
771
- pycompile: bool = True,
772
- ) -> None:
773
- scheme = get_scheme(
774
- self.name,
775
- user=use_user_site,
776
- home=home,
777
- root=root,
778
- isolated=self.isolated,
779
- prefix=prefix,
780
- )
781
-
782
- if self.editable and not self.is_wheel:
783
- install_editable_legacy(
784
- global_options=global_options if global_options is not None else [],
785
- prefix=prefix,
786
- home=home,
787
- use_user_site=use_user_site,
788
- name=self.name,
789
- setup_py_path=self.setup_py_path,
790
- isolated=self.isolated,
791
- build_env=self.build_env,
792
- unpacked_source_directory=self.unpacked_source_directory,
793
- )
794
- self.install_succeeded = True
795
- return
796
-
797
- assert self.is_wheel
798
- assert self.local_file_path
799
-
800
- install_wheel(
801
- self.name,
802
- self.local_file_path,
803
- scheme=scheme,
804
- req_description=str(self.req),
805
- pycompile=pycompile,
806
- warn_script_location=warn_script_location,
807
- direct_url=self.download_info if self.original_link else None,
808
- requested=self.user_supplied,
809
- )
810
- self.install_succeeded = True
811
-
812
-
813
- def check_invalid_constraint_type(req: InstallRequirement) -> str:
814
- # Check for unsupported forms
815
- problem = ""
816
- if not req.name:
817
- problem = "Unnamed requirements are not allowed as constraints"
818
- elif req.editable:
819
- problem = "Editable requirements are not allowed as constraints"
820
- elif req.extras:
821
- problem = "Constraints cannot have extras"
822
-
823
- if problem:
824
- deprecated(
825
- reason=(
826
- "Constraints are only allowed to take the form of a package "
827
- "name and a version specifier. Other forms were originally "
828
- "permitted as an accident of the implementation, but were "
829
- "undocumented. The new implementation of the resolver no "
830
- "longer supports these forms."
831
- ),
832
- replacement="replacing the constraint with a requirement",
833
- # No plan yet for when the new resolver becomes default
834
- gone_in=None,
835
- issue=8210,
836
- )
837
-
838
- return problem
839
-
840
-
841
- def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool:
842
- if getattr(options, option, None):
843
- return True
844
- for req in reqs:
845
- if getattr(req, option, None):
846
- return True
847
- return False
848
-
849
-
850
- def check_legacy_setup_py_options(
851
- options: Values,
852
- reqs: List[InstallRequirement],
853
- ) -> None:
854
- has_build_options = _has_option(options, reqs, "build_options")
855
- has_global_options = _has_option(options, reqs, "global_options")
856
- if has_build_options or has_global_options:
857
- deprecated(
858
- reason="--build-option and --global-option are deprecated.",
859
- issue=11859,
860
- replacement="to use --config-settings",
861
- gone_in="23.3",
862
- )
863
- logger.warning(
864
- "Implying --no-binary=:all: due to the presence of "
865
- "--build-option / --global-option. "
866
- )
867
- options.format_control.disallow_binaries()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/poolmanager.py DELETED
@@ -1,537 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import collections
4
- import functools
5
- import logging
6
-
7
- from ._collections import RecentlyUsedContainer
8
- from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
9
- from .exceptions import (
10
- LocationValueError,
11
- MaxRetryError,
12
- ProxySchemeUnknown,
13
- ProxySchemeUnsupported,
14
- URLSchemeUnknown,
15
- )
16
- from .packages import six
17
- from .packages.six.moves.urllib.parse import urljoin
18
- from .request import RequestMethods
19
- from .util.proxy import connection_requires_http_tunnel
20
- from .util.retry import Retry
21
- from .util.url import parse_url
22
-
23
- __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
24
-
25
-
26
- log = logging.getLogger(__name__)
27
-
28
- SSL_KEYWORDS = (
29
- "key_file",
30
- "cert_file",
31
- "cert_reqs",
32
- "ca_certs",
33
- "ssl_version",
34
- "ca_cert_dir",
35
- "ssl_context",
36
- "key_password",
37
- "server_hostname",
38
- )
39
-
40
- # All known keyword arguments that could be provided to the pool manager, its
41
- # pools, or the underlying connections. This is used to construct a pool key.
42
- _key_fields = (
43
- "key_scheme", # str
44
- "key_host", # str
45
- "key_port", # int
46
- "key_timeout", # int or float or Timeout
47
- "key_retries", # int or Retry
48
- "key_strict", # bool
49
- "key_block", # bool
50
- "key_source_address", # str
51
- "key_key_file", # str
52
- "key_key_password", # str
53
- "key_cert_file", # str
54
- "key_cert_reqs", # str
55
- "key_ca_certs", # str
56
- "key_ssl_version", # str
57
- "key_ca_cert_dir", # str
58
- "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
59
- "key_maxsize", # int
60
- "key_headers", # dict
61
- "key__proxy", # parsed proxy url
62
- "key__proxy_headers", # dict
63
- "key__proxy_config", # class
64
- "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
65
- "key__socks_options", # dict
66
- "key_assert_hostname", # bool or string
67
- "key_assert_fingerprint", # str
68
- "key_server_hostname", # str
69
- )
70
-
71
- #: The namedtuple class used to construct keys for the connection pool.
72
- #: All custom key schemes should include the fields in this key at a minimum.
73
- PoolKey = collections.namedtuple("PoolKey", _key_fields)
74
-
75
- _proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
76
- ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
77
-
78
-
79
- def _default_key_normalizer(key_class, request_context):
80
- """
81
- Create a pool key out of a request context dictionary.
82
-
83
- According to RFC 3986, both the scheme and host are case-insensitive.
84
- Therefore, this function normalizes both before constructing the pool
85
- key for an HTTPS request. If you wish to change this behaviour, provide
86
- alternate callables to ``key_fn_by_scheme``.
87
-
88
- :param key_class:
89
- The class to use when constructing the key. This should be a namedtuple
90
- with the ``scheme`` and ``host`` keys at a minimum.
91
- :type key_class: namedtuple
92
- :param request_context:
93
- A dictionary-like object that contain the context for a request.
94
- :type request_context: dict
95
-
96
- :return: A namedtuple that can be used as a connection pool key.
97
- :rtype: PoolKey
98
- """
99
- # Since we mutate the dictionary, make a copy first
100
- context = request_context.copy()
101
- context["scheme"] = context["scheme"].lower()
102
- context["host"] = context["host"].lower()
103
-
104
- # These are both dictionaries and need to be transformed into frozensets
105
- for key in ("headers", "_proxy_headers", "_socks_options"):
106
- if key in context and context[key] is not None:
107
- context[key] = frozenset(context[key].items())
108
-
109
- # The socket_options key may be a list and needs to be transformed into a
110
- # tuple.
111
- socket_opts = context.get("socket_options")
112
- if socket_opts is not None:
113
- context["socket_options"] = tuple(socket_opts)
114
-
115
- # Map the kwargs to the names in the namedtuple - this is necessary since
116
- # namedtuples can't have fields starting with '_'.
117
- for key in list(context.keys()):
118
- context["key_" + key] = context.pop(key)
119
-
120
- # Default to ``None`` for keys missing from the context
121
- for field in key_class._fields:
122
- if field not in context:
123
- context[field] = None
124
-
125
- return key_class(**context)
126
-
127
-
128
- #: A dictionary that maps a scheme to a callable that creates a pool key.
129
- #: This can be used to alter the way pool keys are constructed, if desired.
130
- #: Each PoolManager makes a copy of this dictionary so they can be configured
131
- #: globally here, or individually on the instance.
132
- key_fn_by_scheme = {
133
- "http": functools.partial(_default_key_normalizer, PoolKey),
134
- "https": functools.partial(_default_key_normalizer, PoolKey),
135
- }
136
-
137
- pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
138
-
139
-
140
- class PoolManager(RequestMethods):
141
- """
142
- Allows for arbitrary requests while transparently keeping track of
143
- necessary connection pools for you.
144
-
145
- :param num_pools:
146
- Number of connection pools to cache before discarding the least
147
- recently used pool.
148
-
149
- :param headers:
150
- Headers to include with all requests, unless other headers are given
151
- explicitly.
152
-
153
- :param \\**connection_pool_kw:
154
- Additional parameters are used to create fresh
155
- :class:`urllib3.connectionpool.ConnectionPool` instances.
156
-
157
- Example::
158
-
159
- >>> manager = PoolManager(num_pools=2)
160
- >>> r = manager.request('GET', 'http://google.com/')
161
- >>> r = manager.request('GET', 'http://google.com/mail')
162
- >>> r = manager.request('GET', 'http://yahoo.com/')
163
- >>> len(manager.pools)
164
- 2
165
-
166
- """
167
-
168
- proxy = None
169
- proxy_config = None
170
-
171
- def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
172
- RequestMethods.__init__(self, headers)
173
- self.connection_pool_kw = connection_pool_kw
174
- self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
175
-
176
- # Locally set the pool classes and keys so other PoolManagers can
177
- # override them.
178
- self.pool_classes_by_scheme = pool_classes_by_scheme
179
- self.key_fn_by_scheme = key_fn_by_scheme.copy()
180
-
181
- def __enter__(self):
182
- return self
183
-
184
- def __exit__(self, exc_type, exc_val, exc_tb):
185
- self.clear()
186
- # Return False to re-raise any potential exceptions
187
- return False
188
-
189
- def _new_pool(self, scheme, host, port, request_context=None):
190
- """
191
- Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
192
- any additional pool keyword arguments.
193
-
194
- If ``request_context`` is provided, it is provided as keyword arguments
195
- to the pool class used. This method is used to actually create the
196
- connection pools handed out by :meth:`connection_from_url` and
197
- companion methods. It is intended to be overridden for customization.
198
- """
199
- pool_cls = self.pool_classes_by_scheme[scheme]
200
- if request_context is None:
201
- request_context = self.connection_pool_kw.copy()
202
-
203
- # Although the context has everything necessary to create the pool,
204
- # this function has historically only used the scheme, host, and port
205
- # in the positional args. When an API change is acceptable these can
206
- # be removed.
207
- for key in ("scheme", "host", "port"):
208
- request_context.pop(key, None)
209
-
210
- if scheme == "http":
211
- for kw in SSL_KEYWORDS:
212
- request_context.pop(kw, None)
213
-
214
- return pool_cls(host, port, **request_context)
215
-
216
- def clear(self):
217
- """
218
- Empty our store of pools and direct them all to close.
219
-
220
- This will not affect in-flight connections, but they will not be
221
- re-used after completion.
222
- """
223
- self.pools.clear()
224
-
225
- def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
226
- """
227
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
228
-
229
- If ``port`` isn't given, it will be derived from the ``scheme`` using
230
- ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
231
- provided, it is merged with the instance's ``connection_pool_kw``
232
- variable and used to create the new connection pool, if one is
233
- needed.
234
- """
235
-
236
- if not host:
237
- raise LocationValueError("No host specified.")
238
-
239
- request_context = self._merge_pool_kwargs(pool_kwargs)
240
- request_context["scheme"] = scheme or "http"
241
- if not port:
242
- port = port_by_scheme.get(request_context["scheme"].lower(), 80)
243
- request_context["port"] = port
244
- request_context["host"] = host
245
-
246
- return self.connection_from_context(request_context)
247
-
248
- def connection_from_context(self, request_context):
249
- """
250
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
251
-
252
- ``request_context`` must at least contain the ``scheme`` key and its
253
- value must be a key in ``key_fn_by_scheme`` instance variable.
254
- """
255
- scheme = request_context["scheme"].lower()
256
- pool_key_constructor = self.key_fn_by_scheme.get(scheme)
257
- if not pool_key_constructor:
258
- raise URLSchemeUnknown(scheme)
259
- pool_key = pool_key_constructor(request_context)
260
-
261
- return self.connection_from_pool_key(pool_key, request_context=request_context)
262
-
263
- def connection_from_pool_key(self, pool_key, request_context=None):
264
- """
265
- Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
266
-
267
- ``pool_key`` should be a namedtuple that only contains immutable
268
- objects. At a minimum it must have the ``scheme``, ``host``, and
269
- ``port`` fields.
270
- """
271
- with self.pools.lock:
272
- # If the scheme, host, or port doesn't match existing open
273
- # connections, open a new ConnectionPool.
274
- pool = self.pools.get(pool_key)
275
- if pool:
276
- return pool
277
-
278
- # Make a fresh ConnectionPool of the desired type
279
- scheme = request_context["scheme"]
280
- host = request_context["host"]
281
- port = request_context["port"]
282
- pool = self._new_pool(scheme, host, port, request_context=request_context)
283
- self.pools[pool_key] = pool
284
-
285
- return pool
286
-
287
- def connection_from_url(self, url, pool_kwargs=None):
288
- """
289
- Similar to :func:`urllib3.connectionpool.connection_from_url`.
290
-
291
- If ``pool_kwargs`` is not provided and a new pool needs to be
292
- constructed, ``self.connection_pool_kw`` is used to initialize
293
- the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
294
- is provided, it is used instead. Note that if a new pool does not
295
- need to be created for the request, the provided ``pool_kwargs`` are
296
- not used.
297
- """
298
- u = parse_url(url)
299
- return self.connection_from_host(
300
- u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
301
- )
302
-
303
- def _merge_pool_kwargs(self, override):
304
- """
305
- Merge a dictionary of override values for self.connection_pool_kw.
306
-
307
- This does not modify self.connection_pool_kw and returns a new dict.
308
- Any keys in the override dictionary with a value of ``None`` are
309
- removed from the merged dictionary.
310
- """
311
- base_pool_kwargs = self.connection_pool_kw.copy()
312
- if override:
313
- for key, value in override.items():
314
- if value is None:
315
- try:
316
- del base_pool_kwargs[key]
317
- except KeyError:
318
- pass
319
- else:
320
- base_pool_kwargs[key] = value
321
- return base_pool_kwargs
322
-
323
- def _proxy_requires_url_absolute_form(self, parsed_url):
324
- """
325
- Indicates if the proxy requires the complete destination URL in the
326
- request. Normally this is only needed when not using an HTTP CONNECT
327
- tunnel.
328
- """
329
- if self.proxy is None:
330
- return False
331
-
332
- return not connection_requires_http_tunnel(
333
- self.proxy, self.proxy_config, parsed_url.scheme
334
- )
335
-
336
- def _validate_proxy_scheme_url_selection(self, url_scheme):
337
- """
338
- Validates that were not attempting to do TLS in TLS connections on
339
- Python2 or with unsupported SSL implementations.
340
- """
341
- if self.proxy is None or url_scheme != "https":
342
- return
343
-
344
- if self.proxy.scheme != "https":
345
- return
346
-
347
- if six.PY2 and not self.proxy_config.use_forwarding_for_https:
348
- raise ProxySchemeUnsupported(
349
- "Contacting HTTPS destinations through HTTPS proxies "
350
- "'via CONNECT tunnels' is not supported in Python 2"
351
- )
352
-
353
- def urlopen(self, method, url, redirect=True, **kw):
354
- """
355
- Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
356
- with custom cross-host redirect logic and only sends the request-uri
357
- portion of the ``url``.
358
-
359
- The given ``url`` parameter must be absolute, such that an appropriate
360
- :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
361
- """
362
- u = parse_url(url)
363
- self._validate_proxy_scheme_url_selection(u.scheme)
364
-
365
- conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
366
-
367
- kw["assert_same_host"] = False
368
- kw["redirect"] = False
369
-
370
- if "headers" not in kw:
371
- kw["headers"] = self.headers.copy()
372
-
373
- if self._proxy_requires_url_absolute_form(u):
374
- response = conn.urlopen(method, url, **kw)
375
- else:
376
- response = conn.urlopen(method, u.request_uri, **kw)
377
-
378
- redirect_location = redirect and response.get_redirect_location()
379
- if not redirect_location:
380
- return response
381
-
382
- # Support relative URLs for redirecting.
383
- redirect_location = urljoin(url, redirect_location)
384
-
385
- # RFC 7231, Section 6.4.4
386
- if response.status == 303:
387
- method = "GET"
388
-
389
- retries = kw.get("retries")
390
- if not isinstance(retries, Retry):
391
- retries = Retry.from_int(retries, redirect=redirect)
392
-
393
- # Strip headers marked as unsafe to forward to the redirected location.
394
- # Check remove_headers_on_redirect to avoid a potential network call within
395
- # conn.is_same_host() which may use socket.gethostbyname() in the future.
396
- if retries.remove_headers_on_redirect and not conn.is_same_host(
397
- redirect_location
398
- ):
399
- headers = list(six.iterkeys(kw["headers"]))
400
- for header in headers:
401
- if header.lower() in retries.remove_headers_on_redirect:
402
- kw["headers"].pop(header, None)
403
-
404
- try:
405
- retries = retries.increment(method, url, response=response, _pool=conn)
406
- except MaxRetryError:
407
- if retries.raise_on_redirect:
408
- response.drain_conn()
409
- raise
410
- return response
411
-
412
- kw["retries"] = retries
413
- kw["redirect"] = redirect
414
-
415
- log.info("Redirecting %s -> %s", url, redirect_location)
416
-
417
- response.drain_conn()
418
- return self.urlopen(method, redirect_location, **kw)
419
-
420
-
421
- class ProxyManager(PoolManager):
422
- """
423
- Behaves just like :class:`PoolManager`, but sends all requests through
424
- the defined proxy, using the CONNECT method for HTTPS URLs.
425
-
426
- :param proxy_url:
427
- The URL of the proxy to be used.
428
-
429
- :param proxy_headers:
430
- A dictionary containing headers that will be sent to the proxy. In case
431
- of HTTP they are being sent with each request, while in the
432
- HTTPS/CONNECT case they are sent only once. Could be used for proxy
433
- authentication.
434
-
435
- :param proxy_ssl_context:
436
- The proxy SSL context is used to establish the TLS connection to the
437
- proxy when using HTTPS proxies.
438
-
439
- :param use_forwarding_for_https:
440
- (Defaults to False) If set to True will forward requests to the HTTPS
441
- proxy to be made on behalf of the client instead of creating a TLS
442
- tunnel via the CONNECT method. **Enabling this flag means that request
443
- and response headers and content will be visible from the HTTPS proxy**
444
- whereas tunneling keeps request and response headers and content
445
- private. IP address, target hostname, SNI, and port are always visible
446
- to an HTTPS proxy even when this flag is disabled.
447
-
448
- Example:
449
- >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
450
- >>> r1 = proxy.request('GET', 'http://google.com/')
451
- >>> r2 = proxy.request('GET', 'http://httpbin.org/')
452
- >>> len(proxy.pools)
453
- 1
454
- >>> r3 = proxy.request('GET', 'https://httpbin.org/')
455
- >>> r4 = proxy.request('GET', 'https://twitter.com/')
456
- >>> len(proxy.pools)
457
- 3
458
-
459
- """
460
-
461
- def __init__(
462
- self,
463
- proxy_url,
464
- num_pools=10,
465
- headers=None,
466
- proxy_headers=None,
467
- proxy_ssl_context=None,
468
- use_forwarding_for_https=False,
469
- **connection_pool_kw
470
- ):
471
-
472
- if isinstance(proxy_url, HTTPConnectionPool):
473
- proxy_url = "%s://%s:%i" % (
474
- proxy_url.scheme,
475
- proxy_url.host,
476
- proxy_url.port,
477
- )
478
- proxy = parse_url(proxy_url)
479
-
480
- if proxy.scheme not in ("http", "https"):
481
- raise ProxySchemeUnknown(proxy.scheme)
482
-
483
- if not proxy.port:
484
- port = port_by_scheme.get(proxy.scheme, 80)
485
- proxy = proxy._replace(port=port)
486
-
487
- self.proxy = proxy
488
- self.proxy_headers = proxy_headers or {}
489
- self.proxy_ssl_context = proxy_ssl_context
490
- self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
491
-
492
- connection_pool_kw["_proxy"] = self.proxy
493
- connection_pool_kw["_proxy_headers"] = self.proxy_headers
494
- connection_pool_kw["_proxy_config"] = self.proxy_config
495
-
496
- super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
497
-
498
- def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
499
- if scheme == "https":
500
- return super(ProxyManager, self).connection_from_host(
501
- host, port, scheme, pool_kwargs=pool_kwargs
502
- )
503
-
504
- return super(ProxyManager, self).connection_from_host(
505
- self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
506
- )
507
-
508
- def _set_proxy_headers(self, url, headers=None):
509
- """
510
- Sets headers needed by proxies: specifically, the Accept and Host
511
- headers. Only sets headers not provided by the user.
512
- """
513
- headers_ = {"Accept": "*/*"}
514
-
515
- netloc = parse_url(url).netloc
516
- if netloc:
517
- headers_["Host"] = netloc
518
-
519
- if headers:
520
- headers_.update(headers)
521
- return headers_
522
-
523
- def urlopen(self, method, url, redirect=True, **kw):
524
- "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
525
- u = parse_url(url)
526
- if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
527
- # For connections using HTTP CONNECT, httplib sets the necessary
528
- # headers on the CONNECT to the proxy. If we're not using CONNECT,
529
- # we'll definitely need to set 'Host' at the very least.
530
- headers = kw.get("headers", self.headers)
531
- kw["headers"] = self._set_proxy_headers(url, headers)
532
-
533
- return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
534
-
535
-
536
- def proxy_from_url(url, **kw):
537
- return ProxyManager(proxy_url=url, **kw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avkash/Satellite_Segmentation_Prediction/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Satellite Segmentation Prediction
3
- emoji: 💻
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.8
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/options/__init__.py DELETED
File without changes
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/__init__.py DELETED
@@ -1,16 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- from .build import META_ARCH_REGISTRY, build_model # isort:skip
5
-
6
- from .panoptic_fpn import PanopticFPN
7
-
8
- # import all the meta_arch, so they will be registered
9
- from .rcnn import GeneralizedRCNN, ProposalNetwork
10
- from .dense_detector import DenseDetector
11
- from .retinanet import RetinaNet
12
- from .fcos import FCOS
13
- from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head
14
-
15
-
16
- __all__ = list(globals().keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AymanKUMA/Speech-Bubbles-detector/app.py DELETED
@@ -1,214 +0,0 @@
1
- from ultralytics import YOLO
2
- import streamlit as st
3
- from PIL import Image
4
-
5
- if __name__ == '__main__':
6
-
7
- st.set_page_config(layout="wide")
8
-
9
- myhtml = '''
10
- <div class="topnav" id="home">
11
- <div class= "logo">
12
- <a href="#">
13
- <h3 style="font-size: 2.3rem;">Bubble-Detector</h3>
14
- </a>
15
- </div>
16
- <div class = "links">
17
- <a href="#speech-bubble-detector">Home</a>
18
- <a href="#speech-bubble-detection">Detector</a>
19
- <a href="#about">About</a>
20
- <a style="color: black; background: white; padding: 0 3em; display: flex; align-items: center"
21
- href="https://github.com/AymanKUMA/yolov8_speech_bubbles_detection">Github</a>
22
- </div>
23
- </div>
24
- <div class="hidden">
25
- </div>
26
- <div class="content">
27
- <div class="home">
28
- <h1 id="home">Speech Bubble Detector</h1>
29
- <p>Revolutionize Manga and Comic Reading Experience with YOLOv8
30
- </br>The Cutting-Edge Model That Detects Speech Bubbles with Unmatched Precision</p>
31
- </div>
32
- </div>
33
- '''
34
-
35
- style = '''
36
- <style>
37
- body {
38
- background-image: url("https://wallpapercave.com/wp/wp8578059.jpg");
39
- background-blend-mode: multiply;
40
- background-color: gray;
41
- background-size: cover;
42
- }
43
-
44
- .topnav {
45
- overflow: hidden;
46
- background-color: #333;
47
- padding: 0rem 1rem;
48
- display: flex;
49
- justify-content: space-between;
50
- position: fixed;
51
- top: 0;
52
- width: 100%;
53
- z-index: 9999999999;
54
- }
55
-
56
- .logo a{
57
- text-decoration: none;
58
- }
59
-
60
- .links{
61
- width: 40%;
62
- display: flex;
63
- justify-content: space-around;
64
- }
65
-
66
- .links a{
67
- display: block;
68
- color: #f2f2f2;
69
- text-align: center;
70
- padding: 20px 10px;
71
- text-decoration: none;
72
- }
73
-
74
- .hidden{
75
- height: 5rem;
76
- }
77
-
78
- .content{
79
- display: flex;
80
- align-items: center;
81
- justify-content: center;
82
- }
83
-
84
- .home{
85
- width: 80%;
86
- background-color: #00000080;
87
- padding: 60px;
88
- }
89
-
90
- .home h1{
91
- font-size: 6rem;
92
- }
93
-
94
- .home p{
95
- font-size: 1.5rem;
96
- }
97
-
98
- main {
99
- margin: 20px;
100
- width: 80%;
101
- background-color: #00000080;
102
- padding: 60px;
103
- }
104
-
105
- .down{
106
- display: flex;
107
- align-items: center;
108
- justify-content: center;
109
- }
110
-
111
- section {
112
- margin-bottom: 40px;
113
- }
114
-
115
- .gallery {
116
- display: flex;
117
- flex-wrap: wrap;
118
- justify-content: center;
119
- }
120
-
121
- .gallery img {
122
- margin: 10px;
123
- max-width: 400px;
124
- max-height: 400px;
125
- }
126
-
127
- .e19lei0e1{
128
- display: none;
129
- }
130
-
131
- .e8zbici2{
132
- display: none;
133
- }
134
- .e1tzin5v0{
135
- gap: 0rem;
136
- }
137
- .egzxvld4{
138
- padding: 0rem 0rem;
139
- }
140
- .egzxvld1{
141
- display: none;
142
- }
143
- .css-ffhzg2{
144
- background: none;
145
- }
146
- .css-keje6w{
147
- padding: 2rem 5rem;
148
- flex: 1 1 calc(100% - 1rem);
149
- }
150
-
151
- .e16nr0p32{
152
- display: none;
153
- }
154
-
155
- .footer{
156
- buttom: 0;
157
- width: 100%;
158
- padding: 1.3rem;
159
- text-align: center;
160
- color: white;
161
- background-color: #333;
162
- zindex: 9999999
163
- }
164
- </style>
165
- '''
166
-
167
- st.markdown(style, unsafe_allow_html=True)
168
- st.markdown(myhtml, unsafe_allow_html=True)
169
- col1, col2, col3 = st.columns((2, 1, 1))
170
-
171
- with col1:
172
- st.title('Speech bubble detection ')
173
- uploaded_file = st.file_uploader("Load image", type=["jpg", "jpeg", "png"])
174
-
175
- if uploaded_file is not None:
176
- is_valid = True
177
- with st.spinner(text='Uploading image...'):
178
- with col2:
179
- st.image(uploaded_file, caption="Input Page", use_column_width=True)
180
- picture = Image.open(uploaded_file)
181
- else:
182
- is_valid = False
183
- if is_valid:
184
- with col3:
185
- with st.spinner(text='Processing image...'):
186
- model = YOLO('best.pt')
187
- results = model.predict(task="detect", source=picture, conf=0.85)
188
- img = results[0].plot()
189
- st.image(img, caption="Detected Objects", use_column_width=True)
190
-
191
- my2ndHtml = '''
192
- <div class="down">
193
- <main>
194
- <section>
195
- <h2>About</h2>
196
- <p>Our model detects speech bubbles from manga and comics using YOLOv8 by ultralytics. With a custom dataset of 2000 images, our model is able to accurately detect and classify speech bubbles in a wide range of styles and formats.</p>
197
- <p>Speech bubbles are an essential part of comic books and manga, allowing characters to speak and express emotions. Our model makes it easy to extract speech bubbles from images, making it a valuable tool for researchers, artists, and publishers alike.</p>
198
- <p>This model is for academic use ONLY. Do not use it for any commercial purpose.</p>
199
- </section>
200
- <section>
201
- <h2>Examples</h2>
202
- <div class="gallery">
203
- <img src=" https://drive.google.com/uc?id=1KJYsh3OX-WGAq5o_1P5k9ElvgOkt978w" alt="Example 1">
204
- <img src="https://drive.google.com/uc?id=1fVvDcxzI46PTn0qRZwBTT6ReaVMNZIbI" alt="Example 2">
205
- <img src="https://drive.google.com/uc?id=1pRBYx4P7_iTsNp3uV8ZeUQHvapWvBkO4" alt="Example 3">
206
- <img src="https://drive.google.com/uc?id=1oxx4rrlHAa0nXU2vRXjaumghdN2czia-" alt="Example 4">
207
- </div>
208
- </section>
209
- </main>
210
- </div>
211
- '''
212
- st.markdown(my2ndHtml, unsafe_allow_html=True)
213
- my3rdHtml = '''<div class="footer">&#169; Speech Bubble Detector</div>'''
214
- st.markdown(my3rdHtml, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/modules/vc/modules.py DELETED
@@ -1,526 +0,0 @@
1
- import os, sys
2
- import traceback
3
- import logging
4
- now_dir = os.getcwd()
5
- sys.path.append(now_dir)
6
- logger = logging.getLogger(__name__)
7
- import lib.globals.globals as rvc_globals
8
- import numpy as np
9
- import soundfile as sf
10
- import torch
11
- from io import BytesIO
12
- from infer.lib.audio import load_audio
13
- from infer.lib.audio import wav2
14
- from infer.lib.infer_pack.models import (
15
- SynthesizerTrnMs256NSFsid,
16
- SynthesizerTrnMs256NSFsid_nono,
17
- SynthesizerTrnMs768NSFsid,
18
- SynthesizerTrnMs768NSFsid_nono,
19
- )
20
- from infer.modules.vc.pipeline import Pipeline
21
- from infer.modules.vc.utils import *
22
- import time
23
- import scipy.io.wavfile as wavfile
24
-
25
- def note_to_hz(note_name):
26
- SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2}
27
- pitch_class, octave = note_name[:-1], int(note_name[-1])
28
- semitone = SEMITONES[pitch_class]
29
- note_number = 12 * (octave - 4) + semitone
30
- frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number
31
- return frequency
32
-
33
- class VC:
34
- def __init__(self, config):
35
- self.n_spk = None
36
- self.tgt_sr = None
37
- self.net_g = None
38
- self.pipeline = None
39
- self.cpt = None
40
- self.version = None
41
- self.if_f0 = None
42
- self.version = None
43
- self.hubert_model = None
44
-
45
- self.config = config
46
-
47
- def get_vc(self, sid, *to_return_protect):
48
- logger.info("Get sid: " + sid)
49
-
50
- to_return_protect0 = {
51
- "visible": self.if_f0 != 0,
52
- "value": to_return_protect[0]
53
- if self.if_f0 != 0 and to_return_protect
54
- else 0.5,
55
- "__type__": "update",
56
- }
57
- to_return_protect1 = {
58
- "visible": self.if_f0 != 0,
59
- "value": to_return_protect[1]
60
- if self.if_f0 != 0 and to_return_protect
61
- else 0.33,
62
- "__type__": "update",
63
- }
64
-
65
- if not sid:
66
- if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
67
- logger.info("Clean model cache")
68
- del (
69
- self.net_g,
70
- self.n_spk,
71
- self.vc,
72
- self.hubert_model,
73
- self.tgt_sr,
74
- ) # ,cpt
75
- self.hubert_model = (
76
- self.net_g
77
- ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None
78
- if torch.cuda.is_available():
79
- torch.cuda.empty_cache()
80
- ###楼下不这么折腾清理不干净
81
- self.if_f0 = self.cpt.get("f0", 1)
82
- self.version = self.cpt.get("version", "v1")
83
- if self.version == "v1":
84
- if self.if_f0 == 1:
85
- self.net_g = SynthesizerTrnMs256NSFsid(
86
- *self.cpt["config"], is_half=self.config.is_half
87
- )
88
- else:
89
- self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
90
- elif self.version == "v2":
91
- if self.if_f0 == 1:
92
- self.net_g = SynthesizerTrnMs768NSFsid(
93
- *self.cpt["config"], is_half=self.config.is_half
94
- )
95
- else:
96
- self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
97
- del self.net_g, self.cpt
98
- if torch.cuda.is_available():
99
- torch.cuda.empty_cache()
100
- return (
101
- {"visible": False, "__type__": "update"},
102
- {
103
- "visible": True,
104
- "value": to_return_protect0,
105
- "__type__": "update",
106
- },
107
- {
108
- "visible": True,
109
- "value": to_return_protect1,
110
- "__type__": "update",
111
- },
112
- "",
113
- "",
114
- )
115
- #person = f'{os.getenv("weight_root")}/{sid}'
116
- person = f'{sid}'
117
- #logger.info(f"Loading: {person}")
118
- logger.info(f"Loading...")
119
- self.cpt = torch.load(person, map_location="cpu")
120
- self.tgt_sr = self.cpt["config"][-1]
121
- self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
122
- self.if_f0 = self.cpt.get("f0", 1)
123
- self.version = self.cpt.get("version", "v1")
124
-
125
- synthesizer_class = {
126
- ("v1", 1): SynthesizerTrnMs256NSFsid,
127
- ("v1", 0): SynthesizerTrnMs256NSFsid_nono,
128
- ("v2", 1): SynthesizerTrnMs768NSFsid,
129
- ("v2", 0): SynthesizerTrnMs768NSFsid_nono,
130
- }
131
-
132
- self.net_g = synthesizer_class.get(
133
- (self.version, self.if_f0), SynthesizerTrnMs256NSFsid
134
- )(*self.cpt["config"], is_half=self.config.is_half)
135
-
136
- del self.net_g.enc_q
137
-
138
- self.net_g.load_state_dict(self.cpt["weight"], strict=False)
139
- self.net_g.eval().to(self.config.device)
140
- if self.config.is_half:
141
- self.net_g = self.net_g.half()
142
- else:
143
- self.net_g = self.net_g.float()
144
-
145
- self.pipeline = Pipeline(self.tgt_sr, self.config)
146
- n_spk = self.cpt["config"][-3]
147
- index = {"value": get_index_path_from_model(sid), "__type__": "update"}
148
- logger.info("Select index: " + index["value"])
149
-
150
- return (
151
- (
152
- {"visible": False, "maximum": n_spk, "__type__": "update"},
153
- to_return_protect0,
154
- to_return_protect1
155
- )
156
- if to_return_protect
157
- else {"visible": False, "maximum": n_spk, "__type__": "update"}
158
- )
159
-
160
-
161
- def vc_single(
162
- self,
163
- sid,
164
- input_audio_path0,
165
- input_audio_path1,
166
- f0_up_key,
167
- f0_file,
168
- f0_method,
169
- file_index,
170
- file_index2,
171
- index_rate,
172
- filter_radius,
173
- resample_sr,
174
- rms_mix_rate,
175
- protect,
176
- crepe_hop_length,
177
- f0_min,
178
- note_min,
179
- f0_max,
180
- note_max,
181
- f0_autotune,
182
- ):
183
- global total_time
184
- total_time = 0
185
- start_time = time.time()
186
- if not input_audio_path0 and not input_audio_path1:
187
- return "You need to upload an audio", None
188
-
189
- if (not os.path.exists(input_audio_path0)) and (not os.path.exists(os.path.join(now_dir, input_audio_path0))):
190
- return "Audio was not properly selected or doesn't exist", None
191
-
192
- input_audio_path1 = input_audio_path1 or input_audio_path0
193
- print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'")
194
- print("-------------------")
195
- f0_up_key = int(f0_up_key)
196
- if rvc_globals.NotesOrHertz and f0_method != 'rmvpe':
197
- f0_min = note_to_hz(note_min) if note_min else 50
198
- f0_max = note_to_hz(note_max) if note_max else 1100
199
- print(f"Converted Min pitch: freq - {f0_min}\n"
200
- f"Converted Max pitch: freq - {f0_max}")
201
- else:
202
- f0_min = f0_min or 50
203
- f0_max = f0_max or 1100
204
- try:
205
- input_audio_path1 = input_audio_path1 or input_audio_path0
206
- print(f"Attempting to load {input_audio_path1}....")
207
- audio = load_audio(file=input_audio_path1,
208
- sr=16000,
209
- DoFormant=rvc_globals.DoFormant,
210
- Quefrency=rvc_globals.Quefrency,
211
- Timbre=rvc_globals.Timbre)
212
-
213
- audio_max = np.abs(audio).max() / 0.95
214
- if audio_max > 1:
215
- audio /= audio_max
216
- times = [0, 0, 0]
217
-
218
- if self.hubert_model is None:
219
- self.hubert_model = load_hubert(self.config)
220
-
221
- try:
222
- self.if_f0 = self.cpt.get("f0", 1)
223
- except NameError:
224
- message = "Model was not properly selected"
225
- print(message)
226
- return message, None
227
-
228
- file_index = (
229
- (
230
- file_index.strip(" ")
231
- .strip('"')
232
- .strip("\n")
233
- .strip('"')
234
- .strip(" ")
235
- .replace("trained", "added")
236
- )
237
- if file_index != ""
238
- else file_index2
239
- ) # 防止小白写错,自动帮他替换掉
240
-
241
- try:
242
- audio_opt = self.pipeline.pipeline(
243
- self.hubert_model,
244
- self.net_g,
245
- sid,
246
- audio,
247
- input_audio_path1,
248
- times,
249
- f0_up_key,
250
- f0_method,
251
- file_index,
252
- index_rate,
253
- self.if_f0,
254
- filter_radius,
255
- self.tgt_sr,
256
- resample_sr,
257
- rms_mix_rate,
258
- self.version,
259
- protect,
260
- crepe_hop_length,
261
- f0_autotune,
262
- f0_file=f0_file,
263
- f0_min=f0_min,
264
- f0_max=f0_max
265
- )
266
- except AssertionError:
267
- message = "Mismatching index version detected (v1 with v2, or v2 with v1)."
268
- print(message)
269
- return message, None
270
- except NameError:
271
- message = "RVC libraries are still loading. Please try again in a few seconds."
272
- print(message)
273
- return message, None
274
-
275
- if self.tgt_sr != resample_sr >= 16000:
276
- self.tgt_sr = resample_sr
277
- index_info = (
278
- "Index:\n%s." % file_index
279
- if os.path.exists(file_index)
280
- else "Index not used."
281
- )
282
- end_time = time.time()
283
- total_time = end_time - start_time
284
-
285
- output_folder = "audio-outputs"
286
- os.makedirs(output_folder, exist_ok=True)
287
- output_filename = "generated_audio_{}.wav"
288
- output_count = 1
289
- while True:
290
- current_output_path = os.path.join(output_folder, output_filename.format(output_count))
291
- if not os.path.exists(current_output_path):
292
- break
293
- output_count += 1
294
-
295
- wavfile.write(current_output_path, self.tgt_sr, audio_opt)
296
- print(f"Generated audio saved to: {current_output_path}")
297
- return f"Success.\n {index_info}\nTime:\n npy:{times[0]}, f0:{times[1]}, infer:{times[2]}\nTotal Time: {total_time} seconds", (self.tgt_sr, audio_opt)
298
- except:
299
- info = traceback.format_exc()
300
- logger.warn(info)
301
- return info, (None, None)
302
-
303
- def vc_single_dont_save(
304
- self,
305
- sid,
306
- input_audio_path0,
307
- input_audio_path1,
308
- f0_up_key,
309
- f0_file,
310
- f0_method,
311
- file_index,
312
- file_index2,
313
- index_rate,
314
- filter_radius,
315
- resample_sr,
316
- rms_mix_rate,
317
- protect,
318
- crepe_hop_length,
319
- f0_min,
320
- note_min,
321
- f0_max,
322
- note_max,
323
- f0_autotune,
324
- ):
325
- global total_time
326
- total_time = 0
327
- start_time = time.time()
328
- if not input_audio_path0 and not input_audio_path1:
329
- return "You need to upload an audio", None
330
-
331
- if (not os.path.exists(input_audio_path0)) and (not os.path.exists(os.path.join(now_dir, input_audio_path0))):
332
- return "Audio was not properly selected or doesn't exist", None
333
-
334
- input_audio_path1 = input_audio_path1 or input_audio_path0
335
- print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'")
336
- print("-------------------")
337
- f0_up_key = int(f0_up_key)
338
- if rvc_globals.NotesOrHertz and f0_method != 'rmvpe':
339
- f0_min = note_to_hz(note_min) if note_min else 50
340
- f0_max = note_to_hz(note_max) if note_max else 1100
341
- print(f"Converted Min pitch: freq - {f0_min}\n"
342
- f"Converted Max pitch: freq - {f0_max}")
343
- else:
344
- f0_min = f0_min or 50
345
- f0_max = f0_max or 1100
346
- try:
347
- input_audio_path1 = input_audio_path1 or input_audio_path0
348
- print(f"Attempting to load {input_audio_path1}....")
349
- audio = load_audio(file=input_audio_path1,
350
- sr=16000,
351
- DoFormant=rvc_globals.DoFormant,
352
- Quefrency=rvc_globals.Quefrency,
353
- Timbre=rvc_globals.Timbre)
354
-
355
- audio_max = np.abs(audio).max() / 0.95
356
- if audio_max > 1:
357
- audio /= audio_max
358
- times = [0, 0, 0]
359
-
360
- if self.hubert_model is None:
361
- self.hubert_model = load_hubert(self.config)
362
-
363
- try:
364
- self.if_f0 = self.cpt.get("f0", 1)
365
- except NameError:
366
- message = "Model was not properly selected"
367
- print(message)
368
- return message, None
369
-
370
- file_index = (
371
- (
372
- file_index.strip(" ")
373
- .strip('"')
374
- .strip("\n")
375
- .strip('"')
376
- .strip(" ")
377
- .replace("trained", "added")
378
- )
379
- if file_index != ""
380
- else file_index2
381
- ) # 防止小白写错,自动帮他替换掉
382
-
383
- try:
384
- audio_opt = self.pipeline.pipeline(
385
- self.hubert_model,
386
- self.net_g,
387
- sid,
388
- audio,
389
- input_audio_path1,
390
- times,
391
- f0_up_key,
392
- f0_method,
393
- file_index,
394
- index_rate,
395
- self.if_f0,
396
- filter_radius,
397
- self.tgt_sr,
398
- resample_sr,
399
- rms_mix_rate,
400
- self.version,
401
- protect,
402
- crepe_hop_length,
403
- f0_autotune,
404
- f0_file=f0_file,
405
- f0_min=f0_min,
406
- f0_max=f0_max
407
- )
408
- except AssertionError:
409
- message = "Mismatching index version detected (v1 with v2, or v2 with v1)."
410
- print(message)
411
- return message, None
412
- except NameError:
413
- message = "RVC libraries are still loading. Please try again in a few seconds."
414
- print(message)
415
- return message, None
416
-
417
- if self.tgt_sr != resample_sr >= 16000:
418
- self.tgt_sr = resample_sr
419
- index_info = (
420
- "Index:\n%s." % file_index
421
- if os.path.exists(file_index)
422
- else "Index not used."
423
- )
424
- end_time = time.time()
425
- total_time = end_time - start_time
426
-
427
- return f"Success.\n {index_info}\nTime:\n npy:{times[0]}, f0:{times[1]}, infer:{times[2]}\nTotal Time: {total_time} seconds", (self.tgt_sr, audio_opt)
428
- except:
429
- info = traceback.format_exc()
430
- logger.warn(info)
431
- return info, (None, None)
432
-
433
-
434
- def vc_multi(
435
- self,
436
- sid,
437
- dir_path,
438
- opt_root,
439
- paths,
440
- f0_up_key,
441
- f0_method,
442
- file_index,
443
- file_index2,
444
- index_rate,
445
- filter_radius,
446
- resample_sr,
447
- rms_mix_rate,
448
- protect,
449
- format1,
450
- crepe_hop_length,
451
- f0_min,
452
- note_min,
453
- f0_max,
454
- note_max,
455
- f0_autotune,
456
- ):
457
- if rvc_globals.NotesOrHertz and f0_method != 'rmvpe':
458
- f0_min = note_to_hz(note_min) if note_min else 50
459
- f0_max = note_to_hz(note_max) if note_max else 1100
460
- print(f"Converted Min pitch: freq - {f0_min}\n"
461
- f"Converted Max pitch: freq - {f0_max}")
462
- else:
463
- f0_min = f0_min or 50
464
- f0_max = f0_max or 1100
465
- try:
466
- dir_path = (
467
- dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
468
- ) # 防止小白拷路径头尾带了空格和"和回车
469
- opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
470
- os.makedirs(opt_root, exist_ok=True)
471
- try:
472
- if dir_path != "":
473
- paths = [
474
- os.path.join(dir_path, name) for name in os.listdir(dir_path)
475
- ]
476
- else:
477
- paths = [path.name for path in paths]
478
- except:
479
- traceback.print_exc()
480
- paths = [path.name for path in paths]
481
- infos = []
482
- for path in paths:
483
- info, opt = self.vc_single(
484
- sid,
485
- path,
486
- f0_up_key,
487
- None,
488
- f0_method,
489
- file_index,
490
- file_index2,
491
- # file_big_npy,
492
- index_rate,
493
- filter_radius,
494
- resample_sr,
495
- rms_mix_rate,
496
- protect,
497
- )
498
- if "Success" in info:
499
- try:
500
- tgt_sr, audio_opt = opt
501
- if format1 in ["wav", "flac"]:
502
- sf.write(
503
- "%s/%s.%s"
504
- % (opt_root, os.path.basename(path), format1),
505
- audio_opt,
506
- tgt_sr,
507
- )
508
- else:
509
- path = "%s/%s.%s" % (opt_root, os.path.basename(path), format1)
510
- with BytesIO() as wavf:
511
- sf.write(
512
- wavf,
513
- audio_opt,
514
- tgt_sr,
515
- format="wav"
516
- )
517
- wavf.seek(0, 0)
518
- with open(path, "wb") as outf:
519
- wav2(wavf, outf, format1)
520
- except:
521
- info += traceback.format_exc()
522
- infos.append("%s->%s" % (os.path.basename(path), info))
523
- yield "\n".join(infos)
524
- yield "\n".join(infos)
525
- except:
526
- yield traceback.format_exc()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/modules/vc/utils.py DELETED
@@ -1,42 +0,0 @@
1
- import os
2
- import re
3
- from fairseq import checkpoint_utils
4
-
5
-
6
- def get_index_path_from_model(sid):
7
- sid0strip = re.sub(r'\.pth|\.onnx$', '', sid)
8
- sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory
9
-
10
- # Check if the sid0strip has the specific ending format _eXXX_sXXX
11
- if re.match(r'.+_e\d+_s\d+$', sid0name):
12
- base_model_name = sid0name.rsplit('_', 2)[0]
13
- else:
14
- base_model_name = sid0name
15
-
16
- return next(
17
- (
18
- f
19
- for f in [
20
- os.path.join(root, name)
21
- for root, _, files in os.walk(os.getenv("index_root"), topdown=False)
22
- for name in files
23
- if name.endswith(".index") and "trained" not in name
24
- ]
25
- if base_model_name in f
26
- ),
27
- "",
28
- )
29
-
30
-
31
- def load_hubert(config):
32
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
33
- ["assets/hubert/hubert_base.pt"],
34
- suffix="",
35
- )
36
- hubert_model = models[0]
37
- hubert_model = hubert_model.to(config.device)
38
- if config.is_half:
39
- hubert_model = hubert_model.half()
40
- else:
41
- hubert_model = hubert_model.float()
42
- return hubert_model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar El Juego Taxi Simulator.md DELETED
@@ -1,80 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar el juego Taxi Simulator</h1>
3
- <p>Si te gusta conducir y quieres experimentar la vida de un taxista, entonces es posible que esté interesado en jugar juegos de simulador de taxi. Estos son juegos que te permiten conducir por diferentes ciudades, recoger pasajeros, completar misiones y ganar dinero. En este artículo, te mostraremos cuáles son los juegos de simulador de taxi, por qué deberías jugarlos, cómo elegir el mejor para ti, cómo descargarlos en diferentes dispositivos y cómo jugarlos. ¡Vamos a empezar! </p>
4
- <h2>Cómo descargar el juego taxi simulator</h2><br /><p><b><b>Download</b> &#10040;&#10040;&#10040; <a href="https://bltlly.com/2v6LqW">https://bltlly.com/2v6LqW</a></b></p><br /><br />
5
- <h2>¿Qué es un juego de simulador de taxi? </h2>
6
- <p>Un juego de simulador de taxi es un tipo de juego de conducción que simula el trabajo de un taxista. Puede elegir entre una variedad de coches, personalizarlos y conducirlos en entornos realistas. También puede interactuar con los clientes, seguir las reglas de tráfico, evitar accidentes y administrar su negocio. Algunos ejemplos de juegos de simulador de taxi populares son <a href="( 1 )">Taxi Sim 2022 Evolution</a>, <a href="( 3 )">Taxi Life: A City Driving Simulator</a>, y <a href="">Crazy Taxi<a>. </p>
7
- <h2>¿Por qué jugar juegos de simulador de taxi? </h2>
8
- <p>Los juegos de simulador de taxi son divertidos y desafiantes por muchas razones. Aquí están algunos de los beneficios y características de jugarlos:</p>
9
- <ul>
10
- <li>Puedes explorar diferentes ciudades y lugares de interés, como Nueva York, Miami, Roma, Los Ángeles, Barcelona y más. </li>
11
- <li> Puede aprender sobre las reglas de tráfico, las señales de tráfico y la etiqueta de conducción en diferentes países. </li>
12
- <li> Puedes mejorar tus habilidades de conducción, reflejos y coordinación. </li>
13
- <li> Puedes disfrutar de gráficos realistas, sonidos y física. </li>
14
- <li>Puede personalizar su coche con varias opciones, como color, motor, ruedas, accesorios y más. </li>
15
- <li>Usted puede ganar dinero y actualizar su coche o comprar nuevos. </li>
16
- <li>Puedes competir con otros jugadores online o offline. </li>
17
- <li>Puedes divertirte con diferentes tipos de clientes, misiones y escenarios. </li>
18
- </ul>
19
- <h2>¿Cómo elegir el mejor juego de simulador de taxi para usted? </h2>
20
-
21
- <ul>
22
- <li>La plataforma: Debes elegir un juego que sea compatible con tu dispositivo, ya sea PC, Android o iOS. </li>
23
- <li>El género: Debes elegir un juego que coincida con tu preferencia, ya sea realista, árcade, casual o simulación. </li>
24
- <li>La calificación: Usted debe elegir un juego que tiene buenas críticas y calificaciones de otros usuarios y críticos. </li>
25
- <li>Las características: Usted debe elegir un juego que tiene las características que desea, como el modo multijugador, opciones de personalización, variedad de coches y ciudades, etc.</li>
26
- <li>El precio: Usted debe elegir un juego que se ajuste a su presupuesto, ya sea gratuito o de pago. </li>
27
- </ul>
28
- <h2>¿Cómo descargar juegos de simulador de taxi en diferentes dispositivos? </h2>
29
- <h3>PC</h3>
30
- <p>Si quieres jugar juegos de simulador de taxi en tu PC, tienes dos opciones:</p>
31
- <ol>
32
- <li>Descárgalos de sitios web oficiales o tiendas en línea. Por ejemplo, puedes descargar <a href="( 3 )">Taxi Life: A City Driving Simulator</a> de Steam o <a href="">Crazy Taxi</a> de Sega.</li>
33
- <li <li>Descárgalos de sitios web o torrents de terceros. Sin embargo, esta opción es arriesgada e ilegal, ya que podría descargar virus, malware o juegos piratas. No recomendamos esta opción y le aconsejamos que la utilice bajo su propio riesgo. </li>
34
- </ol>
35
- <h3>Android</h3>
36
- <p>Si quieres jugar juegos de simulador de taxi en tu dispositivo Android, tienes dos opciones:</p>
37
- <ol>
38
- <li>Descargar desde Google Play Store. Por ejemplo, puede descargar <a href="">Taxi Sim 2022 Evolution</a> o <a href="">Taxi Driver 3D: Hill Station</a> desde allí. </li>
39
- <li>Descárgalos de otras fuentes. Por ejemplo, puedes descargar <a href=">Taxi Sim 2020</a> desde APKPure o <a href="">Taxi Game 2</a> desde APKMonk. Sin embargo, esta opción también es arriesgada y podría exponer su dispositivo a aplicaciones o virus dañinos. Le sugerimos que compruebe los permisos y comentarios antes de instalar cualquier aplicación de fuentes desconocidas. </li>
40
- </ol>
41
- <h3>iOS</h3>
42
-
43
- <ol>
44
- <li>Descárgalos desde App Store. Por ejemplo, puedes descargar <a href="">Taxi Sim 2020</a> o <a href="">Taxi Game 2</a> desde allí. </li>
45
- </ol>
46
- <h2>Cómo jugar juegos de simulador de taxi? </h2>
47
- <p>Una vez que haya descargado e instalado el juego de simulador de taxi elegido, puede comenzar a jugarlo. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego:</p>
48
- <p></p>
49
- <ul>
50
- <li>Lee las instrucciones y tutoriales cuidadosamente. Te enseñarán los controles básicos, objetivos y características del juego. </li>
51
- <li>Elija su coche y personalícelo según su preferencia. Puede cambiar el color, el motor, las ruedas, los accesorios y más. </li>
52
- <li>Seleccione su ciudad y modo. Puede elegir entre diferentes ciudades y lugares de interés, como Nueva York, Miami, Roma, Los Ángeles, Barcelona y más. También puedes elegir entre diferentes modos, como carrera, roaming gratuito, multijugador, etc.</li>
53
- <li>Conduce por la ciudad y recoge pasajeros. Puedes usar el mapa y el GPS para encontrar tu destino. También puede seguir las reglas de tráfico y evitar accidentes. </li>
54
- <li>Gana dinero y reputación. Puedes ganar dinero completando misiones, entregando clientes y realizando acrobacias. También puedes ganar reputación siendo un buen conductor, obteniendo comentarios positivos y valoraciones de los clientes. </li>
55
- <li>Actualizar su coche o comprar nuevos. Puede utilizar el dinero que ganó para actualizar su coche o comprar nuevos. También puede desbloquear nuevos coches completando logros o desafíos. </li>
56
- <li>Compite con otros jugadores online o offline. Puedes unirte a carreras online o a desafíos con otros jugadores de todo el mundo. También puedes jugar sin conexión con tus amigos o familiares usando el modo multijugador local. </li>
57
-
58
- </ul>
59
- <h2>Conclusión</h2>
60
- <p>Los juegos de simulador de taxi son una gran manera de experimentar la vida de un taxista. Son divertidos y desafiantes para cualquiera que ame conducir. En este artículo, te mostramos cuáles son los juegos de simulador de taxi, por qué deberías jugarlos, cómo elegir el mejor para ti, cómo descargarlos en diferentes dispositivos y cómo jugarlos. Esperamos que haya encontrado este artículo útil e informativo. Ahora que sabe cómo descargar juegos de simulador de taxi,<b> ¿por qué no intentarlo? </b></p>
61
- <h2>Preguntas frecuentes</h2>
62
- <h3>¿Cuáles son los mejores juegos de simulador de taxi? </h3>
63
- <p>La respuesta a esta pregunta depende de sus preferencias y gustos personales. Sin embargo, algunos de los juegos de simulador de taxi más populares y altamente calificados son <a href="">Taxi Sim 2022 Evolution</a>, <a href="">Taxi Life: A City Driving Simulator</a>, <a href="">Crazy Taxi</a>, <a href=">Taxi Sim 2020/a>, y <a>href="Game<2/a. </p>
64
- <h3>¿Cuánto cuestan los juegos de simuladores de taxi? </h3>
65
- <p>El costo de los juegos de simulador de taxi varía dependiendo de la plataforma y el juego en sí. Algunos juegos son gratuitos para descargar y jugar, pero pueden contener anuncios o compras en la aplicación. Algunos juegos se pagan y requieren un pago único o una cuota de suscripción para descargar y jugar. Puedes consultar el precio del juego en la plataforma que estás utilizando, como Google Play Store, App Store, Steam, etc.</p>
66
- <h3>¿Son realistas los juegos de simulador de taxi? </h3>
67
-
68
- <h3>¿Puedo jugar juegos de simulador de taxi en línea con amigos? </h3>
69
- <p>Sí, puede jugar juegos de simulador de taxi en línea con amigos. Algunos juegos tienen un modo multijugador que le permite unirse a carreras en línea o desafíos con otros jugadores de todo el mundo. También puedes chatear con ellos, enviarles mensajes o invitarlos a tu juego. Algunos juegos también tienen un modo multijugador local que te permite jugar sin conexión con tus amigos o familiares usando el mismo dispositivo o una red local. </p>
70
- <h3>¿Cómo puedo mejorar mis habilidades de conducción de taxi en juegos de simulador de taxi? </h3>
71
- <p>Si quieres mejorar tus habilidades de conducción de taxi en los juegos de simulador de taxi, aquí hay algunos consejos que pueden ayudarte:</p>
72
- <ul>
73
- <li>Practica regularmente. Cuanto más juegues, más aprenderás y mejorarás. </li>
74
- <li>Vea tutoriales y guías. Puede encontrar muchos videos y artículos en línea que le enseñan cómo jugar mejor los juegos de simulador de taxi. </li>
75
- <li>Aprende de tus errores. Puedes revisar tu rendimiento y ver qué hiciste mal o qué puedes hacer mejor. </li>
76
- <li>Ponte a prueba. Puedes probar diferentes modos, niveles, misiones y escenarios que ponen a prueba tus habilidades y habilidades. </li>
77
- <li>Diviértete. No te tomes el juego demasiado en serio ni te estreses. Disfruta del juego y diviértete con él. </li>
78
- </ul></p> 64aa2da5cf<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/session.py DELETED
@@ -1,1229 +0,0 @@
1
- # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License"). You
5
- # may not use this file except in compliance with the License. A copy of
6
- # the License is located at
7
- #
8
- # http://aws.amazon.com/apache2.0/
9
- #
10
- # or in the "license" file accompanying this file. This file is
11
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12
- # ANY KIND, either express or implied. See the License for the specific
13
- # language governing permissions and limitations under the License.
14
- """
15
- This module contains the main interface to the botocore package, the
16
- Session object.
17
- """
18
-
19
- import copy
20
- import logging
21
- import os
22
- import platform
23
- import socket
24
- import warnings
25
-
26
- import botocore.client
27
- import botocore.configloader
28
- import botocore.credentials
29
- import botocore.tokens
30
- from botocore import (
31
- UNSIGNED,
32
- __version__,
33
- handlers,
34
- invoke_initializers,
35
- monitoring,
36
- paginate,
37
- retryhandler,
38
- translate,
39
- waiter,
40
- )
41
- from botocore.compat import HAS_CRT, MutableMapping
42
- from botocore.configprovider import (
43
- BOTOCORE_DEFAUT_SESSION_VARIABLES,
44
- ConfigChainFactory,
45
- ConfigValueStore,
46
- DefaultConfigResolver,
47
- SmartDefaultsConfigStoreFactory,
48
- create_botocore_default_config_mapping,
49
- )
50
- from botocore.errorfactory import ClientExceptionsFactory
51
- from botocore.exceptions import (
52
- ConfigNotFound,
53
- InvalidDefaultsMode,
54
- PartialCredentialsError,
55
- ProfileNotFound,
56
- UnknownServiceError,
57
- )
58
- from botocore.hooks import (
59
- EventAliaser,
60
- HierarchicalEmitter,
61
- first_non_none_response,
62
- )
63
- from botocore.loaders import create_loader
64
- from botocore.model import ServiceModel
65
- from botocore.parsers import ResponseParserFactory
66
- from botocore.regions import EndpointResolver
67
- from botocore.utils import (
68
- EVENT_ALIASES,
69
- IMDSRegionProvider,
70
- validate_region_name,
71
- )
72
-
73
- logger = logging.getLogger(__name__)
74
-
75
-
76
- class Session:
77
- """
78
- The Session object collects together useful functionality
79
- from `botocore` as well as important data such as configuration
80
- information and credentials into a single, easy-to-use object.
81
-
82
- :ivar available_profiles: A list of profiles defined in the config
83
- file associated with this session.
84
- :ivar profile: The current profile.
85
- """
86
-
87
- SESSION_VARIABLES = copy.copy(BOTOCORE_DEFAUT_SESSION_VARIABLES)
88
-
89
- #: The default format string to use when configuring the botocore logger.
90
- LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
91
-
92
- def __init__(
93
- self,
94
- session_vars=None,
95
- event_hooks=None,
96
- include_builtin_handlers=True,
97
- profile=None,
98
- ):
99
- """
100
- Create a new Session object.
101
-
102
- :type session_vars: dict
103
- :param session_vars: A dictionary that is used to override some or all
104
- of the environment variables associated with this session. The
105
- key/value pairs defined in this dictionary will override the
106
- corresponding variables defined in ``SESSION_VARIABLES``.
107
-
108
- :type event_hooks: BaseEventHooks
109
- :param event_hooks: The event hooks object to use. If one is not
110
- provided, an event hooks object will be automatically created
111
- for you.
112
-
113
- :type include_builtin_handlers: bool
114
- :param include_builtin_handlers: Indicates whether or not to
115
- automatically register builtin handlers.
116
-
117
- :type profile: str
118
- :param profile: The name of the profile to use for this
119
- session. Note that the profile can only be set when
120
- the session is created.
121
-
122
- """
123
- if event_hooks is None:
124
- self._original_handler = HierarchicalEmitter()
125
- else:
126
- self._original_handler = event_hooks
127
- self._events = EventAliaser(self._original_handler)
128
- if include_builtin_handlers:
129
- self._register_builtin_handlers(self._events)
130
- self.user_agent_name = 'Botocore'
131
- self.user_agent_version = __version__
132
- self.user_agent_extra = ''
133
- # The _profile attribute is just used to cache the value
134
- # of the current profile to avoid going through the normal
135
- # config lookup process each access time.
136
- self._profile = None
137
- self._config = None
138
- self._credentials = None
139
- self._auth_token = None
140
- self._profile_map = None
141
- # This is a dict that stores per session specific config variable
142
- # overrides via set_config_variable().
143
- self._session_instance_vars = {}
144
- if profile is not None:
145
- self._session_instance_vars['profile'] = profile
146
- self._client_config = None
147
- self._last_client_region_used = None
148
- self._components = ComponentLocator()
149
- self._internal_components = ComponentLocator()
150
- self._register_components()
151
- self.session_var_map = SessionVarDict(self, self.SESSION_VARIABLES)
152
- if session_vars is not None:
153
- self.session_var_map.update(session_vars)
154
- invoke_initializers(self)
155
-
156
- def _register_components(self):
157
- self._register_credential_provider()
158
- self._register_token_provider()
159
- self._register_data_loader()
160
- self._register_endpoint_resolver()
161
- self._register_event_emitter()
162
- self._register_response_parser_factory()
163
- self._register_exceptions_factory()
164
- self._register_config_store()
165
- self._register_monitor()
166
- self._register_default_config_resolver()
167
- self._register_smart_defaults_factory()
168
-
169
- def _register_event_emitter(self):
170
- self._components.register_component('event_emitter', self._events)
171
-
172
- def _register_token_provider(self):
173
- self._components.lazy_register_component(
174
- 'token_provider', self._create_token_resolver
175
- )
176
-
177
- def _create_token_resolver(self):
178
- return botocore.tokens.create_token_resolver(self)
179
-
180
- def _register_credential_provider(self):
181
- self._components.lazy_register_component(
182
- 'credential_provider', self._create_credential_resolver
183
- )
184
-
185
- def _create_credential_resolver(self):
186
- return botocore.credentials.create_credential_resolver(
187
- self, region_name=self._last_client_region_used
188
- )
189
-
190
- def _register_data_loader(self):
191
- self._components.lazy_register_component(
192
- 'data_loader',
193
- lambda: create_loader(self.get_config_variable('data_path')),
194
- )
195
-
196
- def _register_endpoint_resolver(self):
197
- def create_default_resolver():
198
- loader = self.get_component('data_loader')
199
- endpoints, path = loader.load_data_with_path('endpoints')
200
- uses_builtin = loader.is_builtin_path(path)
201
- return EndpointResolver(endpoints, uses_builtin_data=uses_builtin)
202
-
203
- self._internal_components.lazy_register_component(
204
- 'endpoint_resolver', create_default_resolver
205
- )
206
-
207
- def _register_default_config_resolver(self):
208
- def create_default_config_resolver():
209
- loader = self.get_component('data_loader')
210
- defaults = loader.load_data('sdk-default-configuration')
211
- return DefaultConfigResolver(defaults)
212
-
213
- self._internal_components.lazy_register_component(
214
- 'default_config_resolver', create_default_config_resolver
215
- )
216
-
217
- def _register_smart_defaults_factory(self):
218
- def create_smart_defaults_factory():
219
- default_config_resolver = self._get_internal_component(
220
- 'default_config_resolver'
221
- )
222
- imds_region_provider = IMDSRegionProvider(session=self)
223
- return SmartDefaultsConfigStoreFactory(
224
- default_config_resolver, imds_region_provider
225
- )
226
-
227
- self._internal_components.lazy_register_component(
228
- 'smart_defaults_factory', create_smart_defaults_factory
229
- )
230
-
231
- def _register_response_parser_factory(self):
232
- self._components.register_component(
233
- 'response_parser_factory', ResponseParserFactory()
234
- )
235
-
236
- def _register_exceptions_factory(self):
237
- self._internal_components.register_component(
238
- 'exceptions_factory', ClientExceptionsFactory()
239
- )
240
-
241
- def _register_builtin_handlers(self, events):
242
- for spec in handlers.BUILTIN_HANDLERS:
243
- if len(spec) == 2:
244
- event_name, handler = spec
245
- self.register(event_name, handler)
246
- else:
247
- event_name, handler, register_type = spec
248
- if register_type is handlers.REGISTER_FIRST:
249
- self._events.register_first(event_name, handler)
250
- elif register_type is handlers.REGISTER_LAST:
251
- self._events.register_last(event_name, handler)
252
-
253
- def _register_config_store(self):
254
- config_store_component = ConfigValueStore(
255
- mapping=create_botocore_default_config_mapping(self)
256
- )
257
- self._components.register_component(
258
- 'config_store', config_store_component
259
- )
260
-
261
- def _register_monitor(self):
262
- self._internal_components.lazy_register_component(
263
- 'monitor', self._create_csm_monitor
264
- )
265
-
266
- def _create_csm_monitor(self):
267
- if self.get_config_variable('csm_enabled'):
268
- client_id = self.get_config_variable('csm_client_id')
269
- host = self.get_config_variable('csm_host')
270
- port = self.get_config_variable('csm_port')
271
- handler = monitoring.Monitor(
272
- adapter=monitoring.MonitorEventAdapter(),
273
- publisher=monitoring.SocketPublisher(
274
- socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
275
- host=host,
276
- port=port,
277
- serializer=monitoring.CSMSerializer(
278
- csm_client_id=client_id
279
- ),
280
- ),
281
- )
282
- return handler
283
- return None
284
-
285
- def _get_crt_version(self):
286
- try:
287
- import awscrt
288
-
289
- return awscrt.__version__
290
- except AttributeError:
291
- return "Unknown"
292
-
293
- @property
294
- def available_profiles(self):
295
- return list(self._build_profile_map().keys())
296
-
297
- def _build_profile_map(self):
298
- # This will build the profile map if it has not been created,
299
- # otherwise it will return the cached value. The profile map
300
- # is a list of profile names, to the config values for the profile.
301
- if self._profile_map is None:
302
- self._profile_map = self.full_config['profiles']
303
- return self._profile_map
304
-
305
- @property
306
- def profile(self):
307
- if self._profile is None:
308
- profile = self.get_config_variable('profile')
309
- self._profile = profile
310
- return self._profile
311
-
312
- def get_config_variable(self, logical_name, methods=None):
313
- if methods is not None:
314
- return self._get_config_variable_with_custom_methods(
315
- logical_name, methods
316
- )
317
- return self.get_component('config_store').get_config_variable(
318
- logical_name
319
- )
320
-
321
- def _get_config_variable_with_custom_methods(self, logical_name, methods):
322
- # If a custom list of methods was supplied we need to perserve the
323
- # behavior with the new system. To do so a new chain that is a copy of
324
- # the old one will be constructed, but only with the supplied methods
325
- # being added to the chain. This chain will be consulted for a value
326
- # and then thrown out. This is not efficient, nor is the methods arg
327
- # used in botocore, this is just for backwards compatibility.
328
- chain_builder = SubsetChainConfigFactory(session=self, methods=methods)
329
- mapping = create_botocore_default_config_mapping(self)
330
- for name, config_options in self.session_var_map.items():
331
- config_name, env_vars, default, typecast = config_options
332
- build_chain_config_args = {
333
- 'conversion_func': typecast,
334
- 'default': default,
335
- }
336
- if 'instance' in methods:
337
- build_chain_config_args['instance_name'] = name
338
- if 'env' in methods:
339
- build_chain_config_args['env_var_names'] = env_vars
340
- if 'config' in methods:
341
- build_chain_config_args['config_property_name'] = config_name
342
- mapping[name] = chain_builder.create_config_chain(
343
- **build_chain_config_args
344
- )
345
- config_store_component = ConfigValueStore(mapping=mapping)
346
- value = config_store_component.get_config_variable(logical_name)
347
- return value
348
-
349
- def set_config_variable(self, logical_name, value):
350
- """Set a configuration variable to a specific value.
351
-
352
- By using this method, you can override the normal lookup
353
- process used in ``get_config_variable`` by explicitly setting
354
- a value. Subsequent calls to ``get_config_variable`` will
355
- use the ``value``. This gives you per-session specific
356
- configuration values.
357
-
358
- ::
359
- >>> # Assume logical name 'foo' maps to env var 'FOO'
360
- >>> os.environ['FOO'] = 'myvalue'
361
- >>> s.get_config_variable('foo')
362
- 'myvalue'
363
- >>> s.set_config_variable('foo', 'othervalue')
364
- >>> s.get_config_variable('foo')
365
- 'othervalue'
366
-
367
- :type logical_name: str
368
- :param logical_name: The logical name of the session variable
369
- you want to set. These are the keys in ``SESSION_VARIABLES``.
370
- :param value: The value to associate with the config variable.
371
-
372
- """
373
- logger.debug(
374
- "Setting config variable for %s to %r",
375
- logical_name,
376
- value,
377
- )
378
- self._session_instance_vars[logical_name] = value
379
-
380
- def instance_variables(self):
381
- return copy.copy(self._session_instance_vars)
382
-
383
- def get_scoped_config(self):
384
- """
385
- Returns the config values from the config file scoped to the current
386
- profile.
387
-
388
- The configuration data is loaded **only** from the config file.
389
- It does not resolve variables based on different locations
390
- (e.g. first from the session instance, then from environment
391
- variables, then from the config file). If you want this lookup
392
- behavior, use the ``get_config_variable`` method instead.
393
-
394
- Note that this configuration is specific to a single profile (the
395
- ``profile`` session variable).
396
-
397
- If the ``profile`` session variable is set and the profile does
398
- not exist in the config file, a ``ProfileNotFound`` exception
399
- will be raised.
400
-
401
- :raises: ConfigNotFound, ConfigParseError, ProfileNotFound
402
- :rtype: dict
403
-
404
- """
405
- profile_name = self.get_config_variable('profile')
406
- profile_map = self._build_profile_map()
407
- # If a profile is not explicitly set return the default
408
- # profile config or an empty config dict if we don't have
409
- # a default profile.
410
- if profile_name is None:
411
- return profile_map.get('default', {})
412
- elif profile_name not in profile_map:
413
- # Otherwise if they specified a profile, it has to
414
- # exist (even if it's the default profile) otherwise
415
- # we complain.
416
- raise ProfileNotFound(profile=profile_name)
417
- else:
418
- return profile_map[profile_name]
419
-
420
- @property
421
- def full_config(self):
422
- """Return the parsed config file.
423
-
424
- The ``get_config`` method returns the config associated with the
425
- specified profile. This property returns the contents of the
426
- **entire** config file.
427
-
428
- :rtype: dict
429
- """
430
- if self._config is None:
431
- try:
432
- config_file = self.get_config_variable('config_file')
433
- self._config = botocore.configloader.load_config(config_file)
434
- except ConfigNotFound:
435
- self._config = {'profiles': {}}
436
- try:
437
- # Now we need to inject the profiles from the
438
- # credentials file. We don't actually need the values
439
- # in the creds file, only the profile names so that we
440
- # can validate the user is not referring to a nonexistent
441
- # profile.
442
- cred_file = self.get_config_variable('credentials_file')
443
- cred_profiles = botocore.configloader.raw_config_parse(
444
- cred_file
445
- )
446
- for profile in cred_profiles:
447
- cred_vars = cred_profiles[profile]
448
- if profile not in self._config['profiles']:
449
- self._config['profiles'][profile] = cred_vars
450
- else:
451
- self._config['profiles'][profile].update(cred_vars)
452
- except ConfigNotFound:
453
- pass
454
- return self._config
455
-
456
- def get_default_client_config(self):
457
- """Retrieves the default config for creating clients
458
-
459
- :rtype: botocore.client.Config
460
- :returns: The default client config object when creating clients. If
461
- the value is ``None`` then there is no default config object
462
- attached to the session.
463
- """
464
- return self._client_config
465
-
466
- def set_default_client_config(self, client_config):
467
- """Sets the default config for creating clients
468
-
469
- :type client_config: botocore.client.Config
470
- :param client_config: The default client config object when creating
471
- clients. If the value is ``None`` then there is no default config
472
- object attached to the session.
473
- """
474
- self._client_config = client_config
475
-
476
- def set_credentials(self, access_key, secret_key, token=None):
477
- """
478
- Manually create credentials for this session. If you would
479
- prefer to use botocore without a config file, environment variables,
480
- or IAM roles, you can pass explicit credentials into this
481
- method to establish credentials for this session.
482
-
483
- :type access_key: str
484
- :param access_key: The access key part of the credentials.
485
-
486
- :type secret_key: str
487
- :param secret_key: The secret key part of the credentials.
488
-
489
- :type token: str
490
- :param token: An option session token used by STS session
491
- credentials.
492
- """
493
- self._credentials = botocore.credentials.Credentials(
494
- access_key, secret_key, token
495
- )
496
-
497
- def get_credentials(self):
498
- """
499
- Return the :class:`botocore.credential.Credential` object
500
- associated with this session. If the credentials have not
501
- yet been loaded, this will attempt to load them. If they
502
- have already been loaded, this will return the cached
503
- credentials.
504
-
505
- """
506
- if self._credentials is None:
507
- self._credentials = self._components.get_component(
508
- 'credential_provider'
509
- ).load_credentials()
510
- return self._credentials
511
-
512
- def get_auth_token(self):
513
- """
514
- Return the :class:`botocore.tokens.AuthToken` object associated with
515
- this session. If the authorization token has not yet been loaded, this
516
- will attempt to load it. If it has already been loaded, this will
517
- return the cached authorization token.
518
-
519
- """
520
- if self._auth_token is None:
521
- provider = self._components.get_component('token_provider')
522
- self._auth_token = provider.load_token()
523
- return self._auth_token
524
-
525
- def user_agent(self):
526
- """
527
- Return a string suitable for use as a User-Agent header.
528
- The string will be of the form:
529
-
530
- <agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver> <exec_env>
531
-
532
- Where:
533
-
534
- - agent_name is the value of the `user_agent_name` attribute
535
- of the session object (`Botocore` by default).
536
- - agent_version is the value of the `user_agent_version`
537
- attribute of the session object (the botocore version by default).
538
- by default.
539
- - py_ver is the version of the Python interpreter beng used.
540
- - plat_name is the name of the platform (e.g. Darwin)
541
- - plat_ver is the version of the platform
542
- - exec_env is exec-env/$AWS_EXECUTION_ENV
543
-
544
- If ``user_agent_extra`` is not empty, then this value will be
545
- appended to the end of the user agent string.
546
-
547
- """
548
- base = (
549
- f'{self.user_agent_name}/{self.user_agent_version} '
550
- f'Python/{platform.python_version()} '
551
- f'{platform.system()}/{platform.release()}'
552
- )
553
- if HAS_CRT:
554
- base += ' awscrt/%s' % self._get_crt_version()
555
- if os.environ.get('AWS_EXECUTION_ENV') is not None:
556
- base += ' exec-env/%s' % os.environ.get('AWS_EXECUTION_ENV')
557
- if self.user_agent_extra:
558
- base += ' %s' % self.user_agent_extra
559
-
560
- return base
561
-
562
- def get_data(self, data_path):
563
- """
564
- Retrieve the data associated with `data_path`.
565
-
566
- :type data_path: str
567
- :param data_path: The path to the data you wish to retrieve.
568
- """
569
- return self.get_component('data_loader').load_data(data_path)
570
-
571
- def get_service_model(self, service_name, api_version=None):
572
- """Get the service model object.
573
-
574
- :type service_name: string
575
- :param service_name: The service name
576
-
577
- :type api_version: string
578
- :param api_version: The API version of the service. If none is
579
- provided, then the latest API version will be used.
580
-
581
- :rtype: L{botocore.model.ServiceModel}
582
- :return: The botocore service model for the service.
583
-
584
- """
585
- service_description = self.get_service_data(service_name, api_version)
586
- return ServiceModel(service_description, service_name=service_name)
587
-
588
- def get_waiter_model(self, service_name, api_version=None):
589
- loader = self.get_component('data_loader')
590
- waiter_config = loader.load_service_model(
591
- service_name, 'waiters-2', api_version
592
- )
593
- return waiter.WaiterModel(waiter_config)
594
-
595
- def get_paginator_model(self, service_name, api_version=None):
596
- loader = self.get_component('data_loader')
597
- paginator_config = loader.load_service_model(
598
- service_name, 'paginators-1', api_version
599
- )
600
- return paginate.PaginatorModel(paginator_config)
601
-
602
- def get_service_data(self, service_name, api_version=None):
603
- """
604
- Retrieve the fully merged data associated with a service.
605
- """
606
- data_path = service_name
607
- service_data = self.get_component('data_loader').load_service_model(
608
- data_path, type_name='service-2', api_version=api_version
609
- )
610
- service_id = EVENT_ALIASES.get(service_name, service_name)
611
- self._events.emit(
612
- 'service-data-loaded.%s' % service_id,
613
- service_data=service_data,
614
- service_name=service_name,
615
- session=self,
616
- )
617
- return service_data
618
-
619
- def get_available_services(self):
620
- """
621
- Return a list of names of available services.
622
- """
623
- return self.get_component('data_loader').list_available_services(
624
- type_name='service-2'
625
- )
626
-
627
- def set_debug_logger(self, logger_name='botocore'):
628
- """
629
- Convenience function to quickly configure full debug output
630
- to go to the console.
631
- """
632
- self.set_stream_logger(logger_name, logging.DEBUG)
633
-
634
- def set_stream_logger(
635
- self, logger_name, log_level, stream=None, format_string=None
636
- ):
637
- """
638
- Convenience method to configure a stream logger.
639
-
640
- :type logger_name: str
641
- :param logger_name: The name of the logger to configure
642
-
643
- :type log_level: str
644
- :param log_level: The log level to set for the logger. This
645
- is any param supported by the ``.setLevel()`` method of
646
- a ``Log`` object.
647
-
648
- :type stream: file
649
- :param stream: A file like object to log to. If none is provided
650
- then sys.stderr will be used.
651
-
652
- :type format_string: str
653
- :param format_string: The format string to use for the log
654
- formatter. If none is provided this will default to
655
- ``self.LOG_FORMAT``.
656
-
657
- """
658
- log = logging.getLogger(logger_name)
659
- log.setLevel(logging.DEBUG)
660
-
661
- ch = logging.StreamHandler(stream)
662
- ch.setLevel(log_level)
663
-
664
- # create formatter
665
- if format_string is None:
666
- format_string = self.LOG_FORMAT
667
- formatter = logging.Formatter(format_string)
668
-
669
- # add formatter to ch
670
- ch.setFormatter(formatter)
671
-
672
- # add ch to logger
673
- log.addHandler(ch)
674
-
675
- def set_file_logger(self, log_level, path, logger_name='botocore'):
676
- """
677
- Convenience function to quickly configure any level of logging
678
- to a file.
679
-
680
- :type log_level: int
681
- :param log_level: A log level as specified in the `logging` module
682
-
683
- :type path: string
684
- :param path: Path to the log file. The file will be created
685
- if it doesn't already exist.
686
- """
687
- log = logging.getLogger(logger_name)
688
- log.setLevel(logging.DEBUG)
689
-
690
- # create console handler and set level to debug
691
- ch = logging.FileHandler(path)
692
- ch.setLevel(log_level)
693
-
694
- # create formatter
695
- formatter = logging.Formatter(self.LOG_FORMAT)
696
-
697
- # add formatter to ch
698
- ch.setFormatter(formatter)
699
-
700
- # add ch to logger
701
- log.addHandler(ch)
702
-
703
- def register(
704
- self, event_name, handler, unique_id=None, unique_id_uses_count=False
705
- ):
706
- """Register a handler with an event.
707
-
708
- :type event_name: str
709
- :param event_name: The name of the event.
710
-
711
- :type handler: callable
712
- :param handler: The callback to invoke when the event
713
- is emitted. This object must be callable, and must
714
- accept ``**kwargs``. If either of these preconditions are
715
- not met, a ``ValueError`` will be raised.
716
-
717
- :type unique_id: str
718
- :param unique_id: An optional identifier to associate with the
719
- registration. A unique_id can only be used once for
720
- the entire session registration (unless it is unregistered).
721
- This can be used to prevent an event handler from being
722
- registered twice.
723
-
724
- :param unique_id_uses_count: boolean
725
- :param unique_id_uses_count: Specifies if the event should maintain
726
- a count when a ``unique_id`` is registered and unregisted. The
727
- event can only be completely unregistered once every register call
728
- using the unique id has been matched by an ``unregister`` call.
729
- If ``unique_id`` is specified, subsequent ``register``
730
- calls must use the same value for ``unique_id_uses_count``
731
- as the ``register`` call that first registered the event.
732
-
733
- :raises ValueError: If the call to ``register`` uses ``unique_id``
734
- but the value for ``unique_id_uses_count`` differs from the
735
- ``unique_id_uses_count`` value declared by the very first
736
- ``register`` call for that ``unique_id``.
737
- """
738
- self._events.register(
739
- event_name,
740
- handler,
741
- unique_id,
742
- unique_id_uses_count=unique_id_uses_count,
743
- )
744
-
745
- def unregister(
746
- self,
747
- event_name,
748
- handler=None,
749
- unique_id=None,
750
- unique_id_uses_count=False,
751
- ):
752
- """Unregister a handler with an event.
753
-
754
- :type event_name: str
755
- :param event_name: The name of the event.
756
-
757
- :type handler: callable
758
- :param handler: The callback to unregister.
759
-
760
- :type unique_id: str
761
- :param unique_id: A unique identifier identifying the callback
762
- to unregister. You can provide either the handler or the
763
- unique_id, you do not have to provide both.
764
-
765
- :param unique_id_uses_count: boolean
766
- :param unique_id_uses_count: Specifies if the event should maintain
767
- a count when a ``unique_id`` is registered and unregisted. The
768
- event can only be completely unregistered once every ``register``
769
- call using the ``unique_id`` has been matched by an ``unregister``
770
- call. If the ``unique_id`` is specified, subsequent
771
- ``unregister`` calls must use the same value for
772
- ``unique_id_uses_count`` as the ``register`` call that first
773
- registered the event.
774
-
775
- :raises ValueError: If the call to ``unregister`` uses ``unique_id``
776
- but the value for ``unique_id_uses_count`` differs from the
777
- ``unique_id_uses_count`` value declared by the very first
778
- ``register`` call for that ``unique_id``.
779
- """
780
- self._events.unregister(
781
- event_name,
782
- handler=handler,
783
- unique_id=unique_id,
784
- unique_id_uses_count=unique_id_uses_count,
785
- )
786
-
787
- def emit(self, event_name, **kwargs):
788
- return self._events.emit(event_name, **kwargs)
789
-
790
- def emit_first_non_none_response(self, event_name, **kwargs):
791
- responses = self._events.emit(event_name, **kwargs)
792
- return first_non_none_response(responses)
793
-
794
- def get_component(self, name):
795
- try:
796
- return self._components.get_component(name)
797
- except ValueError:
798
- if name in ['endpoint_resolver', 'exceptions_factory']:
799
- warnings.warn(
800
- 'Fetching the %s component with the get_component() '
801
- 'method is deprecated as the component has always been '
802
- 'considered an internal interface of botocore' % name,
803
- DeprecationWarning,
804
- )
805
- return self._internal_components.get_component(name)
806
- raise
807
-
808
- def _get_internal_component(self, name):
809
- # While this method may be called by botocore classes outside of the
810
- # Session, this method should **never** be used by a class that lives
811
- # outside of botocore.
812
- return self._internal_components.get_component(name)
813
-
814
- def _register_internal_component(self, name, component):
815
- # While this method may be called by botocore classes outside of the
816
- # Session, this method should **never** be used by a class that lives
817
- # outside of botocore.
818
- return self._internal_components.register_component(name, component)
819
-
820
- def register_component(self, name, component):
821
- self._components.register_component(name, component)
822
-
823
- def lazy_register_component(self, name, component):
824
- self._components.lazy_register_component(name, component)
825
-
826
- def create_client(
827
- self,
828
- service_name,
829
- region_name=None,
830
- api_version=None,
831
- use_ssl=True,
832
- verify=None,
833
- endpoint_url=None,
834
- aws_access_key_id=None,
835
- aws_secret_access_key=None,
836
- aws_session_token=None,
837
- config=None,
838
- ):
839
- """Create a botocore client.
840
-
841
- :type service_name: string
842
- :param service_name: The name of the service for which a client will
843
- be created. You can use the ``Session.get_available_services()``
844
- method to get a list of all available service names.
845
-
846
- :type region_name: string
847
- :param region_name: The name of the region associated with the client.
848
- A client is associated with a single region.
849
-
850
- :type api_version: string
851
- :param api_version: The API version to use. By default, botocore will
852
- use the latest API version when creating a client. You only need
853
- to specify this parameter if you want to use a previous API version
854
- of the client.
855
-
856
- :type use_ssl: boolean
857
- :param use_ssl: Whether or not to use SSL. By default, SSL is used.
858
- Note that not all services support non-ssl connections.
859
-
860
- :type verify: boolean/string
861
- :param verify: Whether or not to verify SSL certificates.
862
- By default SSL certificates are verified. You can provide the
863
- following values:
864
-
865
- * False - do not validate SSL certificates. SSL will still be
866
- used (unless use_ssl is False), but SSL certificates
867
- will not be verified.
868
- * path/to/cert/bundle.pem - A filename of the CA cert bundle to
869
- uses. You can specify this argument if you want to use a
870
- different CA cert bundle than the one used by botocore.
871
-
872
- :type endpoint_url: string
873
- :param endpoint_url: The complete URL to use for the constructed
874
- client. Normally, botocore will automatically construct the
875
- appropriate URL to use when communicating with a service. You can
876
- specify a complete URL (including the "http/https" scheme) to
877
- override this behavior. If this value is provided, then
878
- ``use_ssl`` is ignored.
879
-
880
- :type aws_access_key_id: string
881
- :param aws_access_key_id: The access key to use when creating
882
- the client. This is entirely optional, and if not provided,
883
- the credentials configured for the session will automatically
884
- be used. You only need to provide this argument if you want
885
- to override the credentials used for this specific client.
886
-
887
- :type aws_secret_access_key: string
888
- :param aws_secret_access_key: The secret key to use when creating
889
- the client. Same semantics as aws_access_key_id above.
890
-
891
- :type aws_session_token: string
892
- :param aws_session_token: The session token to use when creating
893
- the client. Same semantics as aws_access_key_id above.
894
-
895
- :type config: botocore.client.Config
896
- :param config: Advanced client configuration options. If a value
897
- is specified in the client config, its value will take precedence
898
- over environment variables and configuration values, but not over
899
- a value passed explicitly to the method. If a default config
900
- object is set on the session, the config object used when creating
901
- the client will be the result of calling ``merge()`` on the
902
- default config with the config provided to this call.
903
-
904
- :rtype: botocore.client.BaseClient
905
- :return: A botocore client instance
906
-
907
- """
908
- default_client_config = self.get_default_client_config()
909
- # If a config is provided and a default config is set, then
910
- # use the config resulting from merging the two.
911
- if config is not None and default_client_config is not None:
912
- config = default_client_config.merge(config)
913
- # If a config was not provided then use the default
914
- # client config from the session
915
- elif default_client_config is not None:
916
- config = default_client_config
917
-
918
- region_name = self._resolve_region_name(region_name, config)
919
-
920
- # Figure out the verify value base on the various
921
- # configuration options.
922
- if verify is None:
923
- verify = self.get_config_variable('ca_bundle')
924
-
925
- if api_version is None:
926
- api_version = self.get_config_variable('api_versions').get(
927
- service_name, None
928
- )
929
-
930
- loader = self.get_component('data_loader')
931
- event_emitter = self.get_component('event_emitter')
932
- response_parser_factory = self.get_component('response_parser_factory')
933
- if config is not None and config.signature_version is UNSIGNED:
934
- credentials = None
935
- elif (
936
- aws_access_key_id is not None and aws_secret_access_key is not None
937
- ):
938
- credentials = botocore.credentials.Credentials(
939
- access_key=aws_access_key_id,
940
- secret_key=aws_secret_access_key,
941
- token=aws_session_token,
942
- )
943
- elif self._missing_cred_vars(aws_access_key_id, aws_secret_access_key):
944
- raise PartialCredentialsError(
945
- provider='explicit',
946
- cred_var=self._missing_cred_vars(
947
- aws_access_key_id, aws_secret_access_key
948
- ),
949
- )
950
- else:
951
- credentials = self.get_credentials()
952
- auth_token = self.get_auth_token()
953
- endpoint_resolver = self._get_internal_component('endpoint_resolver')
954
- exceptions_factory = self._get_internal_component('exceptions_factory')
955
- config_store = self.get_component('config_store')
956
- defaults_mode = self._resolve_defaults_mode(config, config_store)
957
- if defaults_mode != 'legacy':
958
- smart_defaults_factory = self._get_internal_component(
959
- 'smart_defaults_factory'
960
- )
961
- config_store = copy.deepcopy(config_store)
962
- smart_defaults_factory.merge_smart_defaults(
963
- config_store, defaults_mode, region_name
964
- )
965
- client_creator = botocore.client.ClientCreator(
966
- loader,
967
- endpoint_resolver,
968
- self.user_agent(),
969
- event_emitter,
970
- retryhandler,
971
- translate,
972
- response_parser_factory,
973
- exceptions_factory,
974
- config_store,
975
- )
976
- client = client_creator.create_client(
977
- service_name=service_name,
978
- region_name=region_name,
979
- is_secure=use_ssl,
980
- endpoint_url=endpoint_url,
981
- verify=verify,
982
- credentials=credentials,
983
- scoped_config=self.get_scoped_config(),
984
- client_config=config,
985
- api_version=api_version,
986
- auth_token=auth_token,
987
- )
988
- monitor = self._get_internal_component('monitor')
989
- if monitor is not None:
990
- monitor.register(client.meta.events)
991
- return client
992
-
993
- def _resolve_region_name(self, region_name, config):
994
- # Figure out the user-provided region based on the various
995
- # configuration options.
996
- if region_name is None:
997
- if config and config.region_name is not None:
998
- region_name = config.region_name
999
- else:
1000
- region_name = self.get_config_variable('region')
1001
-
1002
- validate_region_name(region_name)
1003
- # For any client that we create in retrieving credentials
1004
- # we want to create it using the same region as specified in
1005
- # creating this client. It is important to note though that the
1006
- # credentials client is only created once per session. So if a new
1007
- # client is created with a different region, its credential resolver
1008
- # will use the region of the first client. However, that is not an
1009
- # issue as of now because the credential resolver uses only STS and
1010
- # the credentials returned at regional endpoints are valid across
1011
- # all regions in the partition.
1012
- self._last_client_region_used = region_name
1013
- return region_name
1014
-
1015
- def _resolve_defaults_mode(self, client_config, config_store):
1016
- mode = config_store.get_config_variable('defaults_mode')
1017
-
1018
- if client_config and client_config.defaults_mode:
1019
- mode = client_config.defaults_mode
1020
-
1021
- default_config_resolver = self._get_internal_component(
1022
- 'default_config_resolver'
1023
- )
1024
- default_modes = default_config_resolver.get_default_modes()
1025
- lmode = mode.lower()
1026
- if lmode not in default_modes:
1027
- raise InvalidDefaultsMode(
1028
- mode=mode, valid_modes=', '.join(default_modes)
1029
- )
1030
-
1031
- return lmode
1032
-
1033
- def _missing_cred_vars(self, access_key, secret_key):
1034
- if access_key is not None and secret_key is None:
1035
- return 'aws_secret_access_key'
1036
- if secret_key is not None and access_key is None:
1037
- return 'aws_access_key_id'
1038
- return None
1039
-
1040
- def get_available_partitions(self):
1041
- """Lists the available partitions found on disk
1042
-
1043
- :rtype: list
1044
- :return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
1045
- """
1046
- resolver = self._get_internal_component('endpoint_resolver')
1047
- return resolver.get_available_partitions()
1048
-
1049
- def get_partition_for_region(self, region_name):
1050
- """Lists the partition name of a particular region.
1051
-
1052
- :type region_name: string
1053
- :param region_name: Name of the region to list partition for (e.g.,
1054
- us-east-1).
1055
-
1056
- :rtype: string
1057
- :return: Returns the respective partition name (e.g., aws).
1058
- """
1059
- resolver = self._get_internal_component('endpoint_resolver')
1060
- return resolver.get_partition_for_region(region_name)
1061
-
1062
- def get_available_regions(
1063
- self, service_name, partition_name='aws', allow_non_regional=False
1064
- ):
1065
- """Lists the region and endpoint names of a particular partition.
1066
-
1067
- :type service_name: string
1068
- :param service_name: Name of a service to list endpoint for (e.g., s3).
1069
- This parameter accepts a service name (e.g., "elb") or endpoint
1070
- prefix (e.g., "elasticloadbalancing").
1071
-
1072
- :type partition_name: string
1073
- :param partition_name: Name of the partition to limit endpoints to.
1074
- (e.g., aws for the public AWS endpoints, aws-cn for AWS China
1075
- endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
1076
-
1077
- :type allow_non_regional: bool
1078
- :param allow_non_regional: Set to True to include endpoints that are
1079
- not regional endpoints (e.g., s3-external-1,
1080
- fips-us-gov-west-1, etc).
1081
- :return: Returns a list of endpoint names (e.g., ["us-east-1"]).
1082
- """
1083
- resolver = self._get_internal_component('endpoint_resolver')
1084
- results = []
1085
- try:
1086
- service_data = self.get_service_data(service_name)
1087
- endpoint_prefix = service_data['metadata'].get(
1088
- 'endpointPrefix', service_name
1089
- )
1090
- results = resolver.get_available_endpoints(
1091
- endpoint_prefix, partition_name, allow_non_regional
1092
- )
1093
- except UnknownServiceError:
1094
- pass
1095
- return results
1096
-
1097
-
1098
- class ComponentLocator:
1099
- """Service locator for session components."""
1100
-
1101
- def __init__(self):
1102
- self._components = {}
1103
- self._deferred = {}
1104
-
1105
- def get_component(self, name):
1106
- if name in self._deferred:
1107
- factory = self._deferred[name]
1108
- self._components[name] = factory()
1109
- # Only delete the component from the deferred dict after
1110
- # successfully creating the object from the factory as well as
1111
- # injecting the instantiated value into the _components dict.
1112
- del self._deferred[name]
1113
- try:
1114
- return self._components[name]
1115
- except KeyError:
1116
- raise ValueError("Unknown component: %s" % name)
1117
-
1118
- def register_component(self, name, component):
1119
- self._components[name] = component
1120
- try:
1121
- del self._deferred[name]
1122
- except KeyError:
1123
- pass
1124
-
1125
- def lazy_register_component(self, name, no_arg_factory):
1126
- self._deferred[name] = no_arg_factory
1127
- try:
1128
- del self._components[name]
1129
- except KeyError:
1130
- pass
1131
-
1132
-
1133
- class SessionVarDict(MutableMapping):
1134
- def __init__(self, session, session_vars):
1135
- self._session = session
1136
- self._store = copy.copy(session_vars)
1137
-
1138
- def __getitem__(self, key):
1139
- return self._store[key]
1140
-
1141
- def __setitem__(self, key, value):
1142
- self._store[key] = value
1143
- self._update_config_store_from_session_vars(key, value)
1144
-
1145
- def __delitem__(self, key):
1146
- del self._store[key]
1147
-
1148
- def __iter__(self):
1149
- return iter(self._store)
1150
-
1151
- def __len__(self):
1152
- return len(self._store)
1153
-
1154
- def _update_config_store_from_session_vars(
1155
- self, logical_name, config_options
1156
- ):
1157
- # This is for backwards compatibility. The new preferred way to
1158
- # modify configuration logic is to use the component system to get
1159
- # the config_store component from the session, and then update
1160
- # a key with a custom config provider(s).
1161
- # This backwards compatibility method takes the old session_vars
1162
- # list of tuples and and transforms that into a set of updates to
1163
- # the config_store component.
1164
- config_chain_builder = ConfigChainFactory(session=self._session)
1165
- config_name, env_vars, default, typecast = config_options
1166
- config_store = self._session.get_component('config_store')
1167
- config_store.set_config_provider(
1168
- logical_name,
1169
- config_chain_builder.create_config_chain(
1170
- instance_name=logical_name,
1171
- env_var_names=env_vars,
1172
- config_property_names=config_name,
1173
- default=default,
1174
- conversion_func=typecast,
1175
- ),
1176
- )
1177
-
1178
-
1179
- class SubsetChainConfigFactory:
1180
- """A class for creating backwards compatible configuration chains.
1181
-
1182
- This class can be used instead of
1183
- :class:`botocore.configprovider.ConfigChainFactory` to make it honor the
1184
- methods argument to get_config_variable. This class can be used to filter
1185
- out providers that are not in the methods tuple when creating a new config
1186
- chain.
1187
- """
1188
-
1189
- def __init__(self, session, methods, environ=None):
1190
- self._factory = ConfigChainFactory(session, environ)
1191
- self._supported_methods = methods
1192
-
1193
- def create_config_chain(
1194
- self,
1195
- instance_name=None,
1196
- env_var_names=None,
1197
- config_property_name=None,
1198
- default=None,
1199
- conversion_func=None,
1200
- ):
1201
- """Build a config chain following the standard botocore pattern.
1202
-
1203
- This config chain factory will omit any providers not in the methods
1204
- tuple provided at initialization. For example if given the tuple
1205
- ('instance', 'config',) it will not inject the environment provider
1206
- into the standard config chain. This lets the botocore session support
1207
- the custom ``methods`` argument for all the default botocore config
1208
- variables when calling ``get_config_variable``.
1209
- """
1210
- if 'instance' not in self._supported_methods:
1211
- instance_name = None
1212
- if 'env' not in self._supported_methods:
1213
- env_var_names = None
1214
- if 'config' not in self._supported_methods:
1215
- config_property_name = None
1216
- return self._factory.create_config_chain(
1217
- instance_name=instance_name,
1218
- env_var_names=env_var_names,
1219
- config_property_names=config_property_name,
1220
- default=default,
1221
- conversion_func=conversion_func,
1222
- )
1223
-
1224
-
1225
- def get_session(env_vars=None):
1226
- """
1227
- Return a new session object.
1228
- """
1229
- return Session(env_vars)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/manifest.py DELETED
@@ -1,393 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2012-2013 Python Software Foundation.
4
- # See LICENSE.txt and CONTRIBUTORS.txt.
5
- #
6
- """
7
- Class representing the list of files in a distribution.
8
-
9
- Equivalent to distutils.filelist, but fixes some problems.
10
- """
11
- import fnmatch
12
- import logging
13
- import os
14
- import re
15
- import sys
16
-
17
- from . import DistlibException
18
- from .compat import fsdecode
19
- from .util import convert_path
20
-
21
-
22
- __all__ = ['Manifest']
23
-
24
- logger = logging.getLogger(__name__)
25
-
26
- # a \ followed by some spaces + EOL
27
- _COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M)
28
- _COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
29
-
30
- #
31
- # Due to the different results returned by fnmatch.translate, we need
32
- # to do slightly different processing for Python 2.7 and 3.2 ... this needed
33
- # to be brought in for Python 3.6 onwards.
34
- #
35
- _PYTHON_VERSION = sys.version_info[:2]
36
-
37
- class Manifest(object):
38
- """A list of files built by on exploring the filesystem and filtered by
39
- applying various patterns to what we find there.
40
- """
41
-
42
- def __init__(self, base=None):
43
- """
44
- Initialise an instance.
45
-
46
- :param base: The base directory to explore under.
47
- """
48
- self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
49
- self.prefix = self.base + os.sep
50
- self.allfiles = None
51
- self.files = set()
52
-
53
- #
54
- # Public API
55
- #
56
-
57
- def findall(self):
58
- """Find all files under the base and set ``allfiles`` to the absolute
59
- pathnames of files found.
60
- """
61
- from stat import S_ISREG, S_ISDIR, S_ISLNK
62
-
63
- self.allfiles = allfiles = []
64
- root = self.base
65
- stack = [root]
66
- pop = stack.pop
67
- push = stack.append
68
-
69
- while stack:
70
- root = pop()
71
- names = os.listdir(root)
72
-
73
- for name in names:
74
- fullname = os.path.join(root, name)
75
-
76
- # Avoid excess stat calls -- just one will do, thank you!
77
- stat = os.stat(fullname)
78
- mode = stat.st_mode
79
- if S_ISREG(mode):
80
- allfiles.append(fsdecode(fullname))
81
- elif S_ISDIR(mode) and not S_ISLNK(mode):
82
- push(fullname)
83
-
84
- def add(self, item):
85
- """
86
- Add a file to the manifest.
87
-
88
- :param item: The pathname to add. This can be relative to the base.
89
- """
90
- if not item.startswith(self.prefix):
91
- item = os.path.join(self.base, item)
92
- self.files.add(os.path.normpath(item))
93
-
94
- def add_many(self, items):
95
- """
96
- Add a list of files to the manifest.
97
-
98
- :param items: The pathnames to add. These can be relative to the base.
99
- """
100
- for item in items:
101
- self.add(item)
102
-
103
- def sorted(self, wantdirs=False):
104
- """
105
- Return sorted files in directory order
106
- """
107
-
108
- def add_dir(dirs, d):
109
- dirs.add(d)
110
- logger.debug('add_dir added %s', d)
111
- if d != self.base:
112
- parent, _ = os.path.split(d)
113
- assert parent not in ('', '/')
114
- add_dir(dirs, parent)
115
-
116
- result = set(self.files) # make a copy!
117
- if wantdirs:
118
- dirs = set()
119
- for f in result:
120
- add_dir(dirs, os.path.dirname(f))
121
- result |= dirs
122
- return [os.path.join(*path_tuple) for path_tuple in
123
- sorted(os.path.split(path) for path in result)]
124
-
125
- def clear(self):
126
- """Clear all collected files."""
127
- self.files = set()
128
- self.allfiles = []
129
-
130
- def process_directive(self, directive):
131
- """
132
- Process a directive which either adds some files from ``allfiles`` to
133
- ``files``, or removes some files from ``files``.
134
-
135
- :param directive: The directive to process. This should be in a format
136
- compatible with distutils ``MANIFEST.in`` files:
137
-
138
- http://docs.python.org/distutils/sourcedist.html#commands
139
- """
140
- # Parse the line: split it up, make sure the right number of words
141
- # is there, and return the relevant words. 'action' is always
142
- # defined: it's the first word of the line. Which of the other
143
- # three are defined depends on the action; it'll be either
144
- # patterns, (dir and patterns), or (dirpattern).
145
- action, patterns, thedir, dirpattern = self._parse_directive(directive)
146
-
147
- # OK, now we know that the action is valid and we have the
148
- # right number of words on the line for that action -- so we
149
- # can proceed with minimal error-checking.
150
- if action == 'include':
151
- for pattern in patterns:
152
- if not self._include_pattern(pattern, anchor=True):
153
- logger.warning('no files found matching %r', pattern)
154
-
155
- elif action == 'exclude':
156
- for pattern in patterns:
157
- found = self._exclude_pattern(pattern, anchor=True)
158
- #if not found:
159
- # logger.warning('no previously-included files '
160
- # 'found matching %r', pattern)
161
-
162
- elif action == 'global-include':
163
- for pattern in patterns:
164
- if not self._include_pattern(pattern, anchor=False):
165
- logger.warning('no files found matching %r '
166
- 'anywhere in distribution', pattern)
167
-
168
- elif action == 'global-exclude':
169
- for pattern in patterns:
170
- found = self._exclude_pattern(pattern, anchor=False)
171
- #if not found:
172
- # logger.warning('no previously-included files '
173
- # 'matching %r found anywhere in '
174
- # 'distribution', pattern)
175
-
176
- elif action == 'recursive-include':
177
- for pattern in patterns:
178
- if not self._include_pattern(pattern, prefix=thedir):
179
- logger.warning('no files found matching %r '
180
- 'under directory %r', pattern, thedir)
181
-
182
- elif action == 'recursive-exclude':
183
- for pattern in patterns:
184
- found = self._exclude_pattern(pattern, prefix=thedir)
185
- #if not found:
186
- # logger.warning('no previously-included files '
187
- # 'matching %r found under directory %r',
188
- # pattern, thedir)
189
-
190
- elif action == 'graft':
191
- if not self._include_pattern(None, prefix=dirpattern):
192
- logger.warning('no directories found matching %r',
193
- dirpattern)
194
-
195
- elif action == 'prune':
196
- if not self._exclude_pattern(None, prefix=dirpattern):
197
- logger.warning('no previously-included directories found '
198
- 'matching %r', dirpattern)
199
- else: # pragma: no cover
200
- # This should never happen, as it should be caught in
201
- # _parse_template_line
202
- raise DistlibException(
203
- 'invalid action %r' % action)
204
-
205
- #
206
- # Private API
207
- #
208
-
209
- def _parse_directive(self, directive):
210
- """
211
- Validate a directive.
212
- :param directive: The directive to validate.
213
- :return: A tuple of action, patterns, thedir, dir_patterns
214
- """
215
- words = directive.split()
216
- if len(words) == 1 and words[0] not in ('include', 'exclude',
217
- 'global-include',
218
- 'global-exclude',
219
- 'recursive-include',
220
- 'recursive-exclude',
221
- 'graft', 'prune'):
222
- # no action given, let's use the default 'include'
223
- words.insert(0, 'include')
224
-
225
- action = words[0]
226
- patterns = thedir = dir_pattern = None
227
-
228
- if action in ('include', 'exclude',
229
- 'global-include', 'global-exclude'):
230
- if len(words) < 2:
231
- raise DistlibException(
232
- '%r expects <pattern1> <pattern2> ...' % action)
233
-
234
- patterns = [convert_path(word) for word in words[1:]]
235
-
236
- elif action in ('recursive-include', 'recursive-exclude'):
237
- if len(words) < 3:
238
- raise DistlibException(
239
- '%r expects <dir> <pattern1> <pattern2> ...' % action)
240
-
241
- thedir = convert_path(words[1])
242
- patterns = [convert_path(word) for word in words[2:]]
243
-
244
- elif action in ('graft', 'prune'):
245
- if len(words) != 2:
246
- raise DistlibException(
247
- '%r expects a single <dir_pattern>' % action)
248
-
249
- dir_pattern = convert_path(words[1])
250
-
251
- else:
252
- raise DistlibException('unknown action %r' % action)
253
-
254
- return action, patterns, thedir, dir_pattern
255
-
256
- def _include_pattern(self, pattern, anchor=True, prefix=None,
257
- is_regex=False):
258
- """Select strings (presumably filenames) from 'self.files' that
259
- match 'pattern', a Unix-style wildcard (glob) pattern.
260
-
261
- Patterns are not quite the same as implemented by the 'fnmatch'
262
- module: '*' and '?' match non-special characters, where "special"
263
- is platform-dependent: slash on Unix; colon, slash, and backslash on
264
- DOS/Windows; and colon on Mac OS.
265
-
266
- If 'anchor' is true (the default), then the pattern match is more
267
- stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
268
- 'anchor' is false, both of these will match.
269
-
270
- If 'prefix' is supplied, then only filenames starting with 'prefix'
271
- (itself a pattern) and ending with 'pattern', with anything in between
272
- them, will match. 'anchor' is ignored in this case.
273
-
274
- If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
275
- 'pattern' is assumed to be either a string containing a regex or a
276
- regex object -- no translation is done, the regex is just compiled
277
- and used as-is.
278
-
279
- Selected strings will be added to self.files.
280
-
281
- Return True if files are found.
282
- """
283
- # XXX docstring lying about what the special chars are?
284
- found = False
285
- pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
286
-
287
- # delayed loading of allfiles list
288
- if self.allfiles is None:
289
- self.findall()
290
-
291
- for name in self.allfiles:
292
- if pattern_re.search(name):
293
- self.files.add(name)
294
- found = True
295
- return found
296
-
297
- def _exclude_pattern(self, pattern, anchor=True, prefix=None,
298
- is_regex=False):
299
- """Remove strings (presumably filenames) from 'files' that match
300
- 'pattern'.
301
-
302
- Other parameters are the same as for 'include_pattern()', above.
303
- The list 'self.files' is modified in place. Return True if files are
304
- found.
305
-
306
- This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
307
- packaging source distributions
308
- """
309
- found = False
310
- pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
311
- for f in list(self.files):
312
- if pattern_re.search(f):
313
- self.files.remove(f)
314
- found = True
315
- return found
316
-
317
- def _translate_pattern(self, pattern, anchor=True, prefix=None,
318
- is_regex=False):
319
- """Translate a shell-like wildcard pattern to a compiled regular
320
- expression.
321
-
322
- Return the compiled regex. If 'is_regex' true,
323
- then 'pattern' is directly compiled to a regex (if it's a string)
324
- or just returned as-is (assumes it's a regex object).
325
- """
326
- if is_regex:
327
- if isinstance(pattern, str):
328
- return re.compile(pattern)
329
- else:
330
- return pattern
331
-
332
- if _PYTHON_VERSION > (3, 2):
333
- # ditch start and end characters
334
- start, _, end = self._glob_to_re('_').partition('_')
335
-
336
- if pattern:
337
- pattern_re = self._glob_to_re(pattern)
338
- if _PYTHON_VERSION > (3, 2):
339
- assert pattern_re.startswith(start) and pattern_re.endswith(end)
340
- else:
341
- pattern_re = ''
342
-
343
- base = re.escape(os.path.join(self.base, ''))
344
- if prefix is not None:
345
- # ditch end of pattern character
346
- if _PYTHON_VERSION <= (3, 2):
347
- empty_pattern = self._glob_to_re('')
348
- prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
349
- else:
350
- prefix_re = self._glob_to_re(prefix)
351
- assert prefix_re.startswith(start) and prefix_re.endswith(end)
352
- prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
353
- sep = os.sep
354
- if os.sep == '\\':
355
- sep = r'\\'
356
- if _PYTHON_VERSION <= (3, 2):
357
- pattern_re = '^' + base + sep.join((prefix_re,
358
- '.*' + pattern_re))
359
- else:
360
- pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
361
- pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
362
- pattern_re, end)
363
- else: # no prefix -- respect anchor flag
364
- if anchor:
365
- if _PYTHON_VERSION <= (3, 2):
366
- pattern_re = '^' + base + pattern_re
367
- else:
368
- pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
369
-
370
- return re.compile(pattern_re)
371
-
372
- def _glob_to_re(self, pattern):
373
- """Translate a shell-like glob pattern to a regular expression.
374
-
375
- Return a string containing the regex. Differs from
376
- 'fnmatch.translate()' in that '*' does not match "special characters"
377
- (which are platform-specific).
378
- """
379
- pattern_re = fnmatch.translate(pattern)
380
-
381
- # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
382
- # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
383
- # and by extension they shouldn't match such "special characters" under
384
- # any OS. So change all non-escaped dots in the RE to match any
385
- # character except the special characters (currently: just os.sep).
386
- sep = os.sep
387
- if os.sep == '\\':
388
- # we're using a regex to manipulate a regex, so we need
389
- # to escape the backslash twice
390
- sep = r'\\\\'
391
- escaped = r'\1[^%s]' % sep
392
- pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
393
- return pattern_re
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisCaviar/ControlNet-v1-1/cv_utils.py DELETED
@@ -1,17 +0,0 @@
1
- import cv2
2
- import numpy as np
3
-
4
-
5
- def resize_image(input_image, resolution, interpolation=None):
6
- H, W, C = input_image.shape
7
- H = float(H)
8
- W = float(W)
9
- k = float(resolution) / max(H, W)
10
- H *= k
11
- W *= k
12
- H = int(np.round(H / 64.0)) * 64
13
- W = int(np.round(W / 64.0)) * 64
14
- if interpolation is None:
15
- interpolation = cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA
16
- img = cv2.resize(input_image, (W, H), interpolation=interpolation)
17
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/renderers/puppeteer/index.js DELETED
@@ -1,14 +0,0 @@
1
- import Puppeteer from './lib/puppeteer.js'
2
-
3
- /**
4
- *
5
- * @param config 本地config.yaml的配置内容
6
- * @returns renderer 渲染器对象
7
- * @returns renderer.id 渲染器ID,对应renderer中选择的id
8
- * @returns renderer.type 渲染类型,保留字段,暂时支持image
9
- * @returns renderer.render 渲染入口
10
- */
11
- export default function (config) {
12
- // TODO Puppeteer待简化重构
13
- return new Puppeteer(config)
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/module-a3cf0cc4.js DELETED
@@ -1,2 +0,0 @@
1
- const w=t=>n=>{const e=t(n);return n.add(e),e},N=t=>(n,e)=>(t.set(n,e),e),f=Number.MAX_SAFE_INTEGER===void 0?9007199254740991:Number.MAX_SAFE_INTEGER,g=536870912,_=g*2,O=(t,n)=>e=>{const r=n.get(e);let s=r===void 0?e.size:r<_?r+1:0;if(!e.has(s))return t(e,s);if(e.size<g){for(;e.has(s);)s=Math.floor(Math.random()*_);return t(e,s)}if(e.size>f)throw new Error("Congratulations, you created a collection of unique numbers which uses all available integers!");for(;e.has(s);)s=Math.floor(Math.random()*f);return t(e,s)},M=new WeakMap,m=N(M),h=O(m,M),I=w(h),R=t=>typeof t.start=="function",p=new WeakMap,A=t=>({...t,connect:({call:n})=>async()=>{const{port1:e,port2:r}=new MessageChannel,s=await n("connect",{port:e},[e]);return p.set(r,s),r},disconnect:({call:n})=>async e=>{const r=p.get(e);if(r===void 0)throw new Error("The given port is not connected.");await n("disconnect",{portId:r})},isSupported:({call:n})=>()=>n("isSupported")}),E=new WeakMap,b=t=>{if(E.has(t))return E.get(t);const n=new Map;return E.set(t,n),n},W=t=>{const n=A(t);return e=>{const r=b(e);e.addEventListener("message",({data:o})=>{const{id:a}=o;if(a!==null&&r.has(a)){const{reject:u,resolve:c}=r.get(a);r.delete(a),o.error===void 0?c(o.result):u(new Error(o.error.message))}}),R(e)&&e.start();const s=(o,a=null,u=[])=>new Promise((c,l)=>{const d=h(r);r.set(d,{reject:l,resolve:c}),a===null?e.postMessage({id:d,method:o},u):e.postMessage({id:d,method:o,params:a},u)}),T=(o,a,u=[])=>{e.postMessage({id:null,method:o,params:a},u)};let i={};for(const[o,a]of Object.entries(n))i={...i,[o]:a({call:s,notify:T})};return{...i}}};export{I as a,W as c,h as g};
2
- //# sourceMappingURL=module-a3cf0cc4.js.map
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/filter.py DELETED
@@ -1,90 +0,0 @@
1
- """
2
- @date: 2021/7/5
3
- @description:
4
- """
5
- import json
6
- import math
7
- import shutil
8
-
9
- import numpy as np
10
- from utils.boundary import *
11
- import dataset
12
- import os
13
- from tqdm import tqdm
14
- from PIL import Image
15
- from visualization.boundary import *
16
- from visualization.floorplan import *
17
- from shapely.geometry import Polygon, Point
18
-
19
-
20
- def filter_center(ceil_corners):
21
- xyz = uv2xyz(ceil_corners, plan_y=1.6)
22
- xz = xyz[:, ::2]
23
- poly = Polygon(xz).buffer(-0.01)
24
- return poly.contains(Point(0, 0))
25
-
26
-
27
- def filter_boundary(corners):
28
- if is_ceil_boundary(corners):
29
- return True
30
- elif is_floor_boundary(corners):
31
- return True
32
- else:
33
- # An intersection occurs and an exception is considered
34
- return False
35
-
36
-
37
- def filter_self_intersection(corners):
38
- xz = uv2xyz(corners)[:, ::2]
39
- poly = Polygon(xz)
40
- return poly.is_valid
41
-
42
-
43
- def filter_dataset(dataset, show=False, output_dir=None):
44
- if output_dir is None:
45
- output_dir = os.path.join(dataset.root_dir, dataset.mode)
46
- output_img_dir = os.path.join(output_dir, 'img_align')
47
- output_label_dir = os.path.join(output_dir, 'label_cor_align')
48
- else:
49
- output_dir = os.path.join(output_dir, dataset.mode)
50
- output_img_dir = os.path.join(output_dir, 'img')
51
- output_label_dir = os.path.join(output_dir, 'label_cor')
52
-
53
- if not os.path.exists(output_img_dir):
54
- os.makedirs(output_img_dir)
55
-
56
- if not os.path.exists(output_label_dir):
57
- os.makedirs(output_label_dir)
58
-
59
- bar = tqdm(dataset, total=len(dataset))
60
- for data in bar:
61
- name = data['name']
62
- bar.set_description(f"Processing {name}")
63
- img = data['img']
64
- corners = data['corners']
65
-
66
- if not filter_center(corners[1::2]):
67
- if show:
68
- draw_boundaries(img, corners_list=[corners[0::2], corners[1::2]], show=True)
69
- if not os.path.exists(data['img_path']):
70
- print("already remove")
71
- else:
72
- print(f"move {name}")
73
- shutil.move(data['img_path'], os.path.join(output_img_dir, os.path.basename(data['img_path'])))
74
- shutil.move(data['label_path'], os.path.join(output_label_dir, os.path.basename(data['label_path'])))
75
-
76
-
77
- def execute_filter_dataset(root_dir, dataset_name="PanoS2D3DDataset", modes=None, output_dir=None):
78
- if modes is None:
79
- modes = ["train", "test", "valid"]
80
-
81
- for mode in modes:
82
- print("mode: {}".format(mode))
83
-
84
- filter_dataset(getattr(dataset, dataset_name)(root_dir, mode), show=False, output_dir=output_dir)
85
-
86
-
87
- if __name__ == '__main__':
88
- execute_filter_dataset(root_dir='/root/data/hd/hnet_dataset',
89
- dataset_name="PanoS2D3DDataset", modes=['train', "test", "valid"],
90
- output_dir='/root/data/hd/hnet_dataset_close')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dauzy/whisper-webui/docs/colab.md DELETED
@@ -1,20 +0,0 @@
1
- # Running Whisper on Google Colab
2
-
3
- If you don't have a decent GPU or any experience in running command-line applications, you might want to try this Google Colab instead:
4
-
5
- * [Google Colab - Whisper WebUI GPU](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing)
6
- * [Screenshots](https://imgur.com/a/ZfY6uBO)
7
-
8
- The runtime (Runtime -> Change runtime type -> Hardware accelerator) should already be set top GPU. But if not, change it to GPU.
9
-
10
- Then, sign in to Google if you haven't already. Next, click on "Connect" at the top right.
11
-
12
- Under "Checking out WebUI from Git", click on the [play icon](https://imgur.com/a/81gOLyD) that appears in "[ ]" at the left. If you get a warning, click "Run anyway".
13
-
14
- After this step has completed, it should be get a green check mark. Then move on to the next section under "Installing dependencies", and click in "[ ]" again. This might take approximately 30 seconds.
15
-
16
- Once this has completed, scroll down to the "Run WebUI" section, and click on "[ ]". This will launch the WebUI in a shared link (expires in 72 hours). To open the UI, click on the link next to "Running on public URL", which will be something like https://12xxx.gradio.app/
17
-
18
- The audio length in this version is not restricted, and it will run much faster as it is backed by a GPU. You can also run it using the "Large" model. Also note that it might take some time to start the model the first time, as it may need to download a 2.8 GB file on Google's servers.
19
-
20
- Once you're done, you can close the WebUI session by clicking the animated close button under "Run WebUI". You can also do this if you encounter any errors and need to restart the UI. You should also go to "Manage Sessions" and terminate the session, otherwise you may end up using all your free compute credits.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DianXian/Real-CUGAN/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Real CUGAN
3
- emoji: 🐢
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.6
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/optimizer.py DELETED
@@ -1,214 +0,0 @@
1
- # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # This work is licensed under the Creative Commons Attribution-NonCommercial
4
- # 4.0 International License. To view a copy of this license, visit
5
- # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
- # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
-
8
- """Helper wrapper for a Tensorflow optimizer."""
9
-
10
- import numpy as np
11
- import tensorflow as tf
12
-
13
- from collections import OrderedDict
14
- from typing import List, Union
15
-
16
- from . import autosummary
17
- from . import tfutil
18
- from .. import util
19
-
20
- from .tfutil import TfExpression, TfExpressionEx
21
-
22
- try:
23
- # TensorFlow 1.13
24
- from tensorflow.python.ops import nccl_ops
25
- except:
26
- # Older TensorFlow versions
27
- import tensorflow.contrib.nccl as nccl_ops
28
-
29
- class Optimizer:
30
- """A Wrapper for tf.train.Optimizer.
31
-
32
- Automatically takes care of:
33
- - Gradient averaging for multi-GPU training.
34
- - Dynamic loss scaling and typecasts for FP16 training.
35
- - Ignoring corrupted gradients that contain NaNs/Infs.
36
- - Reporting statistics.
37
- - Well-chosen default settings.
38
- """
39
-
40
- def __init__(self,
41
- name: str = "Train",
42
- tf_optimizer: str = "tf.train.AdamOptimizer",
43
- learning_rate: TfExpressionEx = 0.001,
44
- use_loss_scaling: bool = False,
45
- loss_scaling_init: float = 64.0,
46
- loss_scaling_inc: float = 0.0005,
47
- loss_scaling_dec: float = 1.0,
48
- **kwargs):
49
-
50
- # Init fields.
51
- self.name = name
52
- self.learning_rate = tf.convert_to_tensor(learning_rate)
53
- self.id = self.name.replace("/", ".")
54
- self.scope = tf.get_default_graph().unique_name(self.id)
55
- self.optimizer_class = util.get_obj_by_name(tf_optimizer)
56
- self.optimizer_kwargs = dict(kwargs)
57
- self.use_loss_scaling = use_loss_scaling
58
- self.loss_scaling_init = loss_scaling_init
59
- self.loss_scaling_inc = loss_scaling_inc
60
- self.loss_scaling_dec = loss_scaling_dec
61
- self._grad_shapes = None # [shape, ...]
62
- self._dev_opt = OrderedDict() # device => optimizer
63
- self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
64
- self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
65
- self._updates_applied = False
66
-
67
- def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
68
- """Register the gradients of the given loss function with respect to the given variables.
69
- Intended to be called once per GPU."""
70
- assert not self._updates_applied
71
-
72
- # Validate arguments.
73
- if isinstance(trainable_vars, dict):
74
- trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
75
-
76
- assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
77
- assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
78
-
79
- if self._grad_shapes is None:
80
- self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars]
81
-
82
- assert len(trainable_vars) == len(self._grad_shapes)
83
- assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes))
84
-
85
- dev = loss.device
86
-
87
- assert all(var.device == dev for var in trainable_vars)
88
-
89
- # Register device and compute gradients.
90
- with tf.name_scope(self.id + "_grad"), tf.device(dev):
91
- if dev not in self._dev_opt:
92
- opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt)
93
- assert callable(self.optimizer_class)
94
- self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
95
- self._dev_grads[dev] = []
96
-
97
- loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
98
- grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
99
- grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
100
- self._dev_grads[dev].append(grads)
101
-
102
- def apply_updates(self) -> tf.Operation:
103
- """Construct training op to update the registered variables based on their gradients."""
104
- tfutil.assert_tf_initialized()
105
- assert not self._updates_applied
106
- self._updates_applied = True
107
- devices = list(self._dev_grads.keys())
108
- total_grads = sum(len(grads) for grads in self._dev_grads.values())
109
- assert len(devices) >= 1 and total_grads >= 1
110
- ops = []
111
-
112
- with tfutil.absolute_name_scope(self.scope):
113
- # Cast gradients to FP32 and calculate partial sum within each device.
114
- dev_grads = OrderedDict() # device => [(grad, var), ...]
115
-
116
- for dev_idx, dev in enumerate(devices):
117
- with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev):
118
- sums = []
119
-
120
- for gv in zip(*self._dev_grads[dev]):
121
- assert all(v is gv[0][1] for g, v in gv)
122
- g = [tf.cast(g, tf.float32) for g, v in gv]
123
- g = g[0] if len(g) == 1 else tf.add_n(g)
124
- sums.append((g, gv[0][1]))
125
-
126
- dev_grads[dev] = sums
127
-
128
- # Sum gradients across devices.
129
- if len(devices) > 1:
130
- with tf.name_scope("SumAcrossGPUs"), tf.device(None):
131
- for var_idx, grad_shape in enumerate(self._grad_shapes):
132
- g = [dev_grads[dev][var_idx][0] for dev in devices]
133
-
134
- if np.prod(grad_shape): # nccl does not support zero-sized tensors
135
- g = nccl_ops.all_sum(g)
136
-
137
- for dev, gg in zip(devices, g):
138
- dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
139
-
140
- # Apply updates separately on each device.
141
- for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
142
- with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev):
143
- # Scale gradients as needed.
144
- if self.use_loss_scaling or total_grads > 1:
145
- with tf.name_scope("Scale"):
146
- coef = tf.constant(np.float32(1.0 / total_grads), name="coef")
147
- coef = self.undo_loss_scaling(coef)
148
- grads = [(g * coef, v) for g, v in grads]
149
-
150
- # Check for overflows.
151
- with tf.name_scope("CheckOverflow"):
152
- grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
153
-
154
- # Update weights and adjust loss scaling.
155
- with tf.name_scope("UpdateWeights"):
156
- # pylint: disable=cell-var-from-loop
157
- opt = self._dev_opt[dev]
158
- ls_var = self.get_loss_scaling_var(dev)
159
-
160
- if not self.use_loss_scaling:
161
- ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
162
- else:
163
- ops.append(tf.cond(grad_ok,
164
- lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
165
- lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
166
-
167
- # Report statistics on the last device.
168
- if dev == devices[-1]:
169
- with tf.name_scope("Statistics"):
170
- ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
171
- ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1)))
172
-
173
- if self.use_loss_scaling:
174
- ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var))
175
-
176
- # Initialize variables and group everything into a single op.
177
- self.reset_optimizer_state()
178
- tfutil.init_uninitialized_vars(list(self._dev_ls_var.values()))
179
-
180
- return tf.group(*ops, name="TrainingOp")
181
-
182
- def reset_optimizer_state(self) -> None:
183
- """Reset internal state of the underlying optimizer."""
184
- tfutil.assert_tf_initialized()
185
- tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
186
-
187
- def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
188
- """Get or create variable representing log2 of the current dynamic loss scaling factor."""
189
- if not self.use_loss_scaling:
190
- return None
191
-
192
- if device not in self._dev_ls_var:
193
- with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None):
194
- self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var")
195
-
196
- return self._dev_ls_var[device]
197
-
198
- def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
199
- """Apply dynamic loss scaling for the given expression."""
200
- assert tfutil.is_tf_expression(value)
201
-
202
- if not self.use_loss_scaling:
203
- return value
204
-
205
- return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
206
-
207
- def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
208
- """Undo the effect of dynamic loss scaling for the given expression."""
209
- assert tfutil.is_tf_expression(value)
210
-
211
- if not self.use_loss_scaling:
212
- return value
213
-
214
- return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ekimetrics/climate-question-answering/climateqa/chat.py DELETED
@@ -1,39 +0,0 @@
1
- # LANGCHAIN IMPORTS
2
- from langchain import PromptTemplate, LLMChain
3
- from langchain.embeddings import HuggingFaceEmbeddings
4
- from langchain.chains import RetrievalQAWithSourcesChain
5
- from langchain.chains.qa_with_sources import load_qa_with_sources_chain
6
-
7
-
8
- # CLIMATEQA
9
- from climateqa.retriever import ClimateQARetriever
10
- from climateqa.vectorstore import get_pinecone_vectorstore
11
- from climateqa.chains import load_climateqa_chain
12
-
13
-
14
- class ClimateQA:
15
- def __init__(self,hf_embedding_model = "sentence-transformers/multi-qa-mpnet-base-dot-v1",
16
- show_progress_bar = False,batch_size = 1,max_tokens = 1024,**kwargs):
17
-
18
- self.llm = self.get_llm(max_tokens = max_tokens,**kwargs)
19
- self.embeddings_function = HuggingFaceEmbeddings(
20
- model_name=hf_embedding_model,
21
- encode_kwargs={"show_progress_bar":show_progress_bar,"batch_size":batch_size}
22
- )
23
-
24
-
25
-
26
- def get_vectorstore(self):
27
- pass
28
-
29
-
30
- def reformulate(self):
31
- pass
32
-
33
-
34
- def retrieve(self):
35
- pass
36
-
37
-
38
- def ask(self):
39
- pass