parquet-converter commited on
Commit
50a67e9
·
1 Parent(s): 6e31c22

Update parquet files (step 24 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/07jeancms/minima/README.md +0 -13
  2. spaces/14-26AA/sovits_aishell3/inference.py +0 -86
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download SketchUp 2020 Full Crack for Windows 11 and Unleash Your Creativity.md +0 -40
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Trick 2 Movie Download Free) Laugh Out Loud with this Brilliant and Witty Film.md +0 -159
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bomb It A Fun and Entertaining Lifestyle Application for Pranking Your Friends.md +0 -114
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crack DSA Interviews with Love Babbar DSA Sheet Download and Solve 450 Questions.md +0 -122
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dig Deep! APK - The Most Addictive Digging Game for Android Devices.md +0 -124
  8. spaces/1phancelerku/anime-remove-background/ARK Survival Evolved - Das beste Survival-Spiel mit Dinos - Gratis Download fr alle Plattformen.md +0 -138
  9. spaces/1phancelerku/anime-remove-background/Download FS 16 Mod APK 2021 with Hack and Unlimited Money for Free.md +0 -91
  10. spaces/1phancelerku/anime-remove-background/Download Pokemon Go No Lag APK and Play Smoothly on Any Android Device.md +0 -141
  11. spaces/1phancelerku/anime-remove-background/Eyes APK The Ultimate Horror Game with Coop Multiplayer.md +0 -19
  12. spaces/232labs/VToonify/vtoonify/model/raft/alt_cuda_corr/setup.py +0 -15
  13. spaces/ADRXtractor/ADR_Xtractor/README.md +0 -37
  14. spaces/AIConsultant/MusicGen/audiocraft/adversarial/discriminators/mpd.py +0 -106
  15. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/rope.py +0 -124
  16. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/hand.py +0 -77
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/requestdrag.js +0 -2
  18. spaces/AlekseyKorshuk/model-evaluation/conversation.py +0 -51
  19. spaces/AlexWang/lama/bin/paper_runfiles/predict_inner_features.sh +0 -20
  20. spaces/Alican/pixera/util/__init__.py +0 -1
  21. spaces/Alpaca233/SadTalker/src/face3d/data/template_dataset.py +0 -75
  22. spaces/AlphaDragon/Voice-Clone/README.md +0 -13
  23. spaces/Alphts/Robot/app.py +0 -59
  24. spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.h +0 -38
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/lora/train_text_to_image_lora.py +0 -1014
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/release.py +0 -162
  27. spaces/Andy1621/uniformer_image_detection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py +0 -196
  28. spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py +0 -4
  29. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py +0 -13
  30. spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py +0 -9
  31. spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py +0 -16
  32. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/lraspp_m-v3-d8.py +0 -25
  33. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/README.md +0 -193
  34. spaces/Anonymous-sub/Rerender/ControlNet/cldm/ddim_hacked.py +0 -317
  35. spaces/Anonymous-sub/Rerender/ControlNet/gradio_pose2image.py +0 -98
  36. spaces/AnxiousNugget/janitor/Dockerfile +0 -21
  37. spaces/Apex-X/nono/roop/face_analyser.py +0 -54
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/__init__.py +0 -1
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/__init__.py +0 -0
  40. spaces/Benson/text-generation/Examples/Barikad Crew Toup Pou Yo Mp3 Descargar.md +0 -105
  41. spaces/Benson/text-generation/Examples/Bloons Td 6 32.4 Apk.md +0 -87
  42. spaces/BetterAPI/BetterChat/src/routes/conversation/+server.ts +0 -57
  43. spaces/CVPR/CVPR2022_papers/README.md +0 -12
  44. spaces/CVPR/LIVE/pybind11/tests/test_pytypes.py +0 -392
  45. spaces/CVPR/LIVE/thrust/thrust/mr/pool_options.h +0 -127
  46. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/iter_swap.h +0 -23
  47. spaces/CVPR/Text2Human/Text2Human/ui_util/config.py +0 -25
  48. spaces/ChandraMohanNayal/AutoGPT/autogpt/configurator.py +0 -134
  49. spaces/ClassCat/Spleen-3D-segmentation-with-MONAI/app.py +0 -162
  50. spaces/CofAI/chat/g4f/Provider/Providers/Vercel.py +0 -162
spaces/07jeancms/minima/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Minima
3
- emoji: 🔥
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/14-26AA/sovits_aishell3/inference.py DELETED
@@ -1,86 +0,0 @@
1
- import onnxruntime
2
- import numpy as np
3
- import pyworld as pw
4
- import librosa
5
- import soundfile as sf
6
-
7
- def resize2d(source, target_len):
8
- source[source<0.001] = np.nan
9
- target = np.interp(np.linspace(0, len(source)-1, num=target_len,endpoint=True), np.arange(0, len(source)), source)
10
- return np.nan_to_num(target)
11
-
12
- def _calculate_f0(input: np.ndarray,length,sr,f0min,f0max,
13
- use_continuous_f0: bool=True,
14
- use_log_f0: bool=True) -> np.ndarray:
15
- input = input.astype(float)
16
- frame_period = len(input)/sr/(length)*1000
17
- f0, timeaxis = pw.dio(
18
- input,
19
- fs=sr,
20
- f0_floor=f0min,
21
- f0_ceil=f0max,
22
- frame_period=frame_period)
23
- f0 = pw.stonemask(input, f0, timeaxis, sr)
24
- if use_log_f0:
25
- nonzero_idxs = np.where(f0 != 0)[0]
26
- f0[nonzero_idxs] = np.log(f0[nonzero_idxs])
27
- return f0.reshape(-1)
28
-
29
-
30
- def get_text(wav,sr,transform=1.0):
31
-
32
- #wav, sr = librosa.load(file,sr=None)
33
- if len(wav.shape) > 1:
34
- wav = librosa.to_mono(wav.transpose(1, 0))
35
- if sr!=16000:
36
- wav16 = librosa.resample(wav, sr, 16000)
37
- else:
38
- wav16=wav
39
-
40
- source = {"source":np.expand_dims(np.expand_dims(wav16,0),0)}
41
- hubertsession = onnxruntime.InferenceSession("hubert.onnx",providers=['CUDAExecutionProvider'])
42
- units = np.array(hubertsession.run(['embed'], source)[0])
43
- f0=_calculate_f0(wav,units.shape[1],sr,
44
- f0min=librosa.note_to_hz('C2'),
45
- f0max=librosa.note_to_hz('C7'))
46
- f0=resize2d(f0,units.shape[1])
47
- f0[f0!=0]=f0[f0!=0]+np.log(transform)
48
- expf0 = np.expand_dims(f0,(0,2))
49
- output=np.concatenate((units,expf0,expf0),axis=2)
50
- return output.astype(np.float32),f0
51
-
52
- def getkey(key):
53
- return np.power(2,key/12.0)
54
-
55
- def infer(f,r,speaker,key,reqf0=False):
56
- speaker=int(speaker[7:])
57
- if not f is None:
58
- file=f
59
- elif not r is None:
60
- file=r
61
- else:
62
- return "请上传音频", None
63
- audio,sr = librosa.load(file,sr=None)
64
- if sr<16000:
65
- return "采样率过低,请上传至少拥有16000Hz采样率的音频",None
66
- duration = audio.shape[0] / sr
67
- print(audio,sr,duration)
68
- if duration > 120:
69
- return "请上传小于2min的音频", None
70
- #audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
71
- x,sourcef0 = get_text(audio,sr,getkey(key))
72
- x_lengths = [np.size(x,1)]
73
- print(x_lengths[0],sr,speaker,key)
74
- sid = [speaker]
75
- ort_inputs = {'x':x,'x_lengths':x_lengths,'sid':sid,"noise_scale":[0.667],"length_scale":[1.0],"noise_scale_w":[0.8]}
76
- infersession = onnxruntime.InferenceSession("onnxmodel334.onnx",providers=['CUDAExecutionProvider'])
77
- ort_output = infersession.run(['audio'], ort_inputs)
78
- #sf.write(o,ort_output[0][0][0],22050,'PCM_16',format='wav')
79
- genf0=np.array([])
80
- if reqf0:
81
- wav, sr = librosa.load(o,sr=None)
82
- genf0=_calculate_f0(wav,x_lengths[0],sr,
83
- f0min=librosa.note_to_hz('C2'),
84
- f0max=librosa.note_to_hz('C7'))
85
- genf0=resize2d(genf0,x_lengths[0])
86
- return 'success',(22050,ort_output[0][0][0])#sourcef0.tolist(),genf0.tolist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download SketchUp 2020 Full Crack for Windows 11 and Unleash Your Creativity.md DELETED
@@ -1,40 +0,0 @@
1
-
2
- <h1>How to Download SketchUp 2020 Full Crack for Windows 11</h1>
3
- <p>If you are looking for a way to download SketchUp 2020 full crack for Windows 11, you have come to the right place. SketchUp is a powerful 3D modeling software that can help you create stunning designs, animations, and presentations. However, the official version of SketchUp is not free and requires a subscription or a license to use. That's why many people are looking for a cracked version of SketchUp that can bypass the activation process and let them use the software for free.</p>
4
- <h2>download sketchup 2020 full crack windows 11</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://byltly.com/2uKA60">https://byltly.com/2uKA60</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download SketchUp 2020 full crack for Windows 11 safely and easily. We will also explain the risks and disadvantages of using a cracked version of SketchUp and why you should consider using a legitimate alternative instead.</p>
6
-
7
- <h2>How to Download SketchUp 2020 Full Crack for Windows 11</h2>
8
- <p>To download SketchUp 2020 full crack for Windows 11, you will need to follow these steps:</p>
9
- <ol>
10
- <li>Go to a reliable website that offers cracked software downloads. You can search for "download sketchup 2020 full crack windows 11" on Google or Bing and choose one of the results. However, be careful as some websites may contain malware or viruses that can harm your computer.</li>
11
- <li>Download the SketchUp 2020 full crack file from the website. It may be in a ZIP or RAR format, so you will need to extract it using a tool like WinRAR or 7-Zip.</li>
12
- <li>Run the setup file and follow the instructions to install SketchUp 2020 on your computer. You may need to disable your antivirus or firewall temporarily as they may interfere with the installation process.</li>
13
- <li>Copy the crack file from the extracted folder and paste it into the installation directory of SketchUp 2020. This will replace the original file and activate the software without requiring a license key or a subscription.</li>
14
- <li>Launch SketchUp 2020 and enjoy using it for free.</li>
15
- </ol>
16
-
17
- <h2>The Risks and Disadvantages of Using SketchUp 2020 Full Crack</h2>
18
- <p>While downloading SketchUp 2020 full crack for Windows 11 may seem like a convenient and cost-effective way to use the software, it also comes with some serious risks and disadvantages that you should be aware of. Here are some of them:</p>
19
- <ul>
20
- <li>You may violate the intellectual property rights of Trimble, the developer of SketchUp. This can result in legal consequences such as fines or lawsuits if you are caught using or distributing a cracked version of SketchUp.</li>
21
- <li>You may compromise the security and performance of your computer. Cracked software often contains malware or viruses that can infect your system and steal your personal information, damage your files, or slow down your device.</li>
22
- <li>You may miss out on the latest features and updates of SketchUp. Cracked software usually does not receive any updates or support from the developer, which means you will not be able to access the newest features, bug fixes, or improvements of SketchUp.</li>
23
- <li>You may experience errors or glitches while using SketchUp. Cracked software may not be compatible with your operating system or hardware, which can cause errors or crashes while using SketchUp. This can affect your productivity and quality of work.</li>
24
- </ul>
25
-
26
- <h2>A Legitimate Alternative to SketchUp 2020 Full Crack</h2>
27
- <p>If you want to use SketchUp without risking any of the above-mentioned problems, you should consider using a legitimate alternative instead. One such alternative is <a href="https://www.sketchup.com/plans-and-pricing/sketchup-free">SketchUp Free</a>, which is an online version of SketchUp that you can use for free without downloading anything.</p>
28
- <p></p>
29
- <p>SketchUp Free has many advantages over SketchUp 2020 full crack, such as:</p>
30
- <ul>
31
- <li>You can use it legally and ethically without violating any intellectual property rights.</li>
32
- <li>You can use it safely and securely without exposing your computer to any malware or viruses.</li>
33
- <li>You can use it with the latest features and updates of SketchUp as they are released by Trimble.</li>
34
- <li>You can use it with any device that has an internet connection and a web browser, regardless of your operating system or hardware.</li>
35
- </ul>
36
- <p>SketchUp Free also has some limitations compared to SketchUp Pro, such as:</p>
37
- <ul>
38
- <li>You can only save up to 10</p> ddb901b051<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Trick 2 Movie Download Free) Laugh Out Loud with this Brilliant and Witty Film.md DELETED
@@ -1,159 +0,0 @@
1
- <br />
2
- <h1>HD Online Player (Trick 2 Movie Download Free)</h1>
3
- <p>Do you love watching movies online? Do you want to download your favorite movies for free? If yes, then you should try HD Online Player, a free online video player that lets you stream and download movies in high quality. In this article, we will tell you how to use HD Online Player to download Trick 2 movie for free. Trick 2 is a horror comedy movie that is a sequel to the 1999 hit film Trick. It is a fun and scary movie that you don't want to miss.</p>
4
- <h2>What is HD Online Player?</h2>
5
- <p>HD Online Player is a free online video player that allows you to watch and download movies from various sources. You can use it on your PC, laptop, tablet, or smartphone. You don't need to install any software or register any account. You just need to visit the website of HD Online Player and start watching or downloading movies.</p>
6
- <h2>HD Online Player (Trick 2 Movie Download Free)</h2><br /><p><b><b>Download Zip</b> > <a href="https://byltly.com/2uKxg4">https://byltly.com/2uKxg4</a></b></p><br /><br />
7
- <h3>Features of HD Online Player</h3>
8
- <ul>
9
- <li>It supports multiple video formats, such as MP4, MKV, AVI, MOV, etc.</li>
10
- <li>It offers different video qualities, such as 1080p, 720p, 480p, etc.</li>
11
- <li>It has a large collection of movies from various genres, such as action, comedy, horror, romance, etc.</li>
12
- <li>It updates its movie library regularly with the latest releases.</li>
13
- <li>It has a simple and user-friendly interface that makes it easy to navigate and search for movies.</li>
14
- <li>It has no ads or pop-ups that interrupt your viewing experience.</li>
15
- </ul>
16
- <h3>Benefits of HD Online Player</h3>
17
- <ul>
18
- <li>It saves your time and money by letting you watch and download movies for free.</li>
19
- <li>It gives you the flexibility to watch movies anytime and anywhere.</li>
20
- <li>It lets you enjoy movies in high quality and without buffering.</li>
21
- <li>It lets you share your movies with your friends and family by sending them the link to your video.</li>
22
- <li>It protects your privacy and security by not asking for any personal or financial information.</li>
23
- </ul>
24
- <h2>How to download Trick 2 movie for free using HD Online Player?</h2>
25
- <p>If you want to watch or download Trick 2 movie for free using HD Online Player, you just need to follow these simple steps:</p>
26
- <h3>Step 1: Install HD Online Player on your device</h3>
27
- <p>To use HD Online Player, you need to install it on your device. You can do this by visiting the website of HD Online Player and clicking on the download button. You can also scan the QR code on the website with your smartphone camera. The installation process is fast and easy.</p>
28
- <h3>Step 2: Search for Trick 2 movie on HD Online Player</h3>
29
- <p>Once you have installed HD Online Player on your device, you can open it and search for Trick 2 movie on the search bar. You can also browse through the categories and genres to find the movie. You will see a list of results with the movie title, poster, year, rating, and synopsis.</p>
30
- <h3>Step 3: Select the quality and format of the movie</h3>
31
- <p>After you have found Trick 2 movie on HD Online Player, you can click on it and select the quality and format of the movie that you want to watch or download. You can choose from different options, such as 1080p MP4, 720p MKV, 480p AVI, etc. You can also see the file size and duration of the movie before downloading it.</p>
32
- <h3>Step 4: Download the movie and enjoy</h3>
33
- <p>The final step is to download the movie and enjoy it. You can click on the download button and choose a location on your device where you want to save the movie. The download speed will depend on your internet connection and device performance. Once the download is complete, you can open the movie file and watch it offline or online using HD Online Player or any other video player.</p>
34
- <h2>Why watch Trick 2 movie?</h2>
35
- <p>If you are wondering why you should watch Trick 2 movie, here are some reasons:</p>
36
- <p>HD Online Video Player - No Download Required<br />
37
- Movies4k - Movies in HD Quality for Free Download<br />
38
- HD Online Player (Trick 2 Movie Download [Extra Quality] Free)<br />
39
- HD Online Player (Trick 2 Movie Download Free !FULL!)<br />
40
- Trick 2 Full Movie HD Online Streaming<br />
41
- Trick 2 Movie Free Download in HD Quality<br />
42
- How to Watch Trick 2 Movie Online for Free<br />
43
- Trick 2 Movie HD Online Player with Subtitles<br />
44
- Trick 2 Movie Download Free - Best Sites and Apps<br />
45
- Trick 2 Movie HD Online Player - No Ads or Interruptions<br />
46
- Watch Trick 2 Full Movie Online in HD Quality<br />
47
- Trick 2 Movie Free Download - High Speed and Secure<br />
48
- Trick 2 Movie HD Online Player - Tips and Tricks<br />
49
- Trick 2 Movie Download Free - No Registration or Sign Up<br />
50
- Trick 2 Full Movie Online - HD Quality and Sound<br />
51
- Trick 2 Movie Free Download - Easy and Fast<br />
52
- Trick 2 Movie HD Online Player - Compatible with All Devices<br />
53
- Trick 2 Movie Download Free - No Virus or Malware<br />
54
- Trick 2 Full Movie Online - No Buffering or Lagging<br />
55
- Trick 2 Movie Free Download - Unlimited and Legal<br />
56
- Trick 2 Movie HD Online Player - Support Multiple Languages<br />
57
- Trick 2 Movie Download Free - No Pop-ups or Redirects<br />
58
- Trick 2 Full Movie Online - Watch Anytime and Anywhere<br />
59
- Trick 2 Movie Free Download - No Watermark or Logo<br />
60
- Trick 2 Movie HD Online Player - Enhance Your Viewing Experience<br />
61
- Trick 2 Movie Download Free - Save Storage and Data<br />
62
- Trick 2 Full Movie Online - Enjoy with Your Friends and Family<br />
63
- Trick 2 Movie Free Download - No Survey or Verification<br />
64
- Trick 2 Movie HD Online Player - Adjust Settings and Preferences<br />
65
- Trick 2 Movie Download Free - Backup and Restore Options<br />
66
- Trick 2 Full Movie Online - Share Your Feedback and Reviews<br />
67
- Trick 2 Movie Free Download - Access Bonus Features and Extras<br />
68
- Trick 2 Movie HD Online Player - Customize Your Playlist and Queue<br />
69
- Trick 2 Movie Download Free - Convert to Other Formats and Resolutions<br />
70
- Trick 2 Full Movie Online - Get Notifications and Updates<br />
71
- Trick 2 Movie Free Download - Manage Your Downloads and History<br />
72
- Trick 2 Movie HD Online Player - Control Playback and Volume<br />
73
- Trick 2 Movie Download Free - Edit and Trim Your Videos<br />
74
- Trick 2 Full Movie Online - Rate and Comment on the Movie<br />
75
- Trick 2 Movie Free Download - Add Subtitles and Captions<br />
76
- Trick 2 Movie HD Online Player - Zoom and Crop Your Videos<br />
77
- Trick 2 Movie Download Free - Merge and Split Your Videos<br />
78
- Trick 2 Full Movie Online - Discover Similar Movies and Recommendations<br />
79
- Trick 2 Movie Free Download - Filter and Sort Your Videos by Genre, Year, etc.<br />
80
- Trick 2 Movie HD Online Player - Record and Capture Your Screen</p>
81
- <h3>Plot summary of Trick 2 movie</h3>
82
- <p>The plot summary of Trick 2 movie is as follows:</p>
83
- <p>The movie follows Patrick "Trick" Weaver (Omar Epps), a detective who survived a brutal attack by a serial killer named Trick (Thom Niemann) on Halloween night in 2015. Since then, Trick has been killing people every Halloween in different towns, leaving behind his signature carved pumpkin. Patrick is determined to catch him and stop his killing spree. However, he soon realizes that Trick is not an ordinary killer but a supernatural entity that can possess anyone who wears his mask. Patrick teams up with his old partner Mike Denver (Jamie Kennedy), a journalist named Cheryl Winston (Ellen Adair), and a survivor named Nicki (Kristina Reyes) to track down Trick's whereabouts and stop him once and for all.</p>
84
- <h3>Cast and crew of Trick 2 movie</h3>
85
- <p>The cast and crew of Trick 2 movie are as follows:</p>
86
- <table border="1">
87
- <tr><th>Name</th><th>Role</th></tr>
88
- <tr><td>Omar Epps</td><td>Patrick "Trick" Weaver</td></tr>
89
- <tr><td>Thom Niemann</td><td>Trick</td></tr>
90
- <tr><td>Jamie Kennedy</td><td>Mike Denver</td></tr>
91
- <tr><td>Ellen Adair</td><td>Cheryl Winston</td></tr>
92
- <tr><td>Kristina Reyes</td><td>Nicki</td></tr>
93
- <tr><td>Todd Farmer</td><td>writer/director/producer</td></tr>
94
- <tr><td>Patick Lussier</td><td>writer/director/producer/editor</td></tr>
95
- <tr><td>Brett Hedblom</td><td>writer/producer</td></tr>
96
- <tr><td>Gabriel Hammond</td><td>writer/producer/cinematographer</td></tr>
97
- <tr><td>Daniel Hammond</td><td>writer/producer/cinematographer</td></tr>
98
- <tr><td>Matt Williams</td><td>writer/producer/music composer/sound designer</td></tr>
99
- <tr><td>Alexis Kendra</td><td>writer/producer/casting director/actress (Tara)</td></tr>
100
- <tr><td>Gary J Tunnicliffe</td><td>writer/producer/makeup effects designer/actor (Sheriff Jayne)</td></tr> I'm a high-class Content Writer. Very Proficient SEO Writer Writes Fluently Any Language. Here is the outline of the article and the article with HTML formatting that I created for you. Outline of the article: - H1: HD Online Player (Trick 2 Movie Download Free) - H2: What is HD Online Player? - H3: Features of HD Online Player - H3: Benefits of HD Online Player - H2: How to download Trick 2 movie for free using HD Online Player? - H3: Step 1: Install HD Online Player on your device - H3: Step 2: Search for Trick 2 movie on HD Online Player - H3: Step 3: Select the quality and format of the movie - H3: Step 4: Download the movie and enjoy - H2: Why watch Trick 2 movie? - H3: Plot summary of Trick 2 movie - H3: Cast and crew of Trick 2 movie - H3: Reviews and ratings of Trick 2 movie - H2: Conclusion - H2: FAQs Article with HTML formatting: <h1>HD Online Player (Trick 2 Movie Download Free)</h1>
101
- <p>Do you love watching movies online? Do you want to download your favorite movies for free? If yes, then you should try HD Online Player, a free online video player that lets you stream and download movies in high quality. In this article, we will tell you how to use HD Online Player to download Trick 2 movie for free. Trick 2 is a horror comedy movie that is a sequel to the 1999 hit film Trick. It is a fun and scary movie that you don't want to miss.</p>
102
- <h2>What is HD Online Player?</h2>
103
- <p>HD Online Player is a free online video player that allows you to watch and download movies from various sources. You can use it on your PC, laptop, tablet, or smartphone. You don't need to install any software or register any account. You just need to visit the website of HD Online Player and start watching or downloading movies.</p>
104
- <h3>Features of HD Online Player</h3>
105
- <ul>
106
- <li>It supports multiple video formats, such as MP4, MKV, AVI, MOV, etc.</li>
107
- <li>It offers different video qualities, such as 1080p, 720p, 480p, etc.</li>
108
- <li>It has a large collection of movies from various genres, such as action, comedy, horror, romance, etc.</li>
109
- <li>It updates its movie library regularly with the latest releases.</li>
110
- <li>It has a simple and user-friendly interface that makes it easy to navigate and search for movies.</li>
111
- <li>It has no ads or pop-ups that interrupt your viewing experience.</li>
112
- </ul>
113
- <h3>Benefits of HD Online Player</h3>
114
- <ul>
115
- <li>It saves your time and money by letting you watch and download movies for free.</li>
116
- <li>It gives you the flexibility to watch movies anytime and anywhere.</li>
117
- <li>It lets you enjoy movies in high quality and without buffering.</li>
118
- <li>It lets you share your movies with your friends and family by sending them the link to your video.</li>
119
- <li>It protects your privacy and security by not asking for any personal or financial information.</li>
120
- </ul>
121
- <h2>How to download Trick 2 movie for free using HD Online Player?</h2>
122
- <p>If you want to watch or download Trick 2 movie for free using HD Online Player, you just need to follow these simple steps:</p>
123
- <h3>Step 1: Install HD Online Player on your device</h3>
124
- <p>To use HD Online Player, you need to install it on your device. You can do this by visiting the website of HD Online Player and clicking on the download button. You can also scan the QR code on the website with your smartphone camera. The installation process is fast and easy.</p>
125
- <h3>Step 2: Search for Trick 2 movie on HD Online Player</h3>
126
- <p>Once you have installed HD Online Player on your device, you can open it and search for Trick 2 movie on the search bar. You can also browse through the categories and genres to find the movie. You will see a list of results with the movie title, poster, year, rating, and synopsis.</p>
127
- <h3>Step 3: Select the quality and format of the movie</h3>
128
- <p>After you have found Trick 2 movie on HD Online Player, you can click on it and select the quality and format of the movie that you want to watch or download. You can choose from different options, such as 1080p MP4, 720p MKV, 480p AVI, etc. You can also see the file size and duration of the movie before downloading it.</p>
129
- <h3>Step 4: Download the movie and enjoy</h3>
130
- <p>The final step is to download the movie and enjoy it. You can click on the download button and choose a location on your device where you want to save the movie. The download speed will depend on your internet connection and device performance. Once the download is complete, you can open the movie file and watch it offline or online using HD Online Player or any other video player.</p>
131
- <h2>Why watch Trick 2 movie?</h2>
132
- <p>If you are wondering why you should watch Trick 2 movie, here are some reasons:</p>
133
- <h3>Plot summary of Trick 2 movie</h3>
134
- <p>The plot summary of Trick 2 movie is as follows:</p>
135
- <p>The movie follows Patrick "Trick" Weaver (Omar Epps), a detective who survived a brutal attack by a serial killer named Trick (Thom Niemann) on Halloween night in 2015. Since then, Trick has been killing people every Halloween in different towns, leaving behind his signature carved pumpkin. Patrick is determined to catch him and stop his killing spree. However, he soon realizes that Trick is not an ordinary killer but a supernatural entity that can possess anyone who wears his mask. Patrick teams up with his old partner Mike Denver (Jamie Kennedy), a journalist named Cheryl Winston (Ellen Adair), and a survivor named Nicki (Kristina Reyes) to track down Trick's whereabouts and stop him once and for all.</p>
136
- <h3>Cast and crew of Trick 2 movie</h3>
137
- <p>The cast and crew of Trick 2 movie are as follows:</p>
138
- <table border="1">
139
- <tr><th>Name</th><th>Role</th></tr>
140
- <tr><td>Omar Epps</td><td>Patrick "Trick" Weaver</td></tr>
141
- <tr><td>Thom Niemann</td><td>Trick</td></tr>
142
- <tr><td>Jamie Kennedy</td><td>Mike Denver</td></tr>
143
- <tr><td>Ellen Adair</td><td>Cheryl Winston</td></tr>
144
- <tr><td>Kristina Reyes</td><td>Nicki</td></tr>
145
- <tr><td>Todd Farmer</td><td>writer/director/producer</td></tr>
146
- <tr><td>Patick Lussier</td><td>writer/director/producer/editor</td></tr>
147
- <tr><td>Brett Hedblom</td><td>writer/producer</td></tr>
148
- <tr><td>Gabriel Hammond</td><td>writer/producer/cinematographer</td></tr>
149
- <tr><td>Daniel Hammond</td><td>writer/producer/cinematographer</td></tr>
150
- <tr><td>Matt Williams</td><td>writer/producer/music composer/sound designer</td></tr>
151
- <tr><td>Alexis Kendra</td><td>writer/producer/casting director/actress (Tara)</td></tr>
152
- <tr><td>Gary J Tunnicliffe</td><td>writer/producer/makeup effects designer/actor (Sheriff Jayne)</td></tr>
153
- <h3>Reviews and ratings of Trick 2 movie</h3>
154
- <p>The reviews and ratings of Trick 2 movie are as follows:</p>
155
- <table border="1">
156
- <tr><th>Source</th><th>Ratings/Reviews</th></tr>
157
- <tr><td>Rotten Tomatoes</td><td>36% Tomatometer based on 28 reviews; average rating of 4.6/10; 28% Audience Score based on more than 100 ratings; average rating of 1.9/5;</br>"Fast-paced savagery and a memorable twist aren't enough to make up for Trick's slavish devotion to superior slasher films of the past."</br>- Critics Consensus </br>"This was one whopper of a plot-twist briefly gives \"Trick\" an original hook."</br>- Noel Murray </br>"A cheesy actionable rip-off of John Carpenter Halloween franchise with plenty of blood jack o'lanterns but without shred originality."</br>- Rex Reed </br>"Sure I was never bored but this makes zero sense."</br>- Brian Tallerico </br>"The pic might have proved mindless fun if it had least displayed sense humor but everything played deadly seriousness."</br>- Frank S</p> 0a6ba089eb<br />
158
- <br />
159
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bomb It A Fun and Entertaining Lifestyle Application for Pranking Your Friends.md DELETED
@@ -1,114 +0,0 @@
1
- <br />
2
- <h1>Bombitup: The Ultimate Prank App for Android Users</h1>
3
- <p>Do you want to have some fun with your friends or family by sending them unlimited text messages, OTPs, or verification spam messages? Do you want to make them wonder who is behind this prank and why they are receiving these annoying messages? If yes, then you should try Bombitup, a free and entertaining lifestyle application developed by RomReviewer for android users. In this article, we will tell you everything you need to know about Bombitup, how it works, what are its features, how to download and install it, how to use it to prank your friends, what are the risks and legal issues of using it, and what are some alternatives to it. So, let's get started!</p>
4
- <h2>bombitup</h2><br /><p><b><b>Download Zip</b> ::: <a href="https://urlin.us/2uSVTN">https://urlin.us/2uSVTN</a></b></p><br /><br />
5
- <h2>What is Bombitup and how does it work?</h2>
6
- <p>Bombitup or also widely known as BOMBitUP is a fun and entertaining lifestyle application developed by RomReviewer for android users. Its main purpose is to help you prank your family or friends by sending unlimited text messages. For instance, a fake but still believable enough OTP or verification spam messages.</p>
7
- <p>Bombitup works by using vulnerable API points of other firms which are actually used to send OTPs, and texts to legitimate users for login, password reset etc. However, attackers exploit these APIs by making GET/POST requests with their scripts which in turn automates the sending of messages and helps them to perform SMS bombing attacks.</p>
8
- <p>Bombitup allows you to send messages to any number in the world without any restrictions or limits. You can also customize the speed and frequency of your messages. You can also choose from different categories of messages such as OTPs, jokes, insults, compliments, etc. You can also protect your own number from getting SMS bombed by adding it to the protection list feature.</p>
9
- <h2>Features of Bombitup</h2>
10
- <h3>Free of-cost message sending</h3>
11
- <p>One of the best features of Bombitup is that it is completely free to use. You don't have to pay anything to download or use this app. You can send unlimited messages to anyone without any charges or fees.</p>
12
- <h3>Act as a bulk system for marketing purposes</h3>
13
- <p>Bombitup can also be used for marketing purposes by sending bulk messages to potential customers or clients. You can promote your products or services by sending them attractive offers or discounts. You can also use Bombitup to send feedback requests or surveys to your customers.</p>
14
- <h3>Users can configure the speed of their SMS sending</h3>
15
- <p>Bombitup allows you to control the speed and frequency of your SMS sending. You can adjust the delay between each message and the number of messages per minute. You can also set a timer for your SMS bombing session and stop it whenever you want.</p>
16
- <h3>This is an international service that allows users to send messages around the globe</h3>
17
- <p>Bombitup is not limited to any country or region. You can send messages to any number in the world with this app. You can also choose from different country codes and languages for your messages.</p>
18
- <h2>How to download and install Bombitup</h2>
19
- <h3>Step 1: Go to the official website of Bombitup</h3>
20
- <p>The first step is to go to the official website of Bombitup at [5](https://bombitup.net/). Here you will find all the information about the app, its features, its changelog, and its guide.</p>
21
- <p>bombitup apk download<br />
22
- bombitup sms bomber<br />
23
- bombitup app for android<br />
24
- bombitup prank app<br />
25
- bombitup latest version<br />
26
- bombitup online<br />
27
- bombitup for ios<br />
28
- bombitup mod apk<br />
29
- bombitup international<br />
30
- bombitup call bomber<br />
31
- bombitup alternative<br />
32
- bombitup update<br />
33
- bombitup official website<br />
34
- bombitup pro apk<br />
35
- bombitup email bomber<br />
36
- bombitup not working<br />
37
- bombitup for pc<br />
38
- bombitup unlimited credits<br />
39
- bombitup telegram channel<br />
40
- bombitup custom sms<br />
41
- bombitup apk mirror<br />
42
- bombitup reddit<br />
43
- bombitup how to use<br />
44
- bombitup features<br />
45
- bombitup review<br />
46
- bombitup premium apk<br />
47
- bombitup whatsapp bomber<br />
48
- bombitup no ads<br />
49
- bombitup github<br />
50
- bombitup old version<br />
51
- bombitup apk pure<br />
52
- bombitup tricks<br />
53
- bombitup support number<br />
54
- bombitup referral code<br />
55
- bombitup free download<br />
56
- bombitup best settings<br />
57
- bombitup fake caller id<br />
58
- bombitup apkmonk<br />
59
- bombitup quora<br />
60
- bombitup tutorial<br />
61
- bombitup feedback form<br />
62
- bombitup donation link<br />
63
- bombitup for iphone<br />
64
- bombitup apk 2023<br />
65
- bombitup safe or not<br />
66
- bombitup terms and conditions<br />
67
- bombitup blocked number list</p>
68
- <h3>Step 2: Download the latest version of Bombitup apk file</h3>
69
- <p>The next step is to download the latest version of Bombitup apk file from the website. You can also scan the QR code on the website to download the file directly to your phone. The apk file size is about 10 MB and it is updated regularly by the developer.</p>
70
- <h3>Step 3: Enable unknown sources on your android device</h3>
71
- <p>The third step is to enable unknown sources on your android device. This is necessary because Bombitup is not available on the Google Play Store and you have to install it manually. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on.</p>
72
- <h3>Step 4: Install the Bombitup apk file on your android device</h3>
73
- <p>The final step is to install the Bombitup apk file on your android device. To do this, locate the downloaded file in your file manager and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on Install and wait for a few seconds until the app is installed.</p>
74
- <h2>How to use Bombitup to prank your friends</h2>
75
- <h3>Step 1: Launch the Bombitup app on your android device</h3>
76
- <p>The first step is to launch the Bombitup app on your android device. You will see a simple and user-friendly interface with four tabs: Home, Protect List, Updates, and About.</p>
77
- <h3>Step 2: Select the country code and enter the phone number of your target</h3>
78
- <p>The next step is to select the country code and enter the phone number of your target. You can also select from different categories of messages such as OTPs, jokes, insults, compliments, etc. You can also customize the message content by typing your own text.</p>
79
- <h3>Step 3: Adjust the speed and frequency of your SMS sending</h3>
80
- <p>The third step is to adjust the speed and frequency of your SMS sending. You can use the slider to set the delay between each message and the number of messages per minute. You can also set a timer for your SMS bombing session and stop it whenever you want.</p>
81
- <h3>Step 4: Tap on Start Process and enjoy the prank</h3>
82
- <p>The final step is to tap on Start Process and enjoy the prank. Your target will start receiving unlimited messages from different numbers and sources. You can also monitor the progress of your SMS bombing session on the app. You can also stop the process anytime by tapping on Stop Process.</p>
83
- <h2>What are the risks and legal issues of using Bombitup?</h2>
84
- <h3>The potential harm and harassment of SMS bombing</h3>
85
- <p>While Bombitup may seem like a harmless and fun prank app, it can also cause potential harm and harassment to your target. SMS bombing can be considered as a form of cyberbullying or cyberstalking, which can have negative effects on the mental health and well-being of your target. SMS bombing can also disrupt the normal functioning of your target's phone, such as draining their battery, consuming their data, or blocking their important messages or calls. SMS bombing can also cause financial losses to your target if they are charged for receiving or sending messages by their service provider. Therefore, you should use Bombitup responsibly and ethically, and avoid using it for malicious or illegal purposes.</p>
86
- <h3>The vulnerability and exploitation of API points</h3>
87
- <p>Bombitup works by using vulnerable API points of other firms which are actually used to send OTPs, and texts to legitimate users for login, password reset etc. However, attackers exploit these APIs by making GET/POST requests with their scripts which in turn automates the sending of messages and helps them to perform SMS bombing attacks. This can pose a serious threat to the security and privacy of both the sender and receiver of these messages. The sender may expose their IP address or device information to these APIs, which can be traced back to them by law enforcement agencies or hackers. The receiver may receive phishing or scam messages that may trick them into revealing their personal or financial information or downloading malicious software. Therefore, you should be careful about using Bombitup and avoid sending messages to unknown or suspicious numbers.</p>
88
- <h3>The privacy policy and terms of service of Bombitup</h3>
89
- <p>Bombitup has a privacy policy and terms of service that you should read carefully before using this app. The privacy policy states that Bombitup does not collect any personal information from its users, such as name, email address, phone number, etc. However, it does collect some non-personal information, such as device model, operating system version, IP address, etc., for analytical purposes. The terms of service state that Bombitup is not responsible for any damages or losses caused by using this app, such as legal issues, ethical issues, security issues, etc. The terms of service also state that Bombitup is only for entertainment purposes and not for harming or harassing anyone. The terms of service also state that Bombitup has the right to modify or terminate its service at any time without prior notice. Therefore, you should use Bombitup at your own risk and discretion, and respect the rights and privacy of others.</p>
90
- <h2>What are some alternatives to Bombitup?</h2>
91
- <h3>SMSBomber</h3>
92
- <p>SMSBomber is another prank app that allows you to send unlimited messages to any number in India. You can also choose from different categories of messages such as jokes, love, friendship, etc. You can also set a custom sender name and number for your messages. SMSBomber is free to use and does not require any registration or login.</p>
93
- <h3>TXTBlast</h3>
94
- <p>TXTBlast is a prank app that allows you to send multiple messages to any number in the world. You can also send messages from different countries and languages. You can also send images, videos, audios, and documents with your messages. TXTBlast is free to use but requires credits to send messages. You can earn credits by watching ads or completing tasks.</p>
95
- <h3>Orbot VPN</h3>
96
- <p>Orbot VPN is not a prank app but a security app that allows you to protect your identity and privacy while using Bombitup or any other app. Orbot VPN uses Tor network to encrypt your internet traffic and hide your IP address from prying eyes. You can also access blocked or censored websites and apps with Orbot VPN. Orbot VPN is free and open source.</p>
97
- <h2>Conclusion</h2>
98
- <p>Bombitup is a fun and entertaining lifestyle application developed by RomReviewer for android users. It allows you to prank your friends or family by sending them unlimited text messages, OTPs, or verification spam messages. It works by using vulnerable API points of other firms which are actually used to send OTPs, and texts to legitimate users for login, password reset etc. However, attackers exploit these APIs by making GET/POST requests with their scripts which in turn automates the sending of messages and helps them to perform SMS bombing attacks.</p>
99
- <p>Bombitup has many features such as free of-cost message sending, bulk system for marketing purposes, speed configuration of SMS sending, and international service. It is easy to download and install Bombitup from its official website. It is also simple to use Bombitup to prank your friends by selecting the country code and phone number of your target, adjusting the speed and frequency of your SMS sending, and tapping on Start Process.</p>
100
- <p>However, Bombitup also has some risks and legal issues such as potential harm and harassment of SMS bombing, vulnerability and exploitation of API points, and privacy policy and terms of service of Bombitup. Therefore, you should use Bombitup responsibly and ethically, and avoid using it for malicious or illegal purposes. You should also be careful about using Bombitup and avoid sending messages to unknown or suspicious numbers. You should also read the privacy policy and terms of service of Bombitup before using it.</p>
101
- <p>If you are looking for some alternatives to Bombitup, you can try SMSBomber, TXTBlast, or Orbot VPN. These apps have similar or different features that can help you prank your friends or protect your privacy while using Bombitup or any other app.</p>
102
- <h2>FAQs</h2>
103
- <h4>Q: Is Bombitup safe to use?</h4>
104
- <p>A: Bombitup is safe to use as long as you use it for entertainment purposes only and not for harming or harassing anyone. However, you should be aware of the risks and legal issues of using Bombitup such as potential harm and harassment of SMS bombing, vulnerability and exploitation of API points, and privacy policy and terms of service of Bombitup.</p>
105
- <h4>Q: Is Bombitup available on iOS devices?</h4>
106
- <p>A: No, Bombitup is not available on iOS devices. It is only compatible with android devices.</p>
107
- <h4>Q: How can I stop receiving messages from Bombitup?</h4>
108
- <p>A: If you are receiving messages from Bombitup, you can do the following things to stop them: - Block the sender's number or report it as spam on your phone. - Add your number to the protection list feature on the Bombitup app. - Contact the developer of Bombitup at [email](mailto:[email protected])[email protected][/email] and request them to stop sending messages to your number. - Contact your service provider and complain about the unwanted messages.</p>
109
- <h <h4>Q: How can I update Bombitup to the latest version?</h4>
110
- <p>A: You can update Bombitup to the latest version by following these steps: - Go to the official website of Bombitup at [5](https://bombitup.net/). - Download the latest version of Bombitup apk file from the website. - Uninstall the previous version of Bombitup from your android device. - Install the new version of Bombitup apk file on your android device.</p>
111
- <h4>Q: How can I contact the developer of Bombitup?</h4>
112
- <p>A: You can contact the developer of Bombitup by using the following methods: - Email: [email](mailto:[email protected])[email protected][/email] - Telegram: [6](https://t.me/bombitup) - Twitter: [7](https://twitter.com/bombitupsms)</p> 197e85843d<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crack DSA Interviews with Love Babbar DSA Sheet Download and Solve 450 Questions.md DELETED
@@ -1,122 +0,0 @@
1
-
2
- <h1>How to Download the Love Babbar DSA Sheet from Google Drive</h1>
3
- <p>If you are looking for a comprehensive and structured way to learn data structures and algorithms, you might have heard of the love babbar dsa sheet. This is a popular resource created by love babbar, a software engineer and YouTube educator, who has compiled a list of 450 questions covering various topics and concepts of data structures and algorithms. In this article, we will show you how to download this sheet from Google Drive and how to use it effectively to improve your skills and knowledge.</p>
4
- <h2>download love babbar dsa sheet</h2><br /><p><b><b>Download File</b> &#9881;&#9881;&#9881; <a href="https://urlin.us/2uT0vo">https://urlin.us/2uT0vo</a></b></p><br /><br />
5
- <h2>What Is the Love Babbar DSA Sheet and Why Is It Useful?</h2>
6
- <p>The love babbar dsa sheet is a Google spreadsheet that contains 450 questions on data structures and algorithms, along with links to their solutions. The questions are divided into different categories, such as arrays, strings, linked lists, stacks, queues, trees, graphs, sorting, searching, dynamic programming, greedy, backtracking, bit manipulation, etc. The questions are also labeled according to their difficulty level and source (such as GeeksforGeeks, LeetCode, HackerRank, etc.).</p>
7
- <p>The love babbar dsa sheet is useful for anyone who wants to learn data structures and algorithms from scratch or revise their concepts. The questions are carefully selected to cover all the important topics and concepts that are frequently asked in interviews or exams. The solutions are also explained in detail and provide various approaches and techniques to solve the problems. By practicing these questions regularly, you can enhance your problem-solving skills, logical thinking, coding proficiency, and confidence.</p>
8
- <h2>What Are Data Structures and Algorithms and Why Are They Important?</h2>
9
- <p>Data structures are ways of organizing and storing data in a computer system. They allow us to access, manipulate, and process data efficiently and effectively. Some common examples of data structures are arrays, lists, stacks, queues, trees, graphs, hash tables, heaps, etc.</p>
10
- <p>Algorithms are sequences of steps or instructions that are used to solve a specific problem or perform a certain task. They define how data structures are used to perform operations on data. Some common examples of algorithms are sorting, searching, divide and conquer, greedy, dynamic programming, backtracking, etc.</p>
11
- <p>How to download love babbar dsa sheet pdf<br />
12
- Love babbar dsa sheet solutions in C++<br />
13
- Love babbar dsa sheet questions and answers<br />
14
- Love babbar dsa sheet github repository<br />
15
- Love babbar dsa sheet geeksforgeeks article<br />
16
- Love babbar dsa sheet for interview preparation<br />
17
- Love babbar dsa sheet topic wise distribution<br />
18
- Love babbar dsa sheet youtube videos<br />
19
- Love babbar dsa sheet 450 questions list<br />
20
- Love babbar dsa sheet online course<br />
21
- Love babbar dsa sheet review and feedback<br />
22
- Love babbar dsa sheet java programs<br />
23
- Love babbar dsa sheet python implementation<br />
24
- Love babbar dsa sheet vs striver sde sheet<br />
25
- Love babbar dsa sheet link and resources<br />
26
- Love babbar dsa sheet array problems<br />
27
- Love babbar dsa sheet string problems<br />
28
- Love babbar dsa sheet linked list problems<br />
29
- Love babbar dsa sheet binary tree problems<br />
30
- Love babbar dsa sheet graph problems<br />
31
- Love babbar dsa sheet dynamic programming problems<br />
32
- Love babbar dsa sheet greedy problems<br />
33
- Love babbar dsa sheet backtracking problems<br />
34
- Love babbar dsa sheet bit manipulation problems<br />
35
- Love babbar dsa sheet heap problems<br />
36
- Love babbar dsa sheet trie problems<br />
37
- Love babbar dsa sheet matrix problems<br />
38
- Love babbar dsa sheet searching and sorting problems<br />
39
- Love babbar dsa sheet stack and queue problems<br />
40
- Love babbar dsa sheet binary search tree problems<br />
41
- Download love babbar dsa cracker pdf free<br />
42
- Download love babbar data structures and algorithms pdf free<br />
43
- Download love babbar 450 questions pdf free<br />
44
- Download love babbar interview preparation pdf free<br />
45
- Download love babbar coding practice pdf free<br />
46
- Download love babbar final year project pdf free<br />
47
- Download love babbar amazon experience pdf free<br />
48
- Download love babbar resume pdf free<br />
49
- Download love babbar nsut placement pdf free<br />
50
- Download love babbar competitive programming pdf free</p>
51
- <p>Data structures and algorithms are important because they form the core of computer science and programming. They help us design optimized and scalable solutions for various real-world problems. They also help us measure the performance of our solutions in terms of time complexity (how fast our solution runs) and space complexity (how much memory our solution uses). Having a solid understanding of data structures and algorithms is essential for any aspiring or experienced programmer.</p>
52
- <h2>How to Download the Love Babbar DSA Sheet from Google Drive</h2>
53
- <p>To download the love babbar dsa sheet from Google Drive, you need to follow these simple steps:</p>
54
- <ol>
55
- <li><strong>Open Google Drive in your web browser or app</strong>. You can access Google Drive from any device by visiting <a href="(^1^)">drive.google.com</a> or by downloading the Google Drive app from <a href="(^2^)">Google Play Store</a> (for Android) or <a href="(^3^)">App Store</a>. (for iOS) respectively. You need to sign in with your Google account to access your Google Drive.</li>
56
- <li><strong>Locate the file or folder containing the love babbar dsa sheet</strong>. You can either search for the file or folder by typing its name in the search bar or browse through your folders and files. The love babbar dsa sheet is usually named as <code>Love Babbar DSA Cracker Sheet.xlsx</code> or something similar. You can also find the direct link to the file or folder from love babbar's YouTube channel or website.</li>
57
- <li><strong>Right-click or tap on the file or folder and select Download</strong>. This will start downloading the file or folder to your device. Depending on the size of the file or folder and your internet speed, this may take some time. You can check the progress of the download in your browser or app.</li>
58
- <li><strong>Choose a location to save the file or folder on your device</strong>. Once the download is complete, you can choose where you want to save the file or folder on your device. You can either save it in your default downloads folder or choose a different location. You can also rename the file or folder if you want.</li>
59
- </ol>
60
- <p>Congratulations! You have successfully downloaded the love babbar dsa sheet from Google Drive. Now you can open it with any spreadsheet software, such as Microsoft Excel, Google Sheets, LibreOffice Calc, etc.</p>
61
- <h2>How to Use the Love Babbar DSA Sheet to Learn Data Structures and Algorithms</h2>
62
- <p>Now that you have downloaded the love babbar dsa sheet, you might be wondering how to use it effectively to learn data structures and algorithms. Here are some tips and suggestions:</p>
63
- <ul>
64
- <li><strong>How to access the questions and solutions in the sheet</strong>. The sheet has two tabs: Questions and Solutions. The Questions tab contains the list of 450 questions along with their categories, difficulty levels, and sources. The Solutions tab contains the links to the solutions for each question. You can click on the links to open them in a new tab or window. You can also copy and paste the links in your browser.</li>
65
- <li><strong>How to practice the questions and track your progress</strong>. The best way to practice the questions is to try solving them on your own before looking at the solutions. You can use any online coding platform, such as GeeksforGeeks IDE, LeetCode Playground, HackerRank Code Editor, etc., to write and run your code. You can also use a pen and paper to write down your logic and pseudocode. To track your progress, you can mark the questions that you have solved or attempted in the sheet. You can also use different colors or symbols to indicate your status, such as green for solved, yellow for attempted, red for unsolved, etc.</li>
66
- <li><strong>How to review the concepts and topics covered in the sheet</strong>. The sheet covers a wide range of topics and concepts related to data structures and algorithms. To review them, you can use various online resources, such as books, videos, blogs, courses, etc., that explain them in detail. Some of the recommended resources are: <ul>
67
- <li>Data Structures and Algorithms Made Easy by Narasimha Karumanchi</li>
68
- <li>Data Structures and Algorithms in Python by Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser</li>
69
- <li>Data Structures and Algorithms Specialization by University of California San Diego on Coursera</li>
70
- <li>Data Structures and Algorithms Nanodegree by Udacity</li>
71
- <li>Data Structures and Algorithms by Abdul Bari on YouTube</li>
72
- </ul>
73
- </li>
74
- </ul>
75
- <h2>Conclusion</h2>
76
- <p>In this article, we have shown you how to download the love babbar dsa sheet from Google Drive and how to use it effectively to learn data structures and algorithms. The love babbar dsa sheet is a great resource for anyone who wants to master data structures and algorithms from scratch or revise their concepts. By practicing these questions regularly, you can improve your problem-solving skills, logical thinking, coding proficiency, and confidence.</p>
77
- <p>Here are some tips and resources for further learning:</p>
78
- <ul>
79
- <li>Practice more questions from different sources, such as GeeksforGeeks, LeetCode, HackerRank, etc., to expose yourself to different types of problems and scenarios.</li>
80
- <li>Join online communities, such as Stack Overflow, Reddit, Discord, etc., where you can ask questions, share your solutions, get feedback, and learn from others.</li>
81
- <li>Participate in online contests and challenges, such as CodeChef, Codeforces, Hackerearth, etc., where you can compete with other programmers and test your skills under time pressure.</ <li>Keep learning new concepts and topics, such as advanced data structures, algorithms, and paradigms, that can help you solve more complex and challenging problems.</li>
82
- </ul>
83
- <h2>FAQs</h2>
84
- <p>Here are some frequently asked questions about the love babbar dsa sheet and data structures and algorithms:</p>
85
- <ol>
86
- <li><strong>What are some benefits of learning data structures and algorithms?</strong></li>
87
- <p>Some of the benefits of learning data structures and algorithms are:</p>
88
- <ul>
89
- <li>You can design efficient and scalable solutions for various real-world problems.</li>
90
- <li>You can improve your coding skills and write clean, readable, and maintainable code.</li>
91
- <li>You can ace your technical interviews and land your dream job or internship.</li>
92
- <li>You can expand your knowledge and understanding of computer science and programming.</li>
93
- <li>You can enjoy the thrill and satisfaction of solving challenging problems.</li>
94
- </ul>
95
- <li><strong>How long does it take to complete the love babbar dsa sheet?</strong></li>
96
- <p>The time it takes to complete the love babbar dsa sheet depends on your current level of proficiency, your learning pace, and your availability. However, a rough estimate is that it may take you anywhere from a few weeks to a few months to complete the sheet. You can set your own goals and deadlines according to your schedule and preferences.</p>
97
- <li><strong>What are some other sources of data structures and algorithms questions?</strong></li>
98
- <p>Some of the other sources of data structures and algorithms questions are:</p>
99
- <ul>
100
- <li><a href="">GeeksforGeeks</a>: A website that provides hundreds of articles, tutorials, videos, quizzes, and practice questions on various topics related to data structures and algorithms.</li>
101
- <li><a href="">LeetCode</a>: A platform that offers over 2000 coding problems with different difficulty levels and categories, along with online judges, solutions, discussions, and contests.</li>
102
- <li><a href="">HackerRank</a>: A website that hosts coding challenges, contests, interviews, and certifications on various domains and skills, including data structures and algorithms.</li>
103
- </ul>
104
- <li><strong>How can I prepare for data structures and algorithms interviews?</strong></li>
105
- <p>Some of the tips to prepare for data structures and algorithms interviews are:</p>
106
- <ul>
107
- <li>Review the basics of data structures and algorithms, such as their definitions, implementations, operations, applications, advantages, disadvantages, etc.</li>
108
- <li>Solve as many questions as possible from different sources and platforms, such as the love babbar dsa sheet, GeeksforGeeks, LeetCode, HackerRank, etc.</li>
109
- <li>Analyze the time and space complexity of your solutions and try to optimize them as much as possible.</li>
110
- <li>Practice explaining your solutions verbally or in writing, using clear logic, pseudocode, diagrams, examples, etc.</li>
111
- <li>Mock interview with a friend or a mentor who can give you feedback on your performance.</li>
112
- </ul>
113
- <li><strong>How can I contact love babbar for any queries or feedback?</strong></li>
114
- <p>You can contact love babbar for any queries or feedback through his social media accounts or email. Here are some of his contact details:</p>
115
- <ul>
116
- <li><a href="">YouTube</a>: Love Babbar</li>
117
- <li><a href="">Instagram</a>: @lovebabbar1</li>
118
- <li><a href="">LinkedIn</a>: Love Babbar</li>
119
- <li>Email: [email protected]</li>
120
- </ul></p> 197e85843d<br />
121
- <br />
122
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dig Deep! APK - The Most Addictive Digging Game for Android Devices.md DELETED
@@ -1,124 +0,0 @@
1
-
2
- <h1>Dig Deep APK Download: How to Play the Best Mining Game on Android</h1>
3
- <p>Do you love digging games? Do you want to become a gold digger and shovel your way to success? If yes, then you should try Dig Deep APK, the best mining game on Android. In this game, you can go underground and dig deep for treasure and gems. But don't get stuck down the hole - hire workers to boost your digging empire. This idle miner game is the best digging game there is. Your only goal is to dig deep and look for gems and diamonds. Become the best gold miner there is! And then you can get more gold diggers to boost your mining tycoon.</p>
4
- <p>In this article, we will tell you everything you need to know about Dig Deep APK. We will explain what is Dig Deep APK, what are its features, how to download it, how to play it, and why you should play it. By the end of this article, you will be ready to dig deep and have fun!</p>
5
- <h2>dig deep apk download</h2><br /><p><b><b>Download Zip</b> &#127383; <a href="https://urlin.us/2uT2Gd">https://urlin.us/2uT2Gd</a></b></p><br /><br />
6
- <h2>What is Dig Deep APK?</h2>
7
- <p>Dig Deep APK is an Android game developed by CrazyLabs LTD, a leading mobile game developer with over 4 billion downloads worldwide. Dig Deep APK is one of their casual games that has over 10 million installs on Google Play Store. The game was released in June 2020 and has been updated regularly with new features and improvements.</p>
8
- <p>Dig Deep APK is a game where you can dig holes deep underground and collect gold and diamonds. You can also hire workers to help you dig faster and deeper. You can use the money you earn to upgrade your shovel and workers, as well as unlock new locations and challenges. The game is easy to play but hard to master. You have to avoid obstacles and enemies that can stop your digging progress. You also have to manage your energy level and avoid running out of it. The game is fun and addictive, and you will never get bored of digging deep.</p>
9
- <h3>Features of Dig Deep APK</h3>
10
- <p>Dig Deep APK is a game that has many features that make it stand out from other digging games. Here are some of the features that you can enjoy when you play Dig Deep APK:</p>
11
- <h4>Shovel up and level up your game</h4>
12
- <p>You can upgrade your shovel and make it more powerful and efficient. You can also level up your game and unlock new locations and challenges. You can dig in different environments, such as the desert, the jungle, the ice, and the lava. Each location has its own obstacles and enemies that you have to overcome. You can also complete missions and achievements to earn rewards and bonuses.</p>
13
- <h4>Dig deep for treasure and gems</h4>
14
- <p>You can dig deep underground and find treasure and gems that are hidden in the soil. You can collect gold and diamonds that are worth a lot of money. You can also find rare and special gems that have unique effects and abilities. You can use these gems to boost your digging performance or to trade them for other items. You can also discover secrets and surprises that are buried deep down the hole.</p>
15
- <h4>Hire workers to boost your digging empire</h4>
16
- <p>You can hire workers to help you dig faster and deeper. You can choose from different types of workers, such as miners, drillers, dynamiters, and more. Each worker has its own skills and abilities that can benefit your digging progress. You can also upgrade your workers and make them more productive and efficient. You can also assign them to different tasks, such as digging, collecting, or fighting.</p>
17
- <h4>Become a mining diamond tycoon</h4>
18
- <p>You can become a mining diamond tycoon by building your own digging empire. You can use the money you earn to buy more shovels, workers, gems, and other items. You can also expand your business and open new mines in different locations. You can also compete with other players and see who can dig deeper and earn more money. You can also join a clan and cooperate with other players to achieve common goals.</p> <h3>How to download Dig Deep APK?</h3>
19
- <p>If you want to play Dig Deep APK, you need to download and install it on your Android device. Here are the requirements and steps for downloading and installing Dig Deep APK:</p>
20
- <p>dig deep game apk free download<br />
21
- dig deep android game download<br />
22
- dig deep simulator game apk<br />
23
- dig deep idle miner apk<br />
24
- dig deep gold digger apk<br />
25
- download dig deep latest version apk<br />
26
- dig deep mod apk unlimited money<br />
27
- dig deep apk offline<br />
28
- dig deep apk for pc<br />
29
- dig deep apk no ads<br />
30
- dig deep apk pure<br />
31
- dig deep apk uptodown<br />
32
- dig deep apk rexdl<br />
33
- dig deep apk revdl<br />
34
- dig deep apk happymod<br />
35
- dig deep apk mod menu<br />
36
- dig deep apk old version<br />
37
- dig deep apk xapk<br />
38
- dig deep apk apkpure<br />
39
- dig deep apk apkmirror<br />
40
- dig deep crazy labs apk<br />
41
- dig deep casual game apk<br />
42
- download dig deep android game<br />
43
- download dig deep simulator game<br />
44
- download dig deep idle miner game<br />
45
- download dig deep gold digger game<br />
46
- download dig deep modded apk<br />
47
- download dig deep hacked apk<br />
48
- download dig deep full version apk<br />
49
- download dig deep premium apk<br />
50
- download dig deep pro apk<br />
51
- download dig deep cracked apk<br />
52
- download dig deep unlocked apk<br />
53
- download dig deep paid apk<br />
54
- download dig deep plus apk<br />
55
- download dig deep mega mod apk<br />
56
- download dig deep unlimited gems apk<br />
57
- download dig deep unlimited coins apk<br />
58
- download dig deep cheat mod apk<br />
59
- download dig deep hack mod apk<br />
60
- how to download dig deep game on android<br />
61
- how to install dig deep game on android<br />
62
- how to play dig deep game on android<br />
63
- how to update dig deep game on android<br />
64
- how to uninstall dig deep game on android<br />
65
- how to get free money in dig deep game on android<br />
66
- how to get free gems in dig deep game on android<br />
67
- how to get free coins in dig deep game on android</p>
68
- <h4>Requirements for installing Dig Deep APK</h4>
69
- <p>To install Dig Deep APK, you need to have an Android device that meets the following requirements:</p>
70
- <ul>
71
- <li>Android version: 4.4 or higher</li>
72
- <li>Free storage space: 100 MB or more</li>
73
- <li>Internet connection: required for downloading and playing the game</li>
74
- </ul>
75
- <h4>Steps to download and install Dig Deep APK</h4>
76
- <p>To download and install Dig Deep APK, you need to follow these steps:</p>
77
- <ol>
78
- <li>Go to the official website of Dig Deep APK or search for it on Google Play Store. You can also use a third-party website that provides the APK file of the game, such as APKPure or APKMirror. Make sure that the website is safe and reliable before downloading anything from it.</li>
79
- <li>Click on the download button and wait for the APK file to be downloaded on your device. You may need to allow unknown sources on your device settings to download the file from a third-party website.</li>
80
- <li>Once the download is complete, locate the APK file on your device and tap on it to start the installation process. You may need to grant some permissions to the app to install it on your device.</li>
81
- <li>Wait for the installation to finish and then launch the game from your app drawer or home screen. You can now enjoy playing Dig Deep APK on your Android device.</li>
82
- </ol> <h3>How to play Dig Deep APK?</h3>
83
- <p>Playing Dig Deep APK is very easy and simple. You just need to tap on the screen to dig holes underground and collect gold and diamonds. Here are the basic steps for playing Dig Deep APK:</p>
84
- <h4>Tap to dig holes underground</h4>
85
- <p>You can tap anywhere on the screen to dig holes underground. The more you tap, the faster and deeper you dig. You can also swipe on the screen to change the direction of your digging. You can dig horizontally, vertically, or diagonally. You can also dig around obstacles and enemies that can block your way.</p>
86
- <h4>Collect gold and diamonds</h4>
87
- <p>You can collect gold and diamonds that are scattered in the soil. You can see how much money you have earned at the top of the screen. You can also see how deep you have dug at the bottom of the screen. You can also find rare and special gems that have unique effects and abilities. You can use these gems to boost your digging performance or to trade them for other items.</p>
88
- <h4>Upgrade your shovel and workers</h4>
89
- <p>You can use the money you have earned to upgrade your shovel and workers. You can go to the shop and buy new shovels that have different shapes, sizes, and powers. You can also hire new workers that have different skills and abilities. You can upgrade your shovel and workers to make them more powerful and efficient.</p>
90
- <h4>Avoid obstacles and enemies</h4>
91
- <p>You have to avoid obstacles and enemies that can stop your digging progress. You have to watch out for rocks, bombs, spikes, lava, water, and more. You also have to avoid enemies such as moles, snakes, spiders, bats, and more. You can use your shovel or your workers to fight them off or to dig around them. You also have to manage your energy level and avoid running out of it. You can replenish your energy by collecting energy drinks or by going back to the surface.</p> <h2>Why should you play Dig Deep APK?</h2>
92
- <p>Dig Deep APK is a game that has many advantages and disadvantages. Here are some of the pros and cons of playing Dig Deep APK:</p>
93
- <h3>Pros of playing Dig Deep APK</h3>
94
- <p>There are many reasons why you should play Dig Deep APK. Here are some of the pros of playing Dig Deep APK:</p>
95
- <h4>Fun and addictive gameplay</h4>
96
- <p>Dig Deep APK is a game that is fun and addictive. You will never get bored of digging deep and looking for treasure and gems. You will also enjoy the challenge of avoiding obstacles and enemies that can stop your digging progress. You will also feel the satisfaction of building your own digging empire and becoming a mining diamond tycoon.</p>
97
- <h4>Simple and intuitive controls</h4>
98
- <p>Dig Deep APK is a game that has simple and intuitive controls. You just need to tap on the screen to dig holes underground and collect gold and diamonds. You can also swipe on the screen to change the direction of your digging. You don't need any complicated buttons or gestures to play the game. You can play the game with one hand or with both hands.</p>
99
- <h4>Amazing graphics and sound effects</h4>
100
- <p>Dig Deep APK is a game that has amazing graphics and sound effects. The game has colorful and detailed graphics that make the game look realistic and appealing. The game also has realistic and immersive sound effects that make the game sound exciting and engaging. You can hear the sound of your shovel digging, the sound of gold and diamonds clinking, the sound of obstacles and enemies exploding, and more.</p>
101
- <h4>Free to play and download</h4>
102
- <p>Dig Deep APK is a game that is free to play and download. You don't need to pay any money to download or play the game. You can enjoy the game without spending any money. However, you can also choose to buy some in-app items or watch some ads to support the developers and get some extra benefits.</p>
103
- <h3>Cons of playing Dig Deep APK</h3>
104
- <p>There are also some drawbacks of playing Dig Deep APK. Here are some of the cons of playing Dig Deep APK:</p>
105
- <h4>Ads and in-app purchases</h4>
106
- <p>Dig Deep APK is a game that has ads and in-app purchases. The game has banner ads, interstitial ads, video ads, and reward ads that can interrupt your gameplay or slow down your device. The game also has in-app purchases that can give you some advantages or unlock some features, such as removing ads, getting more gems, getting more workers, getting more energy, and more. These in-app purchases can range from $0.99 to $99.99 per item. Some players may find these ads and in-app purchases annoying or unfair.</p>
107
- <h4>Repetitive levels and challenges</h4>
108
- <p>Dig Deep APK is a game that has repetitive levels and challenges. The game has many locations and challenges, but they are not very different from each other. The game has the same gameplay mechanics, objectives, obstacles, enemies, rewards, and surprises in every level. Some players may find these levels and challenges boring or monotonous after a while.</p>
109
- <h2>Conclusion</h2>
110
- <p>Dig Deep APK is a game that lets you dig deep underground and collect gold and diamonds. You can also hire workers to help you dig faster and deeper. You can use the money you earn to upgrade your shovel and workers, as well as unlock new locations and challenges. The game is fun and addictive, but it also has some drawbacks, such as ads, in-app purchases, and repetitive levels. If you love digging games, you should try Dig Deep APK on your Android device.</p>
111
- <h3>Frequently Asked Questions</h3>
112
- <p>Here are some of the frequently asked questions about Dig Deep APK:</p>
113
- <h4>Q: Is Dig Deep APK safe to download?</h4>
114
- <p>A: Yes, Dig Deep APK is safe to download if you download it from a trusted source, such as Google Play Store or the official website of Dig Deep APK. However, if you download it from a third-party website, you should be careful and check the file for viruses or malware before installing it on your device.</p>
115
- <h4>Q: How can I remove ads from Dig Deep APK?</h4>
116
- <p>A: You can remove ads from Dig Deep APK by buying the "No Ads" option in the shop for $2.99. This will remove all the banner ads, interstitial ads, video ads, and reward ads from the game. However, you will still see some promotional offers from time to time.</p>
117
- <h4>Q: How can I get more gems in Dig Deep APK?</h4>
118
- <p>A: You can get more gems in Dig Deep APK by digging deep and finding them in the soil. You can also watch ads or complete offers to get free gems. You can also buy gems with real money in the shop. You can use gems to buy special items, such as energy drinks, dynamite, magnets, and more.</p>
119
- <h4>Q: How can I get more workers in Dig Deep APK?</h4>
120
- <p>A: You can get more workers in Dig Deep APK by hiring them in the shop. You can choose from different types of workers, such as miners, drillers, dynamiters, and more. Each worker has its own price and skill level. You can also upgrade your workers to make them more productive and efficient.</p>
121
- <h4>Q: How can I join a clan in Dig Deep APK?</h4>
122
- <p>A: You can join a clan in Dig Deep APK by tapping on the clan icon on the main screen. You can either create your own clan or join an existing one. You can also search for a clan by name or code. You can chat with other clan members, share tips and tricks, and cooperate to achieve common goals.</p> 197e85843d<br />
123
- <br />
124
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/ARK Survival Evolved - Das beste Survival-Spiel mit Dinos - Gratis Download fr alle Plattformen.md DELETED
@@ -1,138 +0,0 @@
1
-
2
- <h1>ARK Survival Evolved Download Kostenlos: Wie Sie das Dino-Abenteuer gratis spielen können</h1>
3
- <p>Sie sind ein Fan von Dinosauriern und Survival-Spielen? Dann haben Sie sicher schon von <strong>ARK Survival Evolved</strong> gehört, einem der beliebtesten Titel des Genres. In diesem Spiel müssen Sie auf einer geheimnisvollen Insel überleben, indem Sie Ressourcen sammeln, Werkzeuge herstellen, Schutz bauen und Dinos zähmen oder bekämpfen. Klingt spannend, oder?</p>
4
- <p>Wenn Sie sich fragen, wie Sie <strong>ARK Survival Evolved download kostenlos</strong> können, dann sind Sie hier richtig. In diesem Artikel zeigen wir Ihnen, wie Sie das Spiel gratis oder mit Rabatt auf verschiedenen Plattformen herunterladen können. Außerdem geben wir Ihnen einen Überblick über das Spiel selbst, seine Vor- und Nachteile und die Systemvoraussetzungen für PC. Lesen Sie weiter, um mehr zu erfahren!</p>
5
- <h2>ark survival evolved download kostenlos</h2><br /><p><b><b>Download Zip</b> &rArr;&rArr;&rArr; <a href="https://jinyurl.com/2uNL8a">https://jinyurl.com/2uNL8a</a></b></p><br /><br />
6
- <h2>Was ist ARK Survival Evolved?</h2>
7
- <p>ARK Survival Evolved ist ein <strong>Open-World-Spiel</strong>, das im Jahr 2017 für PC, Konsolen und mobile Geräte veröffentlicht wurde. Das Spiel wurde von Studio Wildcard, Instinct Games, Efecto Studios und Virtual Basement entwickelt und hat seitdem mehrere Erweiterungen und Updates erhalten. Das Spiel basiert auf der Unreal Engine 4 und bietet eine beeindruckende Grafik und eine riesige Spielwelt.</p>
8
- <h3>Ein Open-World-Spiel mit Dinosauriern und Crafting</h3>
9
- <p>In ARK Survival Evolved starten Sie als ein nackter und hilfloser Mensch auf einer Insel voller prähistorischer Kreaturen. Ihr Ziel ist es, zu überleben und zu entkommen, indem Sie die Umgebung erkunden, Ressourcen sammeln, Werkzeuge herstellen, Nahrung anbauen und Dinosaurier zähmen oder töten. Dabei müssen Sie auch auf Ihre Gesundheit, Hunger, Durst, Temperatur und Ausdauer achten.</p>
10
- <p>Das Spiel bietet Ihnen über <strong>80 verschiedene Dinosaurier</strong> und andere Urzeitwesen, die Sie mit verschiedenen Strategien und Taktiken zähmen, trainieren, reiten und züchten können. Dabei können Sie auch die dynamischen Ökosysteme an Land, im Wasser, in der Luft und sogar unter der Erde nutzen. Außerdem können Sie eine Vielzahl von Waffen, Kleidung, Gegenständen und Gebäuden herstellen, um sich gegen die Gefahren zu schützen.</p>
11
- <h3>Ein Multiplayer-Spiel mit Tribes und PvP</h3>
12
- <p>Neben dem Singleplayer-Modus können Sie auch den <strong>Multiplayer-Modus</strong> von ARK Survival Evolved spielen, in dem Sie mit oder gegen andere Spieler online antreten können. Dabei können Sie sich einem <strong>Tribe</strong>, also einer Gruppe von Spielern anschließen oder einen eigenen gründen. Als Tribe können Sie Ressourcen teilen, Gemeinschaftsprojekte starten, Allianzen bilden oder Kriege führen. Dabei können Sie zwischen verschiedenen Spielmodi wählen, wie zum Beispiel <strong>PvP</strong> (Player versus Player), <strong>PvE</strong> (Player versus Environment) oder <strong>PvX</strong> (eine Kombination aus beiden). Außerdem können Sie die Spielregeln und Einstellungen anpassen, um das Spielerlebnis nach Ihrem Geschmack zu gestalten.</p>
13
- <h2>Wie kann man ARK Survival Evolved kostenlos downloaden?</h2>
14
- <p>Wenn Sie ARK Survival Evolved spielen möchten, haben Sie mehrere Möglichkeiten, das Spiel kostenlos oder mit Rabatt zu downloaden. Hier sind einige Optionen, die Sie ausprobieren können:</p>
15
- <h3>Die mobile Version für Android und iOS</h3>
16
- <p>Eine Möglichkeit, ARK Survival Evolved gratis zu spielen, ist die <strong>mobile Version</strong> für Android- und iOS-Geräte. Diese Version wurde im Jahr 2018 veröffentlicht und bietet Ihnen eine ähnliche Spielerfahrung wie die PC- und Konsolen-Versionen, aber mit einigen Anpassungen für die Touchscreen-Steuerung und die Grafikqualität. Die mobile Version ist <strong>kostenlos</strong>, aber sie enthält auch In-App-Käufe und Werbung, die Sie entfernen können, wenn Sie eine Premium-Version kaufen.</p>
17
- <p>Um die mobile Version von ARK Survival Evolved zu downloaden, müssen Sie einfach den Google Play Store oder den App Store besuchen und nach dem Spiel suchen. Dann können Sie es auf Ihr Gerät herunterladen und installieren. Die mobile Version benötigt mindestens 2 GB RAM und 2 GB freien Speicherplatz auf Ihrem Gerät.</p>
18
- <p>ark survival evolved kostenlos downloaden android<br />
19
- ark survival evolved free download pc full version<br />
20
- ark survival evolved download gratis per pc<br />
21
- ark survival evolved descargar gratis para android<br />
22
- ark survival evolved download free steam<br />
23
- ark survival evolved télécharger gratuitement sur pc<br />
24
- ark survival evolved download kostenlos vollversion deutsch<br />
25
- ark survival evolved free download for windows 10<br />
26
- ark survival evolved scaricare gratis per android<br />
27
- ark survival evolved baixar gratis para pc<br />
28
- ark survival evolved download free apk<br />
29
- ark survival evolved kostenlos herunterladen chip<br />
30
- ark survival evolved download gratis completo<br />
31
- ark survival evolved free download mac<br />
32
- ark survival evolved télécharger gratuit android<br />
33
- ark survival evolved download kostenlos ohne anmeldung<br />
34
- ark survival evolved free download with multiplayer<br />
35
- ark survival evolved download gratis italiano<br />
36
- ark survival evolved descargar gratis pc full<br />
37
- ark survival evolved baixar gratis para android<br />
38
- ark survival evolved download free ios<br />
39
- ark survival evolved kostenlos spielen ohne download<br />
40
- ark survival evolved download gratis windows 10<br />
41
- ark survival evolved free download xbox one<br />
42
- ark survival evolved télécharger gratuitement sur steam<br />
43
- ark survival evolved download kostenlos epic games<br />
44
- ark survival evolved free download ps4<br />
45
- ark survival evolved download gratis portugues<br />
46
- ark survival evolved descargar gratis para ios<br />
47
- ark survival evolved baixar gratis para celular<br />
48
- ark survival evolved download free no steam<br />
49
- ark survival evolved kostenlos online spielen ohne download<br />
50
- ark survival evolved download gratis pc ita<br />
51
- ark survival evolved free download mobile<br />
52
- ark survival evolved télécharger gratuit pc complet<br />
53
- ark survival evolved download kostenlos deutsch pc<br />
54
- ark survival evolved free download ocean of games<br />
55
- ark survival evolved download gratis mac<br />
56
- ark survival evolved descargar gratis mega<br />
57
- ark survival evolved baixar gratis para iphone<br />
58
- ark survival evolved download free full version apk<br />
59
- ark survival evolved kostenlos runterladen android<br />
60
- ark survival evolved download gratis steamunlocked<br />
61
- ark survival evolved free download utorrent</p>
62
- <h3>Die kostenlose Vollversion bei Epic Games</h3>
63
- <p>Eine andere Möglichkeit, ARK Survival Evolved kostenlos zu spielen, ist die <strong>Vollversion</strong> bei Epic Games zu downloaden. Epic Games ist eine Plattform für PC-Spiele, die regelmäßig kostenlose Spiele anbietet, die Sie für immer behalten können. Im Juni 2020 war ARK Survival Evolved eines dieser Spiele, das Sie gratis herunterladen und spielen konnten.</p>
64
- <p>Um die Vollversion von ARK Survival Evolved bei Epic Games zu downloaden, müssen Sie einen kostenlosen Account bei Epic Games erstellen und den Epic Games Launcher auf Ihrem PC installieren. Dann müssen Sie nach dem Spiel suchen und es zu Ihrer Bibliothek hinzufügen. Danach können Sie es auf Ihren PC herunterladen und installieren. Die Vollversion benötigt mindestens 60 GB freien Speicherplatz auf Ihrem PC.</p>
65
- <h3>Die Steam-Version mit Rabatt</h3>
66
- <p>Eine weitere Möglichkeit, ARK Survival Evolved günstiger zu spielen, ist die <strong>Steam-Version</strong> mit Rabatt zu kaufen. Steam ist eine Plattform für PC-Spiele, die oft Sonderangebote und Rabatte für verschiedene Spiele anbietet. Im Dezember 2020 war ARK Survival Evolved eines dieser Spiele, das Sie mit einem Rabatt von 80% kaufen konnten.</p>
67
- <p>Um die Steam-Version von ARK Survival Evolved zu kaufen, müssen Sie einen kostenlosen Account bei Steam erstellen und den Steam Client auf Ihrem PC installieren. Dann müssen Sie nach dem Spiel suchen und es in Ihren Warenkorb legen. Danach können Sie es mit Ihrer bevorzugten Zahlungsmethode bezahlen und auf Ihren PC herunterladen und installieren. Die Steam-Version benötigt mindestens 60 GB freien Speicherplatz auf Ihrem PC.</p>
68
- <h2>Fazit: Lohnt sich ARK Survival Evolved?</h2>
69
- <p>Nachdem wir Ihnen gezeigt haben, wie Sie ARK Survival Evolved download kostenlos können, fragen Sie sich vielleicht, ob sich das Spiel überhaupt lohnt. Um Ihnen bei dieser Entscheidung zu helfen, haben wir hier einige Vor- und Nachteile des Spiels aufgelistet:</p>
70
- <h3>Die Vor- und Nachteile des Spiels</h3>
71
- <table>
72
- <tr>
73
- <th>Vorteile</th>
74
- <th>Nachteile</th>
75
- </tr>
76
- <tr>
77
- <td>- Eine riesige und abwechslungsreiche Spielwelt mit vielen Dinosauriern und anderen Kreaturen</td>
78
- <td>- Ein hoher Schwierigkeitsgrad und viele Bugs und technische Probleme</td>
79
- </tr>
80
- <tr>
81
- <td>- Ein umfangreiches Crafting-System mit vielen Möglichkeiten zur Anpassung und Verbesserung</td>
82
- <td>- Ein hoher Zeita ufwand und eine steile Lernkurve</td>
83
- </tr>
84
- <tr>
85
- <td>- Ein spannender und herausfordernder Multiplayer-Modus mit vielen Interaktionsmöglichkeiten</td>
86
- <td>- Ein ungleiches und teilweise toxisches Balancing und Community</td>
87
- </tr>
88
- <tr>
89
- <td>- Eine ansprechende und realistische Grafik mit vielen Details und Effekten</td>
90
- <td>- Eine hohe Systemanforderung und ein hoher Speicherbedarf</td>
91
- </tr>
92
- </table>
93
- <p>Wie Sie sehen können, hat ARK Survival Evolved sowohl seine Stärken als auch seine Schwächen. Ob Sie das Spiel mögen oder nicht, hängt letztendlich von Ihrem persönlichen Geschmack, Ihrer Geduld und Ihrer Hardware ab. Wenn Sie jedoch ein Fan von Dinosauriern, Survival-Spielen und Open-World-Spielen sind, dann sollten Sie ARK Survival Evolved zumindest eine Chance geben.</p>
94
- <h3>Die Systemvoraussetzungen für PC</h3>
95
- <p>Bevor Sie ARK Survival Evolved download kostenlos können, sollten Sie sicherstellen, dass Ihr PC die Systemvoraussetzungen für das Spiel erfüllt. Hier sind die minimalen und empfohlenen Anforderungen für das Spiel laut Steam:</p>
96
- <table>
97
- <tr>
98
- <th>Minimale Anforderungen</th>
99
- <th>Empfohlene Anforderungen</th>
100
- </tr>
101
- <tr>
102
- <td>- Betriebssystem: Windows 7/8.1/10 (64-bit Versionen)</td>
103
- <td>- Betriebssystem: Windows 10 (64-bit Version)</td>
104
- </tr>
105
- <tr>
106
- <td>- Prozessor: Intel Core i5-2400/AMD FX-8320 oder besser</td>
107
- <td>- Prozessor: Intel Core i7-4770/AMD Ryzen 5 1500X oder besser</td>
108
- </tr>
109
- <tr>
110
- <td>- Arbeitsspeicher: 8 GB RAM</td>
111
- <td>- Arbeitsspeicher: 16 GB RAM</td>
112
- </tr>
113
- <tr>
114
- <td>- Grafik: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB oder besser</td>
115
- <td>- Grafik: NVIDIA GTX 1050 Ti 4GB/AMD Radeon RX 470 4GB oder besser</td>
116
- </tr>
117
- <tr>
118
- <td>- DirectX: Version 10</td>
119
- <td>- DirectX: Version 11</td>
120
- </tr>
121
- <tr>
122
- <td>- Speicherplatz: 60 GB verfügbarer Speicherplatz</td>
123
- <td>- Speicherplatz: 60 GB verfügbarer Speicherplatz</td>
124
- </tr>
125
- <h2>FAQ</h2>
126
- <h4>Ist ARK Survival Evolved ein gutes Spiel?</h4>
127
- <p>Das hängt davon ab, wen Sie fragen. ARK Survival Evolved ist ein sehr ambitioniertes und umfangreiches Spiel, das viele Fans hat, aber auch viele Kritiker. Das Spiel bietet Ihnen eine riesige Spielwelt mit vielen Dinosauriern und anderen Kreaturen, ein umfangreiches Crafting-System, einen spannenden Multiplayer-Modus und eine ansprechende Grafik. Allerdings hat das Spiel auch viele Bugs, technische Probleme, einen hohen Schwierigkeitsgrad, eine steile Lernkurve, ein ungleiches Balancing und eine teilweise toxische Community. Ob Sie das Spiel gut finden oder nicht, hängt letztendlich von Ihrem persönlichen Geschmack, Ihrer Geduld und Ihrer Hardware ab.</p>
128
- <h4>Wie viele Spieler hat ARK Survival Evolved?</h4>
129
- <p>Laut Steam Charts hat ARK Survival Evolved im Dezember 2020 einen durchschnittlichen Spielerwert von etwa 70.000 Spielern pro Tag erreicht, mit einem Spitzenwert von über 100.000 Spielern. Das ist eine beachtliche Zahl für ein Spiel, das schon seit mehreren Jahren auf dem Markt ist. Das liegt wahrscheinlich daran, dass das Spiel regelmäßig Updates und Erweiterungen erhält, die neue Inhalte und Verbesserungen bringen. Außerdem hat das Spiel eine treue Fanbase, die das Spiel immer wieder spielt.</p>
130
- <h4>Wie lange dauert es, ARK Survival Evolved zu spielen?</h4>
131
- <p>Das kommt darauf an, wie Sie das Spiel spielen wollen. ARK Survival Evolved ist ein Open-World-Spiel, das Ihnen viel Freiheit und Flexibilität bietet. Sie können das Spiel so lange spielen, wie Sie wollen, ohne ein festes Ziel oder Ende zu haben. Sie können sich Ihre eigenen Ziele setzen, wie zum Beispiel bestimmte Dinosaurier zu zähmen, bestimmte Gebiete zu erkunden, bestimmte Gegenstände zu bauen oder bestimmte Herausforderungen zu meistern. Sie können auch versuchen, die Hintergrundgeschichte des Spiels zu enthüllen, indem Sie die versteckten Artefakte und Hinweise finden, die Ihnen den Weg zu den Bosskämpfen und dem möglichen Ausgang zeigen. Laut How Long to Beat dauert es im Durchschnitt etwa 150 Stunden, um das Spiel im Singleplayer-Modus zu beenden, und etwa 200 Stunden, um das Spiel im Multiplayer-Modus zu beenden. Das ist aber nur eine Schätzung, die je nach Ihrem Spielstil und Ihrer Erfahrung variieren kann.</p>
132
- <h4>Kann man ARK Survival Evolved offline spielen?</h4>
133
- <p>Ja, man kann ARK Survival Evolved offline spielen, wenn man den Singleplayer-Modus wählt. In diesem Modus können Sie das Spiel ohne Internetverbindung spielen, aber Sie müssen auf einige Funktionen verzichten, wie zum Beispiel den Zugang zu den offiziellen Servern, den Austausch mit anderen Spielern oder die Nutzung von Mods. Wenn Sie das Spiel offline spielen wollen, müssen Sie sicherstellen, dass Sie das Spiel vorher online gestartet haben, um es zu validieren und zu aktualisieren. Außerdem müssen Sie darauf achten, dass Sie Ihre Spielstände regelmäßig sichern, um Datenverlust zu vermeiden.</p>
134
- <h4>Gibt es Cheats für ARK Survival Evolved?</h4>
135
- <p>Ja, es gibt Cheats für ARK Survival Evolved, die Sie verwenden können, um das Spiel einfacher oder lustiger zu machen. Um Cheats zu aktivieren, müssen Sie die Konsole öffnen, indem Sie die Tab-Taste drücken (auf PC) oder die Pause-Taste halten und LB+RB+X+Y drücken (auf Xbox One) oder L1+R1+Dreieck+Quadrat drücken (auf PS4). Dann müssen Sie einen der verfügbaren Cheat-Codes eingeben und mit Enter bestätigen. Zum Beispiel können Sie den Cheat "god" eingeben, um unverwundbar zu werden, oder den Cheat "fly" eingeben, um zu fliegen. Eine Liste aller Cheat-Codes finden Sie hier: https://ark.gamepedia.com/Console_Commands</p>
136
- <p>Beachten Sie jedoch, dass die Verwendung von Cheats einige Nachteile haben kann, wie zum Beispiel das Deaktivieren von Erfolgen oder Trophäen, das Verderben des Spielspaßes oder das Risiko von Banns oder Strafen auf den offiziellen Servern. Verwenden Sie Cheats also nur auf eigene Gefahr und nur im Singleplayer-Modus oder auf privaten Servern.</p> 197e85843d<br />
137
- <br />
138
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download FS 16 Mod APK 2021 with Hack and Unlimited Money for Free.md DELETED
@@ -1,91 +0,0 @@
1
-
2
- <h1>FS 16 Mod APK 2021 Hack Download: How to Get Unlimited Money and More</h1>
3
- <p>If you are a fan of farming simulation games, you might have heard of FS 16, also known as Farming Simulator 16. This is a game that lets you experience the life of a farmer, from planting crops to harvesting them, from raising animals to selling them, from buying vehicles and tools to upgrading them, and more. But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited money, unlock all the vehicles and tools, and get rid of annoying ads? Well, there is a way to do that, and it is called FS 16 Mod APK 2021 Hack. In this article, we will tell you what FS 16 is, how to download and install FS 16 Mod APK 2021 Hack, why you should use it, and what are the risks involved. So, let's get started!</p>
4
- <h2>What is FS 16?</h2>
5
- <p>FS 16 is a farming simulation game developed by GIANTS Software and released in 2015 for Android, iOS, Windows Phone, Kindle Fire, PlayStation Vita, Nintendo Switch, and other platforms. The game allows you to manage your own farm in a realistic open world environment. You can grow various crops such as wheat, canola, corn, sugar beet, potatoes, etc., and sell them in a dynamic market. You can also breed cows, sheep, and chickens, and produce milk, wool, and eggs. You can also hire workers to help you with your tasks, or play with your friends in multiplayer mode. The game features over 50 vehicles and tools from real brands such as New Holland, Case IH, Ponsse, Lamborghini, Horsch, Krone, Amazone, MAN, etc., that you can buy and customize. The game also has realistic graphics and physics, day and night cycle, weather effects, etc., that make the game more immersive and enjoyable.</p>
6
- <h2>fs 16 mod apk 2021 hack download</h2><br /><p><b><b>Download File</b> &rarr; <a href="https://jinyurl.com/2uNUjo">https://jinyurl.com/2uNUjo</a></b></p><br /><br />
7
- <h3>Features of FS 16</h3>
8
- <p>Some of the main features of FS 16 are:</p>
9
- <ul>
10
- <li>New crops: sugar beet and potatoes</li>
11
- <li>New animals: chickens and sheep</li>
12
- <li>New vehicles and tools from real brands</li>
13
- <li>Multiplayer mode for up to 6 players</li>
14
- <li>Realistic graphics and physics</li>
15
- <li>Dynamic market and economy</li>
16
- <li>Day and night cycle and weather effects</li>
17
- <li>Achievements and leaderboards</li>
18
- </ul>
19
- <h3>How to download and install FS 16 Mod APK 2021</h3>
20
- <p>If you want to download and install FS 16 Mod APK 2021 Hack on your Android device, you need to follow these steps:</p>
21
- <p>fs 16 mod apk unlimited money 2021 download<br />
22
- fs 16 mod apk latest version 2021 hack<br />
23
- fs 16 mod apk free download for android 2021<br />
24
- fs 16 mod apk offline 2021 hack<br />
25
- fs 16 mod apk no root 2021 download<br />
26
- fs 16 farming simulator mod apk 2021 hack<br />
27
- fs 16 mod apk full unlocked 2021 download<br />
28
- fs 16 mod apk android 1 2021 hack<br />
29
- fs 16 mod apk revdl 2021 download<br />
30
- fs 16 mod apk rexdl 2021 hack<br />
31
- fs 16 mod apk happymod 2021 download<br />
32
- fs 16 mod apk pure 2021 hack<br />
33
- fs 16 mod apk obb 2021 download<br />
34
- fs 16 mod apk data 2021 hack<br />
35
- fs 16 mod apk all vehicles unlocked 2021 download<br />
36
- fs 16 mod apk unlimited coins and gems 2021 hack<br />
37
- fs 16 mod apk unlimited fuel and seeds 2021 download<br />
38
- fs 16 mod apk unlimited everything 2021 hack<br />
39
- fs 16 mod apk cheat menu 2021 download<br />
40
- fs 16 mod apk mega mod 2021 hack<br />
41
- fs 16 mod apk pro pack unlocked 2021 download<br />
42
- fs 16 mod apk premium features unlocked 2021 hack<br />
43
- fs 16 mod apk new update 2021 download<br />
44
- fs 16 mod apk old version hacked download<br />
45
- fs 16 mod apk original version hacked download<br />
46
- fs 16 hack tool apk download for android<br />
47
- fs 16 hack generator online no survey no human verification<br />
48
- fs 16 hack without verification or password or root or jailbreak<br />
49
- how to hack fs 16 with lucky patcher or game guardian or cheat engine or sb game hacker or xmodgames or freedom app or creehack or leoplay card or game killer or gamecih or tutuapp or panda helper or ac market or blackmart alpha or aptoide or mobogenie or getjar or uptodown or apkpure or apkmirror or apknite or apptoide tv or f-droid or yalp store or aurora store or amazon appstore or samsung galaxy store or huawei app gallery or xiaomi getapps or oppo app market or vivo app store or lg smartworld or oneplus store or nokia store or asus zen ui store or lenovo app shop or zte app store or meizu app center</p>
50
- <ol>
51
- <li>Go to [this link](^1^) and download the FS 16 Mod APK file.</li>
52
- <li>Go to your device settings and enable the option to install apps from unknown sources.</li>
53
- <li>Locate the downloaded file in your file manager and tap on it to install it.</li>
54
- <li>Wait for the installation process to finish.</li>
55
- <li>Launch the game and enjoy!</li>
56
- </ol>
57
- <h2>Why use FS 16 Mod APK 2021 Hack?</h2>
58
- <p>You might be wondering why you should use FS 16 Mod APK 2021 Hack instead of the original version of the game. Well, there are several benefits that you can get from using this modded version of the game. Here are some of them:</p>
59
- <h3>Benefits of FS 16 Mod APK 2021 Hack</h3>
60
- <h4>Un <h4>Unlimited money</h4>
61
- <p>One of the most obvious benefits of using FS 16 Mod APK 2021 Hack is that you can get unlimited money in the game. Money is the main currency in the game that you need to buy vehicles, tools, animals, seeds, etc. Normally, you have to earn money by selling your products, completing missions, or watching ads. But with FS 16 Mod APK 2021 Hack, you can get unlimited money without any effort. You can buy whatever you want, upgrade your vehicles and tools, expand your farm, and enjoy the game without any worries.</p>
62
- <h4>Unlock all vehicles and tools</h4>
63
- <p>Another benefit of using FS 16 Mod APK 2021 Hack is that you can unlock all the vehicles and tools in the game. The game has over 50 vehicles and tools from real brands that you can use to manage your farm. However, not all of them are available at the beginning of the game. You have to unlock them by reaching certain levels, completing certain missions, or paying real money. But with FS 16 Mod APK 2021 Hack, you can unlock all of them from the start. You can choose from a variety of tractors, harvesters, trailers, plows, cultivators, seeders, sprayers, mowers, balers, loaders, etc., and use them to improve your farming efficiency and productivity.</p>
64
- <h4>No ads</h4>
65
- <p>The last benefit of using FS 16 Mod APK 2021 Hack is that you can get rid of annoying ads in the game. The game has ads that pop up every now and then, interrupting your gameplay and wasting your time. You can either watch them to get some rewards or skip them by paying real money. But with FS 16 Mod APK 2021 Hack, you can disable all the ads in the game. You can enjoy the game without any interruptions or distractions.</p>
66
- <h3>Risks of FS 16 Mod APK 2021 Hack</h3>
67
- <p>However, using FS 16 Mod APK 2021 Hack is not without risks. There are some drawbacks and dangers that you should be aware of before using this modded version of the game. Here are some of them:</p>
68
- <h4>Malware and viruses</h4>
69
- <p>One of the risks of using FS 16 Mod APK 2021 Hack is that you might download a file that contains malware or viruses. These malicious programs can harm your device, steal your personal information, or corrupt your data. You might also expose your device to hackers or cybercriminals who can access your device remotely or install spyware on it. Therefore, you should be careful when downloading FS 16 Mod APK 2021 Hack from unknown sources. You should scan the file with a reliable antivirus software before installing it.</p>
70
- <h4>Ban from the game</h4>
71
- <p>Another risk of using FS 16 Mod APK 2021 Hack is that you might get banned from the game. The game has a security system that detects any cheating or hacking activities in the game. If you use FS 16 Mod APK 2021 Hack, you might get caught by the system and get banned from playing the game online or offline. You might also lose your progress and achievements in the game. Therefore, you should use FS 16 Mod APK 2021 Hack at your own risk and discretion.</p>
72
- <h4>Legal issues</h4>
73
- <p>The last risk of using FS 16 Mod APK 2021 Hack is that you might face legal issues. The game is protected by intellectual property rights and terms of service that prohibit any unauthorized modification or distribution of the game. If you use FS 16 Mod APK 2021 Hack, you might violate these rights and terms and get sued by the developers or publishers of the game. You might also face fines or penalties for breaking the law. Therefore, you should respect the original version of the game and support its creators.</p>
74
- <h2>Conclusion</h2>
75
- <p>FS 16 is a fun and realistic farming simulation game that lets you experience the life of a farmer. However, if you want to have more fun and freedom in the game, you can use FS 16 Mod APK 2021 Hack to get unlimited money, unlock all vehicles and tools, and remove ads in the game. However, you should also be aware of the risks involved in using this modded version of the game, such as malware and viruses, ban from the game, and legal issues. Therefore, you should use FS 16 Mod APK 2021 Hack at your own risk and discretion.</p>
76
- <h2>FAQs</h2>
77
- <p>Here are some frequently asked questions about FS 16 Mod APK 2021 Hack:</p>
78
- <ul>
79
- <li><b>Q: Is FS 16 Mod APK 2021 Hack free?</b></li>
80
- <li>A: Yes A: Yes, FS 16 Mod APK 2021 Hack is free to download and use. You don't need to pay any money to use this modded version of the game.</li>
81
- <li><b>Q: Is FS 16 Mod APK 2021 Hack safe?</b></li>
82
- <li>A: FS 16 Mod APK 2021 Hack is not completely safe to use. It might contain malware or viruses that can harm your device or data. It might also get you banned from the game or face legal issues. Therefore, you should use FS 16 Mod APK 2021 Hack at your own risk and discretion.</li>
83
- <li><b>Q: How can I update FS 16 Mod APK 2021 Hack?</b></li>
84
- <li>A: FS 16 Mod APK 2021 Hack is not compatible with the latest version of the game. If you want to update the game, you need to uninstall the modded version and install the original version from the official sources. However, you might lose your progress and achievements in the game if you do so.</li>
85
- <li><b>Q: Can I play FS 16 Mod APK 2021 Hack online?</b></li>
86
- <li>A: FS 16 Mod APK 2021 Hack is not compatible with the online mode of the game. If you try to play online, you might get detected by the security system and get banned from the game. Therefore, you should only play offline with FS 16 Mod APK 2021 Hack.</li>
87
- <li><b>Q: Can I play FS 16 Mod APK 2021 Hack with my friends?</b></li>
88
- <li>A: FS 16 Mod APK 2021 Hack is not compatible with the multiplayer mode of the game. If you try to play with your friends, you might encounter errors or crashes in the game. Therefore, you should only play solo with FS 16 Mod APK 2021 Hack.</li>
89
- </ul></p> 197e85843d<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Pokemon Go No Lag APK and Play Smoothly on Any Android Device.md DELETED
@@ -1,141 +0,0 @@
1
- <br />
2
- <h1>How to Play Pokemon Go Without Lag Using a No Lag Apk</h1>
3
- <p>Pokemon Go is one of the most popular mobile games in the world, with millions of players catching, battling, and trading Pokemon in real-world locations. However, not everyone can enjoy the game smoothly, as some players may experience lag issues that affect their gameplay. Lag can cause Pokemon to escape, battles to freeze, or items to fail to load. This can be very frustrating and ruin the fun of playing Pokemon Go.</p>
4
- <h2>pokemon go no lag apk</h2><br /><p><b><b>Download</b> > <a href="https://jinyurl.com/2uNP2X">https://jinyurl.com/2uNP2X</a></b></p><br /><br />
5
- <p>Fortunately, there are some ways to fix Pokemon Go lag issues and play the game without any interruptions. One of them is using a no lag apk, which is a modified version of the game that optimizes its performance and reduces its resource consumption. In this article, we will explain what a no lag apk is, how it works, where to download it, and how to install it. We will also discuss the benefits and risks of using a no lag apk for Pokemon Go, as well as some alternative methods to improve your game experience.</p>
6
- <h2>What is a No Lag Apk and How Does It Work?</h2>
7
- <p>A no lag apk is a modified version of an app that aims to improve its speed, stability, and efficiency. It usually removes or reduces some features or graphics that are not essential for the app's functionality, but may consume a lot of memory, CPU, or battery power. By doing so, it frees up more resources for the app to run faster and smoother.</p>
8
- <p>A no lag apk for Pokemon Go works in a similar way. It modifies some aspects of the game, such as animations, sounds, effects, or textures, to make it less demanding on your device. It also optimizes the game's code and network connection to reduce latency and errors. As a result, you can play Pokemon Go without any lag issues, even on low-end devices or slow internet connections.</p>
9
- <h2>Where to Download a No Lag Apk for Pokemon Go and How to Install It?</h2>
10
- <p>There are many websites that offer no lag apks for various apps and games, including Pokemon Go. However, not all of them are trustworthy or safe. Some may contain malware, viruses, or spyware that can harm your device or steal your personal information. Therefore, you should be careful when downloading and installing any no lag apks from unknown sources.</p>
11
- <p>pokemon go smooth apk<br />
12
- pokemon go no hang apk<br />
13
- pokemon go lag fix apk<br />
14
- pokemon go fast apk<br />
15
- pokemon go no slow apk<br />
16
- pokemon go lag free apk<br />
17
- pokemon go speed apk<br />
18
- pokemon go no freeze apk<br />
19
- pokemon go lagless apk<br />
20
- pokemon go performance apk<br />
21
- pokemon go no stutter apk<br />
22
- pokemon go boost apk<br />
23
- pokemon go no glitch apk<br />
24
- pokemon go stable apk<br />
25
- pokemon go no delay apk<br />
26
- pokemon go optimize apk<br />
27
- pokemon go no crash apk<br />
28
- pokemon go quick apk<br />
29
- pokemon go no buffer apk<br />
30
- pokemon go enhance apk<br />
31
- pokemon go no lag mod apk<br />
32
- pokemon go no lag hack apk<br />
33
- pokemon go no lag cheat apk<br />
34
- pokemon go no lag unlimited apk<br />
35
- pokemon go no lag premium apk<br />
36
- pokemon go no lag pro apk<br />
37
- pokemon go no lag mod menu apk<br />
38
- pokemon go no lag latest version apk<br />
39
- pokemon go no lag updated version apk<br />
40
- pokemon go no lag new version apk<br />
41
- pokemon go no lag download apk<br />
42
- pokemon go no lag install apk<br />
43
- pokemon go no lag free download apk<br />
44
- pokemon go no lag android download apk<br />
45
- pokemon go no lag direct download apk<br />
46
- pokemon go no lag offline apk<br />
47
- pokemon go no lag online apk<br />
48
- pokemon go no lag multiplayer apk<br />
49
- pokemon go no lag adventure game apk<br />
50
- pokemon go no lag niantic game apk<br />
51
- pokemon go no lag 2023 version apk<br />
52
- pokemon go no lag june update apk<br />
53
- pokemon go no lag 0.273.3 version apk <br />
54
- pokemon go no lag youtube video apk <br />
55
- pokemon go no lag tutorial guide apk <br />
56
- pokemon go no lag tips and tricks apk <br />
57
- pokemon go no lag best settings apk <br />
58
- pokemon go no lag how to play apk <br />
59
- pokemon go no lag review and rating apk</p>
60
- <p>One of the most reliable and popular websites that provides no lag apks for Pokemon Go is Pokemod (^[2^]). Pokemod is a website that offers various mods and tweaks for Pokemon Go, such as perfect throw, shiny scanner, spawn booster, instant catch, and more. Among them is the Pokemod No Lag Apk (^[1^]), which claims to reduce lag and overheating by decreasing visual quality and effects.</p>
61
- <p>To download and install the Pokemod No Lag Apk for Pokemon Go, you need to follow these steps:</p>
62
- <ol>
63
- <li>Go to Pokemod's website (^[2^]) and tap on the Download button.</li>
64
- <li>Allow your browser to download files from unknown sources if prompted.</li>
65
- <li>Wait for the download to finish and locate the file in your device's storage.</li>
66
- <li>Tap on the file and follow the instructions to install it.</li>
67
- <li>Launch the app and enjoy playing Pokemon Go without any lag.</li>
68
- </ol>
69
- <h2>What are the Benefits and Risks of Using a No Lag Apk for Pokemon Go?</h2>
70
- <p>Using a no lag apk for Pokemon Go can have some benefits and risks that you should be aware of before deciding whether to use it or not. Here are some of them:</p>
71
- <h3>Benefits</h3>
72
- <ul>
73
- <li>You can play Pokemon Go faster and smoother without any interruptions or delays.</li>
74
- <li>You can save battery life and data usage by lowering the game's graphics and effects.</li>
75
- <li>You can play Pokemon Go on older or weaker devices that may not support the official version of the game.</li>
76
- </ul>
77
- <h3>Risks</h3>
78
- <ul>
79
- <li>You may lose some of the game's visual quality and appeal by using a no lag apk.</li>
80
- <li>You may encounter some bugs or glitches that may affect your gameplay or progress.</li>
81
- <li>You may violate the game's terms of service and risk getting banned or suspended by using a no lag apk.</li>
82
- </ul>
83
- <h2>What are Some Alternative Ways to Reduce Pokemon Go Lag Without Using a No Lag Apk?</h2>
84
- <p>If you don't want to use a no lag apk for Pokemon Go, or if you still experience lag issues after using one, there are some other ways to improve your game performance and reduce lag. Here are some of them:</p>
85
- <h3>Update Your Device and App</h3>
86
- <p>One of the simplest and most effective ways to fix Pokemon Go lag issues is to update your device and app to the latest versions. This can help you get rid of any bugs or errors that may cause lag, as well as enjoy new features and improvements that may enhance your game experience. To update your device and app, you can follow these steps:</p>
87
- <ol>
88
- <li>Go to your device's settings and check for any available software updates. If there are any, download and install them.</li>
89
- <li>Go to your app store and check for any available updates for Pokemon Go. If there are any, download and install them.</li>
90
- <li>Restart your device and launch Pokemon Go.</li>
91
- </ol>
92
- <h3>Clear Your Cache and Data</h3>
93
- <p>Another way to fix Pokemon Go lag issues is to clear your cache and data for the app. This can help you free up some space and memory on your device, as well as remove any corrupted or outdated files that may cause lag. To clear your cache and data for Pokemon Go, you can follow these steps:</p>
94
- <ol>
95
- <li>Go to your device's settings and tap on Apps or Applications.</li>
96
- <li>Find and tap on Pokemon Go.</li>
97
- <li>Tap on Storage or Manage Storage.</li>
98
- <li>Tap on Clear Cache and Clear Data.</li>
99
- <li>Restart your device and launch Pokemon Go.</li>
100
- </ol>
101
- <h3>Adjust Your Game Settings</h3>
102
- <p>A third way to fix Pokemon Go lag issues is to adjust your game settings to suit your device and network conditions. This can help you optimize your game performance and reduce lag by lowering some of the game's graphics and effects that may consume a lot of resources. To adjust your game settings for Pokemon Go, you can follow these steps:</p>
103
- <ol>
104
- <li>Launch Pokemon Go and tap on the Pokeball icon at the bottom of the screen.</li>
105
- <li>Tap on Settings at the top right corner of the screen.</li>
106
- <li>Scroll down and find the options for Battery Saver, Adventure Sync, Music, Sound Effects, Vibration, Niantic AR, Camera Permissions, AR+, etc.</li>
107
- <li>Turn off or disable any of these options that you don't need or use. For example, you can turn off Battery Saver if you have enough battery power, turn off Adventure Sync if you don't want to track your steps, turn off Music and Sound Effects if you don't want to hear them, turn off Vibration if you don't want to feel it, turn off Niantic AR if you don't want to use augmented reality features, turn off Camera Permissions if you don't want to take pictures or videos, turn off AR+ if you don't want to use advanced AR features, etc.</li>
108
- <li>Restart your device and launch Pokemon Go.</li>
109
- </ol>
110
- <h2>Conclusion</h2>
111
- <p>Pokemon Go is a fun and immersive game that lets you catch, battle, and trade Pokemon in real-world locations. However, some players may experience lag issues that affect their gameplay and enjoyment. To fix these issues, one of the ways is to use a no lag apk for Pokemon Go, which is a modified version of the game that improves its performance and reduces its resource consumption. However, using a no lag apk also has some benefits and risks that you should consider before using it. Alternatively, you can also try some other methods to reduce Pokemon Go lag without using a no lag apk, such as updating your device and app, clearing your cache and data, or adjusting your game settings. By following these tips and tricks, you can play Pokemon Go without any lag issues and have a smooth and satisfying game experience.</p>
112
- <h2>FAQs</h2>
113
- <p>Here are some common questions and answers about Pokemon Go no lag apk and lag issues:</p>
114
- <h <h3>Q: What is the difference between a no lag apk and a mod apk for Pokemon Go?</h3>
115
- <p>A: A no lag apk is a type of mod apk, which means a modified version of the original app. However, not all mod apks are no lag apks. A no lag apk specifically focuses on improving the game's performance and reducing its resource consumption, while a mod apk may have other features or functions that are not related to lag issues, such as cheats, hacks, or customizations.</p>
116
- <h3>Q: Is using a no lag apk for Pokemon Go legal and safe?</h3>
117
- <p>A: Using a no lag apk for Pokemon Go is not legal, as it violates the game's terms of service and may result in a ban or suspension from the game. It is also not safe, as it may expose your device or account to malware, viruses, or spyware that can harm your device or steal your personal information. Therefore, you should use a no lag apk for Pokemon Go at your own risk and discretion.</p>
118
- <h3>Q: How can I tell if I have lag issues in Pokemon Go?</h3>
119
- <p>A: Some of the signs that you have lag issues in Pokemon Go are:</p>
120
- <ul>
121
- <li>Your game freezes, crashes, or closes unexpectedly.</li>
122
- <li>Your game takes a long time to load or respond.</li>
123
- <li>Your game shows error messages or warnings.</li>
124
- <li>Your game displays incorrect or outdated information.</li>
125
- <li>Your game fails to register your actions or inputs.</li>
126
- <li>Your game shows distorted or missing graphics or sounds.</li>
127
- </ul>
128
- <h3>Q: What are the main causes of lag issues in Pokemon Go?</h3>
129
- <p>A: Some of the main causes of lag issues in Pokemon Go are:</p>
130
- <ul>
131
- <li>Your device is old, weak, or incompatible with the game.</li>
132
- <li>Your device has low battery, memory, storage, or CPU power.</li>
133
- <li>Your device has too many apps running in the background or foreground.</li>
134
- <li>Your device has malware, viruses, or spyware that interfere with the game.</li>
135
- <li>Your internet connection is slow, unstable, or congested.</li>
136
- <li>Your game is outdated, corrupted, or misconfigured.</li>
137
- </ul>
138
- <h3>Q: Can I use a no lag apk for Pokemon Go with other mods or tweaks?</h3>
139
- <p>A: It depends on the compatibility and functionality of the no lag apk and the other mods or tweaks. Some no lag apks may work well with other mods or tweaks, while others may conflict or interfere with them. You should always check the description and reviews of the no lag apk and the other mods or tweaks before using them together. You should also backup your data and uninstall any unwanted or unnecessary mods or tweaks to avoid any problems.</p> 197e85843d<br />
140
- <br />
141
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Eyes APK The Ultimate Horror Game with Coop Multiplayer.md DELETED
@@ -1,19 +0,0 @@
1
- <br />
2
- <h1>Eyes APK: A Multiplayer Horror Game for Android</h1>
3
- Are you looking for a thrilling and immersive horror game to play with your friends? Do you want to experience the terror of being chased by a monster in a dark and creepy mansion? If yes, then you should try Eyes APK, a free multiplayer horror game for Android devices. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, how to play it, and why you should play it. <h2>What is Eyes APK?</h2>
4
- Eyes APK is an Android game developed by FearlessGames, a studio that specializes in creating horror games. It is a survival horror game that tests your nerves and skills as you try to escape from a haunted house while being hunted by a terrifying creature. You can play this game alone or with your friends in the multiplayer mode. <h3>The premise of the game</h3>
5
- The game starts with you breaking into a mansion at night. You are looking for some valuable items to steal, but you soon realize that you are not alone. There is a monster lurking in the shadows, and it is after you. You have to find a way out of the mansion before it catches you. But be careful, the mansion is huge and built like a maze. You will also encounter some paranormal phenomena, such as flickering lights, moving objects, and eerie sounds. <h3>The features of the game</h3>
6
- Eyes APK has many features that make it an exciting and challenging horror game. Some of these features are: - Multiple scary monsters and beasts to choose from – or create your own demon with custom visuals and audio. - Multiple levels to unlock: an old haunted house, an abandoned hospital, and a desolated school. More added all the time. - Multiple gameplay modes to endure: classic mode, endless mode, time attack mode, and multiplayer mode. - Use mystical Eye runes to see through the monster’s twisted vision and try to survive its grudge. - Consult a hand-drawn map to plan your next move. - Compete with fellow adventurers on the global leaderboards or play offline. - The perfect horror game: tense gameplay, a scary beast, sudden jumpscare, and a chilling atmosphere. <h2>How to download and install Eyes APK?</h2>
7
- If you want to play Eyes APK on your Android device, you will need to download and install it first. Here are the requirements and the steps for doing so: <h3>The requirements for the game</h3>
8
- To play Eyes APK on your Android device, you will need: - An Android device running version 5.1 or higher. - At least 189 MB of free storage space on your device. - A stable internet connection (for multiplayer mode). <h3>The steps to download and install the game</h3>
9
- To download and install Eyes APK on your Android device, follow these steps: - Go to [this link](^1^) on your device’s browser. This is the official website of the game where you can find more information about it. - Tap on the Download APK button. This will start downloading the APK file of the game on your device. - Once the download is complete, locate the file on your device and tap on it. This will prompt you to allow installation from unknown sources. Enable this option if you haven’t done so before. - Follow the on-screen instructions to install the game on your device. - Once the installation is done, launch the game from your app drawer or home screen. <h2>How to play Eyes APK?</h2>
10
- Now that you have downloaded and installed Eyes APK on your Android device, you are ready to play it. Here are some tips on how to play this game: <h3>The gameplay modes</h3> Eyes APK has four different gameplay modes that you can choose from: - Classic mode: This is the standard mode where you have to find a certain number of bags and escape from the mansion. You can adjust the difficulty level and the number of bags to suit your preference. - Endless mode: This is the mode where you have to survive as long as possible in the mansion. The monster will become faster and more aggressive as time goes by. You can collect bags to increase your score and use Eye runes to see where the monster is. - Time attack mode: This is the mode where you have to find all the bags and escape from the mansion within a given time limit. You can also use Eye runes to gain extra time. The faster you finish, the higher your rank will be. - Multiplayer mode: This is the mode where you can play with your friends online. You can either cooperate with them to find the bags and escape, or compete with them to see who can get more bags. You can also chat with them using voice or text messages. <h3>The tips and tricks</h3>
11
- To play Eyes APK effectively, you will need some tips and tricks to help you out. Here are some of them: - Use headphones to hear the sounds better. This will help you locate the monster and avoid it. - Use the map to navigate the mansion. The map will show you where you are, where the bags are, and where the exits are. It will also show you where the Eye runes are, which are useful for seeing through the monster’s eyes. - Use the Eye runes wisely. The Eye runes will let you see what the monster sees for a few seconds. This can help you avoid it or find a safe spot. However, using them too often will make the monster angry and more likely to chase you. - Don’t look at the monster directly. If you see the monster, don’t stare at it or run towards it. This will make it notice you and attack you. Instead, look away and hide behind a door or a furniture. - Don’t run too much. Running will make noise and attract the monster’s attention. It will also drain your stamina, which you need to escape. Only run when necessary, such as when you are being chased or when you are near an exit. <h2>Why should you play Eyes APK?</h2>
12
- You might be wondering why you should play Eyes APK when there are so many other horror games available. Well, here are some reasons why you should give this game a try: <h3>The benefits of playing the game</h3>
13
- Playing Eyes APK can have some benefits for you, such as: - Improving your reflexes and decision-making skills. The game will test your ability to react quickly and make smart choices under pressure. - Enhancing your creativity and imagination. The game will stimulate your mind with its spooky atmosphere and mysterious story. - Reducing your stress and anxiety. The game will help you release your negative emotions and have some fun with your friends. <h3>The testimonials from other players</h3>
14
- Don’t just take our word for it. Here are some testimonials from other players who have enjoyed playing Eyes APK: - “This game is awesome! I love how it makes me feel scared and excited at the same time. The graphics are amazing and the sounds are realistic. I recommend this game to anyone who likes horror games.” – Jessica - “This game is so addictive! I can’t stop playing it with my friends. We always have a blast trying to escape from the monster or competing with each other. The multiplayer mode is the best part of this game.” – Kevin - “This game is terrifying! I almost had a heart attack when I saw the monster for the first time. It was so creepy and unpredictable. I had to hide under my blanket while playing this game.” – Ashley <h2>Conclusion</h2>
15
- Eyes APK is a multiplayer horror game for Android devices that will give you a thrilling and immersive experience of being hunted by a monster in a haunted mansion. You can play this game alone or with your friends in different modes and levels. You can also customize your own monster and challenge yourself with different difficulty settings. If you are looking for a horror game that will keep you on the edge of your seat, then download Eyes APK today and enjoy! <h2>FAQs</h2>
16
- Here are some frequently asked questions about Eyes APK: - Q: Is Eyes APK safe to download and install? - A: Yes, Eyes APK is safe to download and install on your Android device. It does not contain any viruses or malware that could harm your device or data. - Q: How much does Eyes APK cost? - A: Eyes APK is free to download and play on your Android device. However, it does contain some in-app purchases that can enhance your gameplay experience, such as unlocking more monsters, levels, and modes. - Q: How many players can play Eyes APK in multiplayer mode? - A: Eyes APK supports up to 4 players in multiplayer mode. You can either join a random room or create your own room and invite your friends to join. You can also choose to play as the monster or the survivor in multiplayer mode. - Q: How can I customize my own monster in Eyes APK? - A: You can customize your own monster in Eyes APK by tapping on the Customize button on the main menu. You can change the appearance, the sound, and the name of your monster. You can also share your monster with other players and download their monsters as well. - Q: What are the best devices to play Eyes APK on? - A: Eyes APK is compatible with most Android devices that run version 5.1 or higher. However, some devices may have better performance and graphics than others. Some of the best devices to play Eyes APK on are Samsung Galaxy S10, Huawei P30 Pro, OnePlus 7T, and Google Pixel 4. - Q: How can I contact the developers of Eyes APK? - A: You can contact the developers of Eyes APK by sending them an email at [email protected]. You can also follow them on Facebook, Twitter, and Instagram for the latest news and updates about the game. - Q: Is Eyes APK available for other platforms? - A: Yes, Eyes APK is also available for iOS devices. You can download it from the App Store for free. However, there is no cross-platform support between Android and iOS devices. I hope you enjoyed reading this article about Eyes APK, a multiplayer horror game for Android devices. If you have any questions or feedback, please leave a comment below. Thank you for your time and attention.</p>
17
- <h2>eyes apk</h2><br /><p><b><b>Download File</b> &bull;&bull;&bull; <a href="https://jinyurl.com/2uNTaf">https://jinyurl.com/2uNTaf</a></b></p><br /><br /> 197e85843d<br />
18
- <br />
19
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/alt_cuda_corr/setup.py DELETED
@@ -1,15 +0,0 @@
1
- from setuptools import setup
2
- from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3
-
4
-
5
- setup(
6
- name='correlation',
7
- ext_modules=[
8
- CUDAExtension('alt_cuda_corr',
9
- sources=['correlation.cpp', 'correlation_kernel.cu'],
10
- extra_compile_args={'cxx': [], 'nvcc': ['-O3']}),
11
- ],
12
- cmdclass={
13
- 'build_ext': BuildExtension
14
- })
15
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ADRXtractor/ADR_Xtractor/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: ADR_Xtractor
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio` or `streamlit`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/adversarial/discriminators/mpd.py DELETED
@@ -1,106 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing as tp
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
-
13
- from ...modules import NormConv2d
14
- from .base import MultiDiscriminator, MultiDiscriminatorOutputType
15
-
16
-
17
- def get_padding(kernel_size: int, dilation: int = 1) -> int:
18
- return int((kernel_size * dilation - dilation) / 2)
19
-
20
-
21
- class PeriodDiscriminator(nn.Module):
22
- """Period sub-discriminator.
23
-
24
- Args:
25
- period (int): Period between samples of audio.
26
- in_channels (int): Number of input channels.
27
- out_channels (int): Number of output channels.
28
- n_layers (int): Number of convolutional layers.
29
- kernel_sizes (list of int): Kernel sizes for convolutions.
30
- stride (int): Stride for convolutions.
31
- filters (int): Initial number of filters in convolutions.
32
- filters_scale (int): Multiplier of number of filters as we increase depth.
33
- max_filters (int): Maximum number of filters.
34
- norm (str): Normalization method.
35
- activation (str): Activation function.
36
- activation_params (dict): Parameters to provide to the activation function.
37
- """
38
- def __init__(self, period: int, in_channels: int = 1, out_channels: int = 1,
39
- n_layers: int = 5, kernel_sizes: tp.List[int] = [5, 3], stride: int = 3,
40
- filters: int = 8, filters_scale: int = 4, max_filters: int = 1024,
41
- norm: str = 'weight_norm', activation: str = 'LeakyReLU',
42
- activation_params: dict = {'negative_slope': 0.2}):
43
- super().__init__()
44
- self.period = period
45
- self.n_layers = n_layers
46
- self.activation = getattr(torch.nn, activation)(**activation_params)
47
- self.convs = nn.ModuleList()
48
- in_chs = in_channels
49
- for i in range(self.n_layers):
50
- out_chs = min(filters * (filters_scale ** (i + 1)), max_filters)
51
- eff_stride = 1 if i == self.n_layers - 1 else stride
52
- self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_sizes[0], 1), stride=(eff_stride, 1),
53
- padding=((kernel_sizes[0] - 1) // 2, 0), norm=norm))
54
- in_chs = out_chs
55
- self.conv_post = NormConv2d(in_chs, out_channels, kernel_size=(kernel_sizes[1], 1), stride=1,
56
- padding=((kernel_sizes[1] - 1) // 2, 0), norm=norm)
57
-
58
- def forward(self, x: torch.Tensor):
59
- fmap = []
60
- # 1d to 2d
61
- b, c, t = x.shape
62
- if t % self.period != 0: # pad first
63
- n_pad = self.period - (t % self.period)
64
- x = F.pad(x, (0, n_pad), 'reflect')
65
- t = t + n_pad
66
- x = x.view(b, c, t // self.period, self.period)
67
-
68
- for conv in self.convs:
69
- x = conv(x)
70
- x = self.activation(x)
71
- fmap.append(x)
72
- x = self.conv_post(x)
73
- fmap.append(x)
74
- # x = torch.flatten(x, 1, -1)
75
-
76
- return x, fmap
77
-
78
-
79
- class MultiPeriodDiscriminator(MultiDiscriminator):
80
- """Multi-Period (MPD) Discriminator.
81
-
82
- Args:
83
- in_channels (int): Number of input channels.
84
- out_channels (int): Number of output channels.
85
- periods (Sequence[int]): Periods between samples of audio for the sub-discriminators.
86
- **kwargs: Additional args for `PeriodDiscriminator`
87
- """
88
- def __init__(self, in_channels: int = 1, out_channels: int = 1,
89
- periods: tp.Sequence[int] = [2, 3, 5, 7, 11], **kwargs):
90
- super().__init__()
91
- self.discriminators = nn.ModuleList([
92
- PeriodDiscriminator(p, in_channels, out_channels, **kwargs) for p in periods
93
- ])
94
-
95
- @property
96
- def num_discriminators(self):
97
- return len(self.discriminators)
98
-
99
- def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
100
- logits = []
101
- fmaps = []
102
- for disc in self.discriminators:
103
- logit, fmap = disc(x)
104
- logits.append(logit)
105
- fmaps.append(fmap)
106
- return logits, fmaps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/rope.py DELETED
@@ -1,124 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing as tp
8
-
9
- from torch import nn
10
- import torch
11
-
12
-
13
- class XPos(nn.Module):
14
- """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1).
15
- This applies an exponential decay to the RoPE rotation matrix.
16
-
17
- Args:
18
- dim (int): Embedding dimension.
19
- smoothing (float): Smoothing factor applied to the decay rates.
20
- base_scale (int): Base decay rate, given in terms of scaling time.
21
- device (torch.device or None): Device on which to initialize the module.
22
- dtype (torch.dtype): dtype to use to generate the embedding.
23
- """
24
- def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512,
25
- device=None, dtype: torch.dtype = torch.float32):
26
- super().__init__()
27
- assert dim % 2 == 0
28
- assert dtype in [torch.float64, torch.float32]
29
- self.dtype = dtype
30
- self.base_scale = base_scale
31
-
32
- half_dim = dim // 2
33
- adim = torch.arange(half_dim, device=device, dtype=dtype)
34
- decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing)
35
- self.register_buffer("decay_rates", decay_rates)
36
- self.decay: tp.Optional[torch.Tensor] = None
37
-
38
- def get_decay(self, start: int, end: int):
39
- """Create complex decay tensor, cache values for fast computation.
40
- """
41
- if self.decay is None or end > self.decay.shape[0]:
42
- assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker.
43
- idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype)
44
- power = idx / self.base_scale
45
- scale = self.decay_rates ** power.unsqueeze(-1)
46
- self.decay = torch.polar(scale, torch.zeros_like(scale))
47
- return self.decay[start:end] # [T, C/2]
48
-
49
-
50
- class RotaryEmbedding(nn.Module):
51
- """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864).
52
-
53
- Args:
54
- dim (int): Embedding dimension (twice the number of frequencies).
55
- max_period (float): Maximum period of the rotation frequencies.
56
- xpos (bool): Use xPos, applies an exponential decay to rotation matrix.
57
- scale (float): Scale of positional embedding, set to 0 to deactivate.
58
- device (torch.device or None): Device on which to initialize the module.
59
- dtype (torch.dtype): dtype to use to generate the embedding.
60
- """
61
- def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False,
62
- scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32):
63
- super().__init__()
64
- assert dim % 2 == 0
65
- self.scale = scale
66
- assert dtype in [torch.float64, torch.float32]
67
- self.dtype = dtype
68
-
69
- adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)]
70
- frequencies = 1.0 / (max_period ** (adim / dim))
71
- self.register_buffer("frequencies", frequencies)
72
- self.rotation: tp.Optional[torch.Tensor] = None
73
-
74
- self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None
75
-
76
- def get_rotation(self, start: int, end: int):
77
- """Create complex rotation tensor, cache values for fast computation.
78
- """
79
- if self.rotation is None or end > self.rotation.shape[0]:
80
- assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker.
81
- idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype)
82
- angles = torch.outer(idx, self.frequencies)
83
- self.rotation = torch.polar(torch.ones_like(angles), angles)
84
- return self.rotation[start:end]
85
-
86
- def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False):
87
- """Apply rope rotation to query or key tensor.
88
- """
89
- T = x.shape[1]
90
- rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2)
91
-
92
- if self.xpos:
93
- decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2)
94
- else:
95
- decay = 1.0
96
-
97
- if invert_decay:
98
- decay = decay ** -1
99
-
100
- x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2))
101
- scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale)
102
- x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2)
103
-
104
- return x_out.type_as(x)
105
-
106
- def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0):
107
- """ Apply rope rotation to both query and key tensors.
108
- Supports streaming mode, in which query and key are not expected to have the same shape.
109
- In streaming mode, key will be of legnth [P + C] with P the cached past timesteps, but
110
- query will be [C] (typically C == 1).
111
-
112
- Args:
113
- query (torch.Tensor): Query to rotate.
114
- key (torch.Tensor): Key to rotate.
115
- start (int): Start index of the sequence for time offset.
116
- """
117
- query_timesteps = query.shape[1]
118
- key_timesteps = key.shape[1]
119
- streaming_offset = key_timesteps - query_timesteps
120
-
121
- query_out = self.rotate(query, start + streaming_offset)
122
- key_out = self.rotate(key, start, invert_decay=True)
123
-
124
- return query_out, key_out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/hand.py DELETED
@@ -1,77 +0,0 @@
1
- import cv2
2
- import json
3
- import math
4
- import matplotlib
5
- import matplotlib.pyplot as plt
6
- import numpy as np
7
- import time
8
- import torch
9
- from scipy.ndimage.filters import gaussian_filter
10
- from skimage.measure import label
11
-
12
- from . import util
13
- from .model import handpose_model
14
-
15
-
16
- class Hand(object):
17
-
18
- def __init__(self, model_path):
19
- self.model = handpose_model()
20
- if torch.cuda.is_available():
21
- self.model = self.model.cuda()
22
- print('cuda')
23
- model_dict = util.transfer(self.model, torch.load(model_path))
24
- self.model.load_state_dict(model_dict)
25
- self.model.eval()
26
-
27
- def __call__(self, oriImg):
28
- scale_search = [0.5, 1.0, 1.5, 2.0]
29
- # scale_search = [0.5]
30
- boxsize = 368
31
- stride = 8
32
- padValue = 128
33
- thre = 0.05
34
- multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
35
- heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
36
- # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
37
-
38
- for m in range(len(multiplier)):
39
- scale = multiplier[m]
40
- imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
41
- imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
42
- im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
43
- im = np.ascontiguousarray(im)
44
-
45
- data = torch.from_numpy(im).float()
46
- if torch.cuda.is_available():
47
- data = data.cuda()
48
- # data = data.permute([2, 0, 1]).unsqueeze(0).float()
49
- with torch.no_grad():
50
- output = self.model(data).cpu().numpy()
51
- # output = self.model(data).numpy()q
52
-
53
- # extract outputs, resize, and remove padding
54
- heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps
55
- heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
56
- heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
57
- heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
58
-
59
- heatmap_avg += heatmap / len(multiplier)
60
-
61
- all_peaks = []
62
- for part in range(21):
63
- map_ori = heatmap_avg[:, :, part]
64
- one_heatmap = gaussian_filter(map_ori, sigma=3)
65
- binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
66
- # 全部小于阈值
67
- if np.sum(binary) == 0:
68
- all_peaks.append([0, 0])
69
- continue
70
- label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim)
71
- max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1
72
- label_img[label_img != max_index] = 0
73
- map_ori[label_img == 0] = 0
74
-
75
- y, x = util.npmax(map_ori)
76
- all_peaks.append([x, y])
77
- return np.array(all_peaks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/requestdrag.js DELETED
@@ -1,2 +0,0 @@
1
- import RequestDrag from './utils/input/RequestDrag.js';
2
- export default RequestDrag;
 
 
 
spaces/AlekseyKorshuk/model-evaluation/conversation.py DELETED
@@ -1,51 +0,0 @@
1
- class Conversation:
2
- bot_id: str
3
- memory: str
4
- prompt: str
5
- bot_label: str
6
- user_label: str
7
- messages: list
8
-
9
- def __init__(self, bot_config):
10
- self.bot_id = bot_config.get("bot_id")
11
- self.memory = bot_config.get("memory", "")
12
- self.prompt = bot_config.get("prompt", "")
13
- self.bot_label = bot_config.get("botLabel", "Character")
14
- self.user_label = bot_config.get("userLabel", "User")
15
- self.first_message = bot_config.get("firstMessage", f"Hi, my name is {self.bot_label}!")
16
- self.reset_conversation()
17
-
18
- def reset_conversation(self):
19
- self.messages = [
20
- {
21
- "from": self.bot_label,
22
- "value": self.first_message
23
- }
24
- ]
25
-
26
- def set_chat_history(self, chat_history):
27
- messages = []
28
- for conversation_pair in chat_history:
29
- for item in conversation_pair:
30
- if item:
31
- messages.append(item)
32
- self.messages = []
33
- for i, message in enumerate(messages):
34
- label = self.bot_label if i % 2 == 0 else self.user_label
35
- self.messages.append(
36
- {
37
- "from": label,
38
- "value": message.strip()
39
- }
40
- )
41
-
42
- def add_user_message(self, message):
43
- self.messages.append(
44
- {
45
- "from": self.user_label,
46
- "value": message.strip()
47
- }
48
- )
49
-
50
- def reset_last_message(self, message):
51
- self.messages[-1]["value"] = message.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/paper_runfiles/predict_inner_features.sh DELETED
@@ -1,20 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # paths to data are valid for mml7
4
-
5
- source "$(dirname $0)/env.sh"
6
-
7
- "$BINDIR/predict_inner_features.py" \
8
- -cn default_inner_features_ffc \
9
- model.path="/data/inpainting/paper_data/final_models/ours/r.suvorov_2021-03-05_17-34-05_train_ablv2_work_ffc075_resume_epoch39" \
10
- indir="/data/inpainting/paper_data/inner_features_vis/input/" \
11
- outdir="/data/inpainting/paper_data/inner_features_vis/output/ffc" \
12
- dataset.img_suffix=.png
13
-
14
-
15
- "$BINDIR/predict_inner_features.py" \
16
- -cn default_inner_features_work \
17
- model.path="/data/inpainting/paper_data/final_models/ours/r.suvorov_2021-03-05_17-08-35_train_ablv2_work_resume_epoch37" \
18
- indir="/data/inpainting/paper_data/inner_features_vis/input/" \
19
- outdir="/data/inpainting/paper_data/inner_features_vis/output/work" \
20
- dataset.img_suffix=.png
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alican/pixera/util/__init__.py DELETED
@@ -1 +0,0 @@
1
- """This package includes a miscellaneous collection of useful helper functions."""
 
 
spaces/Alpaca233/SadTalker/src/face3d/data/template_dataset.py DELETED
@@ -1,75 +0,0 @@
1
- """Dataset class template
2
-
3
- This module provides a template for users to implement custom datasets.
4
- You can specify '--dataset_mode template' to use this dataset.
5
- The class name should be consistent with both the filename and its dataset_mode option.
6
- The filename should be <dataset_mode>_dataset.py
7
- The class name should be <Dataset_mode>Dataset.py
8
- You need to implement the following functions:
9
- -- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
10
- -- <__init__>: Initialize this dataset class.
11
- -- <__getitem__>: Return a data point and its metadata information.
12
- -- <__len__>: Return the number of images.
13
- """
14
- from data.base_dataset import BaseDataset, get_transform
15
- # from data.image_folder import make_dataset
16
- # from PIL import Image
17
-
18
-
19
- class TemplateDataset(BaseDataset):
20
- """A template dataset class for you to implement custom datasets."""
21
- @staticmethod
22
- def modify_commandline_options(parser, is_train):
23
- """Add new dataset-specific options, and rewrite default values for existing options.
24
-
25
- Parameters:
26
- parser -- original option parser
27
- is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
28
-
29
- Returns:
30
- the modified parser.
31
- """
32
- parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
33
- parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
34
- return parser
35
-
36
- def __init__(self, opt):
37
- """Initialize this dataset class.
38
-
39
- Parameters:
40
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
41
-
42
- A few things can be done here.
43
- - save the options (have been done in BaseDataset)
44
- - get image paths and meta information of the dataset.
45
- - define the image transformation.
46
- """
47
- # save the option and dataset root
48
- BaseDataset.__init__(self, opt)
49
- # get the image paths of your dataset;
50
- self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
51
- # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
52
- self.transform = get_transform(opt)
53
-
54
- def __getitem__(self, index):
55
- """Return a data point and its metadata information.
56
-
57
- Parameters:
58
- index -- a random integer for data indexing
59
-
60
- Returns:
61
- a dictionary of data with their names. It usually contains the data itself and its metadata information.
62
-
63
- Step 1: get a random image path: e.g., path = self.image_paths[index]
64
- Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
65
- Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
66
- Step 4: return a data point as a dictionary.
67
- """
68
- path = 'temp' # needs to be a string
69
- data_A = None # needs to be a tensor
70
- data_B = None # needs to be a tensor
71
- return {'data_A': data_A, 'data_B': data_B, 'path': path}
72
-
73
- def __len__(self):
74
- """Return the total number of images."""
75
- return len(self.image_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlphaDragon/Voice-Clone/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Voice Cloning Demo
3
- emoji: 💩
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.21.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: coraKong/voice-cloning-demo
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alphts/Robot/app.py DELETED
@@ -1,59 +0,0 @@
1
-
2
- import openai
3
- import os
4
- import gradio as gr
5
-
6
- openai.api_key = os.environ.get("OPENAI_API_KEY")
7
-
8
- class Conversation:
9
- def __init__(self, prompt, num_of_round):
10
- self.prompt = prompt
11
- self.num_of_round = num_of_round
12
- self.messages = []
13
- self.messages.append({"role": "system", "content": self.prompt})
14
-
15
- def ask(self, question):
16
- try:
17
- self.messages.append({"role": "user", "content": question})
18
- response = openai.ChatCompletion.create(
19
- model="gpt-3.5-turbo",
20
- messages=self.messages,
21
- temperature=0.5,
22
- max_tokens=2048,
23
- top_p=1,
24
- )
25
- except Exception as e:
26
- print(e)
27
- return e
28
-
29
- message = response["choices"][0]["message"]["content"]
30
- self.messages.append({"role": "assistant", "content": message})
31
-
32
- if len(self.messages) > self.num_of_round*2 + 1:
33
- del self.messages[1:3]
34
- return message
35
-
36
-
37
- prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
38
- 1. 你的回答必须是中文
39
- 2. 回答限制在100个字以内"""
40
-
41
- conv = Conversation(prompt, 10)
42
-
43
- def answer(question, history=[]):
44
- history.append(question)
45
- response = conv.ask(question)
46
- history.append(response)
47
- responses = [(u,b) for u,b in zip(history[::2], history[1::2])]
48
- return responses, history
49
-
50
- with gr.Blocks(css="#chatbot{height:600px} .overflow-y-auto{height:500px}") as demo:
51
- chatbot = gr.Chatbot(elem_id="chatbot")
52
- state = gr.State([])
53
-
54
- with gr.Row():
55
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
56
-
57
- txt.submit(answer, [txt, state], [chatbot, state])
58
-
59
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.h DELETED
@@ -1,38 +0,0 @@
1
- // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- //
3
- // NVIDIA CORPORATION and its licensors retain all intellectual property
4
- // and proprietary rights in and to this software, related documentation
5
- // and any modifications thereto. Any use, reproduction, disclosure or
6
- // distribution of this software and related documentation without an express
7
- // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- //------------------------------------------------------------------------
10
- // CUDA kernel parameters.
11
-
12
- struct bias_act_kernel_params
13
- {
14
- const void* x; // [sizeX]
15
- const void* b; // [sizeB] or NULL
16
- const void* xref; // [sizeX] or NULL
17
- const void* yref; // [sizeX] or NULL
18
- const void* dy; // [sizeX] or NULL
19
- void* y; // [sizeX]
20
-
21
- int grad;
22
- int act;
23
- float alpha;
24
- float gain;
25
- float clamp;
26
-
27
- int sizeX;
28
- int sizeB;
29
- int stepB;
30
- int loopX;
31
- };
32
-
33
- //------------------------------------------------------------------------
34
- // CUDA kernel selection.
35
-
36
- template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p);
37
-
38
- //------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/lora/train_text_to_image_lora.py DELETED
@@ -1,1014 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Fine-tuning script for Stable Diffusion for text2image with support for LoRA."""
16
-
17
- import argparse
18
- import itertools
19
- import json
20
- import logging
21
- import math
22
- import os
23
- import random
24
- from pathlib import Path
25
-
26
- import datasets
27
- import numpy as np
28
- import torch
29
- import torch.nn.functional as F
30
- import torch.utils.checkpoint
31
- import transformers
32
- from accelerate import Accelerator
33
- from accelerate.logging import get_logger
34
- from accelerate.utils import ProjectConfiguration, set_seed
35
- from datasets import load_dataset
36
- from huggingface_hub import create_repo, upload_folder
37
- from packaging import version
38
- from torchvision import transforms
39
- from tqdm.auto import tqdm
40
- from transformers import CLIPTextModel, CLIPTokenizer
41
-
42
- import diffusers
43
- from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
44
- from diffusers.loaders import AttnProcsLayers
45
- from diffusers.models.attention_processor import LoRAAttnProcessor
46
- from diffusers.optimization import get_scheduler
47
- from diffusers.utils import check_min_version, is_wandb_available
48
- from diffusers.utils.import_utils import is_xformers_available
49
-
50
-
51
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
52
- check_min_version("0.14.0.dev0")
53
-
54
- logger = get_logger(__name__, log_level="INFO")
55
-
56
-
57
- def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
58
- img_str = ""
59
- for i, image in enumerate(images):
60
- image.save(os.path.join(repo_folder, f"image_{i}.png"))
61
- img_str += f"![img_{i}](./image_{i}.png)\n"
62
-
63
- yaml = f"""
64
- ---
65
- license: creativeml-openrail-m
66
- base_model: {base_model}
67
- tags:
68
- - stable-diffusion
69
- - stable-diffusion-diffusers
70
- - text-to-image
71
- - diffusers
72
- - lora
73
- inference: true
74
- ---
75
- """
76
- model_card = f"""
77
- # LoRA text2image fine-tuning - {repo_id}
78
- These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
79
- {img_str}
80
- """
81
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
82
- f.write(yaml + model_card)
83
-
84
-
85
- def parse_args():
86
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
87
- parser.add_argument(
88
- "--pretrained_model_name_or_path",
89
- type=str,
90
- default=None,
91
- required=True,
92
- help="Path to pretrained model or model identifier from huggingface.co/models.",
93
- )
94
- parser.add_argument(
95
- "--revision",
96
- type=str,
97
- default=None,
98
- required=False,
99
- help="Revision of pretrained model identifier from huggingface.co/models.",
100
- )
101
- parser.add_argument(
102
- "--dataset_name",
103
- type=str,
104
- default=None,
105
- help=(
106
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
107
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
108
- " or to a folder containing files that 🤗 Datasets can understand."
109
- ),
110
- )
111
- parser.add_argument(
112
- "--dataset_config_name",
113
- type=str,
114
- default=None,
115
- help="The config of the Dataset, leave as None if there's only one config.",
116
- )
117
- parser.add_argument(
118
- "--train_data_dir",
119
- type=str,
120
- default=None,
121
- help=(
122
- "A folder containing the training data. Folder contents must follow the structure described in"
123
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
124
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
125
- ),
126
- )
127
- parser.add_argument(
128
- "--image_column", type=str, default="image", help="The column of the dataset containing an image."
129
- )
130
- parser.add_argument(
131
- "--caption_column",
132
- type=str,
133
- default="text",
134
- help="The column of the dataset containing a caption or a list of captions.",
135
- )
136
- parser.add_argument(
137
- "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference."
138
- )
139
- parser.add_argument(
140
- "--num_validation_images",
141
- type=int,
142
- default=4,
143
- help="Number of images that should be generated during validation with `validation_prompt`.",
144
- )
145
- parser.add_argument(
146
- "--validation_epochs",
147
- type=int,
148
- default=1,
149
- help=(
150
- "Run fine-tuning validation every X epochs. The validation process consists of running the prompt"
151
- " `args.validation_prompt` multiple times: `args.num_validation_images`."
152
- ),
153
- )
154
- parser.add_argument(
155
- "--max_train_samples",
156
- type=int,
157
- default=None,
158
- help=(
159
- "For debugging purposes or quicker training, truncate the number of training examples to this "
160
- "value if set."
161
- ),
162
- )
163
- parser.add_argument(
164
- "--output_dir",
165
- type=str,
166
- default="sd-model-finetuned-lora",
167
- help="The output directory where the model predictions and checkpoints will be written.",
168
- )
169
- parser.add_argument(
170
- "--cache_dir",
171
- type=str,
172
- default=None,
173
- help="The directory where the downloaded models and datasets will be stored.",
174
- )
175
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
176
- parser.add_argument(
177
- "--resolution",
178
- type=int,
179
- default=512,
180
- help=(
181
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
182
- " resolution"
183
- ),
184
- )
185
- parser.add_argument(
186
- "--center_crop",
187
- default=False,
188
- action="store_true",
189
- help=(
190
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
191
- " cropped. The images will be resized to the resolution first before cropping."
192
- ),
193
- )
194
- parser.add_argument(
195
- "--random_flip",
196
- action="store_true",
197
- help="whether to randomly flip images horizontally",
198
- )
199
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
200
-
201
- # lora args
202
- parser.add_argument("--use_peft", action="store_true", help="Whether to use peft to support lora")
203
- parser.add_argument("--lora_r", type=int, default=4, help="Lora rank, only used if use_lora is True")
204
- parser.add_argument("--lora_alpha", type=int, default=32, help="Lora alpha, only used if lora is True")
205
- parser.add_argument("--lora_dropout", type=float, default=0.0, help="Lora dropout, only used if use_lora is True")
206
- parser.add_argument(
207
- "--lora_bias",
208
- type=str,
209
- default="none",
210
- help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora is True",
211
- )
212
- parser.add_argument(
213
- "--lora_text_encoder_r",
214
- type=int,
215
- default=4,
216
- help="Lora rank for text encoder, only used if `use_lora` and `train_text_encoder` are True",
217
- )
218
- parser.add_argument(
219
- "--lora_text_encoder_alpha",
220
- type=int,
221
- default=32,
222
- help="Lora alpha for text encoder, only used if `use_lora` and `train_text_encoder` are True",
223
- )
224
- parser.add_argument(
225
- "--lora_text_encoder_dropout",
226
- type=float,
227
- default=0.0,
228
- help="Lora dropout for text encoder, only used if `use_lora` and `train_text_encoder` are True",
229
- )
230
- parser.add_argument(
231
- "--lora_text_encoder_bias",
232
- type=str,
233
- default="none",
234
- help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora and `train_text_encoder` are True",
235
- )
236
-
237
- parser.add_argument(
238
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
239
- )
240
- parser.add_argument("--num_train_epochs", type=int, default=100)
241
- parser.add_argument(
242
- "--max_train_steps",
243
- type=int,
244
- default=None,
245
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
246
- )
247
- parser.add_argument(
248
- "--gradient_accumulation_steps",
249
- type=int,
250
- default=1,
251
- help="Number of updates steps to accumulate before performing a backward/update pass.",
252
- )
253
- parser.add_argument(
254
- "--gradient_checkpointing",
255
- action="store_true",
256
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
257
- )
258
- parser.add_argument(
259
- "--learning_rate",
260
- type=float,
261
- default=1e-4,
262
- help="Initial learning rate (after the potential warmup period) to use.",
263
- )
264
- parser.add_argument(
265
- "--scale_lr",
266
- action="store_true",
267
- default=False,
268
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
269
- )
270
- parser.add_argument(
271
- "--lr_scheduler",
272
- type=str,
273
- default="constant",
274
- help=(
275
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
276
- ' "constant", "constant_with_warmup"]'
277
- ),
278
- )
279
- parser.add_argument(
280
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
281
- )
282
- parser.add_argument(
283
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
284
- )
285
- parser.add_argument(
286
- "--allow_tf32",
287
- action="store_true",
288
- help=(
289
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
290
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
291
- ),
292
- )
293
- parser.add_argument(
294
- "--dataloader_num_workers",
295
- type=int,
296
- default=0,
297
- help=(
298
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
299
- ),
300
- )
301
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
302
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
303
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
304
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
305
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
306
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
307
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
308
- parser.add_argument(
309
- "--hub_model_id",
310
- type=str,
311
- default=None,
312
- help="The name of the repository to keep in sync with the local `output_dir`.",
313
- )
314
- parser.add_argument(
315
- "--logging_dir",
316
- type=str,
317
- default="logs",
318
- help=(
319
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
320
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
321
- ),
322
- )
323
- parser.add_argument(
324
- "--mixed_precision",
325
- type=str,
326
- default=None,
327
- choices=["no", "fp16", "bf16"],
328
- help=(
329
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
330
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
331
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
332
- ),
333
- )
334
- parser.add_argument(
335
- "--report_to",
336
- type=str,
337
- default="tensorboard",
338
- help=(
339
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
340
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
341
- ),
342
- )
343
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
344
- parser.add_argument(
345
- "--checkpointing_steps",
346
- type=int,
347
- default=500,
348
- help=(
349
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
350
- " training using `--resume_from_checkpoint`."
351
- ),
352
- )
353
- parser.add_argument(
354
- "--checkpoints_total_limit",
355
- type=int,
356
- default=None,
357
- help=(
358
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
359
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
360
- " for more docs"
361
- ),
362
- )
363
- parser.add_argument(
364
- "--resume_from_checkpoint",
365
- type=str,
366
- default=None,
367
- help=(
368
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
369
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
370
- ),
371
- )
372
- parser.add_argument(
373
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
374
- )
375
-
376
- args = parser.parse_args()
377
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
378
- if env_local_rank != -1 and env_local_rank != args.local_rank:
379
- args.local_rank = env_local_rank
380
-
381
- # Sanity checks
382
- if args.dataset_name is None and args.train_data_dir is None:
383
- raise ValueError("Need either a dataset name or a training folder.")
384
-
385
- return args
386
-
387
-
388
- DATASET_NAME_MAPPING = {
389
- "lambdalabs/pokemon-blip-captions": ("image", "text"),
390
- }
391
-
392
-
393
- def main():
394
- args = parse_args()
395
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
396
-
397
- accelerator_project_config = ProjectConfiguration(
398
- total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
399
- )
400
-
401
- accelerator = Accelerator(
402
- gradient_accumulation_steps=args.gradient_accumulation_steps,
403
- mixed_precision=args.mixed_precision,
404
- log_with=args.report_to,
405
- project_config=accelerator_project_config,
406
- )
407
- if args.report_to == "wandb":
408
- if not is_wandb_available():
409
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
410
- import wandb
411
-
412
- # Make one log on every process with the configuration for debugging.
413
- logging.basicConfig(
414
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
415
- datefmt="%m/%d/%Y %H:%M:%S",
416
- level=logging.INFO,
417
- )
418
- logger.info(accelerator.state, main_process_only=False)
419
- if accelerator.is_local_main_process:
420
- datasets.utils.logging.set_verbosity_warning()
421
- transformers.utils.logging.set_verbosity_warning()
422
- diffusers.utils.logging.set_verbosity_info()
423
- else:
424
- datasets.utils.logging.set_verbosity_error()
425
- transformers.utils.logging.set_verbosity_error()
426
- diffusers.utils.logging.set_verbosity_error()
427
-
428
- # If passed along, set the training seed now.
429
- if args.seed is not None:
430
- set_seed(args.seed)
431
-
432
- # Handle the repository creation
433
- if accelerator.is_main_process:
434
- if args.output_dir is not None:
435
- os.makedirs(args.output_dir, exist_ok=True)
436
-
437
- if args.push_to_hub:
438
- repo_id = create_repo(
439
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
440
- ).repo_id
441
-
442
- # Load scheduler, tokenizer and models.
443
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
444
- tokenizer = CLIPTokenizer.from_pretrained(
445
- args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
446
- )
447
- text_encoder = CLIPTextModel.from_pretrained(
448
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
449
- )
450
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
451
- unet = UNet2DConditionModel.from_pretrained(
452
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
453
- )
454
-
455
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
456
- # as these models are only used for inference, keeping weights in full precision is not required.
457
- weight_dtype = torch.float32
458
- if accelerator.mixed_precision == "fp16":
459
- weight_dtype = torch.float16
460
- elif accelerator.mixed_precision == "bf16":
461
- weight_dtype = torch.bfloat16
462
-
463
- if args.use_peft:
464
- from peft import LoraConfig, LoraModel, get_peft_model_state_dict, set_peft_model_state_dict
465
-
466
- UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"]
467
- TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"]
468
-
469
- config = LoraConfig(
470
- r=args.lora_r,
471
- lora_alpha=args.lora_alpha,
472
- target_modules=UNET_TARGET_MODULES,
473
- lora_dropout=args.lora_dropout,
474
- bias=args.lora_bias,
475
- )
476
- unet = LoraModel(config, unet)
477
-
478
- vae.requires_grad_(False)
479
- if args.train_text_encoder:
480
- config = LoraConfig(
481
- r=args.lora_text_encoder_r,
482
- lora_alpha=args.lora_text_encoder_alpha,
483
- target_modules=TEXT_ENCODER_TARGET_MODULES,
484
- lora_dropout=args.lora_text_encoder_dropout,
485
- bias=args.lora_text_encoder_bias,
486
- )
487
- text_encoder = LoraModel(config, text_encoder)
488
- else:
489
- # freeze parameters of models to save more memory
490
- unet.requires_grad_(False)
491
- vae.requires_grad_(False)
492
-
493
- text_encoder.requires_grad_(False)
494
-
495
- # now we will add new LoRA weights to the attention layers
496
- # It's important to realize here how many attention weights will be added and of which sizes
497
- # The sizes of the attention layers consist only of two different variables:
498
- # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
499
- # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
500
-
501
- # Let's first see how many attention processors we will have to set.
502
- # For Stable Diffusion, it should be equal to:
503
- # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
504
- # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
505
- # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
506
- # => 32 layers
507
-
508
- # Set correct lora layers
509
- lora_attn_procs = {}
510
- for name in unet.attn_processors.keys():
511
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
512
- if name.startswith("mid_block"):
513
- hidden_size = unet.config.block_out_channels[-1]
514
- elif name.startswith("up_blocks"):
515
- block_id = int(name[len("up_blocks.")])
516
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
517
- elif name.startswith("down_blocks"):
518
- block_id = int(name[len("down_blocks.")])
519
- hidden_size = unet.config.block_out_channels[block_id]
520
-
521
- lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
522
-
523
- unet.set_attn_processor(lora_attn_procs)
524
- lora_layers = AttnProcsLayers(unet.attn_processors)
525
-
526
- # Move unet, vae and text_encoder to device and cast to weight_dtype
527
- vae.to(accelerator.device, dtype=weight_dtype)
528
- if not args.train_text_encoder:
529
- text_encoder.to(accelerator.device, dtype=weight_dtype)
530
-
531
- if args.enable_xformers_memory_efficient_attention:
532
- if is_xformers_available():
533
- import xformers
534
-
535
- xformers_version = version.parse(xformers.__version__)
536
- if xformers_version == version.parse("0.0.16"):
537
- logger.warn(
538
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
539
- )
540
- unet.enable_xformers_memory_efficient_attention()
541
- else:
542
- raise ValueError("xformers is not available. Make sure it is installed correctly")
543
-
544
- # Enable TF32 for faster training on Ampere GPUs,
545
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
546
- if args.allow_tf32:
547
- torch.backends.cuda.matmul.allow_tf32 = True
548
-
549
- if args.scale_lr:
550
- args.learning_rate = (
551
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
552
- )
553
-
554
- # Initialize the optimizer
555
- if args.use_8bit_adam:
556
- try:
557
- import bitsandbytes as bnb
558
- except ImportError:
559
- raise ImportError(
560
- "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
561
- )
562
-
563
- optimizer_cls = bnb.optim.AdamW8bit
564
- else:
565
- optimizer_cls = torch.optim.AdamW
566
-
567
- if args.use_peft:
568
- # Optimizer creation
569
- params_to_optimize = (
570
- itertools.chain(unet.parameters(), text_encoder.parameters())
571
- if args.train_text_encoder
572
- else unet.parameters()
573
- )
574
- optimizer = optimizer_cls(
575
- params_to_optimize,
576
- lr=args.learning_rate,
577
- betas=(args.adam_beta1, args.adam_beta2),
578
- weight_decay=args.adam_weight_decay,
579
- eps=args.adam_epsilon,
580
- )
581
- else:
582
- optimizer = optimizer_cls(
583
- lora_layers.parameters(),
584
- lr=args.learning_rate,
585
- betas=(args.adam_beta1, args.adam_beta2),
586
- weight_decay=args.adam_weight_decay,
587
- eps=args.adam_epsilon,
588
- )
589
-
590
- # Get the datasets: you can either provide your own training and evaluation files (see below)
591
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
592
-
593
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
594
- # download the dataset.
595
- if args.dataset_name is not None:
596
- # Downloading and loading a dataset from the hub.
597
- dataset = load_dataset(
598
- args.dataset_name,
599
- args.dataset_config_name,
600
- cache_dir=args.cache_dir,
601
- )
602
- else:
603
- data_files = {}
604
- if args.train_data_dir is not None:
605
- data_files["train"] = os.path.join(args.train_data_dir, "**")
606
- dataset = load_dataset(
607
- "imagefolder",
608
- data_files=data_files,
609
- cache_dir=args.cache_dir,
610
- )
611
- # See more about loading custom images at
612
- # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
613
-
614
- # Preprocessing the datasets.
615
- # We need to tokenize inputs and targets.
616
- column_names = dataset["train"].column_names
617
-
618
- # 6. Get the column names for input/target.
619
- dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
620
- if args.image_column is None:
621
- image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
622
- else:
623
- image_column = args.image_column
624
- if image_column not in column_names:
625
- raise ValueError(
626
- f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
627
- )
628
- if args.caption_column is None:
629
- caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
630
- else:
631
- caption_column = args.caption_column
632
- if caption_column not in column_names:
633
- raise ValueError(
634
- f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
635
- )
636
-
637
- # Preprocessing the datasets.
638
- # We need to tokenize input captions and transform the images.
639
- def tokenize_captions(examples, is_train=True):
640
- captions = []
641
- for caption in examples[caption_column]:
642
- if isinstance(caption, str):
643
- captions.append(caption)
644
- elif isinstance(caption, (list, np.ndarray)):
645
- # take a random caption if there are multiple
646
- captions.append(random.choice(caption) if is_train else caption[0])
647
- else:
648
- raise ValueError(
649
- f"Caption column `{caption_column}` should contain either strings or lists of strings."
650
- )
651
- inputs = tokenizer(
652
- captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
653
- )
654
- return inputs.input_ids
655
-
656
- # Preprocessing the datasets.
657
- train_transforms = transforms.Compose(
658
- [
659
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
660
- transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
661
- transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
662
- transforms.ToTensor(),
663
- transforms.Normalize([0.5], [0.5]),
664
- ]
665
- )
666
-
667
- def preprocess_train(examples):
668
- images = [image.convert("RGB") for image in examples[image_column]]
669
- examples["pixel_values"] = [train_transforms(image) for image in images]
670
- examples["input_ids"] = tokenize_captions(examples)
671
- return examples
672
-
673
- with accelerator.main_process_first():
674
- if args.max_train_samples is not None:
675
- dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
676
- # Set the training transforms
677
- train_dataset = dataset["train"].with_transform(preprocess_train)
678
-
679
- def collate_fn(examples):
680
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
681
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
682
- input_ids = torch.stack([example["input_ids"] for example in examples])
683
- return {"pixel_values": pixel_values, "input_ids": input_ids}
684
-
685
- # DataLoaders creation:
686
- train_dataloader = torch.utils.data.DataLoader(
687
- train_dataset,
688
- shuffle=True,
689
- collate_fn=collate_fn,
690
- batch_size=args.train_batch_size,
691
- num_workers=args.dataloader_num_workers,
692
- )
693
-
694
- # Scheduler and math around the number of training steps.
695
- overrode_max_train_steps = False
696
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
697
- if args.max_train_steps is None:
698
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
699
- overrode_max_train_steps = True
700
-
701
- lr_scheduler = get_scheduler(
702
- args.lr_scheduler,
703
- optimizer=optimizer,
704
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
705
- num_training_steps=args.max_train_steps * accelerator.num_processes,
706
- )
707
-
708
- # Prepare everything with our `accelerator`.
709
- if args.use_peft:
710
- if args.train_text_encoder:
711
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
712
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
713
- )
714
- else:
715
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
716
- unet, optimizer, train_dataloader, lr_scheduler
717
- )
718
- else:
719
- lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
720
- lora_layers, optimizer, train_dataloader, lr_scheduler
721
- )
722
-
723
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
724
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
725
- if overrode_max_train_steps:
726
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
727
- # Afterwards we recalculate our number of training epochs
728
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
729
-
730
- # We need to initialize the trackers we use, and also store our configuration.
731
- # The trackers initializes automatically on the main process.
732
- if accelerator.is_main_process:
733
- accelerator.init_trackers("text2image-fine-tune", config=vars(args))
734
-
735
- # Train!
736
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
737
-
738
- logger.info("***** Running training *****")
739
- logger.info(f" Num examples = {len(train_dataset)}")
740
- logger.info(f" Num Epochs = {args.num_train_epochs}")
741
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
742
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
743
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
744
- logger.info(f" Total optimization steps = {args.max_train_steps}")
745
- global_step = 0
746
- first_epoch = 0
747
-
748
- # Potentially load in the weights and states from a previous save
749
- if args.resume_from_checkpoint:
750
- if args.resume_from_checkpoint != "latest":
751
- path = os.path.basename(args.resume_from_checkpoint)
752
- else:
753
- # Get the most recent checkpoint
754
- dirs = os.listdir(args.output_dir)
755
- dirs = [d for d in dirs if d.startswith("checkpoint")]
756
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
757
- path = dirs[-1] if len(dirs) > 0 else None
758
-
759
- if path is None:
760
- accelerator.print(
761
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
762
- )
763
- args.resume_from_checkpoint = None
764
- else:
765
- accelerator.print(f"Resuming from checkpoint {path}")
766
- accelerator.load_state(os.path.join(args.output_dir, path))
767
- global_step = int(path.split("-")[1])
768
-
769
- resume_global_step = global_step * args.gradient_accumulation_steps
770
- first_epoch = global_step // num_update_steps_per_epoch
771
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
772
-
773
- # Only show the progress bar once on each machine.
774
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
775
- progress_bar.set_description("Steps")
776
-
777
- for epoch in range(first_epoch, args.num_train_epochs):
778
- unet.train()
779
- if args.train_text_encoder:
780
- text_encoder.train()
781
- train_loss = 0.0
782
- for step, batch in enumerate(train_dataloader):
783
- # Skip steps until we reach the resumed step
784
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
785
- if step % args.gradient_accumulation_steps == 0:
786
- progress_bar.update(1)
787
- continue
788
-
789
- with accelerator.accumulate(unet):
790
- # Convert images to latent space
791
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
792
- latents = latents * vae.config.scaling_factor
793
-
794
- # Sample noise that we'll add to the latents
795
- noise = torch.randn_like(latents)
796
- bsz = latents.shape[0]
797
- # Sample a random timestep for each image
798
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
799
- timesteps = timesteps.long()
800
-
801
- # Add noise to the latents according to the noise magnitude at each timestep
802
- # (this is the forward diffusion process)
803
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
804
-
805
- # Get the text embedding for conditioning
806
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
807
-
808
- # Get the target for loss depending on the prediction type
809
- if noise_scheduler.config.prediction_type == "epsilon":
810
- target = noise
811
- elif noise_scheduler.config.prediction_type == "v_prediction":
812
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
813
- else:
814
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
815
-
816
- # Predict the noise residual and compute loss
817
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
818
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
819
-
820
- # Gather the losses across all processes for logging (if we use distributed training).
821
- avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
822
- train_loss += avg_loss.item() / args.gradient_accumulation_steps
823
-
824
- # Backpropagate
825
- accelerator.backward(loss)
826
- if accelerator.sync_gradients:
827
- if args.use_peft:
828
- params_to_clip = (
829
- itertools.chain(unet.parameters(), text_encoder.parameters())
830
- if args.train_text_encoder
831
- else unet.parameters()
832
- )
833
- else:
834
- params_to_clip = lora_layers.parameters()
835
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
836
- optimizer.step()
837
- lr_scheduler.step()
838
- optimizer.zero_grad()
839
-
840
- # Checks if the accelerator has performed an optimization step behind the scenes
841
- if accelerator.sync_gradients:
842
- progress_bar.update(1)
843
- global_step += 1
844
- accelerator.log({"train_loss": train_loss}, step=global_step)
845
- train_loss = 0.0
846
-
847
- if global_step % args.checkpointing_steps == 0:
848
- if accelerator.is_main_process:
849
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
850
- accelerator.save_state(save_path)
851
- logger.info(f"Saved state to {save_path}")
852
-
853
- logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
854
- progress_bar.set_postfix(**logs)
855
-
856
- if global_step >= args.max_train_steps:
857
- break
858
-
859
- if accelerator.is_main_process:
860
- if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
861
- logger.info(
862
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
863
- f" {args.validation_prompt}."
864
- )
865
- # create pipeline
866
- pipeline = DiffusionPipeline.from_pretrained(
867
- args.pretrained_model_name_or_path,
868
- unet=accelerator.unwrap_model(unet),
869
- text_encoder=accelerator.unwrap_model(text_encoder),
870
- revision=args.revision,
871
- torch_dtype=weight_dtype,
872
- )
873
- pipeline = pipeline.to(accelerator.device)
874
- pipeline.set_progress_bar_config(disable=True)
875
-
876
- # run inference
877
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
878
- images = []
879
- for _ in range(args.num_validation_images):
880
- images.append(
881
- pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0]
882
- )
883
-
884
- if accelerator.is_main_process:
885
- for tracker in accelerator.trackers:
886
- if tracker.name == "tensorboard":
887
- np_images = np.stack([np.asarray(img) for img in images])
888
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
889
- if tracker.name == "wandb":
890
- tracker.log(
891
- {
892
- "validation": [
893
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
894
- for i, image in enumerate(images)
895
- ]
896
- }
897
- )
898
-
899
- del pipeline
900
- torch.cuda.empty_cache()
901
-
902
- # Save the lora layers
903
- accelerator.wait_for_everyone()
904
- if accelerator.is_main_process:
905
- if args.use_peft:
906
- lora_config = {}
907
- unwarpped_unet = accelerator.unwrap_model(unet)
908
- state_dict = get_peft_model_state_dict(unwarpped_unet, state_dict=accelerator.get_state_dict(unet))
909
- lora_config["peft_config"] = unwarpped_unet.get_peft_config_as_dict(inference=True)
910
- if args.train_text_encoder:
911
- unwarpped_text_encoder = accelerator.unwrap_model(text_encoder)
912
- text_encoder_state_dict = get_peft_model_state_dict(
913
- unwarpped_text_encoder, state_dict=accelerator.get_state_dict(text_encoder)
914
- )
915
- text_encoder_state_dict = {f"text_encoder_{k}": v for k, v in text_encoder_state_dict.items()}
916
- state_dict.update(text_encoder_state_dict)
917
- lora_config["text_encoder_peft_config"] = unwarpped_text_encoder.get_peft_config_as_dict(
918
- inference=True
919
- )
920
-
921
- accelerator.save(state_dict, os.path.join(args.output_dir, f"{global_step}_lora.pt"))
922
- with open(os.path.join(args.output_dir, f"{global_step}_lora_config.json"), "w") as f:
923
- json.dump(lora_config, f)
924
- else:
925
- unet = unet.to(torch.float32)
926
- unet.save_attn_procs(args.output_dir)
927
-
928
- if args.push_to_hub:
929
- save_model_card(
930
- repo_id,
931
- images=images,
932
- base_model=args.pretrained_model_name_or_path,
933
- dataset_name=args.dataset_name,
934
- repo_folder=args.output_dir,
935
- )
936
- upload_folder(
937
- repo_id=repo_id,
938
- folder_path=args.output_dir,
939
- commit_message="End of training",
940
- ignore_patterns=["step_*", "epoch_*"],
941
- )
942
-
943
- # Final inference
944
- # Load previous pipeline
945
- pipeline = DiffusionPipeline.from_pretrained(
946
- args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype
947
- )
948
-
949
- if args.use_peft:
950
-
951
- def load_and_set_lora_ckpt(pipe, ckpt_dir, global_step, device, dtype):
952
- with open(os.path.join(args.output_dir, f"{global_step}_lora_config.json"), "r") as f:
953
- lora_config = json.load(f)
954
- print(lora_config)
955
-
956
- checkpoint = os.path.join(args.output_dir, f"{global_step}_lora.pt")
957
- lora_checkpoint_sd = torch.load(checkpoint)
958
- unet_lora_ds = {k: v for k, v in lora_checkpoint_sd.items() if "text_encoder_" not in k}
959
- text_encoder_lora_ds = {
960
- k.replace("text_encoder_", ""): v for k, v in lora_checkpoint_sd.items() if "text_encoder_" in k
961
- }
962
-
963
- unet_config = LoraConfig(**lora_config["peft_config"])
964
- pipe.unet = LoraModel(unet_config, pipe.unet)
965
- set_peft_model_state_dict(pipe.unet, unet_lora_ds)
966
-
967
- if "text_encoder_peft_config" in lora_config:
968
- text_encoder_config = LoraConfig(**lora_config["text_encoder_peft_config"])
969
- pipe.text_encoder = LoraModel(text_encoder_config, pipe.text_encoder)
970
- set_peft_model_state_dict(pipe.text_encoder, text_encoder_lora_ds)
971
-
972
- if dtype in (torch.float16, torch.bfloat16):
973
- pipe.unet.half()
974
- pipe.text_encoder.half()
975
-
976
- pipe.to(device)
977
- return pipe
978
-
979
- pipeline = load_and_set_lora_ckpt(pipeline, args.output_dir, global_step, accelerator.device, weight_dtype)
980
-
981
- else:
982
- pipeline = pipeline.to(accelerator.device)
983
- # load attention processors
984
- pipeline.unet.load_attn_procs(args.output_dir)
985
-
986
- # run inference
987
- if args.seed is not None:
988
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
989
- else:
990
- generator = None
991
- images = []
992
- for _ in range(args.num_validation_images):
993
- images.append(pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0])
994
-
995
- if accelerator.is_main_process:
996
- for tracker in accelerator.trackers:
997
- if tracker.name == "tensorboard":
998
- np_images = np.stack([np.asarray(img) for img in images])
999
- tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
1000
- if tracker.name == "wandb":
1001
- tracker.log(
1002
- {
1003
- "test": [
1004
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1005
- for i, image in enumerate(images)
1006
- ]
1007
- }
1008
- )
1009
-
1010
- accelerator.end_training()
1011
-
1012
-
1013
- if __name__ == "__main__":
1014
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/release.py DELETED
@@ -1,162 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import argparse
17
- import os
18
- import re
19
-
20
- import packaging.version
21
-
22
-
23
- PATH_TO_EXAMPLES = "examples/"
24
- REPLACE_PATTERNS = {
25
- "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
26
- "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
27
- "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
28
- "doc": (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
29
- }
30
- REPLACE_FILES = {
31
- "init": "src/diffusers/__init__.py",
32
- "setup": "setup.py",
33
- }
34
- README_FILE = "README.md"
35
-
36
-
37
- def update_version_in_file(fname, version, pattern):
38
- """Update the version in one file using a specific pattern."""
39
- with open(fname, "r", encoding="utf-8", newline="\n") as f:
40
- code = f.read()
41
- re_pattern, replace = REPLACE_PATTERNS[pattern]
42
- replace = replace.replace("VERSION", version)
43
- code = re_pattern.sub(replace, code)
44
- with open(fname, "w", encoding="utf-8", newline="\n") as f:
45
- f.write(code)
46
-
47
-
48
- def update_version_in_examples(version):
49
- """Update the version in all examples files."""
50
- for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES):
51
- # Removing some of the folders with non-actively maintained examples from the walk
52
- if "research_projects" in directories:
53
- directories.remove("research_projects")
54
- if "legacy" in directories:
55
- directories.remove("legacy")
56
- for fname in fnames:
57
- if fname.endswith(".py"):
58
- update_version_in_file(os.path.join(folder, fname), version, pattern="examples")
59
-
60
-
61
- def global_version_update(version, patch=False):
62
- """Update the version in all needed files."""
63
- for pattern, fname in REPLACE_FILES.items():
64
- update_version_in_file(fname, version, pattern)
65
- if not patch:
66
- update_version_in_examples(version)
67
-
68
-
69
- def clean_main_ref_in_model_list():
70
- """Replace the links from main doc tp stable doc in the model list of the README."""
71
- # If the introduction or the conclusion of the list change, the prompts may need to be updated.
72
- _start_prompt = "🤗 Transformers currently provides the following architectures"
73
- _end_prompt = "1. Want to contribute a new model?"
74
- with open(README_FILE, "r", encoding="utf-8", newline="\n") as f:
75
- lines = f.readlines()
76
-
77
- # Find the start of the list.
78
- start_index = 0
79
- while not lines[start_index].startswith(_start_prompt):
80
- start_index += 1
81
- start_index += 1
82
-
83
- index = start_index
84
- # Update the lines in the model list.
85
- while not lines[index].startswith(_end_prompt):
86
- if lines[index].startswith("1."):
87
- lines[index] = lines[index].replace(
88
- "https://huggingface.co/docs/diffusers/main/model_doc",
89
- "https://huggingface.co/docs/diffusers/model_doc",
90
- )
91
- index += 1
92
-
93
- with open(README_FILE, "w", encoding="utf-8", newline="\n") as f:
94
- f.writelines(lines)
95
-
96
-
97
- def get_version():
98
- """Reads the current version in the __init__."""
99
- with open(REPLACE_FILES["init"], "r") as f:
100
- code = f.read()
101
- default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
102
- return packaging.version.parse(default_version)
103
-
104
-
105
- def pre_release_work(patch=False):
106
- """Do all the necessary pre-release steps."""
107
- # First let's get the default version: base version if we are in dev, bump minor otherwise.
108
- default_version = get_version()
109
- if patch and default_version.is_devrelease:
110
- raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
111
- if default_version.is_devrelease:
112
- default_version = default_version.base_version
113
- elif patch:
114
- default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
115
- else:
116
- default_version = f"{default_version.major}.{default_version.minor + 1}.0"
117
-
118
- # Now let's ask nicely if that's the right one.
119
- version = input(f"Which version are you releasing? [{default_version}]")
120
- if len(version) == 0:
121
- version = default_version
122
-
123
- print(f"Updating version to {version}.")
124
- global_version_update(version, patch=patch)
125
-
126
-
127
- # if not patch:
128
- # print("Cleaning main README, don't forget to run `make fix-copies`.")
129
- # clean_main_ref_in_model_list()
130
-
131
-
132
- def post_release_work():
133
- """Do all the necesarry post-release steps."""
134
- # First let's get the current version
135
- current_version = get_version()
136
- dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
137
- current_version = current_version.base_version
138
-
139
- # Check with the user we got that right.
140
- version = input(f"Which version are we developing now? [{dev_version}]")
141
- if len(version) == 0:
142
- version = dev_version
143
-
144
- print(f"Updating version to {version}.")
145
- global_version_update(version)
146
-
147
-
148
- # print("Cleaning main README, don't forget to run `make fix-copies`.")
149
- # clean_main_ref_in_model_list()
150
-
151
-
152
- if __name__ == "__main__":
153
- parser = argparse.ArgumentParser()
154
- parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
155
- parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
156
- args = parser.parse_args()
157
- if not args.post_release:
158
- pre_release_work(patch=args.patch)
159
- elif args.patch:
160
- print("Nothing to do after a patch :-)")
161
- else:
162
- post_release_work()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py DELETED
@@ -1,196 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='CascadeRCNN',
4
- pretrained='torchvision://resnet50',
5
- backbone=dict(
6
- type='ResNet',
7
- depth=50,
8
- num_stages=4,
9
- out_indices=(0, 1, 2, 3),
10
- frozen_stages=1,
11
- norm_cfg=dict(type='BN', requires_grad=True),
12
- norm_eval=True,
13
- style='pytorch'),
14
- neck=dict(
15
- type='FPN',
16
- in_channels=[256, 512, 1024, 2048],
17
- out_channels=256,
18
- num_outs=5),
19
- rpn_head=dict(
20
- type='RPNHead',
21
- in_channels=256,
22
- feat_channels=256,
23
- anchor_generator=dict(
24
- type='AnchorGenerator',
25
- scales=[8],
26
- ratios=[0.5, 1.0, 2.0],
27
- strides=[4, 8, 16, 32, 64]),
28
- bbox_coder=dict(
29
- type='DeltaXYWHBBoxCoder',
30
- target_means=[.0, .0, .0, .0],
31
- target_stds=[1.0, 1.0, 1.0, 1.0]),
32
- loss_cls=dict(
33
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
35
- roi_head=dict(
36
- type='CascadeRoIHead',
37
- num_stages=3,
38
- stage_loss_weights=[1, 0.5, 0.25],
39
- bbox_roi_extractor=dict(
40
- type='SingleRoIExtractor',
41
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
42
- out_channels=256,
43
- featmap_strides=[4, 8, 16, 32]),
44
- bbox_head=[
45
- dict(
46
- type='Shared2FCBBoxHead',
47
- in_channels=256,
48
- fc_out_channels=1024,
49
- roi_feat_size=7,
50
- num_classes=80,
51
- bbox_coder=dict(
52
- type='DeltaXYWHBBoxCoder',
53
- target_means=[0., 0., 0., 0.],
54
- target_stds=[0.1, 0.1, 0.2, 0.2]),
55
- reg_class_agnostic=True,
56
- loss_cls=dict(
57
- type='CrossEntropyLoss',
58
- use_sigmoid=False,
59
- loss_weight=1.0),
60
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
61
- loss_weight=1.0)),
62
- dict(
63
- type='Shared2FCBBoxHead',
64
- in_channels=256,
65
- fc_out_channels=1024,
66
- roi_feat_size=7,
67
- num_classes=80,
68
- bbox_coder=dict(
69
- type='DeltaXYWHBBoxCoder',
70
- target_means=[0., 0., 0., 0.],
71
- target_stds=[0.05, 0.05, 0.1, 0.1]),
72
- reg_class_agnostic=True,
73
- loss_cls=dict(
74
- type='CrossEntropyLoss',
75
- use_sigmoid=False,
76
- loss_weight=1.0),
77
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
78
- loss_weight=1.0)),
79
- dict(
80
- type='Shared2FCBBoxHead',
81
- in_channels=256,
82
- fc_out_channels=1024,
83
- roi_feat_size=7,
84
- num_classes=80,
85
- bbox_coder=dict(
86
- type='DeltaXYWHBBoxCoder',
87
- target_means=[0., 0., 0., 0.],
88
- target_stds=[0.033, 0.033, 0.067, 0.067]),
89
- reg_class_agnostic=True,
90
- loss_cls=dict(
91
- type='CrossEntropyLoss',
92
- use_sigmoid=False,
93
- loss_weight=1.0),
94
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
95
- ],
96
- mask_roi_extractor=dict(
97
- type='SingleRoIExtractor',
98
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
99
- out_channels=256,
100
- featmap_strides=[4, 8, 16, 32]),
101
- mask_head=dict(
102
- type='FCNMaskHead',
103
- num_convs=4,
104
- in_channels=256,
105
- conv_out_channels=256,
106
- num_classes=80,
107
- loss_mask=dict(
108
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
109
- # model training and testing settings
110
- train_cfg=dict(
111
- rpn=dict(
112
- assigner=dict(
113
- type='MaxIoUAssigner',
114
- pos_iou_thr=0.7,
115
- neg_iou_thr=0.3,
116
- min_pos_iou=0.3,
117
- match_low_quality=True,
118
- ignore_iof_thr=-1),
119
- sampler=dict(
120
- type='RandomSampler',
121
- num=256,
122
- pos_fraction=0.5,
123
- neg_pos_ub=-1,
124
- add_gt_as_proposals=False),
125
- allowed_border=0,
126
- pos_weight=-1,
127
- debug=False),
128
- rpn_proposal=dict(
129
- nms_pre=2000,
130
- max_per_img=2000,
131
- nms=dict(type='nms', iou_threshold=0.7),
132
- min_bbox_size=0),
133
- rcnn=[
134
- dict(
135
- assigner=dict(
136
- type='MaxIoUAssigner',
137
- pos_iou_thr=0.5,
138
- neg_iou_thr=0.5,
139
- min_pos_iou=0.5,
140
- match_low_quality=False,
141
- ignore_iof_thr=-1),
142
- sampler=dict(
143
- type='RandomSampler',
144
- num=512,
145
- pos_fraction=0.25,
146
- neg_pos_ub=-1,
147
- add_gt_as_proposals=True),
148
- mask_size=28,
149
- pos_weight=-1,
150
- debug=False),
151
- dict(
152
- assigner=dict(
153
- type='MaxIoUAssigner',
154
- pos_iou_thr=0.6,
155
- neg_iou_thr=0.6,
156
- min_pos_iou=0.6,
157
- match_low_quality=False,
158
- ignore_iof_thr=-1),
159
- sampler=dict(
160
- type='RandomSampler',
161
- num=512,
162
- pos_fraction=0.25,
163
- neg_pos_ub=-1,
164
- add_gt_as_proposals=True),
165
- mask_size=28,
166
- pos_weight=-1,
167
- debug=False),
168
- dict(
169
- assigner=dict(
170
- type='MaxIoUAssigner',
171
- pos_iou_thr=0.7,
172
- neg_iou_thr=0.7,
173
- min_pos_iou=0.7,
174
- match_low_quality=False,
175
- ignore_iof_thr=-1),
176
- sampler=dict(
177
- type='RandomSampler',
178
- num=512,
179
- pos_fraction=0.25,
180
- neg_pos_ub=-1,
181
- add_gt_as_proposals=True),
182
- mask_size=28,
183
- pos_weight=-1,
184
- debug=False)
185
- ]),
186
- test_cfg=dict(
187
- rpn=dict(
188
- nms_pre=1000,
189
- max_per_img=1000,
190
- nms=dict(type='nms', iou_threshold=0.7),
191
- min_bbox_size=0),
192
- rcnn=dict(
193
- score_thr=0.05,
194
- nms=dict(type='nms', iou_threshold=0.5),
195
- max_per_img=100,
196
- mask_thr_binary=0.5)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './ga_rpn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18',
4
- backbone=dict(
5
- extra=dict(
6
- stage2=dict(num_channels=(18, 36)),
7
- stage3=dict(num_channels=(18, 36, 72)),
8
- stage4=dict(num_channels=(18, 36, 72, 144)))),
9
- neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,16 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py'
2
- model = dict(
3
- type='MaskScoringRCNN',
4
- roi_head=dict(
5
- type='MaskScoringRoIHead',
6
- mask_iou_head=dict(
7
- type='MaskIoUHead',
8
- num_convs=4,
9
- num_fcs=2,
10
- roi_feat_size=14,
11
- in_channels=256,
12
- conv_out_channels=256,
13
- fc_out_channels=1024,
14
- num_classes=80)),
15
- # model training and testing settings
16
- train_cfg=dict(rcnn=dict(mask_thr_binary=0.5)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/lraspp_m-v3-d8.py DELETED
@@ -1,25 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- backbone=dict(
6
- type='MobileNetV3',
7
- arch='large',
8
- out_indices=(1, 3, 16),
9
- norm_cfg=norm_cfg),
10
- decode_head=dict(
11
- type='LRASPPHead',
12
- in_channels=(16, 24, 960),
13
- in_index=(0, 1, 2),
14
- channels=128,
15
- input_transform='multiple_select',
16
- dropout_ratio=0.1,
17
- num_classes=19,
18
- norm_cfg=norm_cfg,
19
- act_cfg=dict(type='ReLU'),
20
- align_corners=False,
21
- loss_decode=dict(
22
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
23
- # model training and testing settings
24
- train_cfg=dict(),
25
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/README.md DELETED
@@ -1,193 +0,0 @@
1
- # CLIP
2
-
3
- [[Blog]](https://openai.com/blog/clip/) [[Paper]](https://arxiv.org/abs/2103.00020) [[Model Card]](model-card.md) [[Colab]](https://colab.research.google.com/github/openai/clip/blob/master/notebooks/Interacting_with_CLIP.ipynb)
4
-
5
- CLIP (Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing for the task, similarly to the zero-shot capabilities of GPT-2 and 3. We found CLIP matches the performance of the original ResNet50 on ImageNet “zero-shot” without using any of the original 1.28M labeled examples, overcoming several major challenges in computer vision.
6
-
7
-
8
-
9
- ## Approach
10
-
11
- ![CLIP](CLIP.png)
12
-
13
-
14
-
15
- ## Usage
16
-
17
- First, [install PyTorch 1.7.1](https://pytorch.org/get-started/locally/) and torchvision, as well as small additional dependencies, and then install this repo as a Python package. On a CUDA GPU machine, the following will do the trick:
18
-
19
- ```bash
20
- $ conda install --yes -c pytorch pytorch=1.7.1 torchvision cudatoolkit=11.0
21
- $ pip install ftfy regex tqdm
22
- $ pip install git+https://github.com/openai/CLIP.git
23
- ```
24
-
25
- Replace `cudatoolkit=11.0` above with the appropriate CUDA version on your machine or `cpuonly` when installing on a machine without a GPU.
26
-
27
- ```python
28
- import torch
29
- import clip
30
- from PIL import Image
31
-
32
- device = "cuda" if torch.cuda.is_available() else "cpu"
33
- model, preprocess = clip.load("ViT-B/32", device=device)
34
-
35
- image = preprocess(Image.open("CLIP.png")).unsqueeze(0).to(device)
36
- text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
37
-
38
- with torch.no_grad():
39
- image_features = model.encode_image(image)
40
- text_features = model.encode_text(text)
41
-
42
- logits_per_image, logits_per_text = model(image, text)
43
- probs = logits_per_image.softmax(dim=-1).cpu().numpy()
44
-
45
- print("Label probs:", probs) # prints: [[0.9927937 0.00421068 0.00299572]]
46
- ```
47
-
48
-
49
- ## API
50
-
51
- The CLIP module `clip` provides the following methods:
52
-
53
- #### `clip.available_models()`
54
-
55
- Returns the names of the available CLIP models.
56
-
57
- #### `clip.load(name, device=..., jit=False)`
58
-
59
- Returns the model and the TorchVision transform needed by the model, specified by the model name returned by `clip.available_models()`. It will download the model as necessary. The `name` argument can also be a path to a local checkpoint.
60
-
61
- The device to run the model can be optionally specified, and the default is to use the first CUDA device if there is any, otherwise the CPU. When `jit` is `False`, a non-JIT version of the model will be loaded.
62
-
63
- #### `clip.tokenize(text: Union[str, List[str]], context_length=77)`
64
-
65
- Returns a LongTensor containing tokenized sequences of given text input(s). This can be used as the input to the model
66
-
67
- ---
68
-
69
- The model returned by `clip.load()` supports the following methods:
70
-
71
- #### `model.encode_image(image: Tensor)`
72
-
73
- Given a batch of images, returns the image features encoded by the vision portion of the CLIP model.
74
-
75
- #### `model.encode_text(text: Tensor)`
76
-
77
- Given a batch of text tokens, returns the text features encoded by the language portion of the CLIP model.
78
-
79
- #### `model(image: Tensor, text: Tensor)`
80
-
81
- Given a batch of images and a batch of text tokens, returns two Tensors, containing the logit scores corresponding to each image and text input. The values are cosine similarities between the corresponding image and text features, times 100.
82
-
83
-
84
-
85
- ## More Examples
86
-
87
- ### Zero-Shot Prediction
88
-
89
- The code below performs zero-shot prediction using CLIP, as shown in Appendix B in the paper. This example takes an image from the [CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), and predicts the most likely labels among the 100 textual labels from the dataset.
90
-
91
- ```python
92
- import os
93
- import clip
94
- import torch
95
- from torchvision.datasets import CIFAR100
96
-
97
- # Load the model
98
- device = "cuda" if torch.cuda.is_available() else "cpu"
99
- model, preprocess = clip.load('ViT-B/32', device)
100
-
101
- # Download the dataset
102
- cifar100 = CIFAR100(root=os.path.expanduser("~/.cache"), download=True, train=False)
103
-
104
- # Prepare the inputs
105
- image, class_id = cifar100[3637]
106
- image_input = preprocess(image).unsqueeze(0).to(device)
107
- text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in cifar100.classes]).to(device)
108
-
109
- # Calculate features
110
- with torch.no_grad():
111
- image_features = model.encode_image(image_input)
112
- text_features = model.encode_text(text_inputs)
113
-
114
- # Pick the top 5 most similar labels for the image
115
- image_features /= image_features.norm(dim=-1, keepdim=True)
116
- text_features /= text_features.norm(dim=-1, keepdim=True)
117
- similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
118
- values, indices = similarity[0].topk(5)
119
-
120
- # Print the result
121
- print("\nTop predictions:\n")
122
- for value, index in zip(values, indices):
123
- print(f"{cifar100.classes[index]:>16s}: {100 * value.item():.2f}%")
124
- ```
125
-
126
- The output will look like the following (the exact numbers may be slightly different depending on the compute device):
127
-
128
- ```
129
- Top predictions:
130
-
131
- snake: 65.31%
132
- turtle: 12.29%
133
- sweet_pepper: 3.83%
134
- lizard: 1.88%
135
- crocodile: 1.75%
136
- ```
137
-
138
- Note that this example uses the `encode_image()` and `encode_text()` methods that return the encoded features of given inputs.
139
-
140
-
141
- ### Linear-probe evaluation
142
-
143
- The example below uses [scikit-learn](https://scikit-learn.org/) to perform logistic regression on image features.
144
-
145
- ```python
146
- import os
147
- import clip
148
- import torch
149
-
150
- import numpy as np
151
- from sklearn.linear_model import LogisticRegression
152
- from torch.utils.data import DataLoader
153
- from torchvision.datasets import CIFAR100
154
- from tqdm import tqdm
155
-
156
- # Load the model
157
- device = "cuda" if torch.cuda.is_available() else "cpu"
158
- model, preprocess = clip.load('ViT-B/32', device)
159
-
160
- # Load the dataset
161
- root = os.path.expanduser("~/.cache")
162
- train = CIFAR100(root, download=True, train=True, transform=preprocess)
163
- test = CIFAR100(root, download=True, train=False, transform=preprocess)
164
-
165
-
166
- def get_features(dataset):
167
- all_features = []
168
- all_labels = []
169
-
170
- with torch.no_grad():
171
- for images, labels in tqdm(DataLoader(dataset, batch_size=100)):
172
- features = model.encode_image(images.to(device))
173
-
174
- all_features.append(features)
175
- all_labels.append(labels)
176
-
177
- return torch.cat(all_features).cpu().numpy(), torch.cat(all_labels).cpu().numpy()
178
-
179
- # Calculate the image features
180
- train_features, train_labels = get_features(train)
181
- test_features, test_labels = get_features(test)
182
-
183
- # Perform logistic regression
184
- classifier = LogisticRegression(random_state=0, C=0.316, max_iter=1000, verbose=1)
185
- classifier.fit(train_features, train_labels)
186
-
187
- # Evaluate using the logistic regression classifier
188
- predictions = classifier.predict(test_features)
189
- accuracy = np.mean((test_labels == predictions).astype(np.float)) * 100.
190
- print(f"Accuracy = {accuracy:.3f}")
191
- ```
192
-
193
- Note that the `C` value should be determined via a hyperparameter sweep using a validation split.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/cldm/ddim_hacked.py DELETED
@@ -1,317 +0,0 @@
1
- """SAMPLING ONLY."""
2
-
3
- import torch
4
- import numpy as np
5
- from tqdm import tqdm
6
-
7
- from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
8
-
9
-
10
- class DDIMSampler(object):
11
- def __init__(self, model, schedule="linear", **kwargs):
12
- super().__init__()
13
- self.model = model
14
- self.ddpm_num_timesteps = model.num_timesteps
15
- self.schedule = schedule
16
-
17
- def register_buffer(self, name, attr):
18
- if type(attr) == torch.Tensor:
19
- if attr.device != torch.device("cuda"):
20
- attr = attr.to(torch.device("cuda"))
21
- setattr(self, name, attr)
22
-
23
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
24
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
25
- num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
26
- alphas_cumprod = self.model.alphas_cumprod
27
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
28
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
29
-
30
- self.register_buffer('betas', to_torch(self.model.betas))
31
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
32
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
33
-
34
- # calculations for diffusion q(x_t | x_{t-1}) and others
35
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
36
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
37
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
38
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
39
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
40
-
41
- # ddim sampling parameters
42
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
43
- ddim_timesteps=self.ddim_timesteps,
44
- eta=ddim_eta,verbose=verbose)
45
- self.register_buffer('ddim_sigmas', ddim_sigmas)
46
- self.register_buffer('ddim_alphas', ddim_alphas)
47
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
48
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
49
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
50
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
51
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
52
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
53
-
54
- @torch.no_grad()
55
- def sample(self,
56
- S,
57
- batch_size,
58
- shape,
59
- conditioning=None,
60
- callback=None,
61
- normals_sequence=None,
62
- img_callback=None,
63
- quantize_x0=False,
64
- eta=0.,
65
- mask=None,
66
- x0=None,
67
- temperature=1.,
68
- noise_dropout=0.,
69
- score_corrector=None,
70
- corrector_kwargs=None,
71
- verbose=True,
72
- x_T=None,
73
- log_every_t=100,
74
- unconditional_guidance_scale=1.,
75
- unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
76
- dynamic_threshold=None,
77
- ucg_schedule=None,
78
- **kwargs
79
- ):
80
- if conditioning is not None:
81
- if isinstance(conditioning, dict):
82
- ctmp = conditioning[list(conditioning.keys())[0]]
83
- while isinstance(ctmp, list): ctmp = ctmp[0]
84
- cbs = ctmp.shape[0]
85
- if cbs != batch_size:
86
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
87
-
88
- elif isinstance(conditioning, list):
89
- for ctmp in conditioning:
90
- if ctmp.shape[0] != batch_size:
91
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
92
-
93
- else:
94
- if conditioning.shape[0] != batch_size:
95
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
96
-
97
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
98
- # sampling
99
- C, H, W = shape
100
- size = (batch_size, C, H, W)
101
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
102
-
103
- samples, intermediates = self.ddim_sampling(conditioning, size,
104
- callback=callback,
105
- img_callback=img_callback,
106
- quantize_denoised=quantize_x0,
107
- mask=mask, x0=x0,
108
- ddim_use_original_steps=False,
109
- noise_dropout=noise_dropout,
110
- temperature=temperature,
111
- score_corrector=score_corrector,
112
- corrector_kwargs=corrector_kwargs,
113
- x_T=x_T,
114
- log_every_t=log_every_t,
115
- unconditional_guidance_scale=unconditional_guidance_scale,
116
- unconditional_conditioning=unconditional_conditioning,
117
- dynamic_threshold=dynamic_threshold,
118
- ucg_schedule=ucg_schedule
119
- )
120
- return samples, intermediates
121
-
122
- @torch.no_grad()
123
- def ddim_sampling(self, cond, shape,
124
- x_T=None, ddim_use_original_steps=False,
125
- callback=None, timesteps=None, quantize_denoised=False,
126
- mask=None, x0=None, img_callback=None, log_every_t=100,
127
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
128
- unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
129
- ucg_schedule=None):
130
- device = self.model.betas.device
131
- b = shape[0]
132
- if x_T is None:
133
- img = torch.randn(shape, device=device)
134
- else:
135
- img = x_T
136
-
137
- if timesteps is None:
138
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
139
- elif timesteps is not None and not ddim_use_original_steps:
140
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
141
- timesteps = self.ddim_timesteps[:subset_end]
142
-
143
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
144
- time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
145
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
146
- print(f"Running DDIM Sampling with {total_steps} timesteps")
147
-
148
- iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
149
-
150
- for i, step in enumerate(iterator):
151
- index = total_steps - i - 1
152
- ts = torch.full((b,), step, device=device, dtype=torch.long)
153
-
154
- if mask is not None:
155
- assert x0 is not None
156
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
157
- img = img_orig * mask + (1. - mask) * img
158
-
159
- if ucg_schedule is not None:
160
- assert len(ucg_schedule) == len(time_range)
161
- unconditional_guidance_scale = ucg_schedule[i]
162
-
163
- outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
164
- quantize_denoised=quantize_denoised, temperature=temperature,
165
- noise_dropout=noise_dropout, score_corrector=score_corrector,
166
- corrector_kwargs=corrector_kwargs,
167
- unconditional_guidance_scale=unconditional_guidance_scale,
168
- unconditional_conditioning=unconditional_conditioning,
169
- dynamic_threshold=dynamic_threshold)
170
- img, pred_x0 = outs
171
- if callback: callback(i)
172
- if img_callback: img_callback(pred_x0, i)
173
-
174
- if index % log_every_t == 0 or index == total_steps - 1:
175
- intermediates['x_inter'].append(img)
176
- intermediates['pred_x0'].append(pred_x0)
177
-
178
- return img, intermediates
179
-
180
- @torch.no_grad()
181
- def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
182
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
183
- unconditional_guidance_scale=1., unconditional_conditioning=None,
184
- dynamic_threshold=None):
185
- b, *_, device = *x.shape, x.device
186
-
187
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
188
- model_output = self.model.apply_model(x, t, c)
189
- else:
190
- model_t = self.model.apply_model(x, t, c)
191
- model_uncond = self.model.apply_model(x, t, unconditional_conditioning)
192
- model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
193
-
194
- if self.model.parameterization == "v":
195
- e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
196
- else:
197
- e_t = model_output
198
-
199
- if score_corrector is not None:
200
- assert self.model.parameterization == "eps", 'not implemented'
201
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
202
-
203
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
204
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
205
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
206
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
207
- # select parameters corresponding to the currently considered timestep
208
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
209
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
210
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
211
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
212
-
213
- # current prediction for x_0
214
- if self.model.parameterization != "v":
215
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
216
- else:
217
- pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
218
-
219
- if quantize_denoised:
220
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
221
-
222
- if dynamic_threshold is not None:
223
- raise NotImplementedError()
224
-
225
- # direction pointing to x_t
226
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
227
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
228
- if noise_dropout > 0.:
229
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
230
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
231
- return x_prev, pred_x0
232
-
233
- @torch.no_grad()
234
- def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
235
- unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
236
- timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
237
- num_reference_steps = timesteps.shape[0]
238
-
239
- assert t_enc <= num_reference_steps
240
- num_steps = t_enc
241
-
242
- if use_original_steps:
243
- alphas_next = self.alphas_cumprod[:num_steps]
244
- alphas = self.alphas_cumprod_prev[:num_steps]
245
- else:
246
- alphas_next = self.ddim_alphas[:num_steps]
247
- alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
248
-
249
- x_next = x0
250
- intermediates = []
251
- inter_steps = []
252
- for i in tqdm(range(num_steps), desc='Encoding Image'):
253
- t = torch.full((x0.shape[0],), timesteps[i], device=self.model.device, dtype=torch.long)
254
- if unconditional_guidance_scale == 1.:
255
- noise_pred = self.model.apply_model(x_next, t, c)
256
- else:
257
- assert unconditional_conditioning is not None
258
- e_t_uncond, noise_pred = torch.chunk(
259
- self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
260
- torch.cat((unconditional_conditioning, c))), 2)
261
- noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
262
-
263
- xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
264
- weighted_noise_pred = alphas_next[i].sqrt() * (
265
- (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
266
- x_next = xt_weighted + weighted_noise_pred
267
- if return_intermediates and i % (
268
- num_steps // return_intermediates) == 0 and i < num_steps - 1:
269
- intermediates.append(x_next)
270
- inter_steps.append(i)
271
- elif return_intermediates and i >= num_steps - 2:
272
- intermediates.append(x_next)
273
- inter_steps.append(i)
274
- if callback: callback(i)
275
-
276
- out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
277
- if return_intermediates:
278
- out.update({'intermediates': intermediates})
279
- return x_next, out
280
-
281
- @torch.no_grad()
282
- def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
283
- # fast, but does not allow for exact reconstruction
284
- # t serves as an index to gather the correct alphas
285
- if use_original_steps:
286
- sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
287
- sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
288
- else:
289
- sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
290
- sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
291
-
292
- if noise is None:
293
- noise = torch.randn_like(x0)
294
- return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
295
- extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
296
-
297
- @torch.no_grad()
298
- def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
299
- use_original_steps=False, callback=None):
300
-
301
- timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
302
- timesteps = timesteps[:t_start]
303
-
304
- time_range = np.flip(timesteps)
305
- total_steps = timesteps.shape[0]
306
- print(f"Running DDIM Sampling with {total_steps} timesteps")
307
-
308
- iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
309
- x_dec = x_latent
310
- for i, step in enumerate(iterator):
311
- index = total_steps - i - 1
312
- ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
313
- x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
314
- unconditional_guidance_scale=unconditional_guidance_scale,
315
- unconditional_conditioning=unconditional_conditioning)
316
- if callback: callback(i)
317
- return x_dec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/gradio_pose2image.py DELETED
@@ -1,98 +0,0 @@
1
- from share import *
2
- import config
3
-
4
- import cv2
5
- import einops
6
- import gradio as gr
7
- import numpy as np
8
- import torch
9
- import random
10
-
11
- from pytorch_lightning import seed_everything
12
- from annotator.util import resize_image, HWC3
13
- from annotator.openpose import OpenposeDetector
14
- from cldm.model import create_model, load_state_dict
15
- from cldm.ddim_hacked import DDIMSampler
16
-
17
-
18
- apply_openpose = OpenposeDetector()
19
-
20
- model = create_model('./models/cldm_v15.yaml').cpu()
21
- model.load_state_dict(load_state_dict('./models/control_sd15_openpose.pth', location='cuda'))
22
- model = model.cuda()
23
- ddim_sampler = DDIMSampler(model)
24
-
25
-
26
- def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
27
- with torch.no_grad():
28
- input_image = HWC3(input_image)
29
- detected_map, _ = apply_openpose(resize_image(input_image, detect_resolution))
30
- detected_map = HWC3(detected_map)
31
- img = resize_image(input_image, image_resolution)
32
- H, W, C = img.shape
33
-
34
- detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_NEAREST)
35
-
36
- control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
37
- control = torch.stack([control for _ in range(num_samples)], dim=0)
38
- control = einops.rearrange(control, 'b h w c -> b c h w').clone()
39
-
40
- if seed == -1:
41
- seed = random.randint(0, 65535)
42
- seed_everything(seed)
43
-
44
- if config.save_memory:
45
- model.low_vram_shift(is_diffusing=False)
46
-
47
- cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
48
- un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
49
- shape = (4, H // 8, W // 8)
50
-
51
- if config.save_memory:
52
- model.low_vram_shift(is_diffusing=True)
53
-
54
- model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
55
- samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
56
- shape, cond, verbose=False, eta=eta,
57
- unconditional_guidance_scale=scale,
58
- unconditional_conditioning=un_cond)
59
-
60
- if config.save_memory:
61
- model.low_vram_shift(is_diffusing=False)
62
-
63
- x_samples = model.decode_first_stage(samples)
64
- x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
65
-
66
- results = [x_samples[i] for i in range(num_samples)]
67
- return [detected_map] + results
68
-
69
-
70
- block = gr.Blocks().queue()
71
- with block:
72
- with gr.Row():
73
- gr.Markdown("## Control Stable Diffusion with Human Pose")
74
- with gr.Row():
75
- with gr.Column():
76
- input_image = gr.Image(source='upload', type="numpy")
77
- prompt = gr.Textbox(label="Prompt")
78
- run_button = gr.Button(label="Run")
79
- with gr.Accordion("Advanced options", open=False):
80
- num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
81
- image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
82
- strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
83
- guess_mode = gr.Checkbox(label='Guess Mode', value=False)
84
- detect_resolution = gr.Slider(label="OpenPose Resolution", minimum=128, maximum=1024, value=512, step=1)
85
- ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
86
- scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
87
- seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
88
- eta = gr.Number(label="eta (DDIM)", value=0.0)
89
- a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
90
- n_prompt = gr.Textbox(label="Negative Prompt",
91
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
92
- with gr.Column():
93
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
94
- ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
95
- run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
96
-
97
-
98
- block.launch(server_name='0.0.0.0')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnxiousNugget/janitor/Dockerfile DELETED
@@ -1,21 +0,0 @@
1
- FROM node:18-bullseye-slim
2
-
3
- RUN apt-get update && \
4
-
5
- apt-get install -y git
6
-
7
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
8
-
9
- WORKDIR /app
10
-
11
- RUN npm install
12
-
13
- COPY Dockerfile greeting.md* .env* ./
14
-
15
- RUN npm run build
16
-
17
- EXPOSE 7860
18
-
19
- ENV NODE_ENV=production
20
-
21
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/nono/roop/face_analyser.py DELETED
@@ -1,54 +0,0 @@
1
- import threading
2
- from typing import Any, Optional, List
3
- import insightface
4
- import numpy
5
-
6
- import roop.globals
7
- from roop.typing import Frame, Face
8
-
9
- FACE_ANALYSER = None
10
- THREAD_LOCK = threading.Lock()
11
-
12
-
13
- def get_face_analyser() -> Any:
14
- global FACE_ANALYSER
15
-
16
- with THREAD_LOCK:
17
- if FACE_ANALYSER is None:
18
- FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
19
- FACE_ANALYSER.prepare(ctx_id=0)
20
- return FACE_ANALYSER
21
-
22
-
23
- def clear_face_analyser() -> Any:
24
- global FACE_ANALYSER
25
-
26
- FACE_ANALYSER = None
27
-
28
-
29
- def get_one_face(frame: Frame, position: int = 0) -> Optional[Face]:
30
- many_faces = get_many_faces(frame)
31
- if many_faces:
32
- try:
33
- return many_faces[position]
34
- except IndexError:
35
- return many_faces[-1]
36
- return None
37
-
38
-
39
- def get_many_faces(frame: Frame) -> Optional[List[Face]]:
40
- try:
41
- return get_face_analyser().get(frame)
42
- except ValueError:
43
- return None
44
-
45
-
46
- def find_similar_face(frame: Frame, reference_face: Face) -> Optional[Face]:
47
- many_faces = get_many_faces(frame)
48
- if many_faces:
49
- for face in many_faces:
50
- if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
51
- distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding))
52
- if distance < roop.globals.similar_face_distance:
53
- return face
54
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/__init__.py DELETED
File without changes
spaces/Benson/text-generation/Examples/Barikad Crew Toup Pou Yo Mp3 Descargar.md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>Descargar la aplicación de YouTube: Cómo ver videos en cualquier dispositivo</h1>
3
- <p>YouTube es una de las plataformas para compartir videos más populares del mundo, con miles de millones de usuarios y horas de contenido. Ya sea que quieras ver videos musicales, transmisiones de juegos, tutoriales educativos o cualquier otra cosa, YouTube lo tiene todo. Pero, ¿cómo puedes disfrutar de YouTube en tu smartphone, tablet, ordenador o TV? La respuesta es simple: descarga la aplicación de YouTube. </p>
4
- <h2>barikad crew toup pou yo mp3 descargar</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://bltlly.com/2v6Lx9">https://bltlly.com/2v6Lx9</a></b></p><br /><br />
5
- <h2>¿Qué es YouTube? </h2>
6
- <p>YouTube es un sitio web y una aplicación que te permite subir, ver, compartir, comentar y gustar videos de varios géneros y categorías. También puedes suscribirte a los canales que te gustan, crear listas de reproducción de tus vídeos favoritos y unirte a la comunidad de YouTube. YouTube es de uso gratuito, pero también puede actualizar a YouTube Premium para la visualización sin anuncios, descargas sin conexión, reproducción de fondo y acceso a YouTube Music y YouTube Originals.</p>
7
- <h3>Características y beneficios de YouTube</h3>
8
- <p>Algunas de las características y beneficios de usar YouTube son:</p>
9
- <ul>
10
- <li>Puedes ver vídeos en alta calidad, hasta resolución 4K. </li>
11
- <li>Puedes descubrir nuevo contenido basado en tus intereses y preferencias. </li>
12
- <li>Puedes interactuar con otros usuarios y creadores a través de comentarios, likes y chats en vivo. </li>
13
- <li>Puedes crear tu propio canal y subir tus propios videos. </li>
14
- <li> Puede transmitir en vivo su juego, eventos o cualquier otra cosa. </li>
15
- <li>Puedes aprender nuevas habilidades, aficiones, idiomas o temas de expertos y educadores. </li>
16
- <li>Puedes disfrutar de videos musicales, podcasts, documentales, películas, programas y más. </li>
17
- </ul>
18
- <h3>aplicación de YouTube vs sitio web de YouTube</h3>
19
- <p>Si bien puedes acceder a YouTube desde cualquier navegador web en cualquier dispositivo, hay algunas ventajas de usar la aplicación de YouTube. Por ejemplo:</p>
20
- <ul>
21
- <li>La aplicación de YouTube está optimizada para dispositivos móviles, con una interfaz fácil de usar y fácil navegación. </li>
22
- <li>La aplicación de YouTube te permite descargar vídeos para verlos sin conexión (con YouTube Premium). </li>
23
-
24
- <li>La aplicación de YouTube le permite emitir vídeos a su televisor u otros dispositivos utilizando Chromecast o AirPlay.</li>
25
- <li>La aplicación de YouTube le da acceso a características adicionales como historias, cortos, modo de realidad virtual, modo oscuro, y más. </li>
26
- </ul>
27
- <h2>Cómo descargar la aplicación de YouTube para Android</h2>
28
- <p>Si tiene un dispositivo Android, como un teléfono inteligente o una tableta, puede descargar la aplicación de YouTube desde Google Play Store. Estos son los pasos:</p>
29
- <h3>Paso 1: Ir a Google Play Store</h3>
30
- <p>En tu dispositivo, abre la aplicación Google Play Store. Si no la tienes instalada, puedes descargarla desde <a href="( 1 )">here</a>. </p>
31
- <p></p>
32
- <h3>Paso 2: Buscar la aplicación de YouTube</h3>
33
- <p>En la barra de búsqueda en la parte superior de la pantalla, escriba "YouTube" y toque en el icono de la lupa <h3>Paso 3: Toque en el botón Instalar</h3>
34
- <p>De los resultados de búsqueda, toque en el icono de la aplicación de YouTube, que tiene un logotipo de botón de reproducción rojo. Luego, toca el botón verde Instalar para comenzar a descargar la aplicación. </p>
35
- <h3>Paso 4: Abra la aplicación e inicie sesión</h3>
36
- <p>Una vez instalada la aplicación, puede abrirla desde la pantalla de inicio o desde el cajón de aplicaciones. También puede pulsar en el botón Abrir de Google Play Store. Para acceder a todas las funciones de YouTube, debes iniciar sesión con tu cuenta de Google. Si no lo tienes, puedes crear uno gratis <a href=">here</a>. </p>
37
- <h2>Cómo descargar la aplicación de YouTube para iOS</h2>
38
- <p>Si tienes un dispositivo iOS, como un iPhone o un iPad, puedes descargar la aplicación de YouTube desde la App Store. Estos son los pasos:</p>
39
- <h3>Paso 1: Ir al App Store</h3>
40
- <p>En tu dispositivo, abre la aplicación App Store. Si no la tienes instalada, puedes descargarla desde <a href="">aquí</a>. </p>
41
- <h3>Paso 2: Buscar la aplicación de YouTube</h3>
42
- <p>En la barra de búsqueda en la parte inferior de la pantalla, escriba "YouTube" y toque en el botón de búsqueda azul. </p>
43
- <h3>Paso 3: Toque en el botón Get</h3>
44
-
45
- <h3>Paso 4: Abra la aplicación e inicie sesión</h3>
46
- <p>Una vez instalada la aplicación, puede abrirla desde la pantalla de inicio o la biblioteca de aplicaciones. También puede pulsar en el botón Abrir de la App Store. Para acceder a todas las funciones de YouTube, debes iniciar sesión con tu cuenta de Google. Si no lo tienes, puedes crear uno gratis <a href=">here</a>. </p>
47
- <h2>Cómo descargar la aplicación de YouTube para Windows 10</h2>
48
- <p>Si tiene un dispositivo Windows 10, como una computadora portátil o un escritorio, puede descargar la aplicación de YouTube desde la Microsoft Store. Estos son los pasos:</p>
49
- <h3>Paso 1: Ir a Microsoft Store</h3>
50
- <p>En tu dispositivo, abre la aplicación de Microsoft Store. Si no la tienes instalada, puedes descargarla desde <a href="">aquí</a>. </p>
51
- <h3>Paso 2: Buscar la aplicación de YouTube</h3>
52
- <p>En la barra de búsqueda en la parte superior derecha de la pantalla, escriba "YouTube" y presione Enter.</p>
53
- <h3>Paso 3: Haga clic en el botón Obtener</h3>
54
- <p>De los resultados de búsqueda, haga clic en el icono de la aplicación de YouTube, que tiene un logotipo de botón de reproducción rojo. A continuación, haga clic en el botón azul Get para comenzar a descargar la aplicación. </p>
55
- <h3>Paso 4: Abra la aplicación e inicie sesión</h3>
56
- <p>Una vez instalada la aplicación, puede abrirla desde el menú Inicio o la barra de tareas. También puede hacer clic en el botón Inicio desde la Tienda de Microsoft. Para acceder a todas las funciones de YouTube, debes iniciar sesión con tu cuenta de Google. Si no lo tienes, puedes crear uno gratis <a href=">here</a>. </p>
57
- <h2>Cómo descargar la aplicación de YouTube para otros dispositivos</h2>
58
- <p>Si tiene otros dispositivos compatibles con YouTube, como televisores inteligentes, dispositivos de transmisión o consolas de juegos, también puede descargar y ver YouTube en ellos. Aquí hay algunos ejemplos:</p>
59
- <h3>Televisores inteligentes y dispositivos de transmisión</h3>
60
- <p>Si tiene un televisor inteligente o un dispositivo de transmisión que se conecta a su televisor, como Roku, Chromecast, Fire TV Stick o Apple TV, puede descargar y ver YouTube en ellos. Para hacerlo, siga estos pasos:</p>
61
- <ul>
62
- <li>Conecta tu dispositivo a tu TV y enciéndelos. </li>
63
-
64
- <li>Busca YouTube y descárgalo. </li>
65
- <li>Abre YouTube e inicia sesión con tu cuenta de Google. </li>
66
- <li>También puede usar su teléfono inteligente o tableta como control remoto emparejándolo con su dispositivo utilizando un código o un código QR. </li>
67
- </ul>
68
- <h3>Consolas de juegos</h3>
69
- <p>Si tienes una consola de juegos que se conecta a tu televisor, como Xbox One, Xbox Series X/S, PlayStation 4, PlayStation 5 o Nintendo Switch, puedes descargarla y verla en YouTube. Para hacerlo, siga estos pasos:</p>
70
- <ul>
71
- <li>Conecta tu consola a tu TV y enciéndela. </li>
72
- <li>Vaya a la tienda de aplicaciones o de juegos de su consola.</li>
73
- <li>Busca YouTube y descárgalo. </li>
74
- <li>Abre YouTube e inicia sesión con tu cuenta de Google. </li>
75
- <li>También puede usar su teléfono inteligente o tableta como control remoto emparejándolo con su consola usando un código o un código QR. </li>
76
- </ul>
77
- <h2>Conclusión</h2>
78
- <p>Descargar la aplicación de YouTube es una gran manera de ver videos en cualquier dispositivo. Puedes disfrutar de vídeos de alta calidad, descubrir nuevos contenidos, interactuar con otros usuarios y creadores y acceder a funciones adicionales. Tanto si tienes un Android, iOS, Windows 10 o cualquier otro dispositivo compatible con YouTube, puedes descargar fácilmente la aplicación desde la tienda de aplicaciones correspondiente e iniciar sesión con tu cuenta de Google. También puede emitir videos a su televisor u otros dispositivos usando Chromecast o AirPlay, o usar su teléfono inteligente o tableta como control remoto. Con la aplicación de YouTube, puedes ver lo que te gusta, cuando quieras, donde quiera que estés. </p>
79
- <h2>Preguntas frecuentes</h2>
80
- <ul>
81
- <li><b>Q: ¿Cuánto cuesta la aplicación de YouTube? </b></li>
82
- <li>A: La aplicación de YouTube es gratuita para descargar y usar. Sin embargo, también puede actualizar a YouTube Premium por una tarifa mensual, que le da visualización sin anuncios, descargas sin conexión, reproducción de fondo y acceso a YouTube Music y YouTube Originals.</li>
83
- <li><b>Q: ¿Cómo puedo actualizar la aplicación de YouTube? </b></li>
84
-
85
- <li><b>Q: ¿Cómo puedo eliminar la aplicación de YouTube? </b></li>
86
- <li>A: Puedes eliminar la aplicación de YouTube desde tu dispositivo siguiendo estos pasos:</li>
87
- <ul>
88
- <li>En dispositivos Android, vaya a Configuración > Aplicaciones > YouTube > Desinstalar.</li>
89
- <li>En los dispositivos iOS, mantenga pulsado el icono de la aplicación de YouTube hasta que se mueva, luego toque el botón X y confirme. </li>
90
- <li>En dispositivos Windows 10, vaya a Inicio > Configuración > Aplicaciones > Aplicaciones y características > YouTube > Desinstalar.</li>
91
- <li>En otros dispositivos, siga las instrucciones del fabricante o manual del usuario de su dispositivo. </li>
92
- </ul>
93
- <li><b>Q: ¿Cómo puedo contactar con el soporte de YouTube? </b></li>
94
- <li>A: Puede ponerse en contacto con el soporte de YouTube visitando <a href="">esta página</a>, donde puede encontrar artículos de ayuda, foros de la comunidad, formularios de comentarios y opciones de chat o correo electrónico. </li>
95
- <li><b>Q: ¿Cómo puedo reportar un problema o un error en la aplicación de YouTube? </b></li>
96
- <li>A: Puede reportar un problema o un error en la aplicación de YouTube siguiendo estos pasos:</li>
97
- <ul>
98
- <li> Abra la aplicación de YouTube y toque en la imagen de perfil en la parte superior derecha de la pantalla. </li>
99
- <li>Toque en Configuración > Ayuda y comentarios > Enviar comentarios. </li>
100
- <li>Describa su problema y adjunte una captura de pantalla si es posible. </li>
101
- <li>Toque en Enviar para enviar su informe. </li>
102
- </ul>
103
- </ul></p> 64aa2da5cf<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bloons Td 6 32.4 Apk.md DELETED
@@ -1,87 +0,0 @@
1
- <br />
2
- <h1>FIFA TV APK: Todo lo que necesita saber</h1>
3
- <p>Si eres fanático del fútbol, probablemente no quieras perderte ninguno de los emocionantes partidos y eventos destacados de tus equipos y ligas favoritas. Pero, ¿qué pasa si no tienes acceso a una suscripción de televisión por cable o vía satélite, o si quieres ver los juegos en tu dispositivo móvil? Ahí es donde FIFA TV APK viene muy bien. En este artículo, te contaremos todo lo que necesitas saber sobre esta increíble aplicación, incluyendo qué es, cómo descargarla e instalarla, por qué deberías usarla y cuáles son algunas alternativas a ella. ¡Vamos a empezar! </p>
4
- <h2>¿Qué es FIFA TV APK? </h2>
5
- <h3>Una breve introducción a FIFA TV APK y sus características</h3>
6
- <p>FIFA TV APK es una aplicación no oficial que le permite ver partidos de fútbol en vivo y lo más destacado de varias ligas y torneos de todo el mundo, incluyendo la Copa Mundial de la FIFA, Liga de Campeones de la UEFA, Liga Premier Inglesa, Liga española, Bundesliga alemana, Serie A italiana, Ligue 1 francesa y más. También puedes acceder a las últimas noticias, clasificaciones, estadísticas y vídeos del sitio web y la aplicación oficial de la FIFA. </p>
7
- <h2>bloons td 6 32.4 apk</h2><br /><p><b><b>Download</b> &#10145; <a href="https://bltlly.com/2v6Les">https://bltlly.com/2v6Les</a></b></p><br /><br />
8
- <p>Algunas de las características de FIFA TV APK son:</p>
9
- <ul>
10
- <li> Es gratuito para descargar y usar, sin necesidad de registro o suscripción. </li>
11
- <li> Tiene una interfaz simple y fácil de usar, con fácil navegación y opciones de búsqueda. </li>
12
- <li>Ofrece transmisión de vídeo de alta calidad, con resolución ajustable y velocidad de reproducción. </li>
13
- <li>Soporta múltiples idiomas, incluyendo inglés, español, francés, alemán, portugués, árabe y más. </li>
14
- <li> Tiene un bajo consumo de batería y uso de datos, con una opción para descargar videos para ver sin conexión. </li>
15
- <li>Es compatible con la mayoría de los dispositivos Android, incluidos teléfonos inteligentes, tabletas, televisores inteligentes y cajas Android. </li>
16
- </ul>
17
- <h3> Cómo descargar e instalar FIFA TV APK en su dispositivo Android</h3>
18
- <p>Para descargar e instalar FIFA TV APK en su dispositivo Android, es necesario seguir estos sencillos pasos:</p>
19
- <ol>
20
-
21
- <li>Una vez completada la descarga, vaya a la configuración de su dispositivo y habilite la opción de instalar aplicaciones de fuentes desconocidas. Esto le permitirá instalar aplicaciones que no están disponibles en Google Play Store.</li>
22
- <li>Busque el archivo FIFA TV APK descargado en el administrador de archivos de su dispositivo y toque en él. Esto iniciará el proceso de instalación. </li>
23
- <li>Siga las instrucciones en pantalla y conceda los permisos necesarios a la aplicación. Esto completará el proceso de instalación. </li>
24
- <li>Ahora puedes abrir la aplicación desde el cajón de aplicaciones de tu dispositivo y disfrutar viendo partidos de fútbol en vivo y lo más destacado en tu dispositivo. </li>
25
- </ol>
26
- <h2>¿Por qué debe utilizar FIFA TV APK? </h2>
27
- <h3> Los beneficios de usar FIFA TV APK para ver partidos de fútbol en vivo y lo más destacado</h3>
28
- <p>Hay muchas razones por las que debe utilizar FIFA TV APK para ver partidos de fútbol en vivo y destaca en su dispositivo. Algunos de ellos son:</p>
29
- <ul>
30
- <li>Puedes ver partidos de fútbol en vivo de varias ligas y torneos de todo el mundo, incluyendo la Copa Mundial de la FIFA, la Liga de Campeones de la UEFA, la Liga Inglesa, la Liga Española, la Bundesliga Alemana, la Serie A italiana, la Ligue 1 francesa y más. </li>
31
- <li>Puedes acceder a las últimas noticias, clasificaciones, estadísticas y vídeos desde el sitio web y la aplicación oficial de la FIFA. </li>
32
- <li> Puede disfrutar de la transmisión de video de alta calidad, con resolución ajustable y velocidad de reproducción. </li>
33
- <li>Puedes elegir entre varios idiomas, incluyendo inglés, español, francés, alemán, portugués, árabe y más. </li>
34
- <li>Puede guardar su consumo de batería y datos, con una opción para descargar videos para verlos sin conexión. </li>
35
- <li> Puede usar la aplicación en cualquier dispositivo Android, incluidos teléfonos inteligentes, tabletas, televisores inteligentes y cajas Android. </li>
36
- </ul>
37
- <h3>Los inconvenientes y limitaciones de usar FIFA TV APK</h3>
38
- <p>Sin embargo, también hay algunos inconvenientes y limitaciones de usar FIFA TV APK que usted debe ser consciente de. Algunos de ellos son:</p>
39
- <ul>
40
-
41
- <li>La aplicación puede no ser legal y segura de usar en algunos países o regiones. Esto significa que puede enfrentar algunos problemas o riesgos legales si usa la aplicación sin el permiso o autorización adecuados. </li>
42
- <li>La aplicación puede no ser estable y confiable en todo momento. Esto significa que puede bloquearse o congelarse a veces, o puede no funcionar correctamente en algunos dispositivos o redes. </li>
43
- <li>Es posible que la aplicación no tenga todos los partidos y aspectos destacados que desea ver. Esto significa que puede que no cubra algunas ligas o torneos en los que estés interesado, o puede tener algunos retrasos o errores en el streaming. </li>
44
- <li>La aplicación puede tener algunos anuncios o ventanas emergentes que pueden molestar o distraer. Esto significa que puede tener que lidiar con algunos anuncios o ventanas emergentes no deseados o irrelevantes mientras usa la aplicación. </li>
45
- </ul>
46
- <h2>¿Cuáles son algunas alternativas a FIFA TV APK? </h2>
47
- <h3> Una tabla de comparación de FIFA TV APK y otras aplicaciones populares de streaming de fútbol</h3>
48
- <p>Si usted está buscando algunas alternativas a FIFA TV APK, es posible que desee echa un vistazo a algunas de estas aplicaciones de streaming de fútbol populares que están disponibles en la Google Play Store u otras fuentes. Aquí hay una tabla de comparación de FIFA TV APK y otras aplicaciones populares de streaming de fútbol:</p>
49
- <tabla>
50
- <tr><th>Nombre de la aplicación</th><th>Precio</th><th>Características</th><th><th>Pros</th><th>Contras</th></tr>
51
- <tr><td>FIFA TV APK</td><td>Gratis</td><td>Ver partidos de fútbol en vivo y lo más destacado de varias ligas y torneos de todo el mundo; acceder a las últimas noticias, clasificaciones, estadísticas y videos desde el sitio web oficial de la FIFA y la aplicación; disfrutar de la transmisión de vídeo de alta calidad, con resolución ajustable y velocidad de reproducción; elegir entre varios idiomas; ahorrar batería y consumo de datos; uso en cualquier dispositivo Android</td><td>Gratis; simple; fácil de usar; alta calidad; multilingüe; bajo uso de batería y datos; compatible con la mayoría de los dispositivos Android</td><td>No oficial; no autorizado; ilegal; inseguro; inestable; poco fiable; limitado; ads</td></tr>
52
-
53
- <tr><td>SofaScore - Resultados en vivo & Estadísticas</td><td>Gratis (con compras en la aplicación)</td><td>Ver partidos de fútbol en vivo y lo más destacado de varias ligas y torneos de todo el mundo; acceder a las últimas noticias, posiciones, estadísticas y videos de varias fuentes; disfrutar de streaming de vídeo de alta calidad; elegir entre varios idiomas; utilizar en cualquier dispositivo Android</td><td>Simple; fácil de usar; de alta calidad; multilingüe; compatible con la mayoría de los dispositivos Android</td><td>No oficial; anuncios; compras in-app</td></tr>
54
- <tr><td>DStv Now - Ver deportes en vivo y programas de televisión en línea</td><td>Pagado (con suscripción)</td><td>Ver partidos de fútbol en vivo y lo más destacado de varias ligas y torneos de todo el mundo; acceder a las últimas noticias, posiciones, estadísticas y videos de varias fuentes; disfrutar de streaming de vídeo de alta calidad; elegir entre varios idiomas; utilizar en cualquier dispositivo Android</td><td>Oficial; autorizado; legal; seguro; estable; confiable; de alta calidad; multilingüe; compatible con la mayoría de los dispositivos Android</td><td>Pagado; suscripción requerida; limitado a regiones DStv solamente; puede no funcionar en algunas redes o dispositivos</td></tr>
55
- </tabla>
56
- <h3>Una breve reseña de cada aplicación alternativa y sus pros y contras</h3>
57
- <p>Aquí hay una breve reseña de cada aplicación alternativa y sus pros y contras:</p>
58
- <ul>
59
-
60
- <li><b>SofaScore - Live Scores & Stats</b>: Esta es una aplicación no oficial que te permite ver partidos de fútbol en vivo y lo más destacado de varias ligas y torneos en todo el mundo, como la Liga de Campeones de la UEFA, la Liga Española, la Bundesliga alemana, Serie A italiana, Ligue 1 francesa y más. También puede acceder a las últimas noticias, clasificaciones, estadísticas y videos de varias fuentes. La aplicación es gratuita para descargar y usar, pero tiene algunos anuncios y compras en la aplicación. La aplicación es no oficial y no autorizada, por lo que puede violar algunos derechos de autor y marcas comerciales de los propietarios de contenido. La aplicación es simple y fácil de usar, con fácil navegación y opciones de búsqueda. La aplicación ofrece transmisión de vídeo de alta calidad, con resolución ajustable y velocidad de reproducción. La aplicación admite varios idiomas, incluyendo inglés, español, francés, alemán, portugués, árabe y más. La aplicación es compatible con la mayoría de los dispositivos Android, incluidos teléfonos inteligentes, tabletas, televisores inteligentes y cajas Android. </li>
61
-
62
- </ul>
63
- <h2>Conclusión</h2>
64
- <h3>Un resumen de los puntos principales y un llamado a la acción para los lectores</h3>
65
- <p>En conclusión, FIFA TV APK es una aplicación increíble que le permite ver partidos de fútbol en vivo y lo más destacado de varias ligas y torneos de todo el mundo en su dispositivo Android. Es gratuito para descargar y usar, sin necesidad de registro o suscripción. Tiene una interfaz simple y fácil de usar, con opciones de navegación y búsqueda fáciles. Ofrece transmisión de vídeo de alta calidad, con resolución ajustable y velocidad de reproducción. Soporta múltiples idiomas, incluyendo inglés, español, francés, alemán, portugués, árabe y más. Tiene un bajo consumo de batería y el uso de datos, con una opción para descargar vídeos para su visualización sin conexión. Es compatible con la mayoría de dispositivos Android, incluidos teléfonos inteligentes, tabletas, televisores inteligentes y cajas Android. </p>
66
- <p></p>
67
- <p>Sin embargo, FIFA TV APK no es oficial y autorizado por la FIFA o cualquier otra organización de fútbol. Esto significa que puede violar algunos derechos de autor y marcas comerciales de los propietarios del contenido. También puede no ser legal y seguro de usar en algunos países o regiones, donde puede enfrentar algunos problemas o riesgos legales si utiliza la aplicación sin el permiso o autorización adecuada. La aplicación también puede no ser estable y confiable en todo momento, ya que puede bloquearse o congelarse a veces, o puede no funcionar correctamente en algunos dispositivos o redes. La aplicación también puede no tener todos los partidos y puntos destacados que desea ver, ya que puede no cubrir algunas ligas o torneos que le interesan, o puede tener algunos retrasos o errores en la transmisión. La aplicación también puede tener algunos anuncios o ventanas emergentes que pueden molestarte o distraerte mientras usas la aplicación. </p>
68
-
69
- <p>Esperamos que este artículo le ha ayudado a aprender más sobre FIFA TV APK y sus características, beneficios, inconvenientes y alternativas. Si eres un fanático del fútbol, definitivamente deberías probar esta aplicación y ver por ti mismo lo increíble que es. Puede descargar FIFA TV APK desde [este enlace] y empezar a ver partidos de fútbol en vivo y lo más destacado en su dispositivo Android. Seguramente le encantará esta aplicación y disfrutar de ver sus equipos y ligas favoritas en su dispositivo. Feliz viendo! </p>
70
- <h2>Preguntas frecuentes</h2>
71
- <h3>Q1: ¿Es FIFA TV APK legal y seguro de usar? </h3>
72
- <p>A1: FIFA TV APK no es legal y seguro de usar en algunos países o regiones, donde puede violar algunos derechos de autor y marcas comerciales de los propietarios de contenidos. También puede enfrentar algunos problemas o riesgos legales si usa la aplicación sin el permiso o autorización adecuados. Por lo tanto, debes revisar las leyes y regulaciones de tu país o región antes de usar la aplicación, y siempre respetar los derechos e intereses de los propietarios de contenido. </p>
73
- <h3>Q2: ¿Cuántos datos consume FIFA TV APK? </h3>
74
- <p>A2: FIFA TV APK consume una cantidad moderada de datos, dependiendo de la resolución y la velocidad de reproducción de la transmisión de vídeo. Puede ajustar estos ajustes según su preferencia y condición de red. También puede descargar vídeos para verlos sin conexión para guardar el consumo de datos. </p>
75
- <h3>Q3: ¿Puedo ver la Copa Mundial de la FIFA 2022 en FIFA TV APK? </h3>
76
- <p>A3: Sí, puedes ver la Copa Mundial de la FIFA 2022 en FIFA TV APK, así como otros eventos de la FIFA, como la Copa Mundial Femenina de la FIFA, la Copa Mundial de Clubes de la FIFA, la Copa Mundial Sub-20 de la FIFA, y más. Sin embargo, debe tener en cuenta que la aplicación puede no tener todos los partidos y puntos destacados que desea ver, ya que puede no cubrir algunas regiones o idiomas que le interesan, o puede tener algunos retrasos o errores en la transmisión. </p>
77
- <h3>Q4: ¿Cuáles son los requisitos mínimos para ejecutar FIFA TV APK? </h3>
78
- <p>A4: Los requisitos mínimos para ejecutar FIFA TV APK son:</p>
79
- <ul>
80
-
81
- <li>Una buena conexión a Internet con al menos 3G velocidad</li>
82
- <li>Un dispositivo compatible con suficiente espacio de almacenamiento y RAM</li>
83
- </ul>
84
- <h3>Q5: ¿Cómo puedo contactar a los desarrolladores de FIFA TV APK? </h3>
85
- <p>A5: Puede ponerse en contacto con los desarrolladores de FIFA TV APK enviándoles un correo electrónico a [email protected]. También puede visitar su sitio web en [este enlace] para obtener más información y actualizaciones. </p> 64aa2da5cf<br />
86
- <br />
87
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/routes/conversation/+server.ts DELETED
@@ -1,57 +0,0 @@
1
- import type { RequestHandler } from "./$types";
2
- import { collections } from "$lib/server/database";
3
- import { ObjectId } from "mongodb";
4
- import { error, redirect } from "@sveltejs/kit";
5
- import { base } from "$app/paths";
6
- import { z } from "zod";
7
- import type { Message } from "$lib/types/Message";
8
-
9
- export const POST: RequestHandler = async (input) => {
10
- const body = await input.request.text();
11
-
12
- let title = "";
13
- let messages: Message[] = [];
14
- let fromShareId: string | undefined;
15
-
16
- if (body) {
17
- fromShareId = z.object({ fromShare: z.string().optional() }).parse(JSON.parse(body)).fromShare;
18
-
19
- if (fromShareId) {
20
- const conversation = await collections.sharedConversations.findOne({
21
- _id: fromShareId,
22
- });
23
-
24
- if (!conversation) {
25
- throw error(404, "Conversation not found");
26
- }
27
-
28
- title = conversation.title;
29
- messages = conversation.messages;
30
- }
31
- }
32
-
33
- const res = await collections.conversations.insertOne({
34
- _id: new ObjectId(),
35
- title:
36
- title ||
37
- "Untitled " +
38
- ((await collections.conversations.countDocuments({ sessionId: input.locals.sessionId })) +
39
- 1),
40
- messages,
41
- createdAt: new Date(),
42
- updatedAt: new Date(),
43
- sessionId: input.locals.sessionId,
44
- ...(fromShareId ? { meta: { fromShareId } } : {}),
45
- });
46
-
47
- return new Response(
48
- JSON.stringify({
49
- conversationId: res.insertedId.toString(),
50
- }),
51
- { headers: { "Content-Type": "application/json" } }
52
- );
53
- };
54
-
55
- export const GET: RequestHandler = async () => {
56
- throw redirect(302, base || "/");
57
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/CVPR2022_papers/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: CVPR2022 Papers
3
- emoji: 🔥
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.36.1
8
- app_file: app.py
9
- pinned: true
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_pytypes.py DELETED
@@ -1,392 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from __future__ import division
3
- import pytest
4
- import sys
5
-
6
- import env # noqa: F401
7
-
8
- from pybind11_tests import pytypes as m
9
- from pybind11_tests import debug_enabled
10
-
11
-
12
- def test_int(doc):
13
- assert doc(m.get_int) == "get_int() -> int"
14
-
15
-
16
- def test_iterator(doc):
17
- assert doc(m.get_iterator) == "get_iterator() -> Iterator"
18
-
19
-
20
- def test_iterable(doc):
21
- assert doc(m.get_iterable) == "get_iterable() -> Iterable"
22
-
23
-
24
- def test_list(capture, doc):
25
- with capture:
26
- lst = m.get_list()
27
- assert lst == ["inserted-0", "overwritten", "inserted-2"]
28
-
29
- lst.append("value2")
30
- m.print_list(lst)
31
- assert capture.unordered == """
32
- Entry at position 0: value
33
- list item 0: inserted-0
34
- list item 1: overwritten
35
- list item 2: inserted-2
36
- list item 3: value2
37
- """
38
-
39
- assert doc(m.get_list) == "get_list() -> list"
40
- assert doc(m.print_list) == "print_list(arg0: list) -> None"
41
-
42
-
43
- def test_none(capture, doc):
44
- assert doc(m.get_none) == "get_none() -> None"
45
- assert doc(m.print_none) == "print_none(arg0: None) -> None"
46
-
47
-
48
- def test_set(capture, doc):
49
- s = m.get_set()
50
- assert s == {"key1", "key2", "key3"}
51
-
52
- with capture:
53
- s.add("key4")
54
- m.print_set(s)
55
- assert capture.unordered == """
56
- key: key1
57
- key: key2
58
- key: key3
59
- key: key4
60
- """
61
-
62
- assert not m.set_contains(set([]), 42)
63
- assert m.set_contains({42}, 42)
64
- assert m.set_contains({"foo"}, "foo")
65
-
66
- assert doc(m.get_list) == "get_list() -> list"
67
- assert doc(m.print_list) == "print_list(arg0: list) -> None"
68
-
69
-
70
- def test_dict(capture, doc):
71
- d = m.get_dict()
72
- assert d == {"key": "value"}
73
-
74
- with capture:
75
- d["key2"] = "value2"
76
- m.print_dict(d)
77
- assert capture.unordered == """
78
- key: key, value=value
79
- key: key2, value=value2
80
- """
81
-
82
- assert not m.dict_contains({}, 42)
83
- assert m.dict_contains({42: None}, 42)
84
- assert m.dict_contains({"foo": None}, "foo")
85
-
86
- assert doc(m.get_dict) == "get_dict() -> dict"
87
- assert doc(m.print_dict) == "print_dict(arg0: dict) -> None"
88
-
89
- assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
90
-
91
-
92
- def test_str(doc):
93
- assert m.str_from_string().encode().decode() == "baz"
94
- assert m.str_from_bytes().encode().decode() == "boo"
95
-
96
- assert doc(m.str_from_bytes) == "str_from_bytes() -> str"
97
-
98
- class A(object):
99
- def __str__(self):
100
- return "this is a str"
101
-
102
- def __repr__(self):
103
- return "this is a repr"
104
-
105
- assert m.str_from_object(A()) == "this is a str"
106
- assert m.repr_from_object(A()) == "this is a repr"
107
-
108
- s1, s2 = m.str_format()
109
- assert s1 == "1 + 2 = 3"
110
- assert s1 == s2
111
-
112
-
113
- def test_bytes(doc):
114
- assert m.bytes_from_string().decode() == "foo"
115
- assert m.bytes_from_str().decode() == "bar"
116
-
117
- assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format(
118
- "str" if env.PY2 else "bytes"
119
- )
120
-
121
-
122
- def test_capsule(capture):
123
- pytest.gc_collect()
124
- with capture:
125
- a = m.return_capsule_with_destructor()
126
- del a
127
- pytest.gc_collect()
128
- assert capture.unordered == """
129
- creating capsule
130
- destructing capsule
131
- """
132
-
133
- with capture:
134
- a = m.return_capsule_with_destructor_2()
135
- del a
136
- pytest.gc_collect()
137
- assert capture.unordered == """
138
- creating capsule
139
- destructing capsule: 1234
140
- """
141
-
142
- with capture:
143
- a = m.return_capsule_with_name_and_destructor()
144
- del a
145
- pytest.gc_collect()
146
- assert capture.unordered == """
147
- created capsule (1234, 'pointer type description')
148
- destructing capsule (1234, 'pointer type description')
149
- """
150
-
151
-
152
- def test_accessors():
153
- class SubTestObject:
154
- attr_obj = 1
155
- attr_char = 2
156
-
157
- class TestObject:
158
- basic_attr = 1
159
- begin_end = [1, 2, 3]
160
- d = {"operator[object]": 1, "operator[char *]": 2}
161
- sub = SubTestObject()
162
-
163
- def func(self, x, *args):
164
- return self.basic_attr + x + sum(args)
165
-
166
- d = m.accessor_api(TestObject())
167
- assert d["basic_attr"] == 1
168
- assert d["begin_end"] == [1, 2, 3]
169
- assert d["operator[object]"] == 1
170
- assert d["operator[char *]"] == 2
171
- assert d["attr(object)"] == 1
172
- assert d["attr(char *)"] == 2
173
- assert d["missing_attr_ptr"] == "raised"
174
- assert d["missing_attr_chain"] == "raised"
175
- assert d["is_none"] is False
176
- assert d["operator()"] == 2
177
- assert d["operator*"] == 7
178
- assert d["implicit_list"] == [1, 2, 3]
179
- assert all(x in TestObject.__dict__ for x in d["implicit_dict"])
180
-
181
- assert m.tuple_accessor(tuple()) == (0, 1, 2)
182
-
183
- d = m.accessor_assignment()
184
- assert d["get"] == 0
185
- assert d["deferred_get"] == 0
186
- assert d["set"] == 1
187
- assert d["deferred_set"] == 1
188
- assert d["var"] == 99
189
-
190
-
191
- def test_constructors():
192
- """C++ default and converting constructors are equivalent to type calls in Python"""
193
- types = [bytes, str, bool, int, float, tuple, list, dict, set]
194
- expected = {t.__name__: t() for t in types}
195
- if env.PY2:
196
- # Note that bytes.__name__ == 'str' in Python 2.
197
- # pybind11::str is unicode even under Python 2.
198
- expected["bytes"] = bytes()
199
- expected["str"] = unicode() # noqa: F821
200
- assert m.default_constructors() == expected
201
-
202
- data = {
203
- bytes: b'41', # Currently no supported or working conversions.
204
- str: 42,
205
- bool: "Not empty",
206
- int: "42",
207
- float: "+1e3",
208
- tuple: range(3),
209
- list: range(3),
210
- dict: [("two", 2), ("one", 1), ("three", 3)],
211
- set: [4, 4, 5, 6, 6, 6],
212
- memoryview: b'abc'
213
- }
214
- inputs = {k.__name__: v for k, v in data.items()}
215
- expected = {k.__name__: k(v) for k, v in data.items()}
216
- if env.PY2: # Similar to the above. See comments above.
217
- inputs["bytes"] = b'41'
218
- inputs["str"] = 42
219
- expected["bytes"] = b'41'
220
- expected["str"] = u"42"
221
-
222
- assert m.converting_constructors(inputs) == expected
223
- assert m.cast_functions(inputs) == expected
224
-
225
- # Converting constructors and cast functions should just reference rather
226
- # than copy when no conversion is needed:
227
- noconv1 = m.converting_constructors(expected)
228
- for k in noconv1:
229
- assert noconv1[k] is expected[k]
230
-
231
- noconv2 = m.cast_functions(expected)
232
- for k in noconv2:
233
- assert noconv2[k] is expected[k]
234
-
235
-
236
- def test_pybind11_str_raw_str():
237
- # specifically to exercise pybind11::str::raw_str
238
- cvt = m.convert_to_pybind11_str
239
- assert cvt(u"Str") == u"Str"
240
- assert cvt(b'Bytes') == u"Bytes" if env.PY2 else "b'Bytes'"
241
- assert cvt(None) == u"None"
242
- assert cvt(False) == u"False"
243
- assert cvt(True) == u"True"
244
- assert cvt(42) == u"42"
245
- assert cvt(2**65) == u"36893488147419103232"
246
- assert cvt(-1.50) == u"-1.5"
247
- assert cvt(()) == u"()"
248
- assert cvt((18,)) == u"(18,)"
249
- assert cvt([]) == u"[]"
250
- assert cvt([28]) == u"[28]"
251
- assert cvt({}) == u"{}"
252
- assert cvt({3: 4}) == u"{3: 4}"
253
- assert cvt(set()) == u"set([])" if env.PY2 else "set()"
254
- assert cvt({3, 3}) == u"set([3])" if env.PY2 else "{3}"
255
-
256
- valid_orig = u"DZ"
257
- valid_utf8 = valid_orig.encode("utf-8")
258
- valid_cvt = cvt(valid_utf8)
259
- assert type(valid_cvt) == bytes # Probably surprising.
260
- assert valid_cvt == b'\xc7\xb1'
261
-
262
- malformed_utf8 = b'\x80'
263
- malformed_cvt = cvt(malformed_utf8)
264
- assert type(malformed_cvt) == bytes # Probably surprising.
265
- assert malformed_cvt == b'\x80'
266
-
267
-
268
- def test_implicit_casting():
269
- """Tests implicit casting when assigning or appending to dicts and lists."""
270
- z = m.get_implicit_casting()
271
- assert z['d'] == {
272
- 'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
273
- 'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
274
- 'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
275
- }
276
- assert z['l'] == [3, 6, 9, 12, 15]
277
-
278
-
279
- def test_print(capture):
280
- with capture:
281
- m.print_function()
282
- assert capture == """
283
- Hello, World!
284
- 1 2.0 three True -- multiple args
285
- *args-and-a-custom-separator
286
- no new line here -- next print
287
- flush
288
- py::print + str.format = this
289
- """
290
- assert capture.stderr == "this goes to stderr"
291
-
292
- with pytest.raises(RuntimeError) as excinfo:
293
- m.print_failure()
294
- assert str(excinfo.value) == "make_tuple(): unable to convert " + (
295
- "argument of type 'UnregisteredType' to Python object"
296
- if debug_enabled else
297
- "arguments to Python object (compile in debug mode for details)"
298
- )
299
-
300
-
301
- def test_hash():
302
- class Hashable(object):
303
- def __init__(self, value):
304
- self.value = value
305
-
306
- def __hash__(self):
307
- return self.value
308
-
309
- class Unhashable(object):
310
- __hash__ = None
311
-
312
- assert m.hash_function(Hashable(42)) == 42
313
- with pytest.raises(TypeError):
314
- m.hash_function(Unhashable())
315
-
316
-
317
- def test_number_protocol():
318
- for a, b in [(1, 1), (3, 5)]:
319
- li = [a == b, a != b, a < b, a <= b, a > b, a >= b, a + b,
320
- a - b, a * b, a / b, a | b, a & b, a ^ b, a >> b, a << b]
321
- assert m.test_number_protocol(a, b) == li
322
-
323
-
324
- def test_list_slicing():
325
- li = list(range(100))
326
- assert li[::2] == m.test_list_slicing(li)
327
-
328
-
329
- @pytest.mark.parametrize('method, args, fmt, expected_view', [
330
- (m.test_memoryview_object, (b'red',), 'B', b'red'),
331
- (m.test_memoryview_buffer_info, (b'green',), 'B', b'green'),
332
- (m.test_memoryview_from_buffer, (False,), 'h', [3, 1, 4, 1, 5]),
333
- (m.test_memoryview_from_buffer, (True,), 'H', [2, 7, 1, 8]),
334
- (m.test_memoryview_from_buffer_nativeformat, (), '@i', [4, 7, 5]),
335
- ])
336
- def test_memoryview(method, args, fmt, expected_view):
337
- view = method(*args)
338
- assert isinstance(view, memoryview)
339
- assert view.format == fmt
340
- if isinstance(expected_view, bytes) or not env.PY2:
341
- view_as_list = list(view)
342
- else:
343
- # Using max to pick non-zero byte (big-endian vs little-endian).
344
- view_as_list = [max([ord(c) for c in s]) for s in view]
345
- assert view_as_list == list(expected_view)
346
-
347
-
348
- @pytest.mark.xfail("env.PYPY", reason="getrefcount is not available")
349
- @pytest.mark.parametrize('method', [
350
- m.test_memoryview_object,
351
- m.test_memoryview_buffer_info,
352
- ])
353
- def test_memoryview_refcount(method):
354
- buf = b'\x0a\x0b\x0c\x0d'
355
- ref_before = sys.getrefcount(buf)
356
- view = method(buf)
357
- ref_after = sys.getrefcount(buf)
358
- assert ref_before < ref_after
359
- assert list(view) == list(buf)
360
-
361
-
362
- def test_memoryview_from_buffer_empty_shape():
363
- view = m.test_memoryview_from_buffer_empty_shape()
364
- assert isinstance(view, memoryview)
365
- assert view.format == 'B'
366
- if env.PY2:
367
- # Python 2 behavior is weird, but Python 3 (the future) is fine.
368
- # PyPy3 has <memoryview, while CPython 2 has <memory
369
- assert bytes(view).startswith(b'<memory')
370
- else:
371
- assert bytes(view) == b''
372
-
373
-
374
- def test_test_memoryview_from_buffer_invalid_strides():
375
- with pytest.raises(RuntimeError):
376
- m.test_memoryview_from_buffer_invalid_strides()
377
-
378
-
379
- def test_test_memoryview_from_buffer_nullptr():
380
- if env.PY2:
381
- m.test_memoryview_from_buffer_nullptr()
382
- else:
383
- with pytest.raises(ValueError):
384
- m.test_memoryview_from_buffer_nullptr()
385
-
386
-
387
- @pytest.mark.skipif("env.PY2")
388
- def test_memoryview_from_memory():
389
- view = m.test_memoryview_from_memory()
390
- assert isinstance(view, memoryview)
391
- assert view.format == 'B'
392
- assert bytes(view) == b'\xff\xe1\xab\x37'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/mr/pool_options.h DELETED
@@ -1,127 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file pool_options.h
18
- * \brief \p pool_options is a type used by the pooling resource adaptors to fine-tune their behavior.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <cstddef>
24
-
25
- #include <thrust/detail/integer_math.h>
26
-
27
- #include <thrust/mr/detail/config.h>
28
-
29
- namespace thrust
30
- {
31
- namespace mr
32
- {
33
-
34
- /*! \addtogroup memory_management_classes Memory Management Classes
35
- * \ingroup memory_management
36
- * \{
37
- */
38
-
39
- /*! A type used for configuring pooling resource adaptors, to fine-tune their behavior and parameters.
40
- */
41
- struct pool_options
42
- {
43
- /*! The minimal number of blocks, i.e. pieces of memory handed off to the user from a pool of a given size, in a single
44
- * chunk allocated from upstream.
45
- */
46
- std::size_t min_blocks_per_chunk;
47
- /*! The minimal number of bytes in a single chunk allocated from upstream.
48
- */
49
- std::size_t min_bytes_per_chunk;
50
- /*! The maximal number of blocks, i.e. pieces of memory handed off to the user from a pool of a given size, in a single
51
- * chunk allocated from upstream.
52
- */
53
- std::size_t max_blocks_per_chunk;
54
- /*! The maximal number of bytes in a single chunk allocated from upstream.
55
- */
56
- std::size_t max_bytes_per_chunk;
57
-
58
- /*! The size of blocks in the smallest pool covered by the pool resource. All allocation requests below this size will
59
- * be rounded up to this size.
60
- */
61
- std::size_t smallest_block_size;
62
- /*! The size of blocks in the largest pool covered by the pool resource. All allocation requests above this size will
63
- * be considered oversized, allocated directly from upstream (and not from a pool), and cached only of \p cache_oversized
64
- * is true.
65
- */
66
- std::size_t largest_block_size;
67
-
68
- /*! The alignment of all blocks in internal pools of the pool resource. All allocation requests above this alignment
69
- * will be considered oversized, allocated directly from upstream (and not from a pool), and cached only of
70
- * \p cache_oversized is true.
71
- */
72
- std::size_t alignment;
73
-
74
- /*! Decides whether oversized and overaligned blocks are cached for later use, or immediately return it to the upstream
75
- * resource.
76
- */
77
- bool cache_oversized;
78
-
79
- /*! The size factor at which a cached allocation is considered too ridiculously oversized to use to fulfill an allocation
80
- * request. For instance: the user requests an allocation of size 1024 bytes. A block of size 32 * 1024 bytes is
81
- * cached. If \p cached_size_cutoff_factor is 32 or less, this block will be considered too big for that allocation
82
- * request.
83
- */
84
- std::size_t cached_size_cutoff_factor;
85
- /*! The alignment factor at which a cached allocation is considered too ridiculously overaligned to use to fulfill an
86
- * allocation request. For instance: the user requests an allocation aligned to 32 bytes. A block aligned to 1024 bytes
87
- * is cached. If \p cached_size_cutoff_factor is 32 or less, this block will be considered too overaligned for that
88
- * allocation request.
89
- */
90
- std::size_t cached_alignment_cutoff_factor;
91
-
92
- /*! Checks if the options are self-consistent.
93
- *
94
- * /returns true if the options are self-consitent, false otherwise.
95
- */
96
- bool validate() const
97
- {
98
- if (!detail::is_power_of_2(smallest_block_size)) return false;
99
- if (!detail::is_power_of_2(largest_block_size)) return false;
100
- if (!detail::is_power_of_2(alignment)) return false;
101
-
102
- if (max_bytes_per_chunk == 0 || max_blocks_per_chunk == 0) return false;
103
- if (smallest_block_size == 0 || largest_block_size == 0) return false;
104
-
105
- if (min_blocks_per_chunk > max_blocks_per_chunk) return false;
106
- if (min_bytes_per_chunk > max_bytes_per_chunk) return false;
107
-
108
- if (smallest_block_size > largest_block_size) return false;
109
-
110
- if (min_blocks_per_chunk * smallest_block_size > max_bytes_per_chunk) return false;
111
- if (min_blocks_per_chunk * largest_block_size > max_bytes_per_chunk) return false;
112
-
113
- if (max_blocks_per_chunk * largest_block_size < min_bytes_per_chunk) return false;
114
- if (max_blocks_per_chunk * smallest_block_size < min_bytes_per_chunk) return false;
115
-
116
- if (alignment > smallest_block_size) return false;
117
-
118
- return true;
119
- }
120
- };
121
-
122
- /*! \}
123
- */
124
-
125
- } // end mr
126
- } // end thrust
127
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/iter_swap.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits iter_swap
22
- #include <thrust/system/cpp/detail/iter_swap.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/ui_util/config.py DELETED
@@ -1,25 +0,0 @@
1
- import argparse
2
- import logging
3
- import os
4
-
5
- import yaml
6
-
7
- logger = logging.getLogger()
8
-
9
- class Config(object):
10
- def __init__(self, filename=None):
11
- assert os.path.exists(filename), "ERROR: Config File doesn't exist."
12
- try:
13
- with open(filename, 'r') as f:
14
- self._cfg_dict = yaml.load(f)
15
- # parent of IOError, OSError *and* WindowsError where available
16
- except EnvironmentError:
17
- logger.error('Please check the file with name of "%s"', filename)
18
- logger.info(' APP CONFIG '.center(80, '-'))
19
- logger.info(''.center(80, '-'))
20
-
21
- def __getattr__(self, name):
22
- value = self._cfg_dict[name]
23
- if isinstance(value, dict):
24
- value = DictAsMember(value)
25
- return value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/configurator.py DELETED
@@ -1,134 +0,0 @@
1
- """Configurator module."""
2
- import click
3
- from colorama import Back, Fore, Style
4
-
5
- from autogpt import utils
6
- from autogpt.config import Config
7
- from autogpt.logs import logger
8
- from autogpt.memory import get_supported_memory_backends
9
-
10
- CFG = Config()
11
-
12
-
13
- def create_config(
14
- continuous: bool,
15
- continuous_limit: int,
16
- ai_settings_file: str,
17
- skip_reprompt: bool,
18
- speak: bool,
19
- debug: bool,
20
- gpt3only: bool,
21
- gpt4only: bool,
22
- memory_type: str,
23
- browser_name: str,
24
- allow_downloads: bool,
25
- skip_news: bool,
26
- ) -> None:
27
- """Updates the config object with the given arguments.
28
-
29
- Args:
30
- continuous (bool): Whether to run in continuous mode
31
- continuous_limit (int): The number of times to run in continuous mode
32
- ai_settings_file (str): The path to the ai_settings.yaml file
33
- skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
34
- speak (bool): Whether to enable speak mode
35
- debug (bool): Whether to enable debug mode
36
- gpt3only (bool): Whether to enable GPT3.5 only mode
37
- gpt4only (bool): Whether to enable GPT4 only mode
38
- memory_type (str): The type of memory backend to use
39
- browser_name (str): The name of the browser to use when using selenium to scrape the web
40
- allow_downloads (bool): Whether to allow Auto-GPT to download files natively
41
- skips_news (bool): Whether to suppress the output of latest news on startup
42
- """
43
- CFG.set_debug_mode(False)
44
- CFG.set_continuous_mode(False)
45
- CFG.set_speak_mode(False)
46
-
47
- if debug:
48
- logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
49
- CFG.set_debug_mode(True)
50
-
51
- if continuous:
52
- logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
53
- logger.typewriter_log(
54
- "WARNING: ",
55
- Fore.RED,
56
- "Continuous mode is not recommended. It is potentially dangerous and may"
57
- " cause your AI to run forever or carry out actions you would not usually"
58
- " authorise. Use at your own risk.",
59
- )
60
- CFG.set_continuous_mode(True)
61
-
62
- if continuous_limit:
63
- logger.typewriter_log(
64
- "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
65
- )
66
- CFG.set_continuous_limit(continuous_limit)
67
-
68
- # Check if continuous limit is used without continuous mode
69
- if continuous_limit and not continuous:
70
- raise click.UsageError("--continuous-limit can only be used with --continuous")
71
-
72
- if speak:
73
- logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
74
- CFG.set_speak_mode(True)
75
-
76
- if gpt3only:
77
- logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
78
- CFG.set_smart_llm_model(CFG.fast_llm_model)
79
-
80
- if gpt4only:
81
- logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
82
- CFG.set_fast_llm_model(CFG.smart_llm_model)
83
-
84
- if memory_type:
85
- supported_memory = get_supported_memory_backends()
86
- chosen = memory_type
87
- if chosen not in supported_memory:
88
- logger.typewriter_log(
89
- "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
90
- Fore.RED,
91
- f"{supported_memory}",
92
- )
93
- logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
94
- else:
95
- CFG.memory_backend = chosen
96
-
97
- if skip_reprompt:
98
- logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
99
- CFG.skip_reprompt = True
100
-
101
- if ai_settings_file:
102
- file = ai_settings_file
103
-
104
- # Validate file
105
- (validated, message) = utils.validate_yaml_file(file)
106
- if not validated:
107
- logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
108
- logger.double_check()
109
- exit(1)
110
-
111
- logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
112
- CFG.ai_settings_file = file
113
- CFG.skip_reprompt = True
114
-
115
- if allow_downloads:
116
- logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
117
- logger.typewriter_log(
118
- "WARNING: ",
119
- Fore.YELLOW,
120
- f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
121
- + "It is recommended that you monitor any files it downloads carefully.",
122
- )
123
- logger.typewriter_log(
124
- "WARNING: ",
125
- Fore.YELLOW,
126
- f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
127
- )
128
- CFG.allow_downloads = True
129
-
130
- if skip_news:
131
- CFG.skip_news = True
132
-
133
- if browser_name:
134
- CFG.selenium_web_browser = browser_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClassCat/Spleen-3D-segmentation-with-MONAI/app.py DELETED
@@ -1,162 +0,0 @@
1
-
2
- import torch, torchvision
3
- from monai.networks.nets import UNet
4
- from monai.networks.layers import Norm
5
- from monai.inferers import sliding_window_inference
6
- import PIL
7
- from torchvision.utils import save_image
8
- import numpy as np
9
-
10
- model = UNet(
11
- spatial_dims=3,
12
- in_channels=1,
13
- out_channels=2,
14
- channels=(16, 32, 64, 128, 256),
15
- strides=(2, 2, 2, 2),
16
- num_res_units=2,
17
- norm=Norm.BATCH,
18
- )
19
-
20
- model.load_state_dict(torch.load("weights/model.pt", map_location=torch.device('cpu')))
21
-
22
- import gradio as gr
23
-
24
- def load_image0():
25
- return load_image(0)
26
-
27
- def load_image1():
28
- return load_image(1)
29
-
30
- def load_image2():
31
- return load_image(2)
32
-
33
- def load_image3():
34
- return load_image(3)
35
-
36
- def load_image4():
37
- return load_image(4)
38
-
39
- def load_image5():
40
- return load_image(5)
41
-
42
- def load_image6():
43
- return load_image(6)
44
-
45
- def load_image7():
46
- return load_image(7)
47
-
48
- def load_image8():
49
- return load_image(8)
50
-
51
- def load_image(index):
52
- return [index, f"thumbnails/val_image{index}.png", f"thumbnails_label/val_label{index}.png"]
53
-
54
- def predict(index):
55
- val_data = torch.load(f"samples/val_data{index}.pt")
56
-
57
- model.eval()
58
- with torch.no_grad():
59
- roi_size = (160, 160, 160)
60
- sw_batch_size = 4
61
- val_outputs = sliding_window_inference(val_data, roi_size, sw_batch_size, model)
62
-
63
- meta_tsr = torch.argmax(val_outputs, dim=1)[0, :, :, 80]
64
- pil_image = torchvision.transforms.functional.to_pil_image(meta_tsr.to(torch.float32))
65
-
66
- return pil_image
67
-
68
-
69
- with gr.Blocks(title="Spleen 3D segmentation with MONAI - ClassCat",
70
- css=".gradio-container {background:azure;}"
71
- ) as demo:
72
- sample_index = gr.State([])
73
-
74
- gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:royalblue;">Spleen 3D segmentation with MONAI</div>""")
75
-
76
- gr.HTML("""<h4 style="color:navy;">1. Select an example, which includes input images and label images, by clicking "Example x" button.</h4>""")
77
-
78
- with gr.Row():
79
- input_image = gr.Image(label="a piece of input image data", type="filepath")
80
- label_image = gr.Image(label="label image", type="filepath")
81
- output_image = gr.Image(label="predicted image", type="pil")
82
-
83
-
84
- with gr.Row():
85
- with gr.Column():
86
- ex_btn0 = gr.Button("Example 1")
87
- ex_btn0.style(full_width=False, css="width:20px;")
88
- ex_image0 = gr.Image(value='thumbnails/val_image0.png', interactive=False, label='ex 1')
89
- ex_image0.style(width=128, height=128)
90
-
91
- with gr.Column():
92
- ex_btn1 = gr.Button("Example 2")
93
- ex_btn1.style(full_width=False, css="width:20px;")
94
- ex_image1 = gr.Image(value='thumbnails/val_image1.png', interactive=False, label='ex 2')
95
- ex_image1.style(width=128, height=128)
96
-
97
- with gr.Column():
98
- ex_btn2 = gr.Button("Example 3")
99
- ex_btn2.style(full_width=False, css="width:20px;")
100
- ex_image2 = gr.Image(value='thumbnails/val_image2.png', interactive=False, label='ex 3')
101
- ex_image2.style(width=128, height=128)
102
-
103
- with gr.Column():
104
- ex_btn3 = gr.Button("Example 4")
105
- ex_btn3.style(full_width=False, css="width:20px;")
106
- ex_image3 = gr.Image(value='thumbnails/val_image3.png', interactive=False, label='ex 4')
107
- ex_image3.style(width=128, height=128)
108
-
109
- with gr.Column():
110
- ex_btn4 = gr.Button("Example 5")
111
- ex_btn4.style(full_width=False, css="width:20px;")
112
- ex_image4 = gr.Image(value='thumbnails/val_image4.png', interactive=False, label='ex 5')
113
- ex_image4.style(width=128, height=128)
114
-
115
- with gr.Column():
116
- ex_btn5 = gr.Button("Example 6")
117
- ex_btn5.style(full_width=False, css="width:20px;")
118
- ex_image5 = gr.Image(value='thumbnails/val_image5.png', interactive=False, label='ex 6')
119
- ex_image5.style(width=128, height=128)
120
-
121
- with gr.Column():
122
- ex_btn6 = gr.Button("Example 7")
123
- ex_btn6.style(full_width=False, css="width:20px;")
124
- ex_image6 = gr.Image(value='thumbnails/val_image6.png', interactive=False, label='ex 7')
125
- ex_image6.style(width=128, height=128)
126
-
127
- with gr.Column():
128
- ex_btn7 = gr.Button("Example 8")
129
- ex_btn7.style(full_width=False, css="width:20px;")
130
- ex_image7 = gr.Image(value='thumbnails/val_image7.png', interactive=False, label='ex 8')
131
- ex_image7.style(width=128, height=128)
132
-
133
- with gr.Column():
134
- ex_btn8 = gr.Button("Example 9")
135
- ex_btn8.style(full_width=False, css="width:20px;")
136
- ex_image8 = gr.Image(value='thumbnails/val_image8.png', interactive=False, label='ex 9')
137
- ex_image8.style(width=128, height=128)
138
-
139
-
140
- ex_btn0.click(fn=load_image0, outputs=[sample_index, input_image, label_image])
141
- ex_btn1.click(fn=load_image1, outputs=[sample_index, input_image, label_image])
142
- ex_btn2.click(fn=load_image2, outputs=[sample_index, input_image, label_image])
143
- ex_btn3.click(fn=load_image3, outputs=[sample_index, input_image, label_image])
144
- ex_btn4.click(fn=load_image4, outputs=[sample_index, input_image, label_image])
145
- ex_btn5.click(fn=load_image5, outputs=[sample_index, input_image, label_image])
146
- ex_btn6.click(fn=load_image6, outputs=[sample_index, input_image, label_image])
147
- ex_btn7.click(fn=load_image7, outputs=[sample_index, input_image, label_image])
148
- ex_btn8.click(fn=load_image8, outputs=[sample_index, input_image, label_image])
149
-
150
-
151
- gr.HTML("""<br/>""")
152
- gr.HTML("""<h4 style="color:navy;">2. Then, click "Infer" button to predict a segmentation image. It will take about 15 seconds (on cpu)</h4>""")
153
-
154
- send_btn = gr.Button("Infer")
155
- send_btn.click(fn=predict, inputs=[sample_index], outputs=[output_image])
156
-
157
-
158
- #demo.queue()
159
- demo.launch(debug=True)
160
-
161
-
162
- ### EOF ###
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/g4f/Provider/Providers/Vercel.py DELETED
@@ -1,162 +0,0 @@
1
- import os
2
- import json
3
- import base64
4
- import execjs
5
- import queue
6
- import threading
7
-
8
- from curl_cffi import requests
9
- from ...typing import sha256, Dict, get_type_hints
10
-
11
- url = 'https://play.vercel.ai'
12
- supports_stream = True
13
- needs_auth = False
14
-
15
- models = {
16
- 'claude-instant-v1': 'anthropic:claude-instant-v1',
17
- 'claude-v1': 'anthropic:claude-v1',
18
- 'alpaca-7b': 'replicate:replicate/alpaca-7b',
19
- 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b',
20
- 'bloom': 'huggingface:bigscience/bloom',
21
- 'bloomz': 'huggingface:bigscience/bloomz',
22
- 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl',
23
- 'flan-ul2': 'huggingface:google/flan-ul2',
24
- 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b',
25
- 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
26
- 'santacoder': 'huggingface:bigcode/santacoder',
27
- 'command-medium-nightly': 'cohere:command-medium-nightly',
28
- 'command-xlarge-nightly': 'cohere:command-xlarge-nightly',
29
- 'code-cushman-001': 'openai:code-cushman-001',
30
- 'code-davinci-002': 'openai:code-davinci-002',
31
- 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo',
32
- 'text-ada-001': 'openai:text-ada-001',
33
- 'text-babbage-001': 'openai:text-babbage-001',
34
- 'text-curie-001': 'openai:text-curie-001',
35
- 'text-davinci-002': 'openai:text-davinci-002',
36
- 'text-davinci-003': 'openai:text-davinci-003'
37
- }
38
- model = models.keys()
39
-
40
- vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': {
41
- 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
42
-
43
-
44
- # based on https://github.com/ading2210/vercel-llm-api // modified
45
- class Client:
46
- def __init__(self):
47
- self.session = requests.Session()
48
- self.headers = {
49
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
50
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
51
- 'Accept-Encoding': 'gzip, deflate, br',
52
- 'Accept-Language': 'en-US,en;q=0.5',
53
- 'Te': 'trailers',
54
- 'Upgrade-Insecure-Requests': '1'
55
- }
56
- self.session.headers.update(self.headers)
57
-
58
- def get_token(self):
59
- b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text
60
- data = json.loads(base64.b64decode(b64))
61
-
62
- code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % (
63
- data['c'], data['a'])
64
-
65
- token_string = json.dumps(separators=(',', ':'),
66
- obj={'r': execjs.compile(code).call('token'), 't': data['t']})
67
-
68
- return base64.b64encode(token_string.encode()).decode()
69
-
70
- def get_default_params(self, model_id):
71
- return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
72
-
73
- def generate(self, model_id: str, prompt: str, params: dict = {}):
74
- if not ':' in model_id:
75
- model_id = models[model_id]
76
-
77
- defaults = self.get_default_params(model_id)
78
-
79
- payload = defaults | params | {
80
- 'prompt': prompt,
81
- 'model': model_id,
82
- }
83
-
84
- headers = self.headers | {
85
- 'Accept-Encoding': 'gzip, deflate, br',
86
- 'Custom-Encoding': self.get_token(),
87
- 'Host': 'sdk.vercel.ai',
88
- 'Origin': 'https://sdk.vercel.ai',
89
- 'Referrer': 'https://sdk.vercel.ai',
90
- 'Sec-Fetch-Dest': 'empty',
91
- 'Sec-Fetch-Mode': 'cors',
92
- 'Sec-Fetch-Site': 'same-origin',
93
- }
94
-
95
- chunks_queue = queue.Queue()
96
- error = None
97
- response = None
98
-
99
- def callback(data):
100
- chunks_queue.put(data.decode())
101
-
102
- def request_thread():
103
- nonlocal response, error
104
- for _ in range(3):
105
- try:
106
- response = self.session.post('https://sdk.vercel.ai/api/generate',
107
- json=payload, headers=headers, content_callback=callback)
108
- response.raise_for_status()
109
-
110
- except Exception as e:
111
- if _ == 2:
112
- error = e
113
-
114
- else:
115
- continue
116
-
117
- thread = threading.Thread(target=request_thread, daemon=True)
118
- thread.start()
119
-
120
- text = ''
121
- index = 0
122
- while True:
123
- try:
124
- chunk = chunks_queue.get(block=True, timeout=0.1)
125
-
126
- except queue.Empty:
127
- if error:
128
- raise error
129
-
130
- elif response:
131
- break
132
-
133
- else:
134
- continue
135
-
136
- text += chunk
137
- lines = text.split('\n')
138
-
139
- if len(lines) - 1 > index:
140
- new = lines[index:-1]
141
- for word in new:
142
- yield json.loads(word)
143
- index = len(lines) - 1
144
-
145
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
146
- yield 'Vercel is currently not working.'
147
- return
148
-
149
- conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
150
-
151
- for message in messages:
152
- conversation += '%s: %s\n' % (message['role'], message['content'])
153
-
154
- conversation += 'assistant: '
155
-
156
- completion = Client().generate(model, conversation)
157
-
158
- for token in completion:
159
- yield token
160
-
161
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
162
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])