parquet-converter commited on
Commit
781abbe
·
1 Parent(s): 743d167

Update parquet files (step 33 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cleanfiles Downloader Exe.md +0 -27
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/El Inolvidable Simon Birch [DVDRIP][.Spanish.].por.GammaRay.avi.md +0 -110
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA 3 A Masterpiece or a Menace?.md +0 -12
  4. spaces/1gistliPinn/ChatGPT4/Examples/Civil 3D 2015 Keygen Xforce Rar Free Download !EXCLUSIVE!.md +0 -90
  5. spaces/1gistliPinn/ChatGPT4/Examples/Db Bot 1.3a Crack [PATCHED] Download.md +0 -6
  6. spaces/1gistliPinn/ChatGPT4/Examples/Download [BEST] Ta Ra Rum Pum Mp4 Download [BEST].md +0 -6
  7. spaces/1gistliPinn/ChatGPT4/Examples/Engineering Metrology And Measurements By Vijayaraghavan Pdf Free Download.md +0 -6
  8. spaces/1line/AutoGPT/tests/unit/test_commands.py +0 -22
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Blue Orchid Mod Apk and Experience a Gripping Story.md +0 -139
  10. spaces/1phancelerku/anime-remove-background/APK5-30 .md +0 -129
  11. spaces/1phancelerku/anime-remove-background/Bubble Shooter Enjoy the Original Bubble Pop Game on Your iOS Device.md +0 -154
  12. spaces/1phancelerku/anime-remove-background/Crafting and Building 1.18 APK A Free Game with Amazing Graphics and Multiplayer Mode.md +0 -121
  13. spaces/1phancelerku/anime-remove-background/Download MuksOS AI Launcher 2.0 Mod APK for Android - Latest Version with Voice Gesture and Text Control.md +0 -108
  14. spaces/1phancelerku/anime-remove-background/Experience GTA V Like Never Before with Online RP Launcher.md +0 -130
  15. spaces/A00001/bingothoo/src/lib/utils.ts +0 -158
  16. spaces/AIConsultant/MusicGen/audiocraft/utils/deadlock.py +0 -58
  17. spaces/AIZero2Hero4Health/5-ImageToLineDrawing-GR/app.py +0 -126
  18. spaces/Abubakari/Sales_Prediction/README.md +0 -12
  19. spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptX.py +0 -97
  20. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/oneself.py +0 -18
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/Factory.d.ts +0 -6
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch/Factory.js +0 -13
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/space/Factory.js +0 -14
  24. spaces/Aloento/9Nine-PITS/text/cleaners.py +0 -113
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/README.md +0 -3
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ddim_inverse.py +0 -349
  27. spaces/Andy1621/uniformer_image_detection/configs/instaboost/README.md +0 -44
  28. spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py +0 -2
  29. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/update_windows.bat +0 -37
  30. spaces/Apex-X/ROOPOK/roop/core.py +0 -215
  31. spaces/Aristo/trafficsign/README.md +0 -13
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/dir_util.py +0 -243
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_path.py +0 -29
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/__init__.py +0 -0
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/typing_extensions.py +0 -2296
  36. spaces/Atualli/node-media-server/app.js +0 -18
  37. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py +0 -82
  38. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/mask_ops.py +0 -275
  39. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_model_e2e.py +0 -223
  40. spaces/Bart92/RVC_HF/julius/bands.py +0 -119
  41. spaces/Bart92/RVC_HF/train/losses.py +0 -59
  42. spaces/Benson/text-generation/Examples/Descargar Garena Drifters Velocidad.md +0 -73
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/lexer.py +0 -883
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/__init__.py +0 -24
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/build_meta.py +0 -511
  46. spaces/Bl1tzie/Jam/README.md +0 -10
  47. spaces/Boadiwaa/Recipes/openai/cli.py +0 -1018
  48. spaces/CVPR/LIVE/pybind11/tests/test_methods_and_attributes.cpp +0 -372
  49. spaces/CVPR/LIVE/thrust/thrust/detail/complex/math_private.h +0 -136
  50. spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/arithmetic_operators.h +0 -432
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cleanfiles Downloader Exe.md DELETED
@@ -1,27 +0,0 @@
1
-
2
- <h1>How to Use CleanFiles Downloader to Download Files from CleanFiles.net</h1>
3
- <p>CleanFiles Downloader is a software program that allows you to download files from CleanFiles.net, a file hosting service that requires you to complete a survey before accessing the download link. CleanFiles Downloader bypasses the survey and lets you download the file directly. Here is how to use CleanFiles Downloader to download files from CleanFiles.net:</p>
4
- <h2>Cleanfiles Downloader Exe</h2><br /><p><b><b>Download Zip</b> &#10038; <a href="https://byltly.com/2uKvR3">https://byltly.com/2uKvR3</a></b></p><br /><br />
5
- <ol>
6
- <li>Download CleanFiles Downloader from <a href="https://cleanfiles-downloader.software.informer.com/">https://cleanfiles-downloader.software.informer.com/</a>. This is the official website of the program and it is safe and virus-free[^1^]. You can also check other related programs such as µTorrent, Internet Download Manager, Creevity Mp3 Cover Downloader and MetaProducts Mass Downloader at the "download" section.</li>
7
- <li>Install CleanFiles Downloader on your computer. The installation process is simple and straightforward. Just follow the instructions on the screen and accept the terms and conditions. The name of the program executable file is CleanFiles Downloader v5.1.exe.</li>
8
- <li>Run CleanFiles Downloader on your computer. You will see a simple interface with a text box where you can enter the URL of the file you want to download from CleanFiles.net.</li>
9
- <li>Copy and paste the URL of the file you want to download from CleanFiles.net into the text box. For example, if you want to download a file called example.exe, the URL might look like this: https://cleanfiles.net/?id=1234567890</li>
10
- <li>Click on the "Download" button. CleanFiles Downloader will automatically bypass the survey and start downloading the file to your computer. You can see the progress of the download on the status bar.</li>
11
- <li>Wait for the download to finish. Once the download is complete, you can find the file in your default download folder or in the folder you specified during the installation. You can then open or run the file as you wish.</li>
12
- </ol>
13
- <p>CleanFiles Downloader is a useful tool for downloading files from CleanFiles.net without completing surveys. However, you should be careful about what files you download from CleanFiles.net, as some of them might contain viruses or malware. You should always scan your files with a reliable antivirus program before opening or running them. You should also respect the intellectual property rights of the file owners and only download files that you have permission to use.</p>
14
-
15
- <h2>How to Remove CleanFiles Downloader from Your Computer</h2>
16
- <p>If you no longer need CleanFiles Downloader or you want to uninstall it for any reason, you can easily remove it from your computer. Here is how to remove CleanFiles Downloader from your computer:</p>
17
- <ol>
18
- <li>Go to the Start menu and click on Control Panel.</li>
19
- <li>Click on Programs and Features or Add/Remove Programs, depending on your version of Windows.</li>
20
- <li>Find CleanFiles Downloader in the list of programs and click on it.</li>
21
- <li>Click on the Uninstall button and follow the instructions on the screen.</li>
22
- <li>Restart your computer if prompted.</li>
23
- </ol>
24
- <p>CleanFiles Downloader should be completely removed from your computer. You can also delete any files that you downloaded from CleanFiles.net using CleanFiles Downloader if you don't need them anymore. You should also scan your computer with a reliable antivirus program to make sure that there are no traces of viruses or malware left by CleanFiles Downloader or the files you downloaded from CleanFiles.net.</p>
25
- <p></p> cec2833e83<br />
26
- <br />
27
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/El Inolvidable Simon Birch [DVDRIP][.Spanish.].por.GammaRay.avi.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>El Inolvidable Simon Birch: A Heartwarming Story of Faith and Friendship</h1>
3
- <p>Have you ever watched a movie that made you laugh, cry, and think at the same time? A movie that touched your heart and inspired your soul? A movie that showed you the beauty of life and the power of faith? If not, then you should definitely watch El Inolvidable Simon Birch, a 1998 American comedy-drama film based on the novel A Prayer for Owen Meany by John Irving. In this article, I will tell you what this movie is about, who are the main characters, what are the themes and messages, and why you should watch it.</p>
4
- <h2>El Inolvidable Simon Birch [DVDRIP][.Spanish.].por.GammaRay.avi</h2><br /><p><b><b>Download Zip</b> &#127383; <a href="https://byltly.com/2uKvIB">https://byltly.com/2uKvIB</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is the movie about?</h3>
7
- <p>El Inolvidable Simon Birch is a movie about a boy named Simon Birch who was born with a rare condition that made him very small and weak. Despite his physical limitations, he has a strong spirit and a firm belief that God has a special plan for him. He lives in a small town in New Hampshire in the 1960s with his parents who don't care much about him. His only friend is Joe Wenteworth, a boy who was born out of wedlock and doesn't know who his father is. Together, they go through many adventures and challenges as they try to find their purpose in life.</p>
8
- <h3>Who are the main characters?</h3>
9
- <p>The main characters of the movie are:</p>
10
- <ul>
11
- <li>Simon Birch (played by Ian Michael Smith): The protagonist of the movie. He is a 12-year-old boy who suffers from Morquio syndrome, a rare genetic disorder that affects his growth and development. He is very smart, witty, and courageous. He believes that he is God's instrument and that he has a destiny to fulfill.</li>
12
- <li>Joe Wenteworth (played by Joseph Mazzello): The narrator and deuteragonist of the movie. He is Simon's best friend and confidant. He is an illegitimate child who lives with his single mother Rebecca. He is loyal, kind, and protective of Simon. He is also curious about his father's identity.</li>
13
- <li>Rebecca Wenteworth (played by Ashley Judd): Joe's mother and Simon's surrogate mother. She is a beautiful, loving, and independent woman who works as a librarian. She loves her son unconditionally and supports his friendship with Simon. She also has a secret affair with Ben Goodrich, the town's baseball coach.</li>
14
- <li>Ben Goodrich (played by Oliver Platt): Rebecca's lover and Joe's potential father. He is a friendly, funny, and caring man who works as a baseball coach at the local school. He has a good relationship with Joe and Simon and treats them like his own sons.</li>
15
- <li>Reverend Russell (played by David Strathairn): The town's minister and antagonist of the movie. He is a strict, stern, and hypocritical man who dislikes Simon for his unconventional views on religion. He tries to prevent Simon from participating in the church activities and often clashes with him.</li>
16
- </ul>
17
- <h3>Why is it called El Inolvidable Simon Birch?</h3>
18
- <p>The movie is called El Inolvidable Simon Birch because it is the Spanish title of the film. The original title was Simon Birch, but it was changed to El Inolvidable Simon Birch for the Spanish-speaking markets. The word "inolvidable" means "unforgettable" in Spanish, which reflects how Simon left a lasting impression on everyone who knew him.</p>
19
- <h2>Plot Summary</h2>
20
- <h3>Simon's birth and childhood</h3>
21
- <p>The movie begins with a flashback of Simon's birth in 1952. He was born prematurely and weighed less than two pounds. The doctors told his parents that he would not survive long, but he miraculously did. However, they also said that he would never grow beyond three feet tall and that he would have many health problems throughout his life.</p>
22
- <p>Simon grew up feeling different from everyone else. He was often bullied by other kids for his size and appearance. He also had trouble breathing and had to use an oxygen tank sometimes. His parents were ashamed of him and neglected him. They never celebrated his birthday or gave him any presents.</p>
23
- <p>The only person who cared for him was Rebecca Wenteworth, Joe's mother. She treated him like her own son and gave him love and attention. She also encouraged him to join the church choir and the Christmas pageant, where he met Joe.</p>
24
- <h3>Simon's friendship with Joe</h3>
25
- <p>Simon and Joe became best friends since they were both outsiders in their own way. They shared everything with each other and supported each other through thick and thin. They also had fun together by playing baseball, watching movies, reading comics, and exploring the town.</p>
26
- <p>One day, they decided to sneak into Rebecca's bedroom to look for clues about Joe's father. They found a locket with a picture of Rebecca and a man they didn't recognize. They also found a baseball signed by Mickey Mantle, which they assumed belonged to Joe's father.</p>
27
- <p>Descargar El Inolvidable Simon Birch DVDRIP en español por GammaRay<br />
28
- Ver online El Inolvidable Simon Birch película completa español DVDRIP GammaRay<br />
29
- El Inolvidable Simon Birch DVDRIP español torrent por GammaRay<br />
30
- El Inolvidable Simon Birch DVDRIP español mega por GammaRay<br />
31
- El Inolvidable Simon Birch DVDRIP español gratis por GammaRay<br />
32
- El Inolvidable Simon Birch DVDRIP español calidad por GammaRay<br />
33
- El Inolvidable Simon Birch DVDRIP español subtitulos por GammaRay<br />
34
- El Inolvidable Simon Birch DVDRIP español 1 link por GammaRay<br />
35
- El Inolvidable Simon Birch DVDRIP español full HD por GammaRay<br />
36
- El Inolvidable Simon Birch DVDRIP español sin cortes por GammaRay<br />
37
- El Inolvidable Simon Birch DVDRIP español descargar directa por GammaRay<br />
38
- El Inolvidable Simon Birch DVDRIP español ver pelicula online por GammaRay<br />
39
- El Inolvidable Simon Birch DVDRIP español descargar pelicula gratis por GammaRay<br />
40
- El Inolvidable Simon Birch DVDRIP español descargar pelicula torrent por GammaRay<br />
41
- El Inolvidable Simon Birch DVDRIP español descargar pelicula mega por GammaRay<br />
42
- El Inolvidable Simon Birch DVDRIP español ver pelicula completa por GammaRay<br />
43
- El Inolvidable Simon Birch DVDRIP español ver pelicula HD por GammaRay<br />
44
- El Inolvidable Simon Birch DVDRIP español ver pelicula sin cortes por GammaRay<br />
45
- El Inolvidable Simon Birch DVDRIP español ver pelicula subtitulada por GammaRay<br />
46
- El Inolvidable Simon Birch DVDRIP español ver pelicula 1 link por GammaRay<br />
47
- El Inolvidable Simon Birch película completa en español DVDRIP por GammaRay<br />
48
- El Inolvidable Simon Birch película en español DVDRIP descargar por GammaRay<br />
49
- El Inolvidable Simon Birch película en español DVDRIP online por GammaRay<br />
50
- El Inolvidable Simon Birch película en español DVDRIP torrent por GammaRay<br />
51
- El Inolvidable Simon Birch película en español DVDRIP mega por GammaRay<br />
52
- El Inolvidable Simon Birch película en español DVDRIP gratis por GammaRay<br />
53
- El Inolvidable Simon Birch película en español DVDRIP calidad por GammaRay<br />
54
- El Inolvidable Simon Birch película en español DVDRIP subtitulos por GammaRay<br />
55
- El Inolvidable Simon Birch película en español DVDRIP 1 link por GammaRay<br />
56
- El Inolvidable Simon Birch película en español DVDRIP full HD por GammaRay<br />
57
- El Inolvidable Simon Birch película en español DVDRIP sin cortes por GammaRay<br />
58
- El Inolvidable Simon Birch película en español DVDRIP descargar directa por GammaRay<br />
59
- El Inolvidable Simon Birch película en español DVDRIP ver online gratis por GammaRay<br />
60
- El Inolvidable Simon Birch película en español DVDRIP ver online HD por GammaRay<br />
61
- El Inolvidable Simon Birch película en español DVDRIP ver online sin cortes por GammaRay<br />
62
- El Inolvidable Simon Birch película en español DVDRIP ver online subtitulada por GammaRay<br />
63
- El Inolvidable Simon Birch película en español DVDRIP ver online 1 link por GammaRay<br />
64
- Descarga directa de la película El Inolvidable Simon Birch en español DVDRIP por GammaRay<br />
65
- Ver la película completa de El Inolvidable Simon Birch en español DVDRIP online gratis por GammaRay<br />
66
- Torrent de la película El Inolvidable Simon Birch en español DVDRIP descargar gratis por GammaRay<br />
67
- Mega de la película El Inolvidable Simon Birch en español DVDRIP descargar gratis por GammaRay<br />
68
- Película completa de El Inolvidable Simon Birch en español DVDRIP online HD por GammaRay<br />
69
- Película de El Inolvidable Simon Birch en español DVDRIP online sin cortes por GammaRay<br />
70
- Película de El Inolvidable Simon Birch en español DVDRIP online subtitulada por GammaRay<br />
71
- Película de El Inolvidable Simon Birch en español DVDRIP online 1 link por GammaRay</p>
72
- <p>They took the baseball with them to play catch at the lake. However, when Simon threw the ball to Joe, he missed it and hit Rebecca instead, who was on a boat with Ben Goodrich. The ball caused Rebecca to fall into the water and drown.</p>
73
- <p>Simon felt guilty for killing Rebecca and wondered if it was part of God's plan for him. Joe was devastated by losing his mother and blamed Simon for her death. He also learned that Ben Goodrich was his father after finding out that he had the same locket as Rebecca.</p>
74
- <h3>Simon's quest to find his destiny</h3>
75
- <p>After Rebecca's funeral, Joe moved in with Ben Goodrich while Simon stayed with his parents. They drifted apart for a while until Ben invited Simon to join them on a camping trip. There, they reconciled their friendship and decided to run away together to find Joe's real father.</p>
76
- <p>They boarded a bus that took them to another town where they met Miss Leavey (played by Jan Hooks), an old friend of Rebecca who ran an orphanage. She recognized Joe from Rebecca's pictures and offered to help them find Joe's father.</p>
77
- <p>She took them to a diner where she introduced them to Mr. Baines (played by Jim Carrey), an adult version of Joe who narrated the story from the beginning. He told them that he never found out who his father was but that he didn't care anymore because he had Ben as his father figure.</p>
78
- <p>He also told them that he became a successful writer because of Simon's influence on him. He said that Simon taught him how to see the world differently and how to appreciate life more.</p>
79
- <h3>Simon's heroic act and death</h3>
80
- <p>The next day, they went back to their hometown on another bus that was carrying some children from Miss Leavey's orphanage. On their way, they encountered an accident where a truck hit their bus and caused it to plunge into a frozen lake.</p>
81
- <p>Simon managed to escape from the bus through a window but saw that many children were still trapped inside. He decided to go back into the water to rescue them one by one using his oxygen tank as an air supply.</p>
82
- <p>He saved all the children except one girl named Marjorie (played by Sam Morton), who was too scared to leave her seatbelt. Simon tried to calm her down but ran out of air before he could free her.</p>
83
- <p>Joe saw what happened from outside and dived into the water to help them. He reached them just in time before they drowned but couldn't pull them out because they were too heavy.</p>
84
- <p>Luckily, Ben arrived at the scene with some firefighters who cut open the bus roof using chainsaws. They pulled out Joe, Simon, Marjorie out of the water along with other survivors.</p>
85
- <p>However, it was too late for Simon who died from hypothermia in Joe's arms. Before he died, he told Joe that he finally found his destiny: saving those children from drowning.</p>
86
- <h2>Themes and Messages</h2>
87
- <h3>The power of faith and belief</h3>
88
- <p>One of the main themes of the movie is the power of faith and belief. Simon is a character who has a strong faith in God and believes that he has a special mission in life. He doesn't let his physical condition or the negative opinions of others stop him from pursuing his dreams. He also inspires others to have faith and hope in themselves and in a higher purpose.</p>
89
- <p>For example, he convinces Joe to believe that his father is someone important and that he can find him someday. He also helps Marjorie overcome her fear of water by telling her that God loves her and that he will protect her. He also shows Reverend Russell that he is wrong about judging him and that he is a true believer.</p>
90
- <h3>The value of friendship and loyalty</h3>
91
- <p>Another theme of the movie is the value of friendship and loyalty. Simon and Joe are best friends who share a bond that transcends their differences and circumstances. They are always there for each other and support each other through good times and bad times. They also have fun together and enjoy each other's company.</p>
92
- <p>For example, they play baseball together even though Simon is not good at it. They also watch movies together and laugh at the funny scenes. They also run away together to find Joe's father and have an adventure. They also risk their lives for each other when they face danger.</p>
93
- <h3>The meaning of life and death</h3>
94
- <p>A third theme of the movie is the meaning of life and death. Simon is a character who has a different perspective on life and death than most people. He doesn't fear death because he believes that it is part of God's plan for him. He also thinks that life is a gift that should be cherished and lived fully.</p>
95
- <p>For example, he celebrates his birthday every day because he doesn't know when he will die. He also makes a list of things he wants to do before he dies, such as kissing a girl, seeing the ocean, and being a hero. He also sacrifices his life to save others because he thinks that it is his destiny.</p>
96
- <h2>Conclusion</h2>
97
- <h3>Why you should watch this movie</h3>
98
- <p>El Inolvidable Simon Birch is a movie that will make you laugh, cry, and think. It is a movie that will touch your heart and inspire your soul. It is a movie that will show you the beauty of life and the power of faith.</p>
99
- <p>You should watch this movie because it will teach you some valuable lessons about friendship, loyalty, courage, belief, purpose, and destiny. You should watch this movie because it will make you appreciate what you have and what you can do. You should watch this movie because it will make you remember Simon Birch, an unforgettable boy who changed the lives of many people.</p>
100
- <h3>FAQs</h3>
101
- <ol>
102
- <li>Q: Is El Inolvidable Simon Birch based on a true story?<br>A: No, El Inolvidable Simon Birch is not based on a true story. It is based on a novel called A Prayer for Owen Meany by John Irving. However, some aspects of the movie are inspired by real events or people, such as the bus accident or the actor who played Simon.</li>
103
- <li>Q: Who played Simon Birch?<br>A: Simon Birch was played by Ian Michael Smith, a boy who was born with Morquio syndrome, the same condition as Simon's character. He was discovered by the director Mark Steven Johnson after seeing his picture in an article about children with rare diseases. He was 11 years old when he made his debut in the movie.</li>
104
- <li>Q: What happened to Ian Michael Smith after the movie?<br>A: Ian Michael Smith continued his acting career after the movie. He appeared in several TV shows and movies, such as The Secret Agent Club (1996), The Final Season (2007), and The Lurking Man (2017). He also graduated from MIT with a degree in computer science and became a software engineer.</li>
105
- <li>Q: Why did John Irving dislike the movie?<br>A: John Irving, the author of the novel A Prayer for Owen Meany, disliked the movie adaptation because he felt that it changed too many things from his original story. He didn't like how the characters' names were changed, how the setting was moved from New England to New Hampshire, how some scenes were added or deleted, and how some themes were altered or omitted. He also didn't like how the movie used his title without his permission.</li>
106
- <li>Q: Where can I watch El Inolvidable Simon Birch?<br>A: You can watch El Inolvidable Simon Birch on various streaming platforms, such as Amazon Prime Video, YouTube, Google Play Movies & TV, iTunes, Vudu, or Hulu. You can also buy or rent it on DVD or Blu-ray.</li>
107
- </ol>
108
- </p> 0a6ba089eb<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA 3 A Masterpiece or a Menace?.md DELETED
@@ -1,12 +0,0 @@
1
- <br />
2
- <h1>Is GTA 3 Worth It?</h1>
3
- <p>Grand Theft Auto III, or GTA 3, is a 2001 action-adventure game developed by DMA Design and published by Rockstar Games. It is the third main entry in the Grand Theft Auto series, and the fifth instalment overall. It is set in a fictional city called Liberty City, loosely based on New York City, and follows the story of Claude, a silent protagonist who seeks revenge after being betrayed by his girlfriend during a robbery.</p>
4
- <p>GTA 3 is widely considered as one of the most influential and groundbreaking games of its time, as it was the first game in the series to feature a fully 3D open world that players can explore freely. The game offers a variety of missions, activities, vehicles, weapons, and characters to interact with, as well as a darkly comic storyline and a stellar voice acting. The game also features a stunning soundtrack that includes licensed music from various genres and radio stations.</p>
5
- <h2>is gta 3 worth it</h2><br /><p><b><b>DOWNLOAD</b> &#9733;&#9733;&#9733; <a href="https://byltly.com/2uKA8T">https://byltly.com/2uKA8T</a></b></p><br /><br />
6
- <p>GTA 3 has received critical acclaim from critics and gamers alike, and has won several awards, including Game of the Year from various publications. It has also sold over 14.5 million copies worldwide, making it one of the best-selling games of all time. The game has been ported to many different platforms, including Windows, Xbox, Mac OS X, Android, iOS, and Fire OS. The game also received an enhanced version for its tenth anniversary in 2011, and another one for its twentieth anniversary in 2021.</p>
7
- <p>So, is GTA 3 worth it? The answer depends on what you are looking for in a game. If you are looking for a classic game that defined the open world genre and offers a lot of fun and freedom, then GTA 3 is definitely worth it. However, if you are looking for a game that has modern graphics, gameplay mechanics, and features, then you might find GTA 3 outdated and clunky compared to newer games in the series or genre. Ultimately, GTA 3 is a game that deserves respect and appreciation for its legacy and impact on gaming history.</p><p>Here are some more paragraphs for the article:</p>
8
- <p>GTA 3 is not only a game, but also a cultural phenomenon that has influenced many other games, movies, music, and art. The game has been referenced and parodied in various media, such as The Simpsons, Family Guy, South Park, Robot Chicken, and The Office. The game has also inspired many real-life events and controversies, such as lawsuits, crimes, protests, and bans. For example, in 2003, a teenager named Devin Moore killed three people and stole a police car in Alabama, and claimed that he was influenced by GTA 3. He was later sentenced to death.</p>
9
- <p>GTA 3 is also a game that has sparked many debates and discussions about the role of violence, sex, morality, and ethics in video games. The game has been criticized by many groups and individuals for its depiction of violence, especially towards women, minorities, and law enforcement. The game has also been accused of promoting crime, drug use, racism, sexism, and misogyny. Some critics have argued that GTA 3 is a satire and a critique of American society and culture, while others have argued that it is a glorification and a celebration of it.</p>
10
- <p>GTA 3 is a game that has left a lasting impression on the gaming industry and the gaming community. It is a game that has challenged the boundaries of what video games can do and be. It is a game that has given players a sense of freedom and empowerment that few games can match. It is a game that has made history and changed the world.</p> ddb901b051<br />
11
- <br />
12
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Civil 3D 2015 Keygen Xforce Rar Free Download !EXCLUSIVE!.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>Civil 3D 2015 Keygen Xforce Rar Free Download - A Guide to Activate Autodesk Civil 3D 2015 and Other Products</h1>
3
- <p>Autodesk Civil 3D 2015 is a powerful software that allows civil engineers and designers to create, analyze, and document civil engineering projects. It offers features such as dynamic modeling, geospatial analysis, stormwater management, site grading, and more. However, to use Autodesk Civil 3D 2015 and other Autodesk products of the 2015 version, you need to have a valid product key that can activate the software and unlock all its features and options.</p>
4
- <h2>Civil 3D 2015 keygen xforce rar free download</h2><br /><p><b><b>Download</b> ->->->-> <a href="https://imgfil.com/2uy0yW">https://imgfil.com/2uy0yW</a></b></p><br /><br />
5
- <p>One way to get a product key for Autodesk Civil 3D 2015 and other products is to purchase it from the official website or an authorized dealer. However, this can be expensive and not affordable for everyone. Another way to get a product key for Autodesk Civil 3D 2015 and other products is to use the Civil 3D 2015 keygen xforce rar free download. This is a file that contains a program called X-Force 2015 that can generate product keys for all Autodesk products of the 2015 version, including Civil 3D 2015. In this article, we will explain what is the Civil 3D 2015 keygen xforce rar free download, how to use it, and what are the benefits and risks of using it.</p>
6
- <h2>What is the Civil 3D 2015 Keygen Xforce Rar Free Download?</h2>
7
- <p>The Civil 3D 2015 keygen xforce rar free download is a file that contains a program called X-Force 2015. X-Force 2015 is a jailbreak software that can generate product keys for all Autodesk products of the 2015 version, such as Civil 3D 2015, AutoCAD 2015, Revit 2015, etc. The product key is required when you install an Autodesk product as a point product or from a product suite. It allows you to activate the product and use all its features and options without any limitations or restrictions.</p>
8
- <p>The Civil 3D 2015 keygen xforce rar free download is available on various websites that provide cracks, patches, mods, and tools for different software and games. You can download it for free from these websites and use it to activate your Autodesk products of the 2015 version.</p>
9
- <h2>How to Use the Civil 3D 2015 Keygen Xforce Rar Free Download?</h2>
10
- <p>To use the Civil 3D 2015 keygen xforce rar free download, you need to follow these steps:</p>
11
- <ol>
12
- <li>Download the Civil 3D 2015 keygen xforce rar free download from a reliable source.</li>
13
- <li>Extract the rar file using a program like WinRAR or 7-Zip.</li>
14
- <li>Run the X-Force 2015 program as administrator.</li>
15
- <li>Select your Autodesk product from the list and click on Generate.</li>
16
- <li>Copy the generated product key and paste it in the installation window of your Autodesk product.</li>
17
- <li>Click on Next and follow the instructions to complete the installation.</li>
18
- <li>Restart your Autodesk product and enjoy its full features and options.</li>
19
- </ol>
20
- <h2>What are the Benefits and Risks of Using the Civil 3D 2015 Keygen Xforce Rar Free Download?</h2>
21
- <p>The Civil 3D 2015 keygen xforce rar free download has some benefits and risks for users who want to activate their Autodesk products of the 2015 version. Some of these benefits and risks are:</p>
22
- <h3>Benefits</h3>
23
- <ul>
24
- <li>You can activate any Autodesk product of the 2015 version, such as Civil 3D 2015, without paying any fees or charges.</li>
25
- <li>You can use all the features and options of your Autodesk product without any limitations or restrictions.</li>
26
- <li>You can use various trainers, cheat codes, mods, and tools that can modify or enhance your Autodesk product's graphics, gameplay, sound, interface, etc.</li>
27
- <li>You can use the map editor and create your own custom maps for your Autodesk product.</li>
28
- </ul>
29
- <h3>Risks</h3>
30
- <ul>
31
- <li>You may violate the terms and conditions of Autodesk and face legal consequences or penalties.</li>
32
- <li>You may encounter compatibility, stability, performance, or security issues with your Autodesk product or your PC.</li>
33
- <li>You may not be able to access some features or options in your Autodesk product that require online activation or verification.</li>
34
- <li>You may expose your PC to viruses, malware, or fake files that can harm your PC or your data.</li>
35
- </ul>
36
- <h2>Conclusion</h2>
37
- <p>The Civil 3D 2015 keygen xforce rar free download is a file that can help you activate your Autodesk products of the 2015 version, such as Civil 3D 2015. It can generate product keys for all Autodesk products of the 2015 version and allow you to use them without any limitations or restrictions. However, it also has some risks and challenges that you should be aware of and prepared for. The Civil 3D 2015 keygen xforce rar free download is not a perfect solution for activating your Autodesk products of the</p>
38
- <p></p>
39
- </p>
40
- <p>If you are interested in using the Civil 3D 2015 keygen xforce rar free download, you can download it from the links below. However, we recommend that you use it at your own risk and discretion. We are not responsible for any damages or losses that may occur from using the Civil 3D 2015 keygen xforce rar free download.</p>
41
- <h2>Download Links for Civil 3D 2015 Keygen Xforce Rar Free Download</h2>
42
- <p>Here are some of the websites that offer the Civil 3D 2015 keygen xforce rar free download:</p>
43
- <ul>
44
- <li><a href="https://iggtech.com/download-x-force-2015-1/">https://iggtech.com/download-x-force-2015-1/</a></li>
45
- <li><a href="https://azdly.com/x-force-2015-download/">https://azdly.com/x-force-2015-download/</a></li>
46
- <li><a href="https://civilmdc.com/2020/03/10/x-force-keygenerator-autodesk-products-2015-all/">https://civilmdc.com/2020/03/10/x-force-keygenerator-autodesk-products-2015-all/</a></li>
47
- <li><a href="https://www.scribd.com/document/395646989/X-Force-Keygen-for-All-Autodesk-Products-2015-Civil-Engineering-Community">https://www.scribd.com/document/395646989/X-Force-Keygen-for-All-Autodesk-Products-2015-Civil-Engineering-Community</a></li>
48
- <li><a href="https://trello.com/c/ZA2bHA7V/169-civil-3d-2015-hot-keygen-xforce-rar-free-download">https://trello.com/c/ZA2bHA7V/169-civil-3d-2015-hot-keygen-xforce-rar-free-download</a></li>
49
- </ul>
50
- <h2>Final Words</h2>
51
- <p>We hope that this article has helped you understand what is the Civil 3D 2015 keygen xforce rar free download, how to use it, and what are the benefits and risks of using it. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have a great day!</p>
52
- </p>
53
- <p>If you are interested in using the Civil 3D 2015 keygen xforce rar free download, you can download it from the links below. However, we recommend that you use it at your own risk and discretion. We are not responsible for any damages or losses that may occur from using the Civil 3D 2015 keygen xforce rar free download.</p>
54
- <h2>Download Links for Civil 3D 2015 Keygen Xforce Rar Free Download</h2>
55
- <p>Here are some of the websites that offer the Civil 3D 2015 keygen xforce rar free download:</p>
56
- <ul>
57
- <li><a href="https://iggtech.com/download-x-force-2015-1/">https://iggtech.com/download-x-force-2015-1/</a></li>
58
- <li><a href="https://azdly.com/x-force-2015-download/">https://azdly.com/x-force-2015-download/</a></li>
59
- <li><a href="https://civilmdc.com/2020/03/10/x-force-keygenerator-autodesk-products-2015-all/">https://civilmdc.com/2020/03/10/x-force-keygenerator-autodesk-products-2015-all/</a></li>
60
- <li><a href="https://www.scribd.com/document/395646989/X-Force-Keygen-for-All-Autodesk-Products-2015-Civil-Engineering-Community">https://www.scribd.com/document/395646989/X-Force-Keygen-for-All-Autodesk-Products-2015-Civil-Engineering-Community</a></li>
61
- <li><a href="https://trello.com/c/ZA2bHA7V/169-civil-3d-2015-hot-keygen-xforce-rar-free-download">https://trello.com/c/ZA2bHA7V/169-civil-3d-2015-hot-keygen-xforce-rar-free-download</a></li>
62
- </ul>
63
- <h2>Final Words</h2>
64
- <p>We hope that this article has helped you understand what is the Civil 3D 2015 keygen xforce rar free download, how to use it, and what are the benefits and risks of using it. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have a great day!</p>
65
- <h2>How to Use Autodesk Civil 3D 2015 After Activation</h2>
66
- <p>After you have activated your Autodesk Civil 3D 2015 using the Civil 3D 2015 keygen xforce rar free download, you can start using the software and enjoy its features and options. Here are some of the things you can do with Autodesk Civil 3D 2015:</p>
67
- <ul>
68
- <li>You can create, edit, and manage civil engineering projects using dynamic modeling, geospatial analysis, stormwater management, site grading, and more.</li>
69
- <li>You can collaborate with other civil engineers and designers using data sharing, design review, and project management tools.</li>
70
- <li>You can generate documentation, reports, and presentations for your civil engineering projects using annotation, layout, and visualization tools.</li>
71
- <li>You can customize your Autodesk Civil 3D 2015 using various add-ons, plug-ins, extensions, and libraries that can enhance your workflow and productivity.</li>
72
- </ul>
73
- <h2>Tips and Tricks for Using Autodesk Civil 3D 2015</h2>
74
- <p>To make the most out of your Autodesk Civil 3D 2015, here are some tips and tricks that can help you improve your skills and efficiency:</p>
75
- <ul>
76
- <li>Use keyboard shortcuts to access commands and tools faster and easier.</li>
77
- <li>Use templates and styles to create consistent and standardized civil engineering projects.</li>
78
- <li>Use data shortcuts to link data between different drawings and projects.</li>
79
- <li>Use labels and tables to display dynamic information about your civil engineering objects.</li>
80
- <li>Use data extraction to export data from your civil engineering projects to other formats and applications.</li>
81
- </ul>
82
- <h2>Conclusion</h2>
83
- <p>In this article, we have discussed what is the Civil 3D 2015 keygen xforce rar free download, how to use it, what are the benefits and risks of using it, how to use Autodesk Civil 3D 2015 after activation, and some tips and tricks for using Autodesk Civil 3D 2015. We hope that this article has been informative and helpful for you. If you have any feedback or suggestions, please let us know in the comments section below. Thank you for reading and have a wonderful day!</p>
84
- <h2>Conclusion</h2>
85
- <p>In this article, we have discussed what is the Civil 3D 2015 keygen xforce rar free download, how to use it, what are the benefits and risks of using it, how to use Autodesk Civil 3D 2015 after activation, and some tips and tricks for using Autodesk Civil 3D 2015. We hope that this article has been informative and helpful for you. If you have any feedback or suggestions, please let us know in the comments section below.</p>
86
- <p>If you are interested in using the Civil 3D 2015 keygen xforce rar free download, you can download it from the links we have provided in this article. However, we recommend that you use it at your own risk and discretion. We are not responsible for any damages or losses that may occur from using the Civil 3D 2015 keygen xforce rar free download.</p>
87
- <p>If you want to learn more about Autodesk Civil 3D 2015 and other Autodesk products of the 2015 version, you can visit the official website or check out some of the online tutorials and courses that are available on various platforms. You can also join some of the online communities and forums that are dedicated to Autodesk Civil 3D 2015 and other Autodesk products of the 2015 version. You can share your experiences, ask questions, get answers, and learn from other civil engineers and designers who use Autodesk Civil 3D 2015 and other Autodesk products of the 2015 version.</p>
88
- <p>Thank you for reading and have a wonderful day!</p> 3cee63e6c2<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Db Bot 1.3a Crack [PATCHED] Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Db Bot 1.3a Crack Download</h2><br /><p><b><b>DOWNLOAD</b> &#10022;&#10022;&#10022; <a href="https://imgfil.com/2uy0s5">https://imgfil.com/2uy0s5</a></b></p><br /><br />
2
- <br />
3
- 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download [BEST] Ta Ra Rum Pum Mp4 Download [BEST].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Download Ta Ra Rum Pum Mp4 Download</h2><br /><p><b><b>Download</b> &#9675;&#9675;&#9675; <a href="https://imgfil.com/2uxWZr">https://imgfil.com/2uxWZr</a></b></p><br /><br />
2
- <br />
3
- 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Engineering Metrology And Measurements By Vijayaraghavan Pdf Free Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Engineering Metrology And Measurements By Vijayaraghavan Pdf Free Download</h2><br /><p><b><b>DOWNLOAD</b> &#10004;&#10004;&#10004; <a href="https://imgfil.com/2uy05T">https://imgfil.com/2uy05T</a></b></p><br /><br />
2
- <br />
3
- April 25th, 2018 - Engineering Metrology and Measurements pdf Download as ... and measurement vijayaraghavan pdf FREE PDF DOWNLOAD NOW Source 2 ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/tests/unit/test_commands.py DELETED
@@ -1,22 +0,0 @@
1
- """Unit tests for the commands module"""
2
- from unittest.mock import MagicMock, patch
3
-
4
- import pytest
5
-
6
- import autogpt.agent.agent_manager as agent_manager
7
- from autogpt.app import execute_command, list_agents, start_agent
8
-
9
-
10
- @pytest.mark.integration_test
11
- def test_make_agent() -> None:
12
- """Test the make_agent command"""
13
- with patch("openai.ChatCompletion.create") as mock:
14
- obj = MagicMock()
15
- obj.response.choices[0].messages[0].content = "Test message"
16
- mock.return_value = obj
17
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
18
- agents = list_agents()
19
- assert "List of agents:\n0: chat" == agents
20
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
21
- agents = list_agents()
22
- assert "List of agents:\n0: chat\n1: write" == agents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Blue Orchid Mod Apk and Experience a Gripping Story.md DELETED
@@ -1,139 +0,0 @@
1
-
2
- <h1>Blue Orchid Mod APK: A Guide for Interactive Story Lovers</h1>
3
- <p>If you are a fan of interactive stories, you might have heard of Blue Orchid, a game that lets you create your own character and live your own adventure. But did you know that there is a modded version of the game that gives you unlimited gems, coins, and choices? In this article, we will tell you everything you need to know about Blue Orchid Mod APK, including what it is, why you should download it, how to play it, and what are its pros and cons. Let's get started!</p>
4
- <h2>What is Blue Orchid?</h2>
5
- <h3>A brief introduction to the game</h3>
6
- <p>Blue Orchid is an interactive story game developed by Elia Games. It is available for Android devices and can be downloaded from Google Play Store. The game is set in a fictional city called Blue Orchid, where you can choose from different genres of stories, such as romance, drama, mystery, fantasy, and more. You can customize your character's appearance, name, personality, and preferences. You can also interact with other characters, make decisions that affect the outcome of the story, and enjoy various mini-games and activities.</p>
7
- <h2>blue orchid mod apk</h2><br /><p><b><b>Download Zip</b> &#187;&#187;&#187; <a href="https://urlin.us/2uSSob">https://urlin.us/2uSSob</a></b></p><br /><br />
8
- <h3>The main features of the game</h3>
9
- <p>Some of the features that make Blue Orchid stand out from other interactive story games are:</p>
10
- <ul>
11
- <li>It has high-quality graphics and sound effects that create an immersive atmosphere.</li>
12
- <li>It has a diverse and inclusive cast of characters that represent different backgrounds, cultures, orientations, and identities.</li>
13
- <li>It has multiple storylines and endings that depend on your choices and actions.</li>
14
- <li>It has a user-friendly interface and easy-to-use controls that make the game accessible and enjoyable.</li>
15
- <li>It has regular updates and new content that keep the game fresh and exciting.</li>
16
- </ul>
17
- <h2>Why download Blue Orchid Mod APK?</h2>
18
- <h3>The benefits of using the modded version</h3>
19
- <p>While Blue Orchid is a free-to-play game, it also has some in-app purchases that require real money. For example, you need gems to unlock premium choices and outfits, coins to buy gifts and items, and tickets to access new chapters. These resources are limited and can run out quickly if you play frequently. This can limit your options and enjoyment of the game.</p>
20
- <p>That's why some players prefer to use Blue Orchid Mod APK, which is a modified version of the game that gives you unlimited gems, coins, and tickets. With this modded version, you can enjoy the following benefits:</p>
21
- <ul>
22
- <li>You can make any choice you want without worrying about the cost or consequences.</li>
23
- <li>You can dress up your character in any outfit you like without spending any money.</li>
24
- <li>You can play any chapter you want without waiting for tickets to refill.</li>
25
- <li>You can explore all the stories and genres without missing any content.</li>
26
- <li>You can have more fun and freedom in the game without any restrictions or limitations.</li>
27
- </ul>
28
- <h3>How to download and install Blue Orchid Mod APK</h3>
29
- <p>If you want to try Blue Orchid Mod APK, you need to follow these steps:</p>
30
- <ol>
31
- <li>Uninstall the original version of Blue Orchid from your device if you have it installed.</li>
32
- <li>Download Blue Orchid Mod APK from a reliable source such as [PlayMods](^1^).</li>
33
- <li>Enable unknown sources on your device settings to allow the installation of third-party apps.</li>
34
- <li>Locate the downloaded file on your device storage and tap on it to start the installation process.</li>
35
- <li>Follow the instructions on the screen to complete the installation.</ <li>Launch the game and enjoy the modded features.</li>
36
- </ol>
37
- <p>Note: You may need to grant some permissions to the app to run properly. Also, make sure to download the modded version from a trusted source to avoid any malware or viruses.</p>
38
- <h2>How to play Blue Orchid: Interactive Story</h2>
39
- <h3>The basic gameplay mechanics</h3>
40
- <p>Playing Blue Orchid is simple and intuitive. Here are the basic steps you need to follow:</p>
41
- <ol>
42
- <li>Choose a story genre that interests you from the main menu. You can browse through different categories such as romance, drama, mystery, fantasy, and more.</li>
43
- <li>Create your character by selecting their gender, appearance, name, and personality. You can also change their outfit and accessories later in the game.</li>
44
- <li>Start the story and read the dialogue and narration. You can tap on the screen to proceed or swipe left or right to go back or forward.</li>
45
- <li>Make choices that affect the plot and your relationships with other characters. Some choices are free, while others require gems or coins. You can also use tickets to unlock new chapters.</li>
46
- <li>Enjoy the mini-games and activities that are part of the story. For example, you can play match-3 puzzles, trivia quizzes, dress-up games, and more.</li>
47
- <li>Earn rewards such as gems, coins, tickets, and items by completing achievements, watching ads, or spinning the wheel.</li>
48
- </ol>
49
- <h3>The tips and tricks for a better experience</h3>
50
- <p>If you want to have more fun and success in Blue Orchid, here are some tips and tricks you can use:</p>
51
- <p>blue orchid interactive story mod apk<br />
52
- blue orchid mod apk unlimited diamonds<br />
53
- blue orchid mod apk latest version<br />
54
- blue orchid mod apk download for android<br />
55
- blue orchid mod apk free shopping<br />
56
- blue orchid mod apk 1.0.1<br />
57
- blue orchid mod apk choices<br />
58
- blue orchid mod apk offline<br />
59
- blue orchid mod apk no ads<br />
60
- blue orchid mod apk unlocked everything<br />
61
- blue orchid mod apk android 1<br />
62
- blue orchid mod apk revdl<br />
63
- blue orchid mod apk happymod<br />
64
- blue orchid mod apk rexdl<br />
65
- blue orchid mod apk apkpure<br />
66
- blue orchid mod apk 2023<br />
67
- blue orchid mod apk update<br />
68
- blue orchid mod apk premium<br />
69
- blue orchid mod apk vip<br />
70
- blue orchid mod apk pro<br />
71
- blue orchid mod apk full version<br />
72
- blue orchid mod apk hack<br />
73
- blue orchid mod apk cheat<br />
74
- blue orchid mod apk cracked<br />
75
- blue orchid mod apk unlimited money<br />
76
- blue orchid romance game mod apk<br />
77
- blue orchid love story mod apk<br />
78
- blue orchid dating sim mod apk<br />
79
- blue orchid visual novel mod apk<br />
80
- blue orchid otome game mod apk<br />
81
- download game blue orchid mod apk<br />
82
- download aplikasi blue orchid mod apk<br />
83
- cara download blue orchid mod apk<br />
84
- link download blue orchid mod apk<br />
85
- how to install blue orchid mod apk<br />
86
- how to play blue orchid mod apk<br />
87
- how to get blue orchid mod apk<br />
88
- how to update blue orchid mod apk<br />
89
- how to hack blue orchid mod apk<br />
90
- how to cheat in blue orchid mod apk<br />
91
- is there a blue orchid mod apk<br />
92
- where can i find blue orchid mod apk<br />
93
- where to download blue orchid mod apk<br />
94
- what is the best site for downloading the latest version of the Blue Orchids Mod Apk?</p>
95
- <ul>
96
- <li>Pay attention to the hints and clues that are given in the story. They can help you make better choices and solve mysteries.</li>
97
- <li>Explore different options and outcomes by replaying the chapters or stories. You can also use the modded version to access all the choices without spending any resources.</li>
98
- <li>Interact with different characters and build your relationships with them. You can also romance them if you want. You can use gifts and items to increase your affection level with them.</li>
99
- <li>Check out the shop and the wardrobe for new outfits and accessories. You can also use the modded version to get unlimited coins and gems to buy anything you want.</li>
100
- <li>Follow the official social media accounts of Blue Orchid for news, updates, sneak peeks, and giveaways. You can also join the community of other players and share your opinions and feedback.</li>
101
- </ul>
102
- <h2>The pros and cons of Blue Orchid Mod APK</h2>
103
- <h3>The advantages of the modded version</h3>
104
- <p>Using Blue Orchid Mod APK has some advantages that make it appealing for many players. Some of them are:</p>
105
- <ul>
106
- <li>You can enjoy unlimited resources such as gems, coins, and tickets that allow you to access all the content and features of the game.</li>
107
- <li>You can have more control and flexibility over your choices and actions in the game without worrying about the cost or consequences.</li>
108
- <li>You can have more fun and satisfaction in the game without any restrictions or limitations.</li>
109
- <li>You can save your time and money by not having to wait for tickets to refill or spend real money on in-app purchases.</li>
110
- </ul>
111
- <h3>The disadvantages of the modded version</h3>
112
- <p>However, using Blue Orchid Mod APK also has some disadvantages that you should be aware of before downloading it. Some of them are:</p>
113
- <ul>
114
- <li>You may face some technical issues or errors while playing the game such as crashes, glitches, or bugs.</li>
115
- <li>You may lose your progress or data if you uninstall the game or switch devices.</li>
116
- <li>You may get banned or suspended from the game if you are detected by the developers or reported by other players.</li>
117
- <li>You may miss out on some of the original features or content of the game that are not included in the modded version.</li>
118
- </ul>
119
- <h2>Conclusion</h2>
120
- <h3>A summary of the main points</h3>
121
- <p>In conclusion, Blue Orchid is an interactive story game that lets you create your own character and live your own adventure in a fictional city. You can choose from different genres of stories, customize your character's appearance and personality, interact with other characters, make decisions that affect the outcome of the story, and enjoy various mini-games and activities. The game is free-to-play but also has some in-app purchases that require real money. If you want to have unlimited resources such as gems, coins, and tickets, you can download Blue Orchid Mod APK, which is a modified version of the game that gives you these benefits. However, you should also be aware of the potential risks and drawbacks of using this modded version such as technical issues, banned or suspended from the game, or missing out on some of the original features or content of the game.</p>
122
- <h3>A call to action for the readers</h3>
123
- <p>Now that you know everything about Blue Orchid Mod APK, you can decide whether you want to download it or not. If you do, make sure to follow the instructions we provided and enjoy the game with unlimited resources. If you don't, you can still play the original version of Blue Orchid and have a great time with the interactive stories. Either way, we hope you have fun and share your thoughts and experiences with us in the comments section below. Happy gaming!</p>
124
- <h2>FAQs</h2>
125
- <p>Here are some of the frequently asked questions about Blue Orchid Mod APK:</p>
126
- <ol>
127
- <li>Is Blue Orchid Mod APK safe to use?</li>
128
- <p>Blue Orchid Mod APK is generally safe to use as long as you download it from a reliable source such as [PlayMods]. However, you should always be careful when installing third-party apps on your device and scan them for any malware or viruses.</p>
129
- <li>How do I update Blue Orchid Mod APK?</li>
130
- <p>Blue Orchid Mod APK is usually updated automatically when the original version of the game is updated. However, if you encounter any problems or errors, you can check the source where you downloaded the modded version and see if there is a newer version available. You can also follow the official social media accounts of Blue Orchid for any news or updates.</p>
131
- <li>Can I play Blue Orchid Mod APK offline?</li>
132
- <p>No, you cannot play Blue Orchid Mod APK offline. You need an internet connection to access the game and its features. However, you can play some of the mini-games and activities offline once you have downloaded them.</p>
133
- <li>Can I transfer my progress from Blue Orchid to Blue Orchid Mod APK or vice versa?</li>
134
- <p>No, you cannot transfer your progress from Blue Orchid to Blue Orchid Mod APK or vice versa. The two versions of the game are not compatible and have different data and files. If you want to switch from one version to another, you will have to start from scratch.</p>
135
- <li>Can I play Blue Orchid Mod APK with my friends?</li>
136
- <p>Yes, you can play Blue Orchid Mod APK with your friends. You can connect your game account to your Facebook account and invite your friends to join you in the game. You can also chat with them, send them gifts, and compete with them in the leaderboards.</p>
137
- </ol></p> 197e85843d<br />
138
- <br />
139
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/APK5-30 .md DELETED
@@ -1,129 +0,0 @@
1
-
2
- <h1>What is APK5-30 and Why You Need It</h1>
3
- <p>If you are looking for a reliable, efficient, and cost-effective axial fan for your cooling and ventilation needs, you might want to consider APK5-30. This is a product from Teral, a leading manufacturer of pumps and fans in Japan. In this article, we will explain what APK5-30 is, what are its features and benefits, how to use it, and how it compares with other axial fans in the market.</p>
4
- <h2>Introduction</h2>
5
- <p>Cooling and ventilation are essential for many industrial applications, such as machinery, equipment, exhaust, and air conditioning. However, not all fans are created equal. Some fans may not be able to deliver the required airflow and pressure, some may consume too much energy and generate too much noise, and some may not be durable or easy to install and maintain. That's why you need a fan that can meet your specific needs and expectations.</p>
6
- <h2>apk5-30</h2><br /><p><b><b>Download</b> &rarr;&rarr;&rarr; <a href="https://jinyurl.com/2uNPSM">https://jinyurl.com/2uNPSM</a></b></p><br /><br />
7
- <h3>What is APK5-30?</h3>
8
- <p>APK5-30 is a type of axial fan that uses an aluminum impeller and a belt drive system to create a high-efficiency airflow. It has a circular shape that can be directly mounted on a duct or suspended from a ceiling. It can handle air temperatures from 0 to 40 degrees Celsius and has a frequency of 50Hz or 60Hz depending on the region. It has a size of 300mm, an output of 0.4kW, a voltage of 200V, and a speed of 4P.</p>
9
- <h3>What are the features and benefits of APK5-30?</h3>
10
- <p>APK5-30 has many features and benefits that make it a superior choice for cooling and ventilation purposes. Here are some of them:</p>
11
- <ul>
12
- <li>It uses a top-runner efficiency motor (IE3 equivalent) that reduces energy consumption and carbon emissions. (Except for 0.2 to 0.4kW models)</li>
13
- <li>It has a simple structure with few components, which makes it cheaper and easier to install and maintain than centrifugal fans.</li>
14
- <li>It has an internal support leg that acts as a static blade, which increases the static pressure and improves the performance.</li>
15
- <li>It has a wide range of models with different capacities, speeds, voltages, and frequencies to suit various applications.</li>
16
- <li>It has a low noise level and vibration level due to its smooth operation and balanced impeller.</li>
17
- </ul>
18
- <h2>How to use APK5-30 for your cooling and ventilation needs</h2>
19
- <p>Now that you know what APK5-30 is and what it can do for you, let's see how you can use it for your cooling and ventilation needs. Here are some tips on how to install, operate, and maintain APK5-30.</p>
20
- <h3>How to install APK5-30</h3>
21
- <p>To install APK5-30, you need to follow these steps:</p>
22
- <ol>
23
- <li>Select a suitable location for the fan that has enough space, ventilation, and accessibility.</li>
24
- <li>Prepare the duct or ceiling where the fan will be mounted or suspended.</li>
25
- <li>Connect the fan to the power supply according to the wiring diagram provided by the manufacturer.</li>
26
- <li>Secure the fan with bolts or nuts on the duct or ceiling.</li>
27
- <li>Check the rotation direction of the impeller by turning on the power briefly.</li>
28
- <li>If the rotation direction is incorrect, reverse the wiring connection.</li>
29
- </ol>
30
- <h3>How to operate APK5-30</h3>
31
- <p>To operate To operate APK5-30, you need to follow these steps: <ol>
32
- <li>Turn on the power switch and adjust the speed controller if needed.</li>
33
- <li>Monitor the fan operation and check for any abnormal sounds, vibrations, or smells.</li>
34
- <li>If the fan stops working or malfunctions, turn off the power immediately and contact the manufacturer or a qualified technician.</li>
35
- </ol>
36
- <h3>How to maintain APK5-30</h3>
37
- <p>To maintain APK5-30, you need to follow these steps:</p>
38
- <ol>
39
- <li>Turn off the power and disconnect the fan from the power supply before cleaning or inspecting.</li>
40
- <li>Clean the fan regularly with a soft cloth or a brush to remove any dust or dirt.</li>
41
- <li>Check the fan for any signs of wear, damage, or corrosion and replace any defective parts as soon as possible.</li>
42
- <li>Lubricate the bearings and belts periodically with the recommended oil or grease.</li>
43
- <li>Store the fan in a dry and cool place when not in use.</li>
44
- </ol>
45
- <h2>Comparison of APK5-30 with other axial fans</h2>
46
- <p>Now that you know how to use APK5-30, let's see how it compares with other axial fans in the market. Here are some aspects that you can use to evaluate different axial fans:</p>
47
- <h3>How APK5-30 differs from other axial fans</h3>
48
- <p>APK5-30 differs from other axial fans in several ways, such as:</p>
49
- <p>APK5-30 axial fan price<br />
50
- APK5-30 axial fan specifications<br />
51
- APK5-30 axial fan installation manual<br />
52
- APK5-30 axial fan performance curve<br />
53
- APK5-30 axial fan noise level<br />
54
- APK5-30 axial fan maintenance<br />
55
- APK5-30 axial fan replacement parts<br />
56
- APK5-30 axial fan reviews<br />
57
- APK5-30 axial fan dimensions<br />
58
- APK5-30 axial fan weight<br />
59
- APK5-30 axial fan power consumption<br />
60
- APK5-30 axial fan airflow rate<br />
61
- APK5-30 axial fan static pressure<br />
62
- APK5-30 axial fan speed<br />
63
- APK5-30 axial fan efficiency<br />
64
- APK5-30 axial fan vs centrifugal fan<br />
65
- APK5-30 axial fan applications<br />
66
- APK5-30 axial fan advantages and disadvantages<br />
67
- APK5-30 axial fan suppliers<br />
68
- APK5-30 axial fan distributors<br />
69
- APK5-30 axial fan online purchase<br />
70
- APK5-30 axial fan warranty<br />
71
- APK5-30 axial fan troubleshooting<br />
72
- APK5-30 axial fan vibration analysis<br />
73
- APK5-30 axial fan blade design<br />
74
- APK5-30 axial fan motor type<br />
75
- APK5-30 axial fan belt tension<br />
76
- APK5-30 axial fan bearing lubrication<br />
77
- APK5-30 axial fan impeller material<br />
78
- APK5-30 axial fan casing material</p>
79
- <ul>
80
- <li>It uses an aluminum impeller instead of a steel or plastic one, which makes it lighter and more resistant to corrosion.</li>
81
- <li>It uses a belt drive system instead of a direct drive system, which allows it to adjust the speed and torque more easily.</li>
82
- <li>It uses an internal support leg instead of an external one, which reduces the air resistance and increases the efficiency.</li>
83
- </ul>
84
- <h3>How APK5-30 performs better than other axial fans</h3>
85
- <p>APK5-30 performs better than other axial fans in several ways, such as:</p>
86
- <ul>
87
- <li>It has a higher airflow rate and pressure than other axial fans of the same size and power.</li>
88
- <li>It has a lower noise level and vibration level than other axial fans of the same size and power.</li>
89
- <li>It has a longer service life and lower maintenance cost than other axial fans of the same size and power.</li>
90
- </ul>
91
- <h3>How APK5-30 saves energy and costs than other axial fans</h3>
92
- <p>APK5-30 saves energy and costs than other axial fans in several ways, such as:</p>
93
- <ul>
94
- <li>It uses a top-runner efficiency motor (IE3 equivalent) that consumes less electricity and emits less carbon dioxide. (Except for 0.2 to 0.4kW models)</li>
95
- <li>It has a simple structure with few components, which reduces the initial purchase price and installation cost.</li>
96
- <li>It has a low operating cost due to its high efficiency and low maintenance requirements.</li>
97
- </ul>
98
- <h2>Conclusion</h2>
99
- <h3>Summary of the main points</h3>
100
- <p>In conclusion, APK5-30 is a type of axial fan that uses an aluminum impeller and a belt drive system to create a high-efficiency airflow. It has many features and benefits that make it a superior choice for cooling and ventilation purposes. It is easy to install, operate, and maintain, and it performs better than other axial fans in terms of airflow, pressure, noise, vibration, service life, and maintenance cost. It also saves energy and costs by using a top-runner efficiency motor (IE3 equivalent) that reduces electricity consumption and carbon emissions. (Except for 0.2 to 0.4kW models)</p>
101
- <h3>Call to action</h3>
102
- <p>If you are interested in purchasing APK5-30 or learning more about it, please visit our website or contact us today. We will be happy to assist you with any questions or inquiries you may have. Don't miss this opportunity to get your hands on this amazing product that will improve your cooling and ventilation needs.</p>
103
- <h2>Frequently Asked Questions</h2>
104
- <h4>What is the warranty period for APK5-30?</h4>
105
- <p>The warranty period for APK5-30 is one year from the date of purchase. If you encounter any problems with the product during this period, please contact us for repair or replacement.</p>
106
- <h4>What are the dimensions and weight of APK5-30?</h4>
107
- <p>The dimensions of APK5-30 are 300mm x 300mm x 300mm (L x W x H) and the weight is 9kg.</p>
108
- <h4>What are the applications of What are the applications of APK5-30?</h4>
109
- <p>APK5-30 can be used for various cooling and ventilation applications, such as:</p>
110
- <ul>
111
- <li>Machinery and equipment cooling</li>
112
- <li>Exhaust and smoke removal</li>
113
- <li>Air conditioning and dehumidification</li>
114
- <li>Greenhouse and farm ventilation</li>
115
- <li>Warehouse and factory ventilation</li>
116
- </ul>
117
- <h4>How can I order APK5-30 online?</h4>
118
- <p>You can order APK5-30 online by visiting our website and filling out the order form. You will need to provide your name, address, phone number, email, and payment method. We will confirm your order and ship the product to you as soon as possible.</p>
119
- <h4>What are the safety precautions for using APK5-30?</h4>
120
- <p>When using APK5-30, you should follow these safety precautions:</p>
121
- <ul>
122
- <li>Do not touch the fan or the impeller when it is running or hot.</li>
123
- <li>Do not insert any objects or fingers into the fan or the duct.</li>
124
- <li>Do not use the fan in wet, dusty, or flammable environments.</li>
125
- <li>Do not overload the fan or the power supply.</li>
126
- <li>Do not modify or repair the fan without authorization.</li>
127
- </ul></p> 197e85843d<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bubble Shooter Enjoy the Original Bubble Pop Game on Your iOS Device.md DELETED
@@ -1,154 +0,0 @@
1
-
2
- <h1>Bubble Shooter for iPhone Free Download: How to Play the Classic and Addictive Game on Your iOS Device</h1>
3
- <p>If you are looking for a fun and relaxing game to play on your iPhone, you might want to try Bubble Shooter. Bubble Shooter is a classic and addictive game that has been around for decades and is still popular among millions of players worldwide. In this article, we will tell you everything you need to know about Bubble Shooter, including what it is, how to download it for free, and how to play it on your iOS device. Let's get started!</p>
4
- <h2>What is Bubble Shooter?</h2>
5
- <p>Bubble Shooter is a puzzle game that involves shooting bubbles of the same color to make them pop and clear the board. The game is simple to learn but challenging to master, as you need to aim carefully and plan your moves ahead. The game has many variations and versions, but the basic concept remains the same: match 3 or more bubbles of the same color to burst them and score points.</p>
6
- <h2>bubble shooter for iphone free download</h2><br /><p><b><b>Download</b> &#9989; <a href="https://jinyurl.com/2uNJP2">https://jinyurl.com/2uNJP2</a></b></p><br /><br />
7
- <h3>The history of Bubble Shooter</h3>
8
- <p>Bubble Shooter was originally developed by a company called Taito in 1994 as an arcade game called Puzzle Bobble. The game was a spin-off of the popular platformer game Bubble Bobble, which featured two cute dragons named Bub and Bob. Puzzle Bobble was later ported to various home consoles and computers, and became a huge hit worldwide. The game spawned several sequels and clones, and inspired many other bubble shooting games over the years.</p>
9
- <h3>The gameplay of Bubble Shooter</h3>
10
- <p>The gameplay of Bubble Shooter is very simple: you have a cannon at the bottom of the screen that shoots bubbles of different colors. You can aim the cannon by moving your finger or mouse cursor on the screen, and tap or click to fire a bubble. Your goal is to match 3 or more bubbles of the same color to make them pop and clear them from the board. If you clear all the bubbles, you win the level and move on to the next one. If the bubbles reach the bottom of the screen, you lose the game and have to start over.</p>
11
- <h3>The benefits of playing Bubble Shooter</h3>
12
- <p>Bubble Shooter is not only a fun and entertaining game, but also a beneficial one. Playing Bubble Shooter can help you improve your skills in various ways, such as:</p>
13
- <ul>
14
- <li>Enhancing your concentration and focus</li>
15
- <li>Boosting your memory and cognitive abilities</li>
16
- <li>Developing your hand-eye coordination and reaction speed</li>
17
- <li>Reducing your stress and anxiety levels</li>
18
- <li>Increasing your creativity and problem-solving skills</li>
19
- </ul>
20
- <p>Besides, playing Bubble Shooter can also make you happy and relaxed, as popping bubbles can release endorphins in your brain that make you feel good.</p>
21
- <h2>How to download Bubble Shooter for iPhone for free?</h2>
22
- <p>If you want to play Bubble Shooter on your iPhone, you have plenty of options to choose from. There are many free apps that offer different versions and variations of Bubble Shooter on the App Store. Here are some of the best ones that we recommend:</p>
23
- <h3>The best Bubble Shooter apps on the App Store</h3>
24
- <h4>Bubble Shooter - Pop Bubbles</h4>
25
- <p>This app is one of the most popular and highly rated Bubble Shooter games on the App Store. It offers a classic and addictive gameplay with thousands of fun levels, amazing graphics and sounds, and various challenges and rewards. You can also play with your friends and family online and compete for the highest score. The app is free to download and play, but it contains ads and in-app purchases. You can download it from here: [Bubble Shooter - Pop Bubbles].</p>
26
- <p>free bubble shooter games download for iphone<br />
27
- bubble shooter app for iphone free download<br />
28
- bubble shooter classic for iphone free download<br />
29
- bubble shooter puzzle for iphone free download<br />
30
- bubble shooter adventure for iphone free download<br />
31
- bubble shooter legend for iphone free download<br />
32
- bubble shooter deluxe for iphone free download<br />
33
- bubble shooter blast for iphone free download<br />
34
- bubble shooter pop for iphone free download<br />
35
- bubble shooter fun for iphone free download<br />
36
- bubble shooter saga for iphone free download<br />
37
- bubble shooter mania for iphone free download<br />
38
- bubble shooter frenzy for iphone free download<br />
39
- bubble shooter magic for iphone free download<br />
40
- bubble shooter galaxy for iphone free download<br />
41
- bubble shooter candy for iphone free download<br />
42
- bubble shooter fruit for iphone free download<br />
43
- bubble shooter animal for iphone free download<br />
44
- bubble shooter dragon for iphone free download<br />
45
- bubble shooter unicorn for iphone free download<br />
46
- bubble shooter rainbow for iphone free download<br />
47
- bubble shooter garden for iphone free download<br />
48
- bubble shooter farm for iphone free download<br />
49
- bubble shooter jungle for iphone free download<br />
50
- bubble shooter forest for iphone free download<br />
51
- bubble shooter ocean for iphone free download<br />
52
- bubble shooter beach for iphone free download<br />
53
- bubble shooter island for iphone free download<br />
54
- bubble shooter pirate for iphone free download<br />
55
- bubble shooter treasure for iphone free download<br />
56
- bubble shooter gold for iphone free download<br />
57
- bubble shooter diamond for iphone free download<br />
58
- bubble shooter jewel for iphone free download<br />
59
- bubble shooter crystal for iphone free download<br />
60
- bubble shooter star for iphone free download<br />
61
- bubble shooter space for iphone free download<br />
62
- bubble shooter planet for iphone free download<br />
63
- bubble shooter solar for iphone free download<br />
64
- bubble shooter lunar for iphone free download<br />
65
- bubble shooter halloween for iphone free download<br />
66
- bubble shooter christmas for iphone free download<br />
67
- bubble shooter winter for iphone free download<br />
68
- bubble shooter spring for iphone free download<br />
69
- bubble shooter summer for iphone free download<br />
70
- bubble shooter autumn for iphone free download<br />
71
- best bubble shooter game for iphone free download<br />
72
- new bubble shooter game for iphone free download<br />
73
- top bubble shooter game for iphone free download<br />
74
- cool bubble shooter game for iphone free download</p>
75
- <h4>Bubble Shooter - Addictive!</h4>
76
- <p>This app is another great option for Bubble Shooter fans. It features a smooth and easy gameplay with over 3000 exciting levels, stunning graphics and effects, and a relaxing soundtrack. You can also customize your bubble shooter with different skins and themes, and enjoy daily bonuses and gifts. The app is free to download and play, but it contains ads and in-app purchases. You can download it from here: [Bubble Shooter - Addictive!].</p>
77
- <h4>Bobble Shooter</h4>
78
- <p>This app is a unique and innovative take on the Bubble Shooter genre. It combines the classic bubble popping gameplay with a physics-based puzzle element. You have to shoot bobbles of different shapes and sizes to create clusters of the same color and make them explode. The game has hundreds of challenging levels, colorful graphics and animations, and a catchy music. The app is free to download and play, but it contains ads and in-app purchases. You can download it from here: [Bobble Shooter].</p>
79
- <h3>How to install and launch Bubble Shooter on your iPhone</h3>
80
- <p>Installing and launching Bubble Shooter on your iPhone is very easy. Just follow these simple steps:</p>
81
- <ol>
82
- <li>Open the App Store on your iPhone and search for the Bubble Shooter app that you want to download.</li>
83
- <li>Tap on the app icon and then tap on the Get button to start the download process.</li>
84
- <li>Wait for the app to finish downloading and then tap on the Open button to launch it.</li>
85
- <li>Alternatively, you can also find the app icon on your home screen and tap on it to launch it.</li>
86
- </ol>
87
- <h3>How to update and delete Bubble Shooter on your iPhone</h3>
88
- <p>Updating and deleting Bubble Shooter on your iPhone is also very simple. Just follow these simple steps:</p>
89
- <ol>
90
- <li>To update Bubble Shooter, open the App Store on your iPhone and tap on the Updates tab at the bottom.</li>
91
- <li>Find the Bubble Shooter app that you want to update and tap on the Update button next to it.</li>
92
- <li>Wait for the app to finish updating and then launch it as usual.</li>
93
- <li>To delete Bubble Shooter, press and hold the app icon on your home screen until it starts to wiggle.</li>
94
- <li>Tap on the X button on the top left corner of the app icon and then tap on Delete to confirm.</li>
95
- </ol>
96
- <h2>How to play Bubble Shooter on your iPhone?</h2>
97
- <p>Playing Bubble Shooter on your iPhone is very fun and easy. Here are some tips and tricks that will help you enjoy the game more:</p>
98
- <h3>The basic rules and tips of Bubble Shooter</h3>
99
- <p>The basic rules of Bubble Shooter are as follows:</p>
100
- <ul>
101
- <li>You have a limited number of bubbles to shoot in each level.</li>
102
- <li>You have to match 3 or more bubbles of the same color to pop them and clear them from the board.</li>
103
- <li>You can bounce the bubbles off the walls to reach difficult spots.</li>
104
- <li>You can see the next bubble that you are going to shoot at the bottom of the screen.</li>
105
- <li>You can swap the current bubble with the next one by tapping on it.</li>
106
- <li>You can use special bubbles that have different effects, such as bombs, rainbows, stars, etc.</li>
107
- <li>You can earn coins and gems by popping bubbles, completing levels, and achieving goals.</li>
108
- <li>You can use coins and gems to buy power-ups, boosters, lives, etc.</li>
109
- </ul>
110
- <p>Some tips that will help you improve your performance are:</p>
111
- <ul>
112
- <li>Aim carefully before you shoot a bubble.</li>
113
- <li>Try to pop as many bubbles as possible with one shot.</li>
114
- <li>Try to create chain reactions by popping bubbles that are connected to other bubbles of the same color.</li>
115
- <li>Try to clear the top rows of bubbles first, as they will drop all the bubbles below them when they pop.</li>
116
- <li>Try to avoid leaving isolated bubbles that are hard to reach or match.</li>
117
- <li>Use power-ups and boosters wisely, as they can help you clear difficult levels or get out of tricky situations.</li>
118
- </ul>
119
- <h3>The different game modes and levels of Bubble Shooter</h3>
120
- <p>Bubble Shooter offers a variety of game modes and levels that will keep you entertained for hours. Some of them are:</p>
121
- <ul>
122
- <li> <li>Classic mode: This is the original and most popular mode of Bubble Shooter. It has hundreds of levels that range from easy to hard, and each level has a different layout and goal. You can play this mode offline or online, and you can also choose the difficulty level and the bubble design.</li>
123
- <li>Arcade mode: This is a fast-paced and exciting mode of Bubble Shooter. It has endless levels that get harder and harder as you progress, and each level has a time limit and a score target. You have to pop as many bubbles as you can before the time runs out, and you can also use power-ups and boosters to speed up your progress.</li>
124
- <li>Puzzle mode: This is a challenging and brain-teasing mode of Bubble Shooter. It has hundreds of levels that require logic and strategy to solve, and each level has a unique puzzle and goal. You have to pop all the bubbles using the least number of shots, and you can also use hints and skips to help you out.</li>
125
- <li>Adventure mode: This is a fun and adventurous mode of Bubble Shooter. It has hundreds of levels that are based on different themes and stories, such as pirates, fairies, dinosaurs, etc. You have to pop bubbles and collect items to complete the levels, and you can also encounter obstacles and enemies along the way.</li>
126
- </ul>
127
- <h3>The features and settings of Bubble Shooter</h3>
128
- <p>Bubble Shooter also has many features and settings that will enhance your gaming experience. Some of them are:</p>
129
- <ul>
130
- <li>You can connect your Facebook account to Bubble Shooter and share your progress, achievements, and scores with your friends.</li>
131
- <li>You can play with other players from around the world in the multiplayer mode and compete for the highest score.</li>
132
- <li>You can join or create a team with other players and chat, cooperate, and exchange gifts with them.</li>
133
- <li>You can participate in various events, tournaments, and challenges that offer special rewards and prizes.</li>
134
- <li>You can customize your bubble shooter with different skins, themes, backgrounds, sounds, etc.</li>
135
- <li>You can adjust the game settings according to your preferences, such as the volume, the language, the notifications, etc.</li>
136
- </ul>
137
- <h2>Conclusion</h2>
138
- <p>Bubble Shooter is a classic and addictive game that you can play on your iPhone for free. It offers a simple but challenging gameplay with thousands of fun levels, amazing graphics and sounds, and various game modes and features. It also helps you improve your skills, reduce your stress, and have fun with your friends. If you are looking for a game that will keep you entertained for hours, download Bubble Shooter today and enjoy popping bubbles!</p>
139
- <h2>FAQs</h2>
140
- <p>Here are some frequently asked questions about Bubble Shooter:</p>
141
- <ol>
142
- <li>How do I get more coins and gems in Bubble Shooter?</li>
143
- <p>You can get more coins and gems in Bubble Shooter by popping bubbles, completing levels, achieving goals, watching ads, spinning the wheel, opening chests, collecting daily bonuses, joining events, buying them with real money, etc.</p>
144
- <li>How do I use power-ups and boosters in Bubble Shooter?</li>
145
- <p>You can use power-ups and boosters in Bubble Shooter by tapping on them before or during the game. Power-ups are special bubbles that have different effects, such as bombs, rainbows, stars, etc. Boosters are items that help you in various ways, such as extra moves, fireballs, magnets, etc.</p>
146
- <li>How do I unlock new levels in Bubble Shooter?</li>
147
- <p>You can unlock new levels in Bubble Shooter by completing the previous levels or by paying coins or gems. You can also unlock new levels by joining events or teams that offer exclusive levels.</p>
148
- <li>How do I reset my progress in Bubble Shooter?</li>
149
- <p>You can reset your progress in Bubble Shooter by deleting the app from your iPhone and reinstalling it. However, this will also erase all your coins, gems, power-ups, boosters, lives, etc. If you want to keep them, you can connect your Facebook account to Bubble Shooter and sync your progress across different devices.</p>
150
- <li>How do I contact the support team of Bubble Shooter?</li>
151
- <p>You can contact the support team of Bubble Shooter by tapping on the settings icon on the main screen and then tapping on the help button. You can also email them at [email protected] or visit their website at www.bubbleshooter.com.</p>
152
- </ol></p> 197e85843d<br />
153
- <br />
154
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Crafting and Building 1.18 APK A Free Game with Amazing Graphics and Multiplayer Mode.md DELETED
@@ -1,121 +0,0 @@
1
- <br />
2
- <h1>Crafting and Building 1.18 APK: A Free Game for Creative Minds</h1>
3
- <p>Do you like building games? Do you want to create your own world with your own rules? If yes, then you should try <strong>crafting and building 1.18 apk</strong>, a new free game that lets you unleash your imagination and show your skills. Crafting and building 1.18 apk is a sandbox game that allows you to build anything you want, from houses and castles to farms and cities. You can also play with your friends online, explore their creations, and have fun together. Crafting and building 1.18 apk is a game for the whole family, suitable for kids, boys, girls, and adults.</p>
4
- <h2>Features of Crafting and Building 1.18 APK</h2>
5
- <p>Crafting and building 1.18 apk has many features that make it an enjoyable and addictive game. Here are some of them:</p>
6
- <h2>crafting and building 1.18 apk</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://jinyurl.com/2uNLwa">https://jinyurl.com/2uNLwa</a></b></p><br /><br />
7
- <ul>
8
- <li><strong>Easy to use interface:</strong> The game has a simple and user-friendly interface that lets you access all the tools and options easily. You can drag and drop blocks, rotate them, change their colors, and customize them as you wish.</li>
9
- <li><strong>Many block types:</strong> The game offers a variety of block types, from grass and wood to stone and metal. You can also find special blocks, such as furniture, animals, plants, and even vehicles.</li>
10
- <li><strong>Multiplayer mode:</strong> The game supports online multiplayer mode, where you can join or create a server and play with your friends or other players from around the world. You can chat with them, visit their worlds, help them build, or compete with them.</li>
11
- <li><strong>Creative mode:</strong> The game has a creative mode, where you have unlimited resources and no enemies or dangers. You can build whatever you want without any limitations or restrictions.</li>
12
- <li><strong>Survival mode:</strong> The game also has a survival mode, where you have to gather resources, craft items, fight enemies, and survive in a hostile environment. You can also tame animals, farm crops, mine ores, and explore dungeons.</li>
13
- </ul>
14
- <h2>Tips and Tricks for Crafting and Building 1.18 APK</h2>
15
- <p>If you want to master crafting and building 1.18 apk, here are some tips and tricks that can help you:</p>
16
- <ul>
17
- <li><strong>Use trapdoors as walls:</strong> A clever way to make a pen for animals or a fence for your garden is to use trapdoors as walls. Animals can climb into the pen but not out of it, and you can easily access it by opening the trapdoors.</li>
18
- <li><strong>Find diamonds under clay patches:</strong> A useful tip to find diamonds easily is to dig under clay patches in rivers. Diamonds are often found below clay patches that have a star shape.</li>
19
- <li><strong>Use torches to breathe underwater:</strong> A handy trick to breathe underwater is to place torches on the wall or floor near your head. The torches will create air bubbles that will replenish your oxygen.</li>
20
- <li><strong>Use beds as explosives:</strong> A fun way to blow up things is to use beds as explosives. Beds will explode when placed in the Nether or the End dimensions, creating a large blast radius.</li>
21
- <li><strong>Use pistons to move blocks:</strong> A smart way to move blocks around is to use pistons. Pistons can push or pull blocks up to 12 blocks away, allowing you to create doors, bridges, elevators, traps, and more.</li>
22
- </ul>
23
- <h2>Reviews of Crafting and Building 1.18 APK</h2>
24
- <p>Crafting and building 1.18 apk has received many positive reviews from users who have played the game. Here are some of them:</p>
25
- <table>
26
- <tr>
27
- <th>User</th>
28
- <th>Rating</th>
29
- <th>Comment</th>
30
- </tr>
31
- <tr>
32
- <td>Amy</td>
33
- <td>5 stars</td>
34
- <td>I love this game! It's so fun and creative. I can build anything I want and play with my friends online. It's like Minecraft but better.</td>
35
- </tr>
36
- <tr>
37
- <td>Jack</td>
38
- <td>4 stars</td>
39
- <td>This game is awesome, but it has some bugs and glitches. Sometimes the game crashes or freezes, and sometimes the blocks disappear or change color. Please fix these issues.</td>
40
- </tr>
41
- <tr>
42
- <td>Lisa</td>
43
- <td>3 stars</td>
44
- <td>This game is good, but it needs more content and features. I wish there were more block types, more animals, more items, more modes, and more customization options. It gets boring after a while.</td>
45
- </tr>
46
- <tr>
47
- <td>Tom</td>
48
- <td>2 stars</td>
49
- <td>This game is okay, but it's too similar to other games. It's like a copy of Minecraft or Roblox. It doesn't have anything original or unique. It's just another building game.</td>
50
- </tr>
51
- <tr>
52
- <td>Anna</td>
53
- <td>1 star</td>
54
- <td>This game is terrible. It's full of ads and pop-ups that ruin the gameplay. It's also very laggy and slow. It takes forever to load and connect to the servers. It's a waste of time and space.</td>
55
- </tr>
56
- </table>
57
- <h2>Conclusion: Download Crafting and Building 1.18 APK Now!</h2>
58
- <p>Crafting and building 1.18 apk is a free game that lets you create your own world with your own rules. You can build anything you want, from houses and castles to farms and cities. You can also play with your friends online, explore their creations, and have fun together. Crafting and building 1.18 apk is a game for the whole family, suitable for kids, boys, girls, and adults.</p>
59
- <p>crafting and building 1.18 apk download free<br />
60
- crafting and building 1.18 apk mod unlimited money<br />
61
- crafting and building 1.18 apk latest version<br />
62
- crafting and building 1.18 apk for android<br />
63
- crafting and building 1.18 apk offline<br />
64
- crafting and building 1.18 apk no ads<br />
65
- crafting and building 1.18 apk update<br />
66
- crafting and building 1.18 apk hack<br />
67
- crafting and building 1.18 apk full version<br />
68
- crafting and building 1.18 apk premium<br />
69
- crafting and building 1.18 apk gameplay<br />
70
- crafting and building 1.18 apk review<br />
71
- crafting and building 1.18 apk features<br />
72
- crafting and building 1.18 apk tips and tricks<br />
73
- crafting and building 1.18 apk cheats<br />
74
- crafting and building 1.18 apk guide<br />
75
- crafting and building 1.18 apk tutorial<br />
76
- crafting and building 1.18 apk best settings<br />
77
- crafting and building 1.18 apk how to play<br />
78
- crafting and building 1.18 apk requirements<br />
79
- crafting and building 1.18 apk size<br />
80
- crafting and building 1.18 apk screenshots<br />
81
- crafting and building 1.18 apk video<br />
82
- crafting and building 1.18 apk online multiplayer<br />
83
- crafting and building 1.18 apk new features<br />
84
- crafting and building 1.18 apk bugs fixes<br />
85
- crafting and building 1.18 apk installation<br />
86
- crafting and building 1.18 apk alternatives<br />
87
- crafting and building 1.18 apk similar games<br />
88
- crafting and building 1.18 apk comparison<br />
89
- crafting and building 1.18 apk pros and cons<br />
90
- crafting and building 1.18 apk ratings<br />
91
- crafting and building 1.18 apk feedbacks<br />
92
- crafting and building 1.18 apk comments<br />
93
- crafting and building 1.18 apk questions and answers<br />
94
- crafting and building 1.18 apk support<br />
95
- crafting and building 1.18 apk developer contact<br />
96
- crafting and building 1.18 apk official website<br />
97
- crafting and building 1.18 apk social media links<br />
98
- crafting and building 1.18 apk news and updates<br />
99
- crafting and building 1.18 apk release date<br />
100
- crafting and building 1.18 apk changelog<br />
101
- crafting and building 1.18 apk download link<br />
102
- crafting and building 1.18 apk mirror link<br />
103
- crafting and building 1.18 apk direct link<br />
104
- crafting and building 1.18 apk file information<br />
105
- crafting and building 1.18 apk virus scan report<br />
106
- crafting and building 1.18 apk safe to download</p>
107
- <p>If you are looking for a game that will challenge your creativity and imagination, then you should download crafting and building 1.18 apk now! You will not regret it!</p>
108
- <h2>FAQs: Frequently Asked Questions About Crafting and Building 1.18 APK</h2>
109
- <p>Here are some of the most common questions and answers about crafting and building 1.18 apk:</p>
110
- <h3>Q: How can I download crafting and building 1.18 apk?</h3>
111
- <p>A: You can download crafting and building 1.18 apk from the Google Play Store or from other websites that offer apk files. However, be careful when downloading from unknown sources, as they may contain viruses or malware.</p>
112
- <h3>Q: How can I update crafting and building 1.18 apk?</h3>
113
- <p>A: You can update crafting and building 1.18 apk from the Google Play Store or from the app itself. The app will notify you when there is a new version available and ask you to update it.</p>
114
- <h3>Q: How can I play crafting and building 1.18 apk offline?</h3>
115
- <p>A: You can play crafting and building 1.18 apk offline by choosing the single-player mode or the creative mode. You will not be able to access the multiplayer mode or the survival mode without an internet connection.</p>
116
- <h3>Q: How can I play crafting and building 1.18 apk with my friends?</h3>
117
- <p>A: You can play crafting and building 1.18 apk with your friends by choosing the multiplayer mode or the survival mode. You will need an internet connection and a valid account to join or create a server.</p>
118
- <h3>Q: How can I contact the developers of crafting and building 1.18 apk?</h3>
119
- <p>A: You can contact the developers of crafting and building 1.18 apk by sending them an email at [email protected] or by leaving them a feedback on the Google Play Store or on their social media pages.</p> 401be4b1e0<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download MuksOS AI Launcher 2.0 Mod APK for Android - Latest Version with Voice Gesture and Text Control.md DELETED
@@ -1,108 +0,0 @@
1
-
2
- <h1>MuksOS AI Launcher 2.0: A Smart and Interactive Android Launcher</h1>
3
- <p>If you are looking for a new and innovative way to interact with your phone, you might want to check out MuksOS AI Launcher 2.0. This is a unique android launcher that combines the features of an app launcher, a virtual assistant, and an AI tool for your DIY automation projects. In this article, we will tell you what MuksOS AI Launcher 2.0 is, what are its features, how to download it, and answer some frequently asked questions.</p>
4
- <h2>muksos ai launcher 2.0 mod apk download</h2><br /><p><b><b>Download File</b> &#127775; <a href="https://jinyurl.com/2uNMrY">https://jinyurl.com/2uNMrY</a></b></p><br /><br />
5
- <h2>What is MuksOS AI Launcher 2.0?</h2>
6
- <p>MuksOS AI Launcher 2.0 is an android app developed by Dr. Mukesh Bangar, a computer engineer and researcher in artificial intelligence. It is designed to make your phone smarter and more responsive by using voice, gestures, or text commands. You can use MuksOS AI Launcher 2.0 to open apps, make calls, search the web, set alarms, reminders, and more. You can also use it as a virtual assistant that can assist you anytime, anywhere with its cool and unique features like JARVIS has in Iron Man movie. And if you are into DIY automation projects, you can use MuksOS AI Launcher 2.0 as an easy AI tool to create amazing things using object recognition and smart connect features.</p>
7
- <h3>Features of MuksOS AI Launcher 2.0</h3>
8
- <p>MuksOS AI Launcher 2.0 has many features that make it stand out from other android launchers. Here are some of them:</p>
9
- <h4>Teachable</h4>
10
- <p>MuksOS AI Launcher 2.0 is not just a passive launcher that does what you say. It is also a teachable launcher that learns from you and adapts to your preferences. You can teach it voice commands, object recognition, and actions that suit your needs.</p>
11
- <h4>Fast and smooth</h4>
12
- <p>MuksOS AI Launcher 2.0 is designed to be fast and smooth, so you can get more done in less time. It has voice access that makes it faster than any other launcher and saves time. You can also use gestures or text commands if you prefer.</p>
13
- <h4>Multiple voice options</h4>
14
- <p>MuksOS AI Launcher 2.0 has six different voice options that you can choose from, depending on your mood and preference. You can switch between male and female voices, as well as different accents and languages.</p>
15
- <h4>100 % privacy</h4>
16
- <p>MuksOS AI Launcher 2.0 respects your privacy and does not store your personal data on cloud servers. All your data is stored locally on your device and encrypted for security.</p>
17
- <p>muksos ai launcher 2.0 apk free download<br />
18
- muksos ai launcher 2.0 latest version<br />
19
- muksos ai launcher 2.0 android app<br />
20
- muksos ai launcher 2.0 for pc<br />
21
- muksos ai launcher 2.0 features<br />
22
- muksos ai launcher 2.0 review<br />
23
- muksos ai launcher 2.0 offline mode<br />
24
- muksos ai launcher 2.0 voice access<br />
25
- muksos ai launcher 2.0 smart connect<br />
26
- muksos ai launcher 2.0 vision ability<br />
27
- muksos ai launcher 2.0 write on home screen<br />
28
- muksos ai launcher 2.0 speech reminders and alarm<br />
29
- muksos ai launcher 2.0 dark and light theme<br />
30
- muksos ai launcher 2.0 hide apps<br />
31
- muksos ai launcher 2.0 power saver<br />
32
- muksos ai launcher 2.0 teachable commands<br />
33
- muksos ai launcher 2.0 object recognition<br />
34
- muksos ai launcher 2.0 diy automation tool<br />
35
- muksos ai launcher 2.0 virtual assistant<br />
36
- muksos ai launcher 2.0 neon glow icons theme<br />
37
- muksos ai launcher 2.0 apkcombo download<br />
38
- muksos ai launcher 2.0 appbrain download<br />
39
- muksos ai launcher 2.0 gameloop download<br />
40
- muksos ai launcher 2.0 apk size and version<br />
41
- muksos ai launcher 2.0 content rating and developer<br />
42
- muksos ai launcher 2.0 install and update<br />
43
- muksos ai launcher 2.0 google play id and category<br />
44
- muksos ai launcher 2.0 interact with phone in natural way<br />
45
- muksos ai launcher 2.0 open apps and contacts with voice or text or gestures<br />
46
- muksos ai launcher 2.0 web search wikipedia or google or youtube with voice or text or gestures<br />
47
- muksos ai launcher 2.0 create amazing AI projects with smart connect feature<br />
48
- muksos ai launcher 2.0 train your mobile for object recognition and actions with vision ability feature<br />
49
- muksos ai launcher 2.0 works without internet with offline mode feature<br />
50
- muksos ai launcher 2.0 change theme in a single tap with dark and light theme feature<br />
51
- muksos ai launcher 2.0 hide unwanted and distracting bloatware with hide apps feature<br />
52
- muksos ai launcher 2.0 save phone battery and optimize battery usage with power saver feature<br />
53
- muksos ai launcher 2.0 teach voice commands, object recognition and actions with teachable feature<br />
54
- muksos ai launcher 2.0 get direct access to your favorite app from home screen with favorite apps feature<br />
55
- muksos ai launcher 2.0 write on home screen to open apps, make a call or web search with write on home screen feature <br />
56
- muksos ai launcher 2.0 quickly access all your apps, contacts, web searches, reminders, alarm etc with voice access feature</p>
57
- <h4>User friendly</h4>
58
- <p>MuksOS AI Launcher 2.0 is user friendly and easy to use. You don't need to scroll pages to find contacts, apps, alarms, reminders, etc. You can access them directly from the home screen with simple commands.</p>
59
- <h4>Power saver</h4>
60
- <p>MuksOS AI Launcher 2.0 saves your phone battery and optimizes battery usage by using minimal resources and background processes.</p>
61
- <h4>Esthetic theme</h4>
62
- <p>MuksOS AI Launcher 2.0 comes with a cool neon glow icons theme that's sure to stand out on your device. You can also customize the theme according to your liking by changing the colors, icons, fonts, and wallpapers.</p>
63
- <h4>Dark and Light theme</h4>
64
- <p>MuksOS AI Launcher 2.0 supports both dark and light themes that you can switch between depending on the time of the day or your preference. The dark theme is ideal for night time or low-light conditions, while the light theme is suitable for daytime or bright conditions.</p>
65
- <h4>Works offline</h4>
66
- <p>MuksOS AI Launcher 2.0 works offline as well as online, so you don't need to worry about internet connectivity or data usage. You can use most of the features without any internet connection, such as opening apps, making calls, setting alarms, reminders, etc.</p>
67
- <h4>Favorite apps</h4>
68
- <p>MuksOS AI Launcher 2.0 lets you add your favorite apps to the home screen for quick and easy access. You can also create folders and categories to organize your apps according to your needs.</p>
69
- <h4>Hide apps</h4>
70
- <p>MuksOS AI Launcher 2.0 allows you to hide apps that you don't want others to see or access. You can use a password or a fingerprint to lock and unlock the hidden apps.</p>
71
- <h3>Premium Features of MuksOS AI Launcher 2.0</h3>
72
- <p>MuksOS AI Launcher 2.0 also has some premium features that you can unlock by purchasing the mod apk version of the app. These features include:</p>
73
- <h4>Write on Home Screen</h4>
74
- <p>This feature lets you write anything on your home screen using your finger or a stylus. You can use this feature to take notes, draw sketches, make lists, etc.</p>
75
- <h4>Voice Access</h4>
76
- <p>This feature lets you control your phone with your voice without touching it. You can use voice commands to open apps, make calls, search the web, play music, etc.</p>
77
- <h4>Speech reminders and Speech alarm</h4>
78
- <p>This feature lets you set reminders and alarms with your voice. You can also choose what you want to hear when the reminder or alarm goes off, such as a song, a quote, a joke, etc.</p>
79
- <h4>Smart connect</h4>
80
- <p>This feature lets you connect your phone with other devices using Bluetooth or Wi-Fi. You can use this feature to transfer files, share photos, play games, etc.</p>
81
- <h4>Vision ability</h4>
82
- <p>This feature lets you use your phone's camera as an AI tool for object recognition and detection. You can use this feature to identify objects, faces, colors, text, etc.</p>
83
- <h3>How to download MuksOS AI Launcher 2.0 mod apk?</h3>
84
- <p>If you want to download MuksOS AI Launcher 2.0 mod apk and enjoy its premium features for free, you can follow these steps:</p>
85
- <ol>
86
- <li>Go to the official website of MuksOS AI Launcher 2.0 and click on the download button.</li>
87
- <li>Allow unknown sources in your device settings to install the app from outside the Google Play Store.</li>
88
- <li>Locate the downloaded file in your file manager and tap on it to install it.</li>
89
- <li>Launch the app and grant it the necessary permissions to access your device features.</li>
90
- <li>Enjoy using MuksOS AI Launcher 2.0 mod apk with all its features unlocked.</li>
91
- </ol>
92
- <h2>Conclusion</h2>
93
- <p>MuksOS AI Launcher 2.0 is a smart and interactive android launcher that offers you a new and innovative way to interact with your phone. It has many features that make it stand out from other android launchers, such as teachable, fast and smooth, multiple voice options, 100 % privacy, user friendly, power saver, esthetic theme, dark and light theme, works offline, favorite apps, hide apps, etc. It also has some premium features that you can unlock by downloading the mod apk version of the app, such as write on home screen, voice access, speech reminders and speech alarm, smart connect, vision ability etc. If you are looking for a smart and interactive android launcher that combines the features of an app launcher, a virtual assistant and an AI tool for your DIY automation projects then MuksOS AI Launcher 2.0 is the perfect choice for you.</p>
94
- <h3>FAQs</h3>
95
- <ul>
96
- <li><b>Q: Is MuksOS AI Launcher 2.0 safe to use?</b></li>
97
- <li>A: Yes, MuksOS AI Launcher 2.0 is safe to use as it does not store your personal data on cloud servers and encrypts it locally on your device.</li>
98
- <li><b>Q: How much does MuksOS AI Launcher 2.0 cost?</b></li>
99
- <li>A: MuksOS AI Launcher 2.0 is free to download and use, but it has some premium features that require a one-time payment of $4.99 to unlock.</li>
100
- <li><b>Q: What are the minimum requirements to run MuksOS AI Launcher 2.0?</b></li>
101
- <li>A: MuksOS AI Launcher 2.0 requires Android 5.0 or higher and at least 1 GB of RAM to run smoothly.</li>
102
- <li><b>Q: How can I contact the developer of MuksOS AI Launcher 2.0?</b></li>
103
- <li>A: You can contact the developer of MuksOS AI Launcher 2.0 by sending an email to [email protected] or by visiting the official website of MuksOS AI Launcher 2.0.</li>
104
- <li><b>Q: How can I support the development of MuksOS AI Launcher 2.0?</b></li>
105
- <li>A: You can support the development of MuksOS AI Launcher 2.0 by rating and reviewing the app on the Google Play Store, sharing it with your friends and family, and providing feedback and suggestions to the developer.</li>
106
- </ul></p> 401be4b1e0<br />
107
- <br />
108
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Experience GTA V Like Never Before with Online RP Launcher.md DELETED
@@ -1,130 +0,0 @@
1
- <br />
2
- <h1>What is an online rp launcher and why you need one</h1>
3
- <p>If you are a fan of Grand Theft Auto (GTA) Online, you might have heard of online rp launchers. These are software applications that allow you to play GTA Online on customized dedicated servers, with different game modes, maps, vehicles, weapons, and more. Online rp launchers are also known as multiplayer modifications or frameworks, and they enable you to create or join your own GTA Online community.</p>
4
- <p>Online rp launchers work by modifying the game files of GTA V, but without affecting your original installation or your access to GTA Online. This means that you can switch between GTA Online and online rp launchers without getting banned by Rockstar. Online rp launchers also use Rockstar's network code with improvements, so you can enjoy the best synchronization and performance possible.</p>
5
- <h2>online rp launcher</h2><br /><p><b><b>Download Zip</b> &#9989; <a href="https://jinyurl.com/2uNMx6">https://jinyurl.com/2uNMx6</a></b></p><br /><br />
6
- <p>Online rp launchers are not only fun and exciting, but also creative and innovative. You can make anything you wish with online rp launchers, such as roleplay, drifting, racing, deathmatch, or something completely original. You can also use different programming languages to create your own scripts and resources for your server. Online rp launchers give you total control over your GTA Online experience.</p>
7
- <h2>How to choose the best online rp launcher for your needs</h2>
8
- <p>There are many online rp launchers available for GTA Online, but not all of them are created equal. Some online rp launchers may have more features, compatibility, or popularity than others. Here are some factors to consider when choosing the best online rp launcher for your needs:</p>
9
- <h3>Features</h3>
10
- <p>The features of an online rp launcher determine what you can do with it. Some online rp launchers may have more options for customization, streaming, AI, scripting, or hosting than others. For example, some online rp launchers may allow you to use custom cars, maps, weapons, and more dynamically, while others may require you to download them manually. Some online rp launchers may also have more support for different programming languages or tools than others.</p>
11
- <h3>Compatibility</h3>
12
- <p>The compatibility of an online rp launcher determines how well it works with your system and your game version. Some online rp launchers may have higher or lower system requirements than others. For example, some online rp launchers may require Windows 10 or a certain CPU or GPU to run smoothly. Some online rp launchers may also be more compatible with the latest updates or patches of GTA V than others.</p>
13
- <h3>Popularity</h3>
14
- <p>The popularity of an online rp launcher determines how many players and servers are using it. Some online rp launchers may have more active and diverse communities than others. For example, some online rp launchers may have more players or servers in your region or language than others. Some online rp launchers may also have more famous or reputable servers or streamers than others.</p>
15
- <h2>FiveM - the GTA V multiplayer modification you have dreamt of</h2>
16
- <p>One of the most popular and well-known online rp launchers is FiveM. FiveM is a modification for GTA V that enables you to play multiplayer on customized dedicated servers powered by Cfx.re. FiveM has been around since 2014 and has over 178k players playing right now.</p>
17
- <p>online rp launcher for GTA V multiplayer<br />
18
- online rp launcher for GTA SAMP on Android<br />
19
- online rp launcher for RAGE MP mod<br />
20
- online rp launcher for FiveM server hosting<br />
21
- online rp launcher for GTA real life roleplay<br />
22
- online rp launcher for GTA drifting and racing<br />
23
- online rp launcher for GTA deathmatch and PvP<br />
24
- online rp launcher for GTA open world sandbox<br />
25
- online rp launcher for GTA custom cars and maps<br />
26
- online rp launcher for GTA AI and sync quality<br />
27
- online rp launcher for GTA source-available platform<br />
28
- online rp launcher for GTA community-driven project<br />
29
- online rp launcher for GTA Cfx.re framework<br />
30
- online rp launcher for GTA multiple programming languages<br />
31
- online rp launcher for GTA developer tools and resources<br />
32
- online rp launcher for GTA net energy gain experiment<br />
33
- online rp launcher for GTA holy grail fusion project<br />
34
- online rp launcher for GTA mini Sun creation<br />
35
- online rp launcher for GTA 100 million°C reactor<br />
36
- online rp launcher for GTA Korea Superconducting Tokamak Advanced Research facility<br />
37
- online rp launcher for GTA Korea Institute of Fusion Energy<br />
38
- online rp launcher for GTA nuclear fusion reaction<br />
39
- online rp launcher for GTA physics and engineering problem<br />
40
- online rp launcher for GTA contributor program and rewards<br />
41
- online rp launcher for GTA Rockstar Online Services validation<br />
42
- online rp launcher for GTA game copy protection<br />
43
- online rp launcher for GTA installation switcher<br />
44
- online rp launcher for GTA ban prevention<br />
45
- online rp launcher for GTA login information security<br />
46
- online rp launcher for GTA multiplayer modification framework<br />
47
- online rp launcher for GTA advanced and unique features<br />
48
- online rp launcher for GTA creativity and personalization<br />
49
- online rp launcher for GTA streaming and dynamic content<br />
50
- online rp launcher for GTA Lua, C#, and JavaScript support<br />
51
- online rp launcher for GTA web development knowledge and ecosystem<br />
52
- online rp launcher for GTA solar core temperature comparison<br />
53
- online rp launcher for GTA seven times hotter than the Sun achievement<br />
54
- online rp launcher for GTA 15 million degrees kelvins measurement<br />
55
- online rp launcher for GTA radiative zone and convection zone layers<br />
56
- online rp launcher for GTA photosphere and chromosphere thicknesses<br />
57
- online rp launcher for GTA sun spot cycle duration<br />
58
- online rp launcher for GTA photosphere composition and elements<br />
59
- online rp launcher for GTA solar atmosphere and surface gas pressure <br />
60
- online rp launcher for GTA optical depth and effective temperature <br />
61
- online rp launcher for GTA system requirements and specifications <br />
62
- online rp launcher for GTA Intel Core CPU and NVIDIA GPU models <br />
63
- online rapuncher.com domain name availability <br />
64
- best online rapuncher reviews and ratings</p>
65
- <p>FiveM has many features that make it stand out from other online rp launchers. Some of these features are:</p>
66
- <ul>
67
- <li>Streaming: FiveM allows servers to use custom cars, maps, weapons, and more without requiring the players to download them manually. This means that you can join any server and enjoy its custom content instantly.</li>
68
- <li>AI: FiveM allows servers to use custom AI scripts and scenarios, such as traffic, pedestrians, animals, and more. This means that you can have a more realistic and immersive experience in GTA Online.</li>
69
- <li>Scripting: FiveM allows servers to use different programming languages and frameworks, such as Lua, C#, JavaScript, and more. This means that you can create or join servers with different game modes, features, and mechanics.</li>
70
- <li>Hosting: FiveM allows anyone to host their own server with their own rules and settings. This means that you can create or join your own GTA Online community with your friends or other players.</li>
71
- </ul>
72
- <p>FiveM is compatible with Windows 7 or higher and the latest version of GTA V. FiveM also has a large and active community of players, servers, developers, and streamers. You can find more information about FiveM on their website or their Discord.</p>
73
- <h2>RAGE Multiplayer - fun, free and easy</h2>
74
- <p>Another popular and well-known online rp launcher is RAGE Multiplayer. RAGE Multiplayer is a modification for GTA V that enables you to play multiplayer on customized dedicated servers powered by RAGE Technology Group. RAGE Multiplayer has been around since 2017 and has over 15k players playing right now.</p>
75
- <p>RAGE Multiplayer has many features that make it stand out from other online rp launchers. Some of these features are:</p>
76
- <ul>
77
- <li>Free: RAGE Multiplayer is completely free to use and does not require any registration or activation. This means that you can download and play RAGE Multiplayer without any hassle or cost.</li>
78
- <li>Easy: RAGE Multiplayer is easy to install and use. You just need to download the launcher, select your GTA V folder, and start playing. You can also easily switch between GTA Online and RAGE Multiplayer without any problems.</li>
79
- <li>Fast: RAGE Multiplayer is fast and optimized for performance and synchronization. You can enjoy smooth gameplay and low latency on any server.</li>
80
- <li>Flexible: RAGE Multiplayer allows servers to use different programming languages and frameworks, such as C#, JavaScript, TypeScript, Node.js, and more. This means that you can create or join servers with different game modes, features, and mechanics.</li>
81
- </ul>
82
- <p>RAGE Multiplayer is compatible with Windows 7 or higher and the latest version of GTA V. RAGE Multiplayer also has a large and active community of players, servers, developers, and streamers. You can find more information about RAGE Multiplayer on their website or their Discord.</p>
83
- <h2>How to play on GTA RP servers</h2>
84
- <p>GTA RP servers are one of the most popular types of online rp launchers. GTA RP stands for Grand Theft Auto Roleplay, which is a game mode where you create a character and live a virtual life in the GTA world. You can interact with other players, follow the laws, get a job, join a gang, or do whatever you want.</p>
85
- <p>GTA RP servers are usually hosted by online rp launchers such as FiveM or RAGE Multiplayer. To play on GTA RP servers, you need to have GTA V installed on your PC and an online rp launcher of your choice. You also need to find a GTA RP server that suits your preferences and style. Some GTA RP servers may have different rules, themes, whitelists, applications, or requirements than others.</p>
86
- <p>To join a GTA RP server, you need to follow these steps:</p>
87
- <ol>
88
- <li>Launch your online rp launcher and select the server browser.</li>
89
- <li>Search for a GTA RP server that you like and click on it.</li>
90
- <li>Read the server's description, rules, website, Discord, or any other information provided by the server owner.</li>
91
- <li>If the server requires an application or a whitelist, follow the instructions given by the server owner to apply or register.</li>
92
- <li>If the server does not require an application or a whitelist, or if you have been accepted or whitelisted, click on connect to join the server.</li>
93
- <li>Create your character and start roleplaying.</li>
94
- </ol>
95
- <p>GTA RP servers are fun and immersive ways to enjoy GTA Online with other players. You can make friends, enemies, allies, rivals, lovers, or anything else you can imagine. You can also explore different aspects of the GTA world that you may not have seen before. GTA RP servers are like living in your own GTA movie or TV show.</p> <h2>Tips and tricks for online rp launcher users</h2>
96
- <p>Online rp launchers are great ways to enhance your GTA Online experience, but they also come with some challenges and risks. Here are some tips and tricks for online rp launcher users to make the most out of their online rp launcher adventures:</p>
97
- <h3>Backup your game files</h3>
98
- <p>Before installing or using any online rp launcher, it is always a good idea to backup your game files. This way, you can restore your original GTA V installation in case something goes wrong or you want to play GTA Online again. You can backup your game files by copying the GTA V folder to another location on your PC or using a backup software.</p>
99
- <h3>Follow the server rules</h3>
100
- <p>When playing on any online rp launcher server, you should always follow the server rules and respect the other players. This is especially important for GTA RP servers, where you are expected to roleplay realistically and follow the server's theme and lore. Breaking the server rules or disrupting the roleplay can result in a kick, a ban, or a report from the server owner or the admins.</p>
101
- <h3>Update your online rp launcher regularly</h3>
102
- <p>Online rp launchers are constantly being updated and improved by their developers and communities. To enjoy the latest features, fixes, and enhancements, you should always update your online rp launcher regularly. You can check for updates on the online rp launcher's website, Discord, or launcher. You should also update your GTA V game whenever a new patch or update is released by Rockstar.</p>
103
- <h3>Use a VPN</h3>
104
- <p>Using a VPN (virtual private network) can help you protect your privacy and security when playing on online rp launcher servers. A VPN can hide your IP address and encrypt your data, making it harder for hackers, trackers, or malicious players to access your information or harm your PC. A VPN can also help you bypass geo-restrictions or firewalls that may prevent you from accessing certain online rp launcher servers.</p>
105
- <h3>Have fun</h3>
106
- <p>The most important tip for online rp launcher users is to have fun. Online rp launchers are meant to provide you with endless possibilities and opportunities to enjoy GTA Online in new and creative ways. You can explore different worlds, meet new people, create your own stories, or just have a blast. Online rp launchers are all about having fun.</p>
107
- <h1>Conclusion</h1>
108
- <p>Online rp launchers are software applications that allow you to play GTA Online on customized dedicated servers with different game modes, maps, vehicles, weapons, and more. Online rp launchers are also known as multiplayer modifications or frameworks, and they enable you to create or join your own GTA Online community.</p>
109
- <p>There are many online rp launchers available for GTA Online, but some of the most popular and well-known ones are FiveM and RAGE Multiplayer. These online rp launchers have many features, compatibility, and popularity that make them stand out from other online rp launchers.</p>
110
- <p>GTA RP servers are one of the most popular types of online rp launchers. GTA RP stands for Grand Theft Auto Roleplay, which is a game mode where you create a character and live a virtual life in the GTA world. You can interact with other players, follow the laws, get a job, join a gang, or do whatever you want.</p>
111
- <p>To play on GTA RP servers, you need to have GTA V installed on your PC and an online rp launcher of your choice. You also need to find a GTA RP server that suits your preferences and style. Some GTA RP servers may have different rules, themes, whitelists, applications, or requirements than others.</p>
112
- <p>To make the most out of your online rp launcher experience, you should follow some tips and tricks such as backing up your game files, following the server rules, updating your online rp launcher regularly, using a VPN, and having fun.</p>
113
- <p>If you are looking for a new way to enjoy GTA Online with more freedom, creativity, and fun, you should definitely try online rp launchers. They will change the way you play GTA Online forever.</p>
114
- <h2>Frequently Asked Questions</h2>
115
- <ul>
116
- <li><strong>What is an online rp launcher?</strong></li>
117
- <li>An online rp launcher is a software application that allows you to play GTA Online on customized dedicated servers with different game modes, maps, vehicles, weapons, and more.</li>
118
- <li><strong>How do I install an online rp launcher?</strong></li>
119
- <li>To install an online rp launcher, you need to download the launcher from its website or Discord and select your GTA V folder. You also need to have GTA V installed on your PC.</li>
120
- <li><strong>Can I play GTA Online with an online rp launcher?</strong></li>
121
- <li>You can still play GTA Online with an online rp launcher, but you need to switch back to your original GTA V installation. Online rp launchers do not affect your GTA Online access or progress.</li>
122
- <li><strong>What are some of the best online rp launchers?</strong></li>
123
- <li>Some of the best online rp launchers are FiveM and RAGE Multiplayer. These online rp launchers have many features, compatibility, and popularity that make them stand out from other online rp launchers.</li>
124
- <li><strong>What are GTA RP servers?</strong></li>
125
- <li>GTA RP servers are online rp launcher servers that use a game mode called Grand Theft Auto Roleplay, where you create a character and live a virtual life in the GTA world. You can interact with other players, follow the laws, get a job, join a gang, or do whatever you want.</li>
126
- <li><strong>How do I join a GTA RP server?</strong></li>
127
- <li>To join a GTA RP server, you need to launch your online rp launcher and select the server browser. Then, you need to search for a GTA RP server that you like and click on it. You may also need to apply or register for some GTA RP servers that have whitelists or applications.</li>
128
- </ul></p> 401be4b1e0<br />
129
- <br />
130
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/lib/utils.ts DELETED
@@ -1,158 +0,0 @@
1
- import { clsx, type ClassValue } from 'clsx'
2
- import { customAlphabet } from 'nanoid'
3
- import { twMerge } from 'tailwind-merge'
4
-
5
- export function cn(...inputs: ClassValue[]) {
6
- return twMerge(clsx(inputs))
7
- }
8
-
9
- export const nanoid = customAlphabet(
10
- '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
11
- 7
12
- ) // 7-character random string
13
-
14
- export function createChunkDecoder() {
15
- const decoder = new TextDecoder()
16
- return function (chunk: Uint8Array | undefined): string {
17
- if (!chunk) return ''
18
- return decoder.decode(chunk, { stream: true })
19
- }
20
- }
21
-
22
- export function random (start: number, end: number) {
23
- return start + Math.ceil(Math.random() * (end - start))
24
- }
25
-
26
- export function randomIP() {
27
- return `11.${random(104, 107)}.${random(1, 255)}.${random(1, 255)}`
28
- }
29
-
30
- export const defaultUID = Math.random().toString(36).slice(2)
31
-
32
- export function parseHeadersFromCurl(content: string) {
33
- const re = /-H '([^:]+):\s*([^']+)/mg
34
- const headers: HeadersInit = {}
35
- content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl
36
- content.replace(re, (_: string, key: string, value: string) => {
37
- headers[key] = value
38
- return ''
39
- })
40
-
41
- return headers
42
- }
43
-
44
- export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2']
45
- export function encodeHeadersToCookie(content: string) {
46
- const base64Content = btoa(content)
47
- const contentChunks = base64Content.match(/.{1,4000}/g) || []
48
- return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`)
49
- }
50
-
51
- export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) {
52
- let base64Content = ''
53
- ChunkKeys.forEach((key) => {
54
- base64Content += (cookies[key] || '')
55
- })
56
- try {
57
- return atob(base64Content)
58
- } catch(e) {
59
- return ''
60
- }
61
- }
62
-
63
- export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) {
64
- return parseHeadersFromCurl(extraCurlFromCookie(cookies))
65
- }
66
-
67
- export function formatDate(input: string | number | Date): string {
68
- const date = new Date(input)
69
- return date.toLocaleDateString('en-US', {
70
- month: 'long',
71
- day: 'numeric',
72
- year: 'numeric'
73
- })
74
- }
75
-
76
- export function parseCookie(cookie: string, cookieName: string) {
77
- const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie
78
- return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : ''
79
- }
80
-
81
- export function setCookie(key: string, value: string) {
82
- const maxAge = 86400 * 30
83
- document.cookie = `${key}=${value || ''}; Path=/; Max-Age=${maxAge}; SameSite=None; Secure`
84
- }
85
-
86
- export function getCookie(cookieName: string) {
87
- const re = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`)
88
- return re.test(document.cookie) ? RegExp.$1 : ''
89
- }
90
-
91
- export function parseCookies(cookie: string, cookieNames: string[]) {
92
- const cookies: { [key: string]: string } = {}
93
- cookieNames.forEach(cookieName => {
94
- cookies[cookieName] = parseCookie(cookie, cookieName)
95
- })
96
- return cookies
97
- }
98
-
99
- export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0'
100
- export const DEFAULT_IP = process.env.BING_IP || randomIP()
101
-
102
- export function parseUA(ua?: string, default_ua = DEFAULT_UA) {
103
- return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua
104
- }
105
-
106
- export function createHeaders(cookies: Partial<{ [key: string]: string }>, defaultHeaders?: Partial<{ [key: string]: string }>, type?: string) {
107
- let {
108
- BING_COOKIE = process.env.BING_COOKIE,
109
- BING_UA = process.env.BING_UA,
110
- BING_IP = process.env.BING_IP,
111
- BING_HEADER = process.env.BING_HEADER,
112
- IMAGE_ONLY = process.env.IMAGE_ONLY ?? '1',
113
- } = cookies
114
-
115
- if (BING_HEADER) {
116
- const headers = extraHeadersFromCookie({
117
- BING_HEADER,
118
- ...cookies,
119
- }) || {}
120
- if (/^(1|true|yes)$/.test(String(IMAGE_ONLY)) && type !== 'image') {
121
- // 仅画图时设置 cookie
122
- headers.cookie = `_U=${defaultUID}`
123
- }
124
- if (headers['user-agent']) {
125
- return headers
126
- }
127
- }
128
-
129
- const ua = parseUA(BING_UA)
130
-
131
- if (!BING_COOKIE) {
132
- BING_COOKIE = defaultHeaders?.IMAGE_BING_COOKIE || defaultUID // hf 暂时不用 Cookie 也可以正常使用
133
- }
134
-
135
- const parsedCookie = parseCookie(BING_COOKIE, '_U')
136
- if (!parsedCookie) {
137
- throw new Error('Invalid Cookie')
138
- }
139
- return {
140
- 'x-forwarded-for': BING_IP || DEFAULT_IP,
141
- 'Accept-Encoding': 'gzip, deflate, br',
142
- 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
143
- 'User-Agent': ua!,
144
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
145
- cookie: `_U=${parsedCookie}` || '',
146
- }
147
- }
148
-
149
- export class WatchDog {
150
- private tid = 0
151
- watch(fn: Function, timeout = 2000) {
152
- clearTimeout(this.tid)
153
- this.tid = setTimeout(fn, timeout + Math.random() * 1000)
154
- }
155
- reset() {
156
- clearTimeout(this.tid)
157
- }
158
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/utils/deadlock.py DELETED
@@ -1,58 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import logging
8
- import os
9
- from queue import Queue, Empty
10
- import signal
11
- import sys
12
- import threading
13
- import traceback
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class DeadlockDetect:
19
- def __init__(self, use: bool = False, timeout: float = 120.):
20
- self.use = use
21
- self.timeout = timeout
22
- self._queue: Queue = Queue()
23
-
24
- def update(self, stage: str):
25
- if self.use:
26
- self._queue.put(stage)
27
-
28
- def __enter__(self):
29
- if self.use:
30
- self._thread = threading.Thread(target=self._detector_thread)
31
- self._thread.start()
32
-
33
- def __exit__(self, exc_type, exc_val, exc_tb):
34
- if self.use:
35
- self._queue.put(None)
36
- self._thread.join()
37
-
38
- def _detector_thread(self):
39
- logger.debug("Deadlock detector started")
40
- last_stage = "init"
41
- while True:
42
- try:
43
- stage = self._queue.get(timeout=self.timeout)
44
- except Empty:
45
- break
46
- if stage is None:
47
- logger.debug("Exiting deadlock detector thread")
48
- return
49
- else:
50
- last_stage = stage
51
- logger.error("Deadlock detector timed out, last stage was %s", last_stage)
52
- for th in threading.enumerate():
53
- print(th, file=sys.stderr)
54
- traceback.print_stack(sys._current_frames()[th.ident])
55
- print(file=sys.stderr)
56
- sys.stdout.flush()
57
- sys.stderr.flush()
58
- os.kill(os.getpid(), signal.SIGKILL)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2Hero4Health/5-ImageToLineDrawing-GR/app.py DELETED
@@ -1,126 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- import gradio as gr
5
- from PIL import Image
6
- import torchvision.transforms as transforms
7
-
8
- norm_layer = nn.InstanceNorm2d
9
-
10
- class ResidualBlock(nn.Module):
11
- def __init__(self, in_features):
12
- super(ResidualBlock, self).__init__()
13
-
14
- conv_block = [ nn.ReflectionPad2d(1),
15
- nn.Conv2d(in_features, in_features, 3),
16
- norm_layer(in_features),
17
- nn.ReLU(inplace=True),
18
- nn.ReflectionPad2d(1),
19
- nn.Conv2d(in_features, in_features, 3),
20
- norm_layer(in_features)
21
- ]
22
-
23
- self.conv_block = nn.Sequential(*conv_block)
24
-
25
- def forward(self, x):
26
- return x + self.conv_block(x)
27
-
28
-
29
- class Generator(nn.Module):
30
- def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True):
31
- super(Generator, self).__init__()
32
-
33
- # Initial convolution block
34
- model0 = [ nn.ReflectionPad2d(3),
35
- nn.Conv2d(input_nc, 64, 7),
36
- norm_layer(64),
37
- nn.ReLU(inplace=True) ]
38
- self.model0 = nn.Sequential(*model0)
39
-
40
- # Downsampling
41
- model1 = []
42
- in_features = 64
43
- out_features = in_features*2
44
- for _ in range(2):
45
- model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
46
- norm_layer(out_features),
47
- nn.ReLU(inplace=True) ]
48
- in_features = out_features
49
- out_features = in_features*2
50
- self.model1 = nn.Sequential(*model1)
51
-
52
- model2 = []
53
- # Residual blocks
54
- for _ in range(n_residual_blocks):
55
- model2 += [ResidualBlock(in_features)]
56
- self.model2 = nn.Sequential(*model2)
57
-
58
- # Upsampling
59
- model3 = []
60
- out_features = in_features//2
61
- for _ in range(2):
62
- model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
63
- norm_layer(out_features),
64
- nn.ReLU(inplace=True) ]
65
- in_features = out_features
66
- out_features = in_features//2
67
- self.model3 = nn.Sequential(*model3)
68
-
69
- # Output layer
70
- model4 = [ nn.ReflectionPad2d(3),
71
- nn.Conv2d(64, output_nc, 7)]
72
- if sigmoid:
73
- model4 += [nn.Sigmoid()]
74
-
75
- self.model4 = nn.Sequential(*model4)
76
-
77
- def forward(self, x, cond=None):
78
- out = self.model0(x)
79
- out = self.model1(out)
80
- out = self.model2(out)
81
- out = self.model3(out)
82
- out = self.model4(out)
83
-
84
- return out
85
-
86
- model1 = Generator(3, 1, 3)
87
- model1.load_state_dict(torch.load('model.pth', map_location=torch.device('cpu')))
88
- model1.eval()
89
-
90
- model2 = Generator(3, 1, 3)
91
- model2.load_state_dict(torch.load('model2.pth', map_location=torch.device('cpu')))
92
- model2.eval()
93
-
94
- def predict(input_img, ver):
95
- input_img = Image.open(input_img)
96
- transform = transforms.Compose([transforms.Resize(256, Image.BICUBIC), transforms.ToTensor()])
97
- input_img = transform(input_img)
98
- input_img = torch.unsqueeze(input_img, 0)
99
-
100
- drawing = 0
101
- with torch.no_grad():
102
- if ver == 'Simple Lines':
103
- drawing = model2(input_img)[0].detach()
104
- else:
105
- drawing = model1(input_img)[0].detach()
106
-
107
- drawing = transforms.ToPILImage()(drawing)
108
- return drawing
109
-
110
- title="Image to Line Drawings - Complex and Simple Portraits and Landscapes"
111
- examples=[
112
- ['01.jpeg', 'Simple Lines'], ['02.jpeg', 'Simple Lines'], ['03.jpeg', 'Simple Lines'],
113
- ['07.jpeg', 'Complex Lines'], ['08.jpeg', 'Complex Lines'], ['09.jpeg', 'Complex Lines'],
114
- ['10.jpeg', 'Simple Lines'], ['11.jpeg', 'Simple Lines'], ['12.jpeg', 'Simple Lines'],
115
- ['01.jpeg', 'Complex Lines'], ['02.jpeg', 'Complex Lines'], ['03.jpeg', 'Complex Lines'],
116
- ['04.jpeg', 'Simple Lines'], ['05.jpeg', 'Simple Lines'], ['06.jpeg', 'Simple Lines'],
117
- ['07.jpeg', 'Simple Lines'], ['08.jpeg', 'Simple Lines'], ['09.jpeg', 'Simple Lines'],
118
- ['04.jpeg', 'Complex Lines'], ['05.jpeg', 'Complex Lines'], ['06.jpeg', 'Complex Lines'],
119
- ['10.jpeg', 'Complex Lines'], ['11.jpeg', 'Complex Lines'], ['12.jpeg', 'Complex Lines']
120
- ]
121
-
122
- iface = gr.Interface(predict, [gr.inputs.Image(type='filepath'),
123
- gr.inputs.Radio(['Complex Lines','Simple Lines'], type="value", default='Simple Lines', label='version')],
124
- gr.outputs.Image(type="pil"), title=title,examples=examples)
125
-
126
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abubakari/Sales_Prediction/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Sales Prediction
3
- emoji: 💻
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptX.py DELETED
@@ -1,97 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import re
4
- import json
5
-
6
- from aiohttp import ClientSession
7
- from ..typing import AsyncResult, Messages
8
- from .base_provider import AsyncGeneratorProvider
9
- from .helper import format_prompt
10
-
11
-
12
- class ChatgptX(AsyncGeneratorProvider):
13
- url = "https://chatgptx.de"
14
- supports_gpt_35_turbo = True
15
- working = True
16
-
17
- @classmethod
18
- async def create_async_generator(
19
- cls,
20
- model: str,
21
- messages: Messages,
22
- **kwargs
23
- ) -> AsyncResult:
24
- headers = {
25
- 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
26
- 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
27
- 'sec-ch-ua-mobile': '?0',
28
- 'sec-ch-ua-platform': 'Linux',
29
- 'sec-fetch-dest': 'empty',
30
- 'sec-fetch-mode': 'cors',
31
- 'sec-fetch-site': 'same-origin',
32
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
33
- }
34
- async with ClientSession(headers=headers) as session:
35
- async with session.get(f"{cls.url}/") as response:
36
- response = await response.text()
37
- result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
38
- if result:
39
- csrf_token = result.group(1)
40
- result = re.search(r"openconversions\('(.*?)'\)", response)
41
- if result:
42
- chat_id = result.group(1)
43
- result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
44
- if result:
45
- user_id = result.group(1)
46
-
47
- if not csrf_token or not chat_id or not user_id:
48
- raise RuntimeError("Missing csrf_token, chat_id or user_id")
49
-
50
- data = {
51
- '_token': csrf_token,
52
- 'user_id': user_id,
53
- 'chats_id': chat_id,
54
- 'prompt': format_prompt(messages),
55
- 'current_model': "gpt3"
56
- }
57
- headers = {
58
- 'authority': 'chatgptx.de',
59
- 'accept': 'application/json, text/javascript, */*; q=0.01',
60
- 'origin': cls.url,
61
- 'referer': f'{cls.url}/',
62
- 'x-csrf-token': csrf_token,
63
- 'x-requested-with': 'XMLHttpRequest'
64
- }
65
- async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
66
- response.raise_for_status()
67
- chat = await response.json()
68
- if "response" not in chat or not chat["response"]:
69
- raise RuntimeError(f'Response: {chat}')
70
- headers = {
71
- 'authority': 'chatgptx.de',
72
- 'accept': 'text/event-stream',
73
- 'referer': f'{cls.url}/',
74
- 'x-csrf-token': csrf_token,
75
- 'x-requested-with': 'XMLHttpRequest'
76
- }
77
- data = {
78
- "user_id": user_id,
79
- "chats_id": chat_id,
80
- "prompt": format_prompt(messages),
81
- "current_model": "gpt3",
82
- "conversions_id": chat["conversions_id"],
83
- "ass_conversions_id": chat["ass_conversions_id"],
84
- }
85
- async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response:
86
- response.raise_for_status()
87
- async for line in response.content:
88
- if line.startswith(b"data: "):
89
- row = line[6:-1]
90
- if row == b"[DONE]":
91
- break
92
- try:
93
- content = json.loads(row)["choices"][0]["delta"].get("content")
94
- except:
95
- raise RuntimeError(f"Broken line: {line.decode()}")
96
- if content:
97
- yield content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/visibility/oneself.py DELETED
@@ -1,18 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, Any
4
-
5
- from . import visibility_registry as VisibilityRegistry
6
- from .base import BaseVisibility
7
-
8
- if TYPE_CHECKING:
9
- from agentverse.environments import BaseEnvironment
10
-
11
-
12
- @VisibilityRegistry.register("oneself")
13
- class OneselfVisibility(BaseVisibility):
14
- """Only the agent itself can see the message"""
15
-
16
- def update_visible_agents(self, environment: BaseEnvironment):
17
- for agent in environment.agents:
18
- agent.set_receiver(set({agent.name}))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/Factory.d.ts DELETED
@@ -1,6 +0,0 @@
1
- import Maker from './Maker';
2
-
3
- export default function (
4
- styles?: Object | string,
5
- customBuilders?: Maker.BuildersType
6
- ): Maker;
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import NinePatch from './NinePatch.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('ninePatch', function (x, y, width, height, key, columns, rows, config) {
6
- var gameObject = new NinePatch(this.scene, x, y, width, height, key, columns, rows, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.NinePatch', NinePatch);
12
-
13
- export default NinePatch;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/space/Factory.js DELETED
@@ -1,14 +0,0 @@
1
- import Space from './Space.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('space', function () {
6
- var gameObject = new Space(this.scene);
7
- // Don't add Zone into scene
8
- // this.scene.add.existing(gameObject);
9
- return gameObject;
10
- });
11
-
12
- SetValue(window, 'RexPlugins.UI.Space', Space);
13
-
14
- export default Space;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/text/cleaners.py DELETED
@@ -1,113 +0,0 @@
1
- import re
2
-
3
- from text import cleaned_text_to_sequence
4
- from text.english import english_to_ipa
5
- from text.japanese import japanese_to_ipa
6
- from text.mandarin import chinese_to_ipa, pinyin_to_ipa
7
- from text.symbols import symbols
8
-
9
-
10
- def str_replace(data):
11
- zh_tab = [";", ":", "\"", "'"]
12
- eng_tab = [".", ",", ' ', " "]
13
-
14
- for index in range(len(zh_tab)):
15
- if zh_tab[index] in data:
16
- data = data.replace(zh_tab[index], eng_tab[index])
17
-
18
- return data
19
-
20
-
21
- def clean_text(text):
22
- cleaned_text, lang_seq = cje_cleaner(text)
23
- cleaned_text = str_replace(cleaned_text)
24
- cleaned_text, lang_seq = remove_invalid_text(cleaned_text, lang_seq)
25
-
26
- return cleaned_text, lang_seq
27
-
28
-
29
- def text_to_sequence(text):
30
- cleaned_text, lang_seq = clean_text(text)
31
- return cleaned_text_to_sequence(cleaned_text), lang_seq
32
-
33
-
34
- lang_map = {
35
- "ZH": 0,
36
- "JA": 1,
37
- "EN": 3,
38
- "P": 0,
39
- "other": 5
40
- }
41
-
42
-
43
- def cje_cleaner(text: str):
44
- text = str_replace(text).replace("\"", '')
45
-
46
- # find all text blocks enclosed in [JA], [ZH], [EN], [P]
47
- original_text = text
48
- blocks = re.finditer(r'\[(JA|ZH|EN|P)\](.*?)\[\1\]', text)
49
-
50
- cleaned_text = ""
51
- lang_seq = []
52
- last_end = 0
53
-
54
- for block in blocks:
55
- start, end = block.span()
56
-
57
- # insert text not enclosed in any blocks
58
- ipa = original_text[last_end:start]
59
- lang_seq += [lang_map["other"] for i in ipa]
60
-
61
- cleaned_text += ipa
62
- last_end = end
63
- language = block.group(1)
64
- text = block.group(2)
65
-
66
- if language == 'P':
67
- ipa = pinyin_to_ipa(text)
68
- lang_seq += [lang_map[language] for i in ipa]
69
- cleaned_text += ipa
70
-
71
- if language == 'JA':
72
- ipa = japanese_to_ipa(text)
73
- lang_seq += [lang_map[language] for i in ipa]
74
- cleaned_text += ipa
75
-
76
- elif language == 'ZH':
77
- ipa = chinese_to_ipa(text)
78
- lang_seq += [lang_map[language] for i in ipa]
79
- cleaned_text += ipa
80
-
81
- elif language == 'EN':
82
- ipa = english_to_ipa(text)
83
- lang_seq += [lang_map[language] for i in ipa]
84
- cleaned_text += ipa
85
-
86
- ipa = original_text[last_end:]
87
-
88
- lang_seq += [lang_map["other"] for i in ipa]
89
- cleaned_text += ipa
90
-
91
- assert len(cleaned_text) == len(lang_seq)
92
- return cleaned_text, lang_seq
93
-
94
-
95
- def remove_invalid_text(cleaned_text, lang_seq):
96
- new_cleaned_text = ''
97
- new_lang_seq = []
98
-
99
- for symbol, la in zip(cleaned_text, lang_seq):
100
- if symbol not in symbols:
101
- print("Invalid Symbol:", symbol)
102
- print("In: ", cleaned_text)
103
- continue
104
-
105
- if la == lang_map["other"]:
106
- print("Invalid Lang:", symbol)
107
- print("In: ", cleaned_text)
108
- continue
109
-
110
- new_cleaned_text += symbol
111
- new_lang_seq.append(la)
112
-
113
- return new_cleaned_text, new_lang_seq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/README.md DELETED
@@ -1,3 +0,0 @@
1
- # Models
2
-
3
- For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview).
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ddim_inverse.py DELETED
@@ -1,349 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
- # and https://github.com/hojonathanho/diffusion
17
- import math
18
- from dataclasses import dataclass
19
- from typing import List, Optional, Tuple, Union
20
-
21
- import numpy as np
22
- import torch
23
-
24
- from diffusers.configuration_utils import ConfigMixin, register_to_config
25
- from diffusers.schedulers.scheduling_utils import SchedulerMixin
26
- from diffusers.utils import BaseOutput, deprecate
27
-
28
-
29
- @dataclass
30
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
31
- class DDIMSchedulerOutput(BaseOutput):
32
- """
33
- Output class for the scheduler's step function output.
34
-
35
- Args:
36
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
37
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
38
- denoising loop.
39
- pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
40
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
41
- `pred_original_sample` can be used to preview progress or for guidance.
42
- """
43
-
44
- prev_sample: torch.FloatTensor
45
- pred_original_sample: Optional[torch.FloatTensor] = None
46
-
47
-
48
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
49
- def betas_for_alpha_bar(
50
- num_diffusion_timesteps,
51
- max_beta=0.999,
52
- alpha_transform_type="cosine",
53
- ):
54
- """
55
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
56
- (1-beta) over time from t = [0,1].
57
-
58
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
59
- to that part of the diffusion process.
60
-
61
-
62
- Args:
63
- num_diffusion_timesteps (`int`): the number of betas to produce.
64
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
65
- prevent singularities.
66
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
67
- Choose from `cosine` or `exp`
68
-
69
- Returns:
70
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
71
- """
72
- if alpha_transform_type == "cosine":
73
-
74
- def alpha_bar_fn(t):
75
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
76
-
77
- elif alpha_transform_type == "exp":
78
-
79
- def alpha_bar_fn(t):
80
- return math.exp(t * -12.0)
81
-
82
- else:
83
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
84
-
85
- betas = []
86
- for i in range(num_diffusion_timesteps):
87
- t1 = i / num_diffusion_timesteps
88
- t2 = (i + 1) / num_diffusion_timesteps
89
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
90
- return torch.tensor(betas, dtype=torch.float32)
91
-
92
-
93
- # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
94
- def rescale_zero_terminal_snr(betas):
95
- """
96
- Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
97
-
98
-
99
- Args:
100
- betas (`torch.FloatTensor`):
101
- the betas that the scheduler is being initialized with.
102
-
103
- Returns:
104
- `torch.FloatTensor`: rescaled betas with zero terminal SNR
105
- """
106
- # Convert betas to alphas_bar_sqrt
107
- alphas = 1.0 - betas
108
- alphas_cumprod = torch.cumprod(alphas, dim=0)
109
- alphas_bar_sqrt = alphas_cumprod.sqrt()
110
-
111
- # Store old values.
112
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
113
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
114
-
115
- # Shift so the last timestep is zero.
116
- alphas_bar_sqrt -= alphas_bar_sqrt_T
117
-
118
- # Scale so the first timestep is back to the old value.
119
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
120
-
121
- # Convert alphas_bar_sqrt to betas
122
- alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
123
- alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
124
- alphas = torch.cat([alphas_bar[0:1], alphas])
125
- betas = 1 - alphas
126
-
127
- return betas
128
-
129
-
130
- class DDIMInverseScheduler(SchedulerMixin, ConfigMixin):
131
- """
132
- DDIMInverseScheduler is the reverse scheduler of [`DDIMScheduler`].
133
-
134
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
135
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
136
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
137
- [`~SchedulerMixin.from_pretrained`] functions.
138
-
139
- For more details, see the original paper: https://arxiv.org/abs/2010.02502
140
-
141
- Args:
142
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
143
- beta_start (`float`): the starting `beta` value of inference.
144
- beta_end (`float`): the final `beta` value.
145
- beta_schedule (`str`):
146
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
147
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
148
- trained_betas (`np.ndarray`, optional):
149
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
150
- clip_sample (`bool`, default `True`):
151
- option to clip predicted sample for numerical stability.
152
- clip_sample_range (`float`, default `1.0`):
153
- the maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
154
- set_alpha_to_zero (`bool`, default `True`):
155
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
156
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `0`,
157
- otherwise it uses the value of alpha at step `num_train_timesteps - 1`.
158
- steps_offset (`int`, default `0`):
159
- an offset added to the inference steps. You can use a combination of `offset=1` and
160
- `set_alpha_to_zero=False`, to make the last step use step `num_train_timesteps - 1` for the previous alpha
161
- product.
162
- prediction_type (`str`, default `epsilon`, optional):
163
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
164
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
165
- https://imagen.research.google/video/paper.pdf)
166
- timestep_spacing (`str`, default `"leading"`):
167
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
168
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
169
- rescale_betas_zero_snr (`bool`, default `False`):
170
- whether to rescale the betas to have zero terminal SNR (proposed by https://arxiv.org/pdf/2305.08891.pdf).
171
- This can enable the model to generate very bright and dark samples instead of limiting it to samples with
172
- medium brightness. Loosely related to
173
- [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
174
- """
175
-
176
- order = 1
177
- ignore_for_config = ["kwargs"]
178
- _deprecated_kwargs = ["set_alpha_to_zero"]
179
-
180
- @register_to_config
181
- def __init__(
182
- self,
183
- num_train_timesteps: int = 1000,
184
- beta_start: float = 0.0001,
185
- beta_end: float = 0.02,
186
- beta_schedule: str = "linear",
187
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
188
- clip_sample: bool = True,
189
- set_alpha_to_one: bool = True,
190
- steps_offset: int = 0,
191
- prediction_type: str = "epsilon",
192
- clip_sample_range: float = 1.0,
193
- timestep_spacing: str = "leading",
194
- rescale_betas_zero_snr: bool = False,
195
- **kwargs,
196
- ):
197
- if kwargs.get("set_alpha_to_zero", None) is not None:
198
- deprecation_message = (
199
- "The `set_alpha_to_zero` argument is deprecated. Please use `set_alpha_to_one` instead."
200
- )
201
- deprecate("set_alpha_to_zero", "1.0.0", deprecation_message, standard_warn=False)
202
- set_alpha_to_one = kwargs["set_alpha_to_zero"]
203
- if trained_betas is not None:
204
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
205
- elif beta_schedule == "linear":
206
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
207
- elif beta_schedule == "scaled_linear":
208
- # this schedule is very specific to the latent diffusion model.
209
- self.betas = (
210
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
211
- )
212
- elif beta_schedule == "squaredcos_cap_v2":
213
- # Glide cosine schedule
214
- self.betas = betas_for_alpha_bar(num_train_timesteps)
215
- else:
216
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
217
-
218
- # Rescale for zero SNR
219
- if rescale_betas_zero_snr:
220
- self.betas = rescale_zero_terminal_snr(self.betas)
221
-
222
- self.alphas = 1.0 - self.betas
223
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
224
-
225
- # At every step in inverted ddim, we are looking into the next alphas_cumprod
226
- # For the initial step, there is no current alphas_cumprod, and the index is out of bounds
227
- # `set_alpha_to_one` decides whether we set this parameter simply to one
228
- # in this case, self.step() just output the predicted noise
229
- # or whether we use the initial alpha used in training the diffusion model.
230
- self.initial_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
231
-
232
- # standard deviation of the initial noise distribution
233
- self.init_noise_sigma = 1.0
234
-
235
- # setable values
236
- self.num_inference_steps = None
237
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps).copy().astype(np.int64))
238
-
239
- # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input
240
- def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
241
- """
242
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
243
- current timestep.
244
-
245
- Args:
246
- sample (`torch.FloatTensor`): input sample
247
- timestep (`int`, optional): current timestep
248
-
249
- Returns:
250
- `torch.FloatTensor`: scaled input sample
251
- """
252
- return sample
253
-
254
- def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
255
- """
256
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
257
-
258
- Args:
259
- num_inference_steps (`int`):
260
- the number of diffusion steps used when generating samples with a pre-trained model.
261
- """
262
-
263
- if num_inference_steps > self.config.num_train_timesteps:
264
- raise ValueError(
265
- f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
266
- f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
267
- f" maximal {self.config.num_train_timesteps} timesteps."
268
- )
269
-
270
- self.num_inference_steps = num_inference_steps
271
-
272
- # "leading" and "trailing" corresponds to annotation of Table 1. of https://arxiv.org/abs/2305.08891
273
- if self.config.timestep_spacing == "leading":
274
- step_ratio = self.config.num_train_timesteps // self.num_inference_steps
275
- # creates integer timesteps by multiplying by ratio
276
- # casting to int to avoid issues when num_inference_step is power of 3
277
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round().copy().astype(np.int64)
278
- timesteps += self.config.steps_offset
279
- elif self.config.timestep_spacing == "trailing":
280
- step_ratio = self.config.num_train_timesteps / self.num_inference_steps
281
- # creates integer timesteps by multiplying by ratio
282
- # casting to int to avoid issues when num_inference_step is power of 3
283
- timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)[::-1]).astype(np.int64)
284
- timesteps -= 1
285
- else:
286
- raise ValueError(
287
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'."
288
- )
289
-
290
- # Roll timesteps array by one to reflect reversed origin and destination semantics for each step
291
- timesteps = np.roll(timesteps, 1)
292
- timesteps[0] = int(timesteps[1] - step_ratio)
293
- self.timesteps = torch.from_numpy(timesteps).to(device)
294
-
295
- def step(
296
- self,
297
- model_output: torch.FloatTensor,
298
- timestep: int,
299
- sample: torch.FloatTensor,
300
- eta: float = 0.0,
301
- use_clipped_model_output: bool = False,
302
- variance_noise: Optional[torch.FloatTensor] = None,
303
- return_dict: bool = True,
304
- ) -> Union[DDIMSchedulerOutput, Tuple]:
305
- # 1. get previous step value (=t+1)
306
- prev_timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps
307
-
308
- # 2. compute alphas, betas
309
- # change original implementation to exactly match noise levels for analogous forward process
310
- alpha_prod_t = self.alphas_cumprod[timestep] if timestep >= 0 else self.initial_alpha_cumprod
311
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep]
312
-
313
- beta_prod_t = 1 - alpha_prod_t
314
-
315
- # 3. compute predicted original sample from predicted noise also called
316
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
317
- if self.config.prediction_type == "epsilon":
318
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
319
- pred_epsilon = model_output
320
- elif self.config.prediction_type == "sample":
321
- pred_original_sample = model_output
322
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
323
- elif self.config.prediction_type == "v_prediction":
324
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
325
- pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
326
- else:
327
- raise ValueError(
328
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
329
- " `v_prediction`"
330
- )
331
-
332
- # 4. Clip or threshold "predicted x_0"
333
- if self.config.clip_sample:
334
- pred_original_sample = pred_original_sample.clamp(
335
- -self.config.clip_sample_range, self.config.clip_sample_range
336
- )
337
-
338
- # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
339
- pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * pred_epsilon
340
-
341
- # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
342
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
343
-
344
- if not return_dict:
345
- return (prev_sample, pred_original_sample)
346
- return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
347
-
348
- def __len__(self):
349
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/instaboost/README.md DELETED
@@ -1,44 +0,0 @@
1
- # InstaBoost for MMDetection
2
-
3
- [ALGORITHM]
4
-
5
- Configs in this directory is the implementation for ICCV2019 paper "InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting" and provided by the authors of the paper. InstaBoost is a data augmentation method for object detection and instance segmentation. The paper has been released on [`arXiv`](https://arxiv.org/abs/1908.07801).
6
-
7
- ```latex
8
- @inproceedings{fang2019instaboost,
9
- title={Instaboost: Boosting instance segmentation via probability map guided copy-pasting},
10
- author={Fang, Hao-Shu and Sun, Jianhua and Wang, Runzhong and Gou, Minghao and Li, Yong-Lu and Lu, Cewu},
11
- booktitle={Proceedings of the IEEE International Conference on Computer Vision},
12
- pages={682--691},
13
- year={2019}
14
- }
15
- ```
16
-
17
- ## Usage
18
-
19
- ### Requirements
20
-
21
- You need to install `instaboostfast` before using it.
22
-
23
- ```shell
24
- pip install instaboostfast
25
- ```
26
-
27
- The code and more details can be found [here](https://github.com/GothicAi/Instaboost).
28
-
29
- ### Integration with MMDetection
30
-
31
- InstaBoost have been already integrated in the data pipeline, thus all you need is to add or change **InstaBoost** configurations after **LoadImageFromFile**. We have provided examples like [this](mask_rcnn_r50_fpn_instaboost_4x#L121). You can refer to [`InstaBoostConfig`](https://github.com/GothicAi/InstaBoost-pypi#instaboostconfig) for more details.
32
-
33
- ## Results and Models
34
-
35
- - All models were trained on `coco_2017_train` and tested on `coco_2017_val` for conveinience of evaluation and comparison. In the paper, the results are obtained from `test-dev`.
36
- - To balance accuracy and training time when using InstaBoost, models released in this page are all trained for 48 Epochs. Other training and testing configs strictly follow the original framework.
37
- - For results and models in MMDetection V1.x, please refer to [Instaboost](https://github.com/GothicAi/Instaboost).
38
-
39
- | Network | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
40
- | :-------------: | :--------: | :-----: | :------: | :------------: | :------:| :-----: | :------: | :-----------------: |
41
- | Mask R-CNN | R-50-FPN | 4x | 4.4 | 17.5 | 40.6 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223635.log.json) |
42
- | Mask R-CNN | R-101-FPN | 4x | 6.4 | | 42.5 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738.log.json) |
43
- | Mask R-CNN | X-101-64x4d-FPN | 4x | 10.7 | | 44.7 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947.log.json) |
44
- | Cascade R-CNN | R-101-FPN | 4x | 6.0 | 12.0 | 43.7 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223646.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './ann_r50-d8_512x512_160k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/update_windows.bat DELETED
@@ -1,37 +0,0 @@
1
- @echo off
2
-
3
- cd /D "%~dp0"
4
-
5
- set PATH=%PATH%;%SystemRoot%\system32
6
-
7
- echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
8
-
9
- @rem fix failed install when installing to a separate drive
10
- set TMP=%cd%\installer_files
11
- set TEMP=%cd%\installer_files
12
-
13
- @rem deactivate existing conda envs as needed to avoid conflicts
14
- (call conda deactivate && call conda deactivate && call conda deactivate) 2>nul
15
-
16
- @rem config
17
- set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
18
- set INSTALL_ENV_DIR=%cd%\installer_files\env
19
-
20
- @rem environment isolation
21
- set PYTHONNOUSERSITE=1
22
- set PYTHONPATH=
23
- set PYTHONHOME=
24
- set "CUDA_PATH=%INSTALL_ENV_DIR%"
25
- set "CUDA_HOME=%CUDA_PATH%"
26
-
27
- @rem activate installer env
28
- call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
29
-
30
- @rem update installer env
31
- call python one_click.py --update && (
32
- echo.
33
- echo Done!
34
- )
35
-
36
- :end
37
- pause
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/ROOPOK/roop/core.py DELETED
@@ -1,215 +0,0 @@
1
- import os
2
- import sys
3
- # single thread doubles cuda performance - needs to be set before torch import
4
- if any(arg.startswith('--execution-provider') for arg in sys.argv):
5
- os.environ['OMP_NUM_THREADS'] = '1'
6
- # reduce tensorflow log level
7
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
8
- import warnings
9
- from typing import List
10
- import platform
11
- import signal
12
- import shutil
13
- import argparse
14
- import torch
15
- import onnxruntime
16
- import tensorflow
17
-
18
- import roop.globals
19
- import roop.metadata
20
- import roop.ui as ui
21
- from roop.predicter import predict_image, predict_video
22
- from roop.processors.frame.core import get_frame_processors_modules
23
- from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
24
-
25
- if 'ROCMExecutionProvider' in roop.globals.execution_providers:
26
- del torch
27
-
28
- warnings.filterwarnings('ignore', category=FutureWarning, module='insightface')
29
- warnings.filterwarnings('ignore', category=UserWarning, module='torchvision')
30
-
31
-
32
- def parse_args() -> None:
33
- signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
34
- program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100))
35
- program.add_argument('-s', '--source', help='select an source image', dest='source_path')
36
- program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
37
- program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
38
- program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+')
39
- program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
40
- program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
41
- program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
42
- program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False)
43
- program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9'])
44
- program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]')
45
- program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory())
46
- program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
47
- program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads())
48
- program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}')
49
-
50
- args = program.parse_args()
51
-
52
- roop.globals.source_path = args.source_path
53
- roop.globals.target_path = args.target_path
54
- roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path)
55
- roop.globals.frame_processors = args.frame_processor
56
- roop.globals.headless = args.source_path or args.target_path or args.output_path
57
- roop.globals.keep_fps = args.keep_fps
58
- roop.globals.keep_audio = args.keep_audio
59
- roop.globals.keep_frames = args.keep_frames
60
- roop.globals.many_faces = args.many_faces
61
- roop.globals.video_encoder = args.video_encoder
62
- roop.globals.video_quality = args.video_quality
63
- roop.globals.max_memory = args.max_memory
64
- roop.globals.execution_providers = decode_execution_providers(args.execution_provider)
65
- roop.globals.execution_threads = args.execution_threads
66
-
67
-
68
- def encode_execution_providers(execution_providers: List[str]) -> List[str]:
69
- return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers]
70
-
71
-
72
- def decode_execution_providers(execution_providers: List[str]) -> List[str]:
73
- return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
74
- if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
75
-
76
-
77
- def suggest_max_memory() -> int:
78
- if platform.system().lower() == 'darwin':
79
- return 4
80
- return 16
81
-
82
-
83
- def suggest_execution_providers() -> List[str]:
84
- return encode_execution_providers(onnxruntime.get_available_providers())
85
-
86
-
87
- def suggest_execution_threads() -> int:
88
- if 'DmlExecutionProvider' in roop.globals.execution_providers:
89
- return 1
90
- if 'ROCMExecutionProvider' in roop.globals.execution_providers:
91
- return 1
92
- return 8
93
-
94
-
95
- def limit_resources() -> None:
96
- # prevent tensorflow memory leak
97
- gpus = tensorflow.config.experimental.list_physical_devices('GPU')
98
- for gpu in gpus:
99
- tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
100
- tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)
101
- ])
102
- # limit memory usage
103
- if roop.globals.max_memory:
104
- memory = roop.globals.max_memory * 1024 ** 3
105
- if platform.system().lower() == 'darwin':
106
- memory = roop.globals.max_memory * 1024 ** 6
107
- if platform.system().lower() == 'windows':
108
- import ctypes
109
- kernel32 = ctypes.windll.kernel32
110
- kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
111
- else:
112
- import resource
113
- resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
114
-
115
-
116
- def release_resources() -> None:
117
- if 'CUDAExecutionProvider' in roop.globals.execution_providers:
118
- torch.cuda.empty_cache()
119
-
120
-
121
- def pre_check() -> bool:
122
- if sys.version_info < (3, 9):
123
- update_status('Python version is not supported - please upgrade to 3.9 or higher.')
124
- return False
125
- if not shutil.which('ffmpeg'):
126
- update_status('ffmpeg is not installed.')
127
- return False
128
- return True
129
-
130
-
131
- def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
132
- print(f'[{scope}] {message}')
133
- if not roop.globals.headless:
134
- ui.update_status(message)
135
-
136
-
137
- def start() -> None:
138
- for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
139
- if not frame_processor.pre_start():
140
- return
141
- # process image to image
142
- if has_image_extension(roop.globals.target_path):
143
- if predict_image(roop.globals.target_path):
144
- destroy()
145
- shutil.copy2(roop.globals.target_path, roop.globals.output_path)
146
- for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
147
- update_status('Progressing...', frame_processor.NAME)
148
- frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path)
149
- frame_processor.post_process()
150
- release_resources()
151
- if is_image(roop.globals.target_path):
152
- update_status('Processing to image succeed!')
153
- else:
154
- update_status('Processing to image failed!')
155
- return
156
- # process image to videos
157
- if predict_video(roop.globals.target_path):
158
- destroy()
159
- update_status('Creating temp resources...')
160
- create_temp(roop.globals.target_path)
161
- update_status('Extracting frames...')
162
- extract_frames(roop.globals.target_path)
163
- temp_frame_paths = get_temp_frame_paths(roop.globals.target_path)
164
- for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
165
- update_status('Progressing...', frame_processor.NAME)
166
- frame_processor.process_video(roop.globals.source_path, temp_frame_paths)
167
- frame_processor.post_process()
168
- release_resources()
169
- # handles fps
170
- if roop.globals.keep_fps:
171
- update_status('Detecting fps...')
172
- fps = detect_fps(roop.globals.target_path)
173
- update_status(f'Creating video with {fps} fps...')
174
- create_video(roop.globals.target_path, fps)
175
- else:
176
- update_status('Creating video with 30.0 fps...')
177
- create_video(roop.globals.target_path)
178
- # handle audio
179
- if roop.globals.keep_audio:
180
- if roop.globals.keep_fps:
181
- update_status('Restoring audio...')
182
- else:
183
- update_status('Restoring audio might cause issues as fps are not kept...')
184
- restore_audio(roop.globals.target_path, roop.globals.output_path)
185
- else:
186
- move_temp(roop.globals.target_path, roop.globals.output_path)
187
- # clean and validate
188
- clean_temp(roop.globals.target_path)
189
- if is_video(roop.globals.target_path):
190
- update_status('Processing to video succeed!')
191
- else:
192
- update_status('Processing to video failed!')
193
-
194
-
195
- def destroy() -> None:
196
- if roop.globals.target_path:
197
- clean_temp(roop.globals.target_path)
198
- quit()
199
-
200
-
201
- def run() -> None:
202
- parse_args()
203
- if not pre_check():
204
- return
205
- for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
206
- if not frame_processor.pre_check():
207
- return
208
- limit_resources()
209
- if roop.globals.headless:
210
- start()
211
- else:
212
- window = ui.init(start, destroy)
213
- window.mainloop()
214
-
215
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aristo/trafficsign/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Trafficsign
3
- emoji: 🏃
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 2.9.4
8
- app_file: app.py
9
- pinned: false
10
- license: afl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/dir_util.py DELETED
@@ -1,243 +0,0 @@
1
- """distutils.dir_util
2
-
3
- Utility functions for manipulating directories and directory trees."""
4
-
5
- import os
6
- import errno
7
- from distutils.errors import DistutilsInternalError, DistutilsFileError
8
- from distutils import log
9
-
10
- # cache for by mkpath() -- in addition to cheapening redundant calls,
11
- # eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
12
- _path_created = {}
13
-
14
-
15
- def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901
16
- """Create a directory and any missing ancestor directories.
17
-
18
- If the directory already exists (or if 'name' is the empty string, which
19
- means the current directory, which of course exists), then do nothing.
20
- Raise DistutilsFileError if unable to create some directory along the way
21
- (eg. some sub-path exists, but is a file rather than a directory).
22
- If 'verbose' is true, print a one-line summary of each mkdir to stdout.
23
- Return the list of directories actually created.
24
-
25
- os.makedirs is not used because:
26
-
27
- a) It's new to Python 1.5.2, and
28
- b) it blows up if the directory already exists (in which case it should
29
- silently succeed).
30
- """
31
-
32
- global _path_created
33
-
34
- # Detect a common bug -- name is None
35
- if not isinstance(name, str):
36
- raise DistutilsInternalError(
37
- "mkpath: 'name' must be a string (got {!r})".format(name)
38
- )
39
-
40
- # XXX what's the better way to handle verbosity? print as we create
41
- # each directory in the path (the current behaviour), or only announce
42
- # the creation of the whole path? (quite easy to do the latter since
43
- # we're not using a recursive algorithm)
44
-
45
- name = os.path.normpath(name)
46
- created_dirs = []
47
- if os.path.isdir(name) or name == '':
48
- return created_dirs
49
- if _path_created.get(os.path.abspath(name)):
50
- return created_dirs
51
-
52
- (head, tail) = os.path.split(name)
53
- tails = [tail] # stack of lone dirs to create
54
-
55
- while head and tail and not os.path.isdir(head):
56
- (head, tail) = os.path.split(head)
57
- tails.insert(0, tail) # push next higher dir onto stack
58
-
59
- # now 'head' contains the deepest directory that already exists
60
- # (that is, the child of 'head' in 'name' is the highest directory
61
- # that does *not* exist)
62
- for d in tails:
63
- # print "head = %s, d = %s: " % (head, d),
64
- head = os.path.join(head, d)
65
- abs_head = os.path.abspath(head)
66
-
67
- if _path_created.get(abs_head):
68
- continue
69
-
70
- if verbose >= 1:
71
- log.info("creating %s", head)
72
-
73
- if not dry_run:
74
- try:
75
- os.mkdir(head, mode)
76
- except OSError as exc:
77
- if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
78
- raise DistutilsFileError(
79
- "could not create '{}': {}".format(head, exc.args[-1])
80
- )
81
- created_dirs.append(head)
82
-
83
- _path_created[abs_head] = 1
84
- return created_dirs
85
-
86
-
87
- def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
88
- """Create all the empty directories under 'base_dir' needed to put 'files'
89
- there.
90
-
91
- 'base_dir' is just the name of a directory which doesn't necessarily
92
- exist yet; 'files' is a list of filenames to be interpreted relative to
93
- 'base_dir'. 'base_dir' + the directory portion of every file in 'files'
94
- will be created if it doesn't already exist. 'mode', 'verbose' and
95
- 'dry_run' flags are as for 'mkpath()'.
96
- """
97
- # First get the list of directories to create
98
- need_dir = set()
99
- for file in files:
100
- need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
101
-
102
- # Now create them
103
- for dir in sorted(need_dir):
104
- mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
105
-
106
-
107
- def copy_tree( # noqa: C901
108
- src,
109
- dst,
110
- preserve_mode=1,
111
- preserve_times=1,
112
- preserve_symlinks=0,
113
- update=0,
114
- verbose=1,
115
- dry_run=0,
116
- ):
117
- """Copy an entire directory tree 'src' to a new location 'dst'.
118
-
119
- Both 'src' and 'dst' must be directory names. If 'src' is not a
120
- directory, raise DistutilsFileError. If 'dst' does not exist, it is
121
- created with 'mkpath()'. The end result of the copy is that every
122
- file in 'src' is copied to 'dst', and directories under 'src' are
123
- recursively copied to 'dst'. Return the list of files that were
124
- copied or might have been copied, using their output name. The
125
- return value is unaffected by 'update' or 'dry_run': it is simply
126
- the list of all files under 'src', with the names changed to be
127
- under 'dst'.
128
-
129
- 'preserve_mode' and 'preserve_times' are the same as for
130
- 'copy_file'; note that they only apply to regular files, not to
131
- directories. If 'preserve_symlinks' is true, symlinks will be
132
- copied as symlinks (on platforms that support them!); otherwise
133
- (the default), the destination of the symlink will be copied.
134
- 'update' and 'verbose' are the same as for 'copy_file'.
135
- """
136
- from distutils.file_util import copy_file
137
-
138
- if not dry_run and not os.path.isdir(src):
139
- raise DistutilsFileError("cannot copy tree '%s': not a directory" % src)
140
- try:
141
- names = os.listdir(src)
142
- except OSError as e:
143
- if dry_run:
144
- names = []
145
- else:
146
- raise DistutilsFileError(
147
- "error listing files in '{}': {}".format(src, e.strerror)
148
- )
149
-
150
- if not dry_run:
151
- mkpath(dst, verbose=verbose)
152
-
153
- outputs = []
154
-
155
- for n in names:
156
- src_name = os.path.join(src, n)
157
- dst_name = os.path.join(dst, n)
158
-
159
- if n.startswith('.nfs'):
160
- # skip NFS rename files
161
- continue
162
-
163
- if preserve_symlinks and os.path.islink(src_name):
164
- link_dest = os.readlink(src_name)
165
- if verbose >= 1:
166
- log.info("linking %s -> %s", dst_name, link_dest)
167
- if not dry_run:
168
- os.symlink(link_dest, dst_name)
169
- outputs.append(dst_name)
170
-
171
- elif os.path.isdir(src_name):
172
- outputs.extend(
173
- copy_tree(
174
- src_name,
175
- dst_name,
176
- preserve_mode,
177
- preserve_times,
178
- preserve_symlinks,
179
- update,
180
- verbose=verbose,
181
- dry_run=dry_run,
182
- )
183
- )
184
- else:
185
- copy_file(
186
- src_name,
187
- dst_name,
188
- preserve_mode,
189
- preserve_times,
190
- update,
191
- verbose=verbose,
192
- dry_run=dry_run,
193
- )
194
- outputs.append(dst_name)
195
-
196
- return outputs
197
-
198
-
199
- def _build_cmdtuple(path, cmdtuples):
200
- """Helper for remove_tree()."""
201
- for f in os.listdir(path):
202
- real_f = os.path.join(path, f)
203
- if os.path.isdir(real_f) and not os.path.islink(real_f):
204
- _build_cmdtuple(real_f, cmdtuples)
205
- else:
206
- cmdtuples.append((os.remove, real_f))
207
- cmdtuples.append((os.rmdir, path))
208
-
209
-
210
- def remove_tree(directory, verbose=1, dry_run=0):
211
- """Recursively remove an entire directory tree.
212
-
213
- Any errors are ignored (apart from being reported to stdout if 'verbose'
214
- is true).
215
- """
216
- global _path_created
217
-
218
- if verbose >= 1:
219
- log.info("removing '%s' (and everything under it)", directory)
220
- if dry_run:
221
- return
222
- cmdtuples = []
223
- _build_cmdtuple(directory, cmdtuples)
224
- for cmd in cmdtuples:
225
- try:
226
- cmd[0](cmd[1])
227
- # remove dir from cache if it's already there
228
- abspath = os.path.abspath(cmd[1])
229
- if abspath in _path_created:
230
- del _path_created[abspath]
231
- except OSError as exc:
232
- log.warn("error removing %s: %s", directory, exc)
233
-
234
-
235
- def ensure_relative(path):
236
- """Take the full path 'path', and make it a relative path.
237
-
238
- This is useful to make 'path' the second argument to os.path.join().
239
- """
240
- drive, path = os.path.splitdrive(path)
241
- if path[0:1] == os.sep:
242
- path = drive + path[1:]
243
- return path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_path.py DELETED
@@ -1,29 +0,0 @@
1
- import os
2
- from typing import Union
3
-
4
- _Path = Union[str, os.PathLike]
5
-
6
-
7
- def ensure_directory(path):
8
- """Ensure that the parent directory of `path` exists"""
9
- dirname = os.path.dirname(path)
10
- os.makedirs(dirname, exist_ok=True)
11
-
12
-
13
- def same_path(p1: _Path, p2: _Path) -> bool:
14
- """Differs from os.path.samefile because it does not require paths to exist.
15
- Purely string based (no comparison between i-nodes).
16
- >>> same_path("a/b", "./a/b")
17
- True
18
- >>> same_path("a/b", "a/./b")
19
- True
20
- >>> same_path("a/b", "././a/b")
21
- True
22
- >>> same_path("a/b", "./a/b/c/..")
23
- True
24
- >>> same_path("a/b", "../a/b/c")
25
- False
26
- >>> same_path("a", "a/b")
27
- False
28
- """
29
- return os.path.normpath(p1) == os.path.normpath(p2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/__init__.py DELETED
File without changes
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/typing_extensions.py DELETED
@@ -1,2296 +0,0 @@
1
- import abc
2
- import collections
3
- import collections.abc
4
- import operator
5
- import sys
6
- import typing
7
-
8
- # After PEP 560, internal typing API was substantially reworked.
9
- # This is especially important for Protocol class which uses internal APIs
10
- # quite extensively.
11
- PEP_560 = sys.version_info[:3] >= (3, 7, 0)
12
-
13
- if PEP_560:
14
- GenericMeta = type
15
- else:
16
- # 3.6
17
- from typing import GenericMeta, _type_vars # noqa
18
-
19
- # The two functions below are copies of typing internal helpers.
20
- # They are needed by _ProtocolMeta
21
-
22
-
23
- def _no_slots_copy(dct):
24
- dict_copy = dict(dct)
25
- if '__slots__' in dict_copy:
26
- for slot in dict_copy['__slots__']:
27
- dict_copy.pop(slot, None)
28
- return dict_copy
29
-
30
-
31
- def _check_generic(cls, parameters):
32
- if not cls.__parameters__:
33
- raise TypeError(f"{cls} is not a generic class")
34
- alen = len(parameters)
35
- elen = len(cls.__parameters__)
36
- if alen != elen:
37
- raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
38
- f" actual {alen}, expected {elen}")
39
-
40
-
41
- # Please keep __all__ alphabetized within each category.
42
- __all__ = [
43
- # Super-special typing primitives.
44
- 'ClassVar',
45
- 'Concatenate',
46
- 'Final',
47
- 'ParamSpec',
48
- 'Self',
49
- 'Type',
50
-
51
- # ABCs (from collections.abc).
52
- 'Awaitable',
53
- 'AsyncIterator',
54
- 'AsyncIterable',
55
- 'Coroutine',
56
- 'AsyncGenerator',
57
- 'AsyncContextManager',
58
- 'ChainMap',
59
-
60
- # Concrete collection types.
61
- 'ContextManager',
62
- 'Counter',
63
- 'Deque',
64
- 'DefaultDict',
65
- 'OrderedDict',
66
- 'TypedDict',
67
-
68
- # Structural checks, a.k.a. protocols.
69
- 'SupportsIndex',
70
-
71
- # One-off things.
72
- 'Annotated',
73
- 'final',
74
- 'IntVar',
75
- 'Literal',
76
- 'NewType',
77
- 'overload',
78
- 'Protocol',
79
- 'runtime',
80
- 'runtime_checkable',
81
- 'Text',
82
- 'TypeAlias',
83
- 'TypeGuard',
84
- 'TYPE_CHECKING',
85
- ]
86
-
87
- if PEP_560:
88
- __all__.extend(["get_args", "get_origin", "get_type_hints"])
89
-
90
- # 3.6.2+
91
- if hasattr(typing, 'NoReturn'):
92
- NoReturn = typing.NoReturn
93
- # 3.6.0-3.6.1
94
- else:
95
- class _NoReturn(typing._FinalTypingBase, _root=True):
96
- """Special type indicating functions that never return.
97
- Example::
98
-
99
- from typing import NoReturn
100
-
101
- def stop() -> NoReturn:
102
- raise Exception('no way')
103
-
104
- This type is invalid in other positions, e.g., ``List[NoReturn]``
105
- will fail in static type checkers.
106
- """
107
- __slots__ = ()
108
-
109
- def __instancecheck__(self, obj):
110
- raise TypeError("NoReturn cannot be used with isinstance().")
111
-
112
- def __subclasscheck__(self, cls):
113
- raise TypeError("NoReturn cannot be used with issubclass().")
114
-
115
- NoReturn = _NoReturn(_root=True)
116
-
117
- # Some unconstrained type variables. These are used by the container types.
118
- # (These are not for export.)
119
- T = typing.TypeVar('T') # Any type.
120
- KT = typing.TypeVar('KT') # Key type.
121
- VT = typing.TypeVar('VT') # Value type.
122
- T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
123
- T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
124
-
125
- ClassVar = typing.ClassVar
126
-
127
- # On older versions of typing there is an internal class named "Final".
128
- # 3.8+
129
- if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
130
- Final = typing.Final
131
- # 3.7
132
- elif sys.version_info[:2] >= (3, 7):
133
- class _FinalForm(typing._SpecialForm, _root=True):
134
-
135
- def __repr__(self):
136
- return 'typing_extensions.' + self._name
137
-
138
- def __getitem__(self, parameters):
139
- item = typing._type_check(parameters,
140
- f'{self._name} accepts only single type')
141
- return typing._GenericAlias(self, (item,))
142
-
143
- Final = _FinalForm('Final',
144
- doc="""A special typing construct to indicate that a name
145
- cannot be re-assigned or overridden in a subclass.
146
- For example:
147
-
148
- MAX_SIZE: Final = 9000
149
- MAX_SIZE += 1 # Error reported by type checker
150
-
151
- class Connection:
152
- TIMEOUT: Final[int] = 10
153
- class FastConnector(Connection):
154
- TIMEOUT = 1 # Error reported by type checker
155
-
156
- There is no runtime checking of these properties.""")
157
- # 3.6
158
- else:
159
- class _Final(typing._FinalTypingBase, _root=True):
160
- """A special typing construct to indicate that a name
161
- cannot be re-assigned or overridden in a subclass.
162
- For example:
163
-
164
- MAX_SIZE: Final = 9000
165
- MAX_SIZE += 1 # Error reported by type checker
166
-
167
- class Connection:
168
- TIMEOUT: Final[int] = 10
169
- class FastConnector(Connection):
170
- TIMEOUT = 1 # Error reported by type checker
171
-
172
- There is no runtime checking of these properties.
173
- """
174
-
175
- __slots__ = ('__type__',)
176
-
177
- def __init__(self, tp=None, **kwds):
178
- self.__type__ = tp
179
-
180
- def __getitem__(self, item):
181
- cls = type(self)
182
- if self.__type__ is None:
183
- return cls(typing._type_check(item,
184
- f'{cls.__name__[1:]} accepts only single type.'),
185
- _root=True)
186
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
187
-
188
- def _eval_type(self, globalns, localns):
189
- new_tp = typing._eval_type(self.__type__, globalns, localns)
190
- if new_tp == self.__type__:
191
- return self
192
- return type(self)(new_tp, _root=True)
193
-
194
- def __repr__(self):
195
- r = super().__repr__()
196
- if self.__type__ is not None:
197
- r += f'[{typing._type_repr(self.__type__)}]'
198
- return r
199
-
200
- def __hash__(self):
201
- return hash((type(self).__name__, self.__type__))
202
-
203
- def __eq__(self, other):
204
- if not isinstance(other, _Final):
205
- return NotImplemented
206
- if self.__type__ is not None:
207
- return self.__type__ == other.__type__
208
- return self is other
209
-
210
- Final = _Final(_root=True)
211
-
212
-
213
- # 3.8+
214
- if hasattr(typing, 'final'):
215
- final = typing.final
216
- # 3.6-3.7
217
- else:
218
- def final(f):
219
- """This decorator can be used to indicate to type checkers that
220
- the decorated method cannot be overridden, and decorated class
221
- cannot be subclassed. For example:
222
-
223
- class Base:
224
- @final
225
- def done(self) -> None:
226
- ...
227
- class Sub(Base):
228
- def done(self) -> None: # Error reported by type checker
229
- ...
230
- @final
231
- class Leaf:
232
- ...
233
- class Other(Leaf): # Error reported by type checker
234
- ...
235
-
236
- There is no runtime checking of these properties.
237
- """
238
- return f
239
-
240
-
241
- def IntVar(name):
242
- return typing.TypeVar(name)
243
-
244
-
245
- # 3.8+:
246
- if hasattr(typing, 'Literal'):
247
- Literal = typing.Literal
248
- # 3.7:
249
- elif sys.version_info[:2] >= (3, 7):
250
- class _LiteralForm(typing._SpecialForm, _root=True):
251
-
252
- def __repr__(self):
253
- return 'typing_extensions.' + self._name
254
-
255
- def __getitem__(self, parameters):
256
- return typing._GenericAlias(self, parameters)
257
-
258
- Literal = _LiteralForm('Literal',
259
- doc="""A type that can be used to indicate to type checkers
260
- that the corresponding value has a value literally equivalent
261
- to the provided parameter. For example:
262
-
263
- var: Literal[4] = 4
264
-
265
- The type checker understands that 'var' is literally equal to
266
- the value 4 and no other value.
267
-
268
- Literal[...] cannot be subclassed. There is no runtime
269
- checking verifying that the parameter is actually a value
270
- instead of a type.""")
271
- # 3.6:
272
- else:
273
- class _Literal(typing._FinalTypingBase, _root=True):
274
- """A type that can be used to indicate to type checkers that the
275
- corresponding value has a value literally equivalent to the
276
- provided parameter. For example:
277
-
278
- var: Literal[4] = 4
279
-
280
- The type checker understands that 'var' is literally equal to the
281
- value 4 and no other value.
282
-
283
- Literal[...] cannot be subclassed. There is no runtime checking
284
- verifying that the parameter is actually a value instead of a type.
285
- """
286
-
287
- __slots__ = ('__values__',)
288
-
289
- def __init__(self, values=None, **kwds):
290
- self.__values__ = values
291
-
292
- def __getitem__(self, values):
293
- cls = type(self)
294
- if self.__values__ is None:
295
- if not isinstance(values, tuple):
296
- values = (values,)
297
- return cls(values, _root=True)
298
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
299
-
300
- def _eval_type(self, globalns, localns):
301
- return self
302
-
303
- def __repr__(self):
304
- r = super().__repr__()
305
- if self.__values__ is not None:
306
- r += f'[{", ".join(map(typing._type_repr, self.__values__))}]'
307
- return r
308
-
309
- def __hash__(self):
310
- return hash((type(self).__name__, self.__values__))
311
-
312
- def __eq__(self, other):
313
- if not isinstance(other, _Literal):
314
- return NotImplemented
315
- if self.__values__ is not None:
316
- return self.__values__ == other.__values__
317
- return self is other
318
-
319
- Literal = _Literal(_root=True)
320
-
321
-
322
- _overload_dummy = typing._overload_dummy # noqa
323
- overload = typing.overload
324
-
325
-
326
- # This is not a real generic class. Don't use outside annotations.
327
- Type = typing.Type
328
-
329
- # Various ABCs mimicking those in collections.abc.
330
- # A few are simply re-exported for completeness.
331
-
332
-
333
- class _ExtensionsGenericMeta(GenericMeta):
334
- def __subclasscheck__(self, subclass):
335
- """This mimics a more modern GenericMeta.__subclasscheck__() logic
336
- (that does not have problems with recursion) to work around interactions
337
- between collections, typing, and typing_extensions on older
338
- versions of Python, see https://github.com/python/typing/issues/501.
339
- """
340
- if self.__origin__ is not None:
341
- if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
342
- raise TypeError("Parameterized generics cannot be used with class "
343
- "or instance checks")
344
- return False
345
- if not self.__extra__:
346
- return super().__subclasscheck__(subclass)
347
- res = self.__extra__.__subclasshook__(subclass)
348
- if res is not NotImplemented:
349
- return res
350
- if self.__extra__ in subclass.__mro__:
351
- return True
352
- for scls in self.__extra__.__subclasses__():
353
- if isinstance(scls, GenericMeta):
354
- continue
355
- if issubclass(subclass, scls):
356
- return True
357
- return False
358
-
359
-
360
- Awaitable = typing.Awaitable
361
- Coroutine = typing.Coroutine
362
- AsyncIterable = typing.AsyncIterable
363
- AsyncIterator = typing.AsyncIterator
364
-
365
- # 3.6.1+
366
- if hasattr(typing, 'Deque'):
367
- Deque = typing.Deque
368
- # 3.6.0
369
- else:
370
- class Deque(collections.deque, typing.MutableSequence[T],
371
- metaclass=_ExtensionsGenericMeta,
372
- extra=collections.deque):
373
- __slots__ = ()
374
-
375
- def __new__(cls, *args, **kwds):
376
- if cls._gorg is Deque:
377
- return collections.deque(*args, **kwds)
378
- return typing._generic_new(collections.deque, cls, *args, **kwds)
379
-
380
- ContextManager = typing.ContextManager
381
- # 3.6.2+
382
- if hasattr(typing, 'AsyncContextManager'):
383
- AsyncContextManager = typing.AsyncContextManager
384
- # 3.6.0-3.6.1
385
- else:
386
- from _collections_abc import _check_methods as _check_methods_in_mro # noqa
387
-
388
- class AsyncContextManager(typing.Generic[T_co]):
389
- __slots__ = ()
390
-
391
- async def __aenter__(self):
392
- return self
393
-
394
- @abc.abstractmethod
395
- async def __aexit__(self, exc_type, exc_value, traceback):
396
- return None
397
-
398
- @classmethod
399
- def __subclasshook__(cls, C):
400
- if cls is AsyncContextManager:
401
- return _check_methods_in_mro(C, "__aenter__", "__aexit__")
402
- return NotImplemented
403
-
404
- DefaultDict = typing.DefaultDict
405
-
406
- # 3.7.2+
407
- if hasattr(typing, 'OrderedDict'):
408
- OrderedDict = typing.OrderedDict
409
- # 3.7.0-3.7.2
410
- elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
411
- OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
412
- # 3.6
413
- else:
414
- class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
415
- metaclass=_ExtensionsGenericMeta,
416
- extra=collections.OrderedDict):
417
-
418
- __slots__ = ()
419
-
420
- def __new__(cls, *args, **kwds):
421
- if cls._gorg is OrderedDict:
422
- return collections.OrderedDict(*args, **kwds)
423
- return typing._generic_new(collections.OrderedDict, cls, *args, **kwds)
424
-
425
- # 3.6.2+
426
- if hasattr(typing, 'Counter'):
427
- Counter = typing.Counter
428
- # 3.6.0-3.6.1
429
- else:
430
- class Counter(collections.Counter,
431
- typing.Dict[T, int],
432
- metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
433
-
434
- __slots__ = ()
435
-
436
- def __new__(cls, *args, **kwds):
437
- if cls._gorg is Counter:
438
- return collections.Counter(*args, **kwds)
439
- return typing._generic_new(collections.Counter, cls, *args, **kwds)
440
-
441
- # 3.6.1+
442
- if hasattr(typing, 'ChainMap'):
443
- ChainMap = typing.ChainMap
444
- elif hasattr(collections, 'ChainMap'):
445
- class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
446
- metaclass=_ExtensionsGenericMeta,
447
- extra=collections.ChainMap):
448
-
449
- __slots__ = ()
450
-
451
- def __new__(cls, *args, **kwds):
452
- if cls._gorg is ChainMap:
453
- return collections.ChainMap(*args, **kwds)
454
- return typing._generic_new(collections.ChainMap, cls, *args, **kwds)
455
-
456
- # 3.6.1+
457
- if hasattr(typing, 'AsyncGenerator'):
458
- AsyncGenerator = typing.AsyncGenerator
459
- # 3.6.0
460
- else:
461
- class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
462
- metaclass=_ExtensionsGenericMeta,
463
- extra=collections.abc.AsyncGenerator):
464
- __slots__ = ()
465
-
466
- NewType = typing.NewType
467
- Text = typing.Text
468
- TYPE_CHECKING = typing.TYPE_CHECKING
469
-
470
-
471
- def _gorg(cls):
472
- """This function exists for compatibility with old typing versions."""
473
- assert isinstance(cls, GenericMeta)
474
- if hasattr(cls, '_gorg'):
475
- return cls._gorg
476
- while cls.__origin__ is not None:
477
- cls = cls.__origin__
478
- return cls
479
-
480
-
481
- _PROTO_WHITELIST = ['Callable', 'Awaitable',
482
- 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
483
- 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
484
- 'ContextManager', 'AsyncContextManager']
485
-
486
-
487
- def _get_protocol_attrs(cls):
488
- attrs = set()
489
- for base in cls.__mro__[:-1]: # without object
490
- if base.__name__ in ('Protocol', 'Generic'):
491
- continue
492
- annotations = getattr(base, '__annotations__', {})
493
- for attr in list(base.__dict__.keys()) + list(annotations.keys()):
494
- if (not attr.startswith('_abc_') and attr not in (
495
- '__abstractmethods__', '__annotations__', '__weakref__',
496
- '_is_protocol', '_is_runtime_protocol', '__dict__',
497
- '__args__', '__slots__',
498
- '__next_in_mro__', '__parameters__', '__origin__',
499
- '__orig_bases__', '__extra__', '__tree_hash__',
500
- '__doc__', '__subclasshook__', '__init__', '__new__',
501
- '__module__', '_MutableMapping__marker', '_gorg')):
502
- attrs.add(attr)
503
- return attrs
504
-
505
-
506
- def _is_callable_members_only(cls):
507
- return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
508
-
509
-
510
- # 3.8+
511
- if hasattr(typing, 'Protocol'):
512
- Protocol = typing.Protocol
513
- # 3.7
514
- elif PEP_560:
515
- from typing import _collect_type_vars # noqa
516
-
517
- def _no_init(self, *args, **kwargs):
518
- if type(self)._is_protocol:
519
- raise TypeError('Protocols cannot be instantiated')
520
-
521
- class _ProtocolMeta(abc.ABCMeta):
522
- # This metaclass is a bit unfortunate and exists only because of the lack
523
- # of __instancehook__.
524
- def __instancecheck__(cls, instance):
525
- # We need this method for situations where attributes are
526
- # assigned in __init__.
527
- if ((not getattr(cls, '_is_protocol', False) or
528
- _is_callable_members_only(cls)) and
529
- issubclass(instance.__class__, cls)):
530
- return True
531
- if cls._is_protocol:
532
- if all(hasattr(instance, attr) and
533
- (not callable(getattr(cls, attr, None)) or
534
- getattr(instance, attr) is not None)
535
- for attr in _get_protocol_attrs(cls)):
536
- return True
537
- return super().__instancecheck__(instance)
538
-
539
- class Protocol(metaclass=_ProtocolMeta):
540
- # There is quite a lot of overlapping code with typing.Generic.
541
- # Unfortunately it is hard to avoid this while these live in two different
542
- # modules. The duplicated code will be removed when Protocol is moved to typing.
543
- """Base class for protocol classes. Protocol classes are defined as::
544
-
545
- class Proto(Protocol):
546
- def meth(self) -> int:
547
- ...
548
-
549
- Such classes are primarily used with static type checkers that recognize
550
- structural subtyping (static duck-typing), for example::
551
-
552
- class C:
553
- def meth(self) -> int:
554
- return 0
555
-
556
- def func(x: Proto) -> int:
557
- return x.meth()
558
-
559
- func(C()) # Passes static type check
560
-
561
- See PEP 544 for details. Protocol classes decorated with
562
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
563
- only the presence of given attributes, ignoring their type signatures.
564
-
565
- Protocol classes can be generic, they are defined as::
566
-
567
- class GenProto(Protocol[T]):
568
- def meth(self) -> T:
569
- ...
570
- """
571
- __slots__ = ()
572
- _is_protocol = True
573
-
574
- def __new__(cls, *args, **kwds):
575
- if cls is Protocol:
576
- raise TypeError("Type Protocol cannot be instantiated; "
577
- "it can only be used as a base class")
578
- return super().__new__(cls)
579
-
580
- @typing._tp_cache
581
- def __class_getitem__(cls, params):
582
- if not isinstance(params, tuple):
583
- params = (params,)
584
- if not params and cls is not typing.Tuple:
585
- raise TypeError(
586
- f"Parameter list to {cls.__qualname__}[...] cannot be empty")
587
- msg = "Parameters to generic types must be types."
588
- params = tuple(typing._type_check(p, msg) for p in params) # noqa
589
- if cls is Protocol:
590
- # Generic can only be subscripted with unique type variables.
591
- if not all(isinstance(p, typing.TypeVar) for p in params):
592
- i = 0
593
- while isinstance(params[i], typing.TypeVar):
594
- i += 1
595
- raise TypeError(
596
- "Parameters to Protocol[...] must all be type variables."
597
- f" Parameter {i + 1} is {params[i]}")
598
- if len(set(params)) != len(params):
599
- raise TypeError(
600
- "Parameters to Protocol[...] must all be unique")
601
- else:
602
- # Subscripting a regular Generic subclass.
603
- _check_generic(cls, params)
604
- return typing._GenericAlias(cls, params)
605
-
606
- def __init_subclass__(cls, *args, **kwargs):
607
- tvars = []
608
- if '__orig_bases__' in cls.__dict__:
609
- error = typing.Generic in cls.__orig_bases__
610
- else:
611
- error = typing.Generic in cls.__bases__
612
- if error:
613
- raise TypeError("Cannot inherit from plain Generic")
614
- if '__orig_bases__' in cls.__dict__:
615
- tvars = _collect_type_vars(cls.__orig_bases__)
616
- # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
617
- # If found, tvars must be a subset of it.
618
- # If not found, tvars is it.
619
- # Also check for and reject plain Generic,
620
- # and reject multiple Generic[...] and/or Protocol[...].
621
- gvars = None
622
- for base in cls.__orig_bases__:
623
- if (isinstance(base, typing._GenericAlias) and
624
- base.__origin__ in (typing.Generic, Protocol)):
625
- # for error messages
626
- the_base = base.__origin__.__name__
627
- if gvars is not None:
628
- raise TypeError(
629
- "Cannot inherit from Generic[...]"
630
- " and/or Protocol[...] multiple types.")
631
- gvars = base.__parameters__
632
- if gvars is None:
633
- gvars = tvars
634
- else:
635
- tvarset = set(tvars)
636
- gvarset = set(gvars)
637
- if not tvarset <= gvarset:
638
- s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
639
- s_args = ', '.join(str(g) for g in gvars)
640
- raise TypeError(f"Some type variables ({s_vars}) are"
641
- f" not listed in {the_base}[{s_args}]")
642
- tvars = gvars
643
- cls.__parameters__ = tuple(tvars)
644
-
645
- # Determine if this is a protocol or a concrete subclass.
646
- if not cls.__dict__.get('_is_protocol', None):
647
- cls._is_protocol = any(b is Protocol for b in cls.__bases__)
648
-
649
- # Set (or override) the protocol subclass hook.
650
- def _proto_hook(other):
651
- if not cls.__dict__.get('_is_protocol', None):
652
- return NotImplemented
653
- if not getattr(cls, '_is_runtime_protocol', False):
654
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
655
- return NotImplemented
656
- raise TypeError("Instance and class checks can only be used with"
657
- " @runtime protocols")
658
- if not _is_callable_members_only(cls):
659
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
660
- return NotImplemented
661
- raise TypeError("Protocols with non-method members"
662
- " don't support issubclass()")
663
- if not isinstance(other, type):
664
- # Same error as for issubclass(1, int)
665
- raise TypeError('issubclass() arg 1 must be a class')
666
- for attr in _get_protocol_attrs(cls):
667
- for base in other.__mro__:
668
- if attr in base.__dict__:
669
- if base.__dict__[attr] is None:
670
- return NotImplemented
671
- break
672
- annotations = getattr(base, '__annotations__', {})
673
- if (isinstance(annotations, typing.Mapping) and
674
- attr in annotations and
675
- isinstance(other, _ProtocolMeta) and
676
- other._is_protocol):
677
- break
678
- else:
679
- return NotImplemented
680
- return True
681
- if '__subclasshook__' not in cls.__dict__:
682
- cls.__subclasshook__ = _proto_hook
683
-
684
- # We have nothing more to do for non-protocols.
685
- if not cls._is_protocol:
686
- return
687
-
688
- # Check consistency of bases.
689
- for base in cls.__bases__:
690
- if not (base in (object, typing.Generic) or
691
- base.__module__ == 'collections.abc' and
692
- base.__name__ in _PROTO_WHITELIST or
693
- isinstance(base, _ProtocolMeta) and base._is_protocol):
694
- raise TypeError('Protocols can only inherit from other'
695
- f' protocols, got {repr(base)}')
696
- cls.__init__ = _no_init
697
- # 3.6
698
- else:
699
- from typing import _next_in_mro, _type_check # noqa
700
-
701
- def _no_init(self, *args, **kwargs):
702
- if type(self)._is_protocol:
703
- raise TypeError('Protocols cannot be instantiated')
704
-
705
- class _ProtocolMeta(GenericMeta):
706
- """Internal metaclass for Protocol.
707
-
708
- This exists so Protocol classes can be generic without deriving
709
- from Generic.
710
- """
711
- def __new__(cls, name, bases, namespace,
712
- tvars=None, args=None, origin=None, extra=None, orig_bases=None):
713
- # This is just a version copied from GenericMeta.__new__ that
714
- # includes "Protocol" special treatment. (Comments removed for brevity.)
715
- assert extra is None # Protocols should not have extra
716
- if tvars is not None:
717
- assert origin is not None
718
- assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars
719
- else:
720
- tvars = _type_vars(bases)
721
- gvars = None
722
- for base in bases:
723
- if base is typing.Generic:
724
- raise TypeError("Cannot inherit from plain Generic")
725
- if (isinstance(base, GenericMeta) and
726
- base.__origin__ in (typing.Generic, Protocol)):
727
- if gvars is not None:
728
- raise TypeError(
729
- "Cannot inherit from Generic[...] or"
730
- " Protocol[...] multiple times.")
731
- gvars = base.__parameters__
732
- if gvars is None:
733
- gvars = tvars
734
- else:
735
- tvarset = set(tvars)
736
- gvarset = set(gvars)
737
- if not tvarset <= gvarset:
738
- s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
739
- s_args = ", ".join(str(g) for g in gvars)
740
- cls_name = "Generic" if any(b.__origin__ is typing.Generic
741
- for b in bases) else "Protocol"
742
- raise TypeError(f"Some type variables ({s_vars}) are"
743
- f" not listed in {cls_name}[{s_args}]")
744
- tvars = gvars
745
-
746
- initial_bases = bases
747
- if (extra is not None and type(extra) is abc.ABCMeta and
748
- extra not in bases):
749
- bases = (extra,) + bases
750
- bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
751
- for b in bases)
752
- if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases):
753
- bases = tuple(b for b in bases if b is not typing.Generic)
754
- namespace.update({'__origin__': origin, '__extra__': extra})
755
- self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
756
- _root=True)
757
- super(GenericMeta, self).__setattr__('_gorg',
758
- self if not origin else
759
- _gorg(origin))
760
- self.__parameters__ = tvars
761
- self.__args__ = tuple(... if a is typing._TypingEllipsis else
762
- () if a is typing._TypingEmpty else
763
- a for a in args) if args else None
764
- self.__next_in_mro__ = _next_in_mro(self)
765
- if orig_bases is None:
766
- self.__orig_bases__ = initial_bases
767
- elif origin is not None:
768
- self._abc_registry = origin._abc_registry
769
- self._abc_cache = origin._abc_cache
770
- if hasattr(self, '_subs_tree'):
771
- self.__tree_hash__ = (hash(self._subs_tree()) if origin else
772
- super(GenericMeta, self).__hash__())
773
- return self
774
-
775
- def __init__(cls, *args, **kwargs):
776
- super().__init__(*args, **kwargs)
777
- if not cls.__dict__.get('_is_protocol', None):
778
- cls._is_protocol = any(b is Protocol or
779
- isinstance(b, _ProtocolMeta) and
780
- b.__origin__ is Protocol
781
- for b in cls.__bases__)
782
- if cls._is_protocol:
783
- for base in cls.__mro__[1:]:
784
- if not (base in (object, typing.Generic) or
785
- base.__module__ == 'collections.abc' and
786
- base.__name__ in _PROTO_WHITELIST or
787
- isinstance(base, typing.TypingMeta) and base._is_protocol or
788
- isinstance(base, GenericMeta) and
789
- base.__origin__ is typing.Generic):
790
- raise TypeError(f'Protocols can only inherit from other'
791
- f' protocols, got {repr(base)}')
792
-
793
- cls.__init__ = _no_init
794
-
795
- def _proto_hook(other):
796
- if not cls.__dict__.get('_is_protocol', None):
797
- return NotImplemented
798
- if not isinstance(other, type):
799
- # Same error as for issubclass(1, int)
800
- raise TypeError('issubclass() arg 1 must be a class')
801
- for attr in _get_protocol_attrs(cls):
802
- for base in other.__mro__:
803
- if attr in base.__dict__:
804
- if base.__dict__[attr] is None:
805
- return NotImplemented
806
- break
807
- annotations = getattr(base, '__annotations__', {})
808
- if (isinstance(annotations, typing.Mapping) and
809
- attr in annotations and
810
- isinstance(other, _ProtocolMeta) and
811
- other._is_protocol):
812
- break
813
- else:
814
- return NotImplemented
815
- return True
816
- if '__subclasshook__' not in cls.__dict__:
817
- cls.__subclasshook__ = _proto_hook
818
-
819
- def __instancecheck__(self, instance):
820
- # We need this method for situations where attributes are
821
- # assigned in __init__.
822
- if ((not getattr(self, '_is_protocol', False) or
823
- _is_callable_members_only(self)) and
824
- issubclass(instance.__class__, self)):
825
- return True
826
- if self._is_protocol:
827
- if all(hasattr(instance, attr) and
828
- (not callable(getattr(self, attr, None)) or
829
- getattr(instance, attr) is not None)
830
- for attr in _get_protocol_attrs(self)):
831
- return True
832
- return super(GenericMeta, self).__instancecheck__(instance)
833
-
834
- def __subclasscheck__(self, cls):
835
- if self.__origin__ is not None:
836
- if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
837
- raise TypeError("Parameterized generics cannot be used with class "
838
- "or instance checks")
839
- return False
840
- if (self.__dict__.get('_is_protocol', None) and
841
- not self.__dict__.get('_is_runtime_protocol', None)):
842
- if sys._getframe(1).f_globals['__name__'] in ['abc',
843
- 'functools',
844
- 'typing']:
845
- return False
846
- raise TypeError("Instance and class checks can only be used with"
847
- " @runtime protocols")
848
- if (self.__dict__.get('_is_runtime_protocol', None) and
849
- not _is_callable_members_only(self)):
850
- if sys._getframe(1).f_globals['__name__'] in ['abc',
851
- 'functools',
852
- 'typing']:
853
- return super(GenericMeta, self).__subclasscheck__(cls)
854
- raise TypeError("Protocols with non-method members"
855
- " don't support issubclass()")
856
- return super(GenericMeta, self).__subclasscheck__(cls)
857
-
858
- @typing._tp_cache
859
- def __getitem__(self, params):
860
- # We also need to copy this from GenericMeta.__getitem__ to get
861
- # special treatment of "Protocol". (Comments removed for brevity.)
862
- if not isinstance(params, tuple):
863
- params = (params,)
864
- if not params and _gorg(self) is not typing.Tuple:
865
- raise TypeError(
866
- f"Parameter list to {self.__qualname__}[...] cannot be empty")
867
- msg = "Parameters to generic types must be types."
868
- params = tuple(_type_check(p, msg) for p in params)
869
- if self in (typing.Generic, Protocol):
870
- if not all(isinstance(p, typing.TypeVar) for p in params):
871
- raise TypeError(
872
- f"Parameters to {repr(self)}[...] must all be type variables")
873
- if len(set(params)) != len(params):
874
- raise TypeError(
875
- f"Parameters to {repr(self)}[...] must all be unique")
876
- tvars = params
877
- args = params
878
- elif self in (typing.Tuple, typing.Callable):
879
- tvars = _type_vars(params)
880
- args = params
881
- elif self.__origin__ in (typing.Generic, Protocol):
882
- raise TypeError(f"Cannot subscript already-subscripted {repr(self)}")
883
- else:
884
- _check_generic(self, params)
885
- tvars = _type_vars(params)
886
- args = params
887
-
888
- prepend = (self,) if self.__origin__ is None else ()
889
- return self.__class__(self.__name__,
890
- prepend + self.__bases__,
891
- _no_slots_copy(self.__dict__),
892
- tvars=tvars,
893
- args=args,
894
- origin=self,
895
- extra=self.__extra__,
896
- orig_bases=self.__orig_bases__)
897
-
898
- class Protocol(metaclass=_ProtocolMeta):
899
- """Base class for protocol classes. Protocol classes are defined as::
900
-
901
- class Proto(Protocol):
902
- def meth(self) -> int:
903
- ...
904
-
905
- Such classes are primarily used with static type checkers that recognize
906
- structural subtyping (static duck-typing), for example::
907
-
908
- class C:
909
- def meth(self) -> int:
910
- return 0
911
-
912
- def func(x: Proto) -> int:
913
- return x.meth()
914
-
915
- func(C()) # Passes static type check
916
-
917
- See PEP 544 for details. Protocol classes decorated with
918
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
919
- only the presence of given attributes, ignoring their type signatures.
920
-
921
- Protocol classes can be generic, they are defined as::
922
-
923
- class GenProto(Protocol[T]):
924
- def meth(self) -> T:
925
- ...
926
- """
927
- __slots__ = ()
928
- _is_protocol = True
929
-
930
- def __new__(cls, *args, **kwds):
931
- if _gorg(cls) is Protocol:
932
- raise TypeError("Type Protocol cannot be instantiated; "
933
- "it can be used only as a base class")
934
- return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds)
935
-
936
-
937
- # 3.8+
938
- if hasattr(typing, 'runtime_checkable'):
939
- runtime_checkable = typing.runtime_checkable
940
- # 3.6-3.7
941
- else:
942
- def runtime_checkable(cls):
943
- """Mark a protocol class as a runtime protocol, so that it
944
- can be used with isinstance() and issubclass(). Raise TypeError
945
- if applied to a non-protocol class.
946
-
947
- This allows a simple-minded structural check very similar to the
948
- one-offs in collections.abc such as Hashable.
949
- """
950
- if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
951
- raise TypeError('@runtime_checkable can be only applied to protocol classes,'
952
- f' got {cls!r}')
953
- cls._is_runtime_protocol = True
954
- return cls
955
-
956
-
957
- # Exists for backwards compatibility.
958
- runtime = runtime_checkable
959
-
960
-
961
- # 3.8+
962
- if hasattr(typing, 'SupportsIndex'):
963
- SupportsIndex = typing.SupportsIndex
964
- # 3.6-3.7
965
- else:
966
- @runtime_checkable
967
- class SupportsIndex(Protocol):
968
- __slots__ = ()
969
-
970
- @abc.abstractmethod
971
- def __index__(self) -> int:
972
- pass
973
-
974
-
975
- if sys.version_info >= (3, 9, 2):
976
- # The standard library TypedDict in Python 3.8 does not store runtime information
977
- # about which (if any) keys are optional. See https://bugs.python.org/issue38834
978
- # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
979
- # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
980
- TypedDict = typing.TypedDict
981
- else:
982
- def _check_fails(cls, other):
983
- try:
984
- if sys._getframe(1).f_globals['__name__'] not in ['abc',
985
- 'functools',
986
- 'typing']:
987
- # Typed dicts are only for static structural subtyping.
988
- raise TypeError('TypedDict does not support instance and class checks')
989
- except (AttributeError, ValueError):
990
- pass
991
- return False
992
-
993
- def _dict_new(*args, **kwargs):
994
- if not args:
995
- raise TypeError('TypedDict.__new__(): not enough arguments')
996
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
997
- return dict(*args, **kwargs)
998
-
999
- _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
1000
-
1001
- def _typeddict_new(*args, total=True, **kwargs):
1002
- if not args:
1003
- raise TypeError('TypedDict.__new__(): not enough arguments')
1004
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
1005
- if args:
1006
- typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
1007
- elif '_typename' in kwargs:
1008
- typename = kwargs.pop('_typename')
1009
- import warnings
1010
- warnings.warn("Passing '_typename' as keyword argument is deprecated",
1011
- DeprecationWarning, stacklevel=2)
1012
- else:
1013
- raise TypeError("TypedDict.__new__() missing 1 required positional "
1014
- "argument: '_typename'")
1015
- if args:
1016
- try:
1017
- fields, = args # allow the "_fields" keyword be passed
1018
- except ValueError:
1019
- raise TypeError('TypedDict.__new__() takes from 2 to 3 '
1020
- f'positional arguments but {len(args) + 2} '
1021
- 'were given')
1022
- elif '_fields' in kwargs and len(kwargs) == 1:
1023
- fields = kwargs.pop('_fields')
1024
- import warnings
1025
- warnings.warn("Passing '_fields' as keyword argument is deprecated",
1026
- DeprecationWarning, stacklevel=2)
1027
- else:
1028
- fields = None
1029
-
1030
- if fields is None:
1031
- fields = kwargs
1032
- elif kwargs:
1033
- raise TypeError("TypedDict takes either a dict or keyword arguments,"
1034
- " but not both")
1035
-
1036
- ns = {'__annotations__': dict(fields)}
1037
- try:
1038
- # Setting correct module is necessary to make typed dict classes pickleable.
1039
- ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
1040
- except (AttributeError, ValueError):
1041
- pass
1042
-
1043
- return _TypedDictMeta(typename, (), ns, total=total)
1044
-
1045
- _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
1046
- ' /, *, total=True, **kwargs)')
1047
-
1048
- class _TypedDictMeta(type):
1049
- def __init__(cls, name, bases, ns, total=True):
1050
- super().__init__(name, bases, ns)
1051
-
1052
- def __new__(cls, name, bases, ns, total=True):
1053
- # Create new typed dict class object.
1054
- # This method is called directly when TypedDict is subclassed,
1055
- # or via _typeddict_new when TypedDict is instantiated. This way
1056
- # TypedDict supports all three syntaxes described in its docstring.
1057
- # Subclasses and instances of TypedDict return actual dictionaries
1058
- # via _dict_new.
1059
- ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
1060
- tp_dict = super().__new__(cls, name, (dict,), ns)
1061
-
1062
- annotations = {}
1063
- own_annotations = ns.get('__annotations__', {})
1064
- own_annotation_keys = set(own_annotations.keys())
1065
- msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
1066
- own_annotations = {
1067
- n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
1068
- }
1069
- required_keys = set()
1070
- optional_keys = set()
1071
-
1072
- for base in bases:
1073
- annotations.update(base.__dict__.get('__annotations__', {}))
1074
- required_keys.update(base.__dict__.get('__required_keys__', ()))
1075
- optional_keys.update(base.__dict__.get('__optional_keys__', ()))
1076
-
1077
- annotations.update(own_annotations)
1078
- if total:
1079
- required_keys.update(own_annotation_keys)
1080
- else:
1081
- optional_keys.update(own_annotation_keys)
1082
-
1083
- tp_dict.__annotations__ = annotations
1084
- tp_dict.__required_keys__ = frozenset(required_keys)
1085
- tp_dict.__optional_keys__ = frozenset(optional_keys)
1086
- if not hasattr(tp_dict, '__total__'):
1087
- tp_dict.__total__ = total
1088
- return tp_dict
1089
-
1090
- __instancecheck__ = __subclasscheck__ = _check_fails
1091
-
1092
- TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
1093
- TypedDict.__module__ = __name__
1094
- TypedDict.__doc__ = \
1095
- """A simple typed name space. At runtime it is equivalent to a plain dict.
1096
-
1097
- TypedDict creates a dictionary type that expects all of its
1098
- instances to have a certain set of keys, with each key
1099
- associated with a value of a consistent type. This expectation
1100
- is not checked at runtime but is only enforced by type checkers.
1101
- Usage::
1102
-
1103
- class Point2D(TypedDict):
1104
- x: int
1105
- y: int
1106
- label: str
1107
-
1108
- a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
1109
- b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
1110
-
1111
- assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
1112
-
1113
- The type info can be accessed via the Point2D.__annotations__ dict, and
1114
- the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
1115
- TypedDict supports two additional equivalent forms::
1116
-
1117
- Point2D = TypedDict('Point2D', x=int, y=int, label=str)
1118
- Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
1119
-
1120
- The class syntax is only supported in Python 3.6+, while two other
1121
- syntax forms work for Python 2.7 and 3.2+
1122
- """
1123
-
1124
-
1125
- # Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
1126
- if hasattr(typing, 'Annotated'):
1127
- Annotated = typing.Annotated
1128
- get_type_hints = typing.get_type_hints
1129
- # Not exported and not a public API, but needed for get_origin() and get_args()
1130
- # to work.
1131
- _AnnotatedAlias = typing._AnnotatedAlias
1132
- # 3.7-3.8
1133
- elif PEP_560:
1134
- class _AnnotatedAlias(typing._GenericAlias, _root=True):
1135
- """Runtime representation of an annotated type.
1136
-
1137
- At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
1138
- with extra annotations. The alias behaves like a normal typing alias,
1139
- instantiating is the same as instantiating the underlying type, binding
1140
- it to types is also the same.
1141
- """
1142
- def __init__(self, origin, metadata):
1143
- if isinstance(origin, _AnnotatedAlias):
1144
- metadata = origin.__metadata__ + metadata
1145
- origin = origin.__origin__
1146
- super().__init__(origin, origin)
1147
- self.__metadata__ = metadata
1148
-
1149
- def copy_with(self, params):
1150
- assert len(params) == 1
1151
- new_type = params[0]
1152
- return _AnnotatedAlias(new_type, self.__metadata__)
1153
-
1154
- def __repr__(self):
1155
- return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
1156
- f"{', '.join(repr(a) for a in self.__metadata__)}]")
1157
-
1158
- def __reduce__(self):
1159
- return operator.getitem, (
1160
- Annotated, (self.__origin__,) + self.__metadata__
1161
- )
1162
-
1163
- def __eq__(self, other):
1164
- if not isinstance(other, _AnnotatedAlias):
1165
- return NotImplemented
1166
- if self.__origin__ != other.__origin__:
1167
- return False
1168
- return self.__metadata__ == other.__metadata__
1169
-
1170
- def __hash__(self):
1171
- return hash((self.__origin__, self.__metadata__))
1172
-
1173
- class Annotated:
1174
- """Add context specific metadata to a type.
1175
-
1176
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
1177
- hypothetical runtime_check module that this type is an unsigned int.
1178
- Every other consumer of this type can ignore this metadata and treat
1179
- this type as int.
1180
-
1181
- The first argument to Annotated must be a valid type (and will be in
1182
- the __origin__ field), the remaining arguments are kept as a tuple in
1183
- the __extra__ field.
1184
-
1185
- Details:
1186
-
1187
- - It's an error to call `Annotated` with less than two arguments.
1188
- - Nested Annotated are flattened::
1189
-
1190
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
1191
-
1192
- - Instantiating an annotated type is equivalent to instantiating the
1193
- underlying type::
1194
-
1195
- Annotated[C, Ann1](5) == C(5)
1196
-
1197
- - Annotated can be used as a generic type alias::
1198
-
1199
- Optimized = Annotated[T, runtime.Optimize()]
1200
- Optimized[int] == Annotated[int, runtime.Optimize()]
1201
-
1202
- OptimizedList = Annotated[List[T], runtime.Optimize()]
1203
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
1204
- """
1205
-
1206
- __slots__ = ()
1207
-
1208
- def __new__(cls, *args, **kwargs):
1209
- raise TypeError("Type Annotated cannot be instantiated.")
1210
-
1211
- @typing._tp_cache
1212
- def __class_getitem__(cls, params):
1213
- if not isinstance(params, tuple) or len(params) < 2:
1214
- raise TypeError("Annotated[...] should be used "
1215
- "with at least two arguments (a type and an "
1216
- "annotation).")
1217
- msg = "Annotated[t, ...]: t must be a type."
1218
- origin = typing._type_check(params[0], msg)
1219
- metadata = tuple(params[1:])
1220
- return _AnnotatedAlias(origin, metadata)
1221
-
1222
- def __init_subclass__(cls, *args, **kwargs):
1223
- raise TypeError(
1224
- f"Cannot subclass {cls.__module__}.Annotated"
1225
- )
1226
-
1227
- def _strip_annotations(t):
1228
- """Strips the annotations from a given type.
1229
- """
1230
- if isinstance(t, _AnnotatedAlias):
1231
- return _strip_annotations(t.__origin__)
1232
- if isinstance(t, typing._GenericAlias):
1233
- stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
1234
- if stripped_args == t.__args__:
1235
- return t
1236
- res = t.copy_with(stripped_args)
1237
- res._special = t._special
1238
- return res
1239
- return t
1240
-
1241
- def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
1242
- """Return type hints for an object.
1243
-
1244
- This is often the same as obj.__annotations__, but it handles
1245
- forward references encoded as string literals, adds Optional[t] if a
1246
- default value equal to None is set and recursively replaces all
1247
- 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
1248
-
1249
- The argument may be a module, class, method, or function. The annotations
1250
- are returned as a dictionary. For classes, annotations include also
1251
- inherited members.
1252
-
1253
- TypeError is raised if the argument is not of a type that can contain
1254
- annotations, and an empty dictionary is returned if no annotations are
1255
- present.
1256
-
1257
- BEWARE -- the behavior of globalns and localns is counterintuitive
1258
- (unless you are familiar with how eval() and exec() work). The
1259
- search order is locals first, then globals.
1260
-
1261
- - If no dict arguments are passed, an attempt is made to use the
1262
- globals from obj (or the respective module's globals for classes),
1263
- and these are also used as the locals. If the object does not appear
1264
- to have globals, an empty dictionary is used.
1265
-
1266
- - If one dict argument is passed, it is used for both globals and
1267
- locals.
1268
-
1269
- - If two dict arguments are passed, they specify globals and
1270
- locals, respectively.
1271
- """
1272
- hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
1273
- if include_extras:
1274
- return hint
1275
- return {k: _strip_annotations(t) for k, t in hint.items()}
1276
- # 3.6
1277
- else:
1278
-
1279
- def _is_dunder(name):
1280
- """Returns True if name is a __dunder_variable_name__."""
1281
- return len(name) > 4 and name.startswith('__') and name.endswith('__')
1282
-
1283
- # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
1284
- # checks, argument expansion etc. are done on the _subs_tre. As a result we
1285
- # can't provide a get_type_hints function that strips out annotations.
1286
-
1287
- class AnnotatedMeta(typing.GenericMeta):
1288
- """Metaclass for Annotated"""
1289
-
1290
- def __new__(cls, name, bases, namespace, **kwargs):
1291
- if any(b is not object for b in bases):
1292
- raise TypeError("Cannot subclass " + str(Annotated))
1293
- return super().__new__(cls, name, bases, namespace, **kwargs)
1294
-
1295
- @property
1296
- def __metadata__(self):
1297
- return self._subs_tree()[2]
1298
-
1299
- def _tree_repr(self, tree):
1300
- cls, origin, metadata = tree
1301
- if not isinstance(origin, tuple):
1302
- tp_repr = typing._type_repr(origin)
1303
- else:
1304
- tp_repr = origin[0]._tree_repr(origin)
1305
- metadata_reprs = ", ".join(repr(arg) for arg in metadata)
1306
- return f'{cls}[{tp_repr}, {metadata_reprs}]'
1307
-
1308
- def _subs_tree(self, tvars=None, args=None): # noqa
1309
- if self is Annotated:
1310
- return Annotated
1311
- res = super()._subs_tree(tvars=tvars, args=args)
1312
- # Flatten nested Annotated
1313
- if isinstance(res[1], tuple) and res[1][0] is Annotated:
1314
- sub_tp = res[1][1]
1315
- sub_annot = res[1][2]
1316
- return (Annotated, sub_tp, sub_annot + res[2])
1317
- return res
1318
-
1319
- def _get_cons(self):
1320
- """Return the class used to create instance of this type."""
1321
- if self.__origin__ is None:
1322
- raise TypeError("Cannot get the underlying type of a "
1323
- "non-specialized Annotated type.")
1324
- tree = self._subs_tree()
1325
- while isinstance(tree, tuple) and tree[0] is Annotated:
1326
- tree = tree[1]
1327
- if isinstance(tree, tuple):
1328
- return tree[0]
1329
- else:
1330
- return tree
1331
-
1332
- @typing._tp_cache
1333
- def __getitem__(self, params):
1334
- if not isinstance(params, tuple):
1335
- params = (params,)
1336
- if self.__origin__ is not None: # specializing an instantiated type
1337
- return super().__getitem__(params)
1338
- elif not isinstance(params, tuple) or len(params) < 2:
1339
- raise TypeError("Annotated[...] should be instantiated "
1340
- "with at least two arguments (a type and an "
1341
- "annotation).")
1342
- else:
1343
- msg = "Annotated[t, ...]: t must be a type."
1344
- tp = typing._type_check(params[0], msg)
1345
- metadata = tuple(params[1:])
1346
- return self.__class__(
1347
- self.__name__,
1348
- self.__bases__,
1349
- _no_slots_copy(self.__dict__),
1350
- tvars=_type_vars((tp,)),
1351
- # Metadata is a tuple so it won't be touched by _replace_args et al.
1352
- args=(tp, metadata),
1353
- origin=self,
1354
- )
1355
-
1356
- def __call__(self, *args, **kwargs):
1357
- cons = self._get_cons()
1358
- result = cons(*args, **kwargs)
1359
- try:
1360
- result.__orig_class__ = self
1361
- except AttributeError:
1362
- pass
1363
- return result
1364
-
1365
- def __getattr__(self, attr):
1366
- # For simplicity we just don't relay all dunder names
1367
- if self.__origin__ is not None and not _is_dunder(attr):
1368
- return getattr(self._get_cons(), attr)
1369
- raise AttributeError(attr)
1370
-
1371
- def __setattr__(self, attr, value):
1372
- if _is_dunder(attr) or attr.startswith('_abc_'):
1373
- super().__setattr__(attr, value)
1374
- elif self.__origin__ is None:
1375
- raise AttributeError(attr)
1376
- else:
1377
- setattr(self._get_cons(), attr, value)
1378
-
1379
- def __instancecheck__(self, obj):
1380
- raise TypeError("Annotated cannot be used with isinstance().")
1381
-
1382
- def __subclasscheck__(self, cls):
1383
- raise TypeError("Annotated cannot be used with issubclass().")
1384
-
1385
- class Annotated(metaclass=AnnotatedMeta):
1386
- """Add context specific metadata to a type.
1387
-
1388
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
1389
- hypothetical runtime_check module that this type is an unsigned int.
1390
- Every other consumer of this type can ignore this metadata and treat
1391
- this type as int.
1392
-
1393
- The first argument to Annotated must be a valid type, the remaining
1394
- arguments are kept as a tuple in the __metadata__ field.
1395
-
1396
- Details:
1397
-
1398
- - It's an error to call `Annotated` with less than two arguments.
1399
- - Nested Annotated are flattened::
1400
-
1401
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
1402
-
1403
- - Instantiating an annotated type is equivalent to instantiating the
1404
- underlying type::
1405
-
1406
- Annotated[C, Ann1](5) == C(5)
1407
-
1408
- - Annotated can be used as a generic type alias::
1409
-
1410
- Optimized = Annotated[T, runtime.Optimize()]
1411
- Optimized[int] == Annotated[int, runtime.Optimize()]
1412
-
1413
- OptimizedList = Annotated[List[T], runtime.Optimize()]
1414
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
1415
- """
1416
-
1417
- # Python 3.8 has get_origin() and get_args() but those implementations aren't
1418
- # Annotated-aware, so we can't use those. Python 3.9's versions don't support
1419
- # ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
1420
- if sys.version_info[:2] >= (3, 10):
1421
- get_origin = typing.get_origin
1422
- get_args = typing.get_args
1423
- # 3.7-3.9
1424
- elif PEP_560:
1425
- try:
1426
- # 3.9+
1427
- from typing import _BaseGenericAlias
1428
- except ImportError:
1429
- _BaseGenericAlias = typing._GenericAlias
1430
- try:
1431
- # 3.9+
1432
- from typing import GenericAlias
1433
- except ImportError:
1434
- GenericAlias = typing._GenericAlias
1435
-
1436
- def get_origin(tp):
1437
- """Get the unsubscripted version of a type.
1438
-
1439
- This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
1440
- and Annotated. Return None for unsupported types. Examples::
1441
-
1442
- get_origin(Literal[42]) is Literal
1443
- get_origin(int) is None
1444
- get_origin(ClassVar[int]) is ClassVar
1445
- get_origin(Generic) is Generic
1446
- get_origin(Generic[T]) is Generic
1447
- get_origin(Union[T, int]) is Union
1448
- get_origin(List[Tuple[T, T]][int]) == list
1449
- get_origin(P.args) is P
1450
- """
1451
- if isinstance(tp, _AnnotatedAlias):
1452
- return Annotated
1453
- if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias,
1454
- ParamSpecArgs, ParamSpecKwargs)):
1455
- return tp.__origin__
1456
- if tp is typing.Generic:
1457
- return typing.Generic
1458
- return None
1459
-
1460
- def get_args(tp):
1461
- """Get type arguments with all substitutions performed.
1462
-
1463
- For unions, basic simplifications used by Union constructor are performed.
1464
- Examples::
1465
- get_args(Dict[str, int]) == (str, int)
1466
- get_args(int) == ()
1467
- get_args(Union[int, Union[T, int], str][int]) == (int, str)
1468
- get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
1469
- get_args(Callable[[], T][int]) == ([], int)
1470
- """
1471
- if isinstance(tp, _AnnotatedAlias):
1472
- return (tp.__origin__,) + tp.__metadata__
1473
- if isinstance(tp, (typing._GenericAlias, GenericAlias)):
1474
- if getattr(tp, "_special", False):
1475
- return ()
1476
- res = tp.__args__
1477
- if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
1478
- res = (list(res[:-1]), res[-1])
1479
- return res
1480
- return ()
1481
-
1482
-
1483
- # 3.10+
1484
- if hasattr(typing, 'TypeAlias'):
1485
- TypeAlias = typing.TypeAlias
1486
- # 3.9
1487
- elif sys.version_info[:2] >= (3, 9):
1488
- class _TypeAliasForm(typing._SpecialForm, _root=True):
1489
- def __repr__(self):
1490
- return 'typing_extensions.' + self._name
1491
-
1492
- @_TypeAliasForm
1493
- def TypeAlias(self, parameters):
1494
- """Special marker indicating that an assignment should
1495
- be recognized as a proper type alias definition by type
1496
- checkers.
1497
-
1498
- For example::
1499
-
1500
- Predicate: TypeAlias = Callable[..., bool]
1501
-
1502
- It's invalid when used anywhere except as in the example above.
1503
- """
1504
- raise TypeError(f"{self} is not subscriptable")
1505
- # 3.7-3.8
1506
- elif sys.version_info[:2] >= (3, 7):
1507
- class _TypeAliasForm(typing._SpecialForm, _root=True):
1508
- def __repr__(self):
1509
- return 'typing_extensions.' + self._name
1510
-
1511
- TypeAlias = _TypeAliasForm('TypeAlias',
1512
- doc="""Special marker indicating that an assignment should
1513
- be recognized as a proper type alias definition by type
1514
- checkers.
1515
-
1516
- For example::
1517
-
1518
- Predicate: TypeAlias = Callable[..., bool]
1519
-
1520
- It's invalid when used anywhere except as in the example
1521
- above.""")
1522
- # 3.6
1523
- else:
1524
- class _TypeAliasMeta(typing.TypingMeta):
1525
- """Metaclass for TypeAlias"""
1526
-
1527
- def __repr__(self):
1528
- return 'typing_extensions.TypeAlias'
1529
-
1530
- class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
1531
- """Special marker indicating that an assignment should
1532
- be recognized as a proper type alias definition by type
1533
- checkers.
1534
-
1535
- For example::
1536
-
1537
- Predicate: TypeAlias = Callable[..., bool]
1538
-
1539
- It's invalid when used anywhere except as in the example above.
1540
- """
1541
- __slots__ = ()
1542
-
1543
- def __instancecheck__(self, obj):
1544
- raise TypeError("TypeAlias cannot be used with isinstance().")
1545
-
1546
- def __subclasscheck__(self, cls):
1547
- raise TypeError("TypeAlias cannot be used with issubclass().")
1548
-
1549
- def __repr__(self):
1550
- return 'typing_extensions.TypeAlias'
1551
-
1552
- TypeAlias = _TypeAliasBase(_root=True)
1553
-
1554
-
1555
- # Python 3.10+ has PEP 612
1556
- if hasattr(typing, 'ParamSpecArgs'):
1557
- ParamSpecArgs = typing.ParamSpecArgs
1558
- ParamSpecKwargs = typing.ParamSpecKwargs
1559
- # 3.6-3.9
1560
- else:
1561
- class _Immutable:
1562
- """Mixin to indicate that object should not be copied."""
1563
- __slots__ = ()
1564
-
1565
- def __copy__(self):
1566
- return self
1567
-
1568
- def __deepcopy__(self, memo):
1569
- return self
1570
-
1571
- class ParamSpecArgs(_Immutable):
1572
- """The args for a ParamSpec object.
1573
-
1574
- Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
1575
-
1576
- ParamSpecArgs objects have a reference back to their ParamSpec:
1577
-
1578
- P.args.__origin__ is P
1579
-
1580
- This type is meant for runtime introspection and has no special meaning to
1581
- static type checkers.
1582
- """
1583
- def __init__(self, origin):
1584
- self.__origin__ = origin
1585
-
1586
- def __repr__(self):
1587
- return f"{self.__origin__.__name__}.args"
1588
-
1589
- class ParamSpecKwargs(_Immutable):
1590
- """The kwargs for a ParamSpec object.
1591
-
1592
- Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
1593
-
1594
- ParamSpecKwargs objects have a reference back to their ParamSpec:
1595
-
1596
- P.kwargs.__origin__ is P
1597
-
1598
- This type is meant for runtime introspection and has no special meaning to
1599
- static type checkers.
1600
- """
1601
- def __init__(self, origin):
1602
- self.__origin__ = origin
1603
-
1604
- def __repr__(self):
1605
- return f"{self.__origin__.__name__}.kwargs"
1606
-
1607
- # 3.10+
1608
- if hasattr(typing, 'ParamSpec'):
1609
- ParamSpec = typing.ParamSpec
1610
- # 3.6-3.9
1611
- else:
1612
-
1613
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
1614
- class ParamSpec(list):
1615
- """Parameter specification variable.
1616
-
1617
- Usage::
1618
-
1619
- P = ParamSpec('P')
1620
-
1621
- Parameter specification variables exist primarily for the benefit of static
1622
- type checkers. They are used to forward the parameter types of one
1623
- callable to another callable, a pattern commonly found in higher order
1624
- functions and decorators. They are only valid when used in ``Concatenate``,
1625
- or s the first argument to ``Callable``. In Python 3.10 and higher,
1626
- they are also supported in user-defined Generics at runtime.
1627
- See class Generic for more information on generic types. An
1628
- example for annotating a decorator::
1629
-
1630
- T = TypeVar('T')
1631
- P = ParamSpec('P')
1632
-
1633
- def add_logging(f: Callable[P, T]) -> Callable[P, T]:
1634
- '''A type-safe decorator to add logging to a function.'''
1635
- def inner(*args: P.args, **kwargs: P.kwargs) -> T:
1636
- logging.info(f'{f.__name__} was called')
1637
- return f(*args, **kwargs)
1638
- return inner
1639
-
1640
- @add_logging
1641
- def add_two(x: float, y: float) -> float:
1642
- '''Add two numbers together.'''
1643
- return x + y
1644
-
1645
- Parameter specification variables defined with covariant=True or
1646
- contravariant=True can be used to declare covariant or contravariant
1647
- generic types. These keyword arguments are valid, but their actual semantics
1648
- are yet to be decided. See PEP 612 for details.
1649
-
1650
- Parameter specification variables can be introspected. e.g.:
1651
-
1652
- P.__name__ == 'T'
1653
- P.__bound__ == None
1654
- P.__covariant__ == False
1655
- P.__contravariant__ == False
1656
-
1657
- Note that only parameter specification variables defined in global scope can
1658
- be pickled.
1659
- """
1660
-
1661
- # Trick Generic __parameters__.
1662
- __class__ = typing.TypeVar
1663
-
1664
- @property
1665
- def args(self):
1666
- return ParamSpecArgs(self)
1667
-
1668
- @property
1669
- def kwargs(self):
1670
- return ParamSpecKwargs(self)
1671
-
1672
- def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
1673
- super().__init__([self])
1674
- self.__name__ = name
1675
- self.__covariant__ = bool(covariant)
1676
- self.__contravariant__ = bool(contravariant)
1677
- if bound:
1678
- self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
1679
- else:
1680
- self.__bound__ = None
1681
-
1682
- # for pickling:
1683
- try:
1684
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
1685
- except (AttributeError, ValueError):
1686
- def_mod = None
1687
- if def_mod != 'typing_extensions':
1688
- self.__module__ = def_mod
1689
-
1690
- def __repr__(self):
1691
- if self.__covariant__:
1692
- prefix = '+'
1693
- elif self.__contravariant__:
1694
- prefix = '-'
1695
- else:
1696
- prefix = '~'
1697
- return prefix + self.__name__
1698
-
1699
- def __hash__(self):
1700
- return object.__hash__(self)
1701
-
1702
- def __eq__(self, other):
1703
- return self is other
1704
-
1705
- def __reduce__(self):
1706
- return self.__name__
1707
-
1708
- # Hack to get typing._type_check to pass.
1709
- def __call__(self, *args, **kwargs):
1710
- pass
1711
-
1712
- if not PEP_560:
1713
- # Only needed in 3.6.
1714
- def _get_type_vars(self, tvars):
1715
- if self not in tvars:
1716
- tvars.append(self)
1717
-
1718
-
1719
- # 3.6-3.9
1720
- if not hasattr(typing, 'Concatenate'):
1721
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
1722
- class _ConcatenateGenericAlias(list):
1723
-
1724
- # Trick Generic into looking into this for __parameters__.
1725
- if PEP_560:
1726
- __class__ = typing._GenericAlias
1727
- else:
1728
- __class__ = typing._TypingBase
1729
-
1730
- # Flag in 3.8.
1731
- _special = False
1732
- # Attribute in 3.6 and earlier.
1733
- _gorg = typing.Generic
1734
-
1735
- def __init__(self, origin, args):
1736
- super().__init__(args)
1737
- self.__origin__ = origin
1738
- self.__args__ = args
1739
-
1740
- def __repr__(self):
1741
- _type_repr = typing._type_repr
1742
- return (f'{_type_repr(self.__origin__)}'
1743
- f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
1744
-
1745
- def __hash__(self):
1746
- return hash((self.__origin__, self.__args__))
1747
-
1748
- # Hack to get typing._type_check to pass in Generic.
1749
- def __call__(self, *args, **kwargs):
1750
- pass
1751
-
1752
- @property
1753
- def __parameters__(self):
1754
- return tuple(
1755
- tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
1756
- )
1757
-
1758
- if not PEP_560:
1759
- # Only required in 3.6.
1760
- def _get_type_vars(self, tvars):
1761
- if self.__origin__ and self.__parameters__:
1762
- typing._get_type_vars(self.__parameters__, tvars)
1763
-
1764
-
1765
- # 3.6-3.9
1766
- @typing._tp_cache
1767
- def _concatenate_getitem(self, parameters):
1768
- if parameters == ():
1769
- raise TypeError("Cannot take a Concatenate of no types.")
1770
- if not isinstance(parameters, tuple):
1771
- parameters = (parameters,)
1772
- if not isinstance(parameters[-1], ParamSpec):
1773
- raise TypeError("The last parameter to Concatenate should be a "
1774
- "ParamSpec variable.")
1775
- msg = "Concatenate[arg, ...]: each arg must be a type."
1776
- parameters = tuple(typing._type_check(p, msg) for p in parameters)
1777
- return _ConcatenateGenericAlias(self, parameters)
1778
-
1779
-
1780
- # 3.10+
1781
- if hasattr(typing, 'Concatenate'):
1782
- Concatenate = typing.Concatenate
1783
- _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
1784
- # 3.9
1785
- elif sys.version_info[:2] >= (3, 9):
1786
- @_TypeAliasForm
1787
- def Concatenate(self, parameters):
1788
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
1789
- higher order function which adds, removes or transforms parameters of a
1790
- callable.
1791
-
1792
- For example::
1793
-
1794
- Callable[Concatenate[int, P], int]
1795
-
1796
- See PEP 612 for detailed information.
1797
- """
1798
- return _concatenate_getitem(self, parameters)
1799
- # 3.7-8
1800
- elif sys.version_info[:2] >= (3, 7):
1801
- class _ConcatenateForm(typing._SpecialForm, _root=True):
1802
- def __repr__(self):
1803
- return 'typing_extensions.' + self._name
1804
-
1805
- def __getitem__(self, parameters):
1806
- return _concatenate_getitem(self, parameters)
1807
-
1808
- Concatenate = _ConcatenateForm(
1809
- 'Concatenate',
1810
- doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
1811
- higher order function which adds, removes or transforms parameters of a
1812
- callable.
1813
-
1814
- For example::
1815
-
1816
- Callable[Concatenate[int, P], int]
1817
-
1818
- See PEP 612 for detailed information.
1819
- """)
1820
- # 3.6
1821
- else:
1822
- class _ConcatenateAliasMeta(typing.TypingMeta):
1823
- """Metaclass for Concatenate."""
1824
-
1825
- def __repr__(self):
1826
- return 'typing_extensions.Concatenate'
1827
-
1828
- class _ConcatenateAliasBase(typing._FinalTypingBase,
1829
- metaclass=_ConcatenateAliasMeta,
1830
- _root=True):
1831
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
1832
- higher order function which adds, removes or transforms parameters of a
1833
- callable.
1834
-
1835
- For example::
1836
-
1837
- Callable[Concatenate[int, P], int]
1838
-
1839
- See PEP 612 for detailed information.
1840
- """
1841
- __slots__ = ()
1842
-
1843
- def __instancecheck__(self, obj):
1844
- raise TypeError("Concatenate cannot be used with isinstance().")
1845
-
1846
- def __subclasscheck__(self, cls):
1847
- raise TypeError("Concatenate cannot be used with issubclass().")
1848
-
1849
- def __repr__(self):
1850
- return 'typing_extensions.Concatenate'
1851
-
1852
- def __getitem__(self, parameters):
1853
- return _concatenate_getitem(self, parameters)
1854
-
1855
- Concatenate = _ConcatenateAliasBase(_root=True)
1856
-
1857
- # 3.10+
1858
- if hasattr(typing, 'TypeGuard'):
1859
- TypeGuard = typing.TypeGuard
1860
- # 3.9
1861
- elif sys.version_info[:2] >= (3, 9):
1862
- class _TypeGuardForm(typing._SpecialForm, _root=True):
1863
- def __repr__(self):
1864
- return 'typing_extensions.' + self._name
1865
-
1866
- @_TypeGuardForm
1867
- def TypeGuard(self, parameters):
1868
- """Special typing form used to annotate the return type of a user-defined
1869
- type guard function. ``TypeGuard`` only accepts a single type argument.
1870
- At runtime, functions marked this way should return a boolean.
1871
-
1872
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
1873
- type checkers to determine a more precise type of an expression within a
1874
- program's code flow. Usually type narrowing is done by analyzing
1875
- conditional code flow and applying the narrowing to a block of code. The
1876
- conditional expression here is sometimes referred to as a "type guard".
1877
-
1878
- Sometimes it would be convenient to use a user-defined boolean function
1879
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
1880
- return type to alert static type checkers to this intention.
1881
-
1882
- Using ``-> TypeGuard`` tells the static type checker that for a given
1883
- function:
1884
-
1885
- 1. The return value is a boolean.
1886
- 2. If the return value is ``True``, the type of its argument
1887
- is the type inside ``TypeGuard``.
1888
-
1889
- For example::
1890
-
1891
- def is_str(val: Union[str, float]):
1892
- # "isinstance" type guard
1893
- if isinstance(val, str):
1894
- # Type of ``val`` is narrowed to ``str``
1895
- ...
1896
- else:
1897
- # Else, type of ``val`` is narrowed to ``float``.
1898
- ...
1899
-
1900
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
1901
- form of ``TypeA`` (it can even be a wider form) and this may lead to
1902
- type-unsafe results. The main reason is to allow for things like
1903
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
1904
- a subtype of the former, since ``List`` is invariant. The responsibility of
1905
- writing type-safe type guards is left to the user.
1906
-
1907
- ``TypeGuard`` also works with type variables. For more information, see
1908
- PEP 647 (User-Defined Type Guards).
1909
- """
1910
- item = typing._type_check(parameters, f'{self} accepts only single type.')
1911
- return typing._GenericAlias(self, (item,))
1912
- # 3.7-3.8
1913
- elif sys.version_info[:2] >= (3, 7):
1914
- class _TypeGuardForm(typing._SpecialForm, _root=True):
1915
-
1916
- def __repr__(self):
1917
- return 'typing_extensions.' + self._name
1918
-
1919
- def __getitem__(self, parameters):
1920
- item = typing._type_check(parameters,
1921
- f'{self._name} accepts only a single type')
1922
- return typing._GenericAlias(self, (item,))
1923
-
1924
- TypeGuard = _TypeGuardForm(
1925
- 'TypeGuard',
1926
- doc="""Special typing form used to annotate the return type of a user-defined
1927
- type guard function. ``TypeGuard`` only accepts a single type argument.
1928
- At runtime, functions marked this way should return a boolean.
1929
-
1930
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
1931
- type checkers to determine a more precise type of an expression within a
1932
- program's code flow. Usually type narrowing is done by analyzing
1933
- conditional code flow and applying the narrowing to a block of code. The
1934
- conditional expression here is sometimes referred to as a "type guard".
1935
-
1936
- Sometimes it would be convenient to use a user-defined boolean function
1937
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
1938
- return type to alert static type checkers to this intention.
1939
-
1940
- Using ``-> TypeGuard`` tells the static type checker that for a given
1941
- function:
1942
-
1943
- 1. The return value is a boolean.
1944
- 2. If the return value is ``True``, the type of its argument
1945
- is the type inside ``TypeGuard``.
1946
-
1947
- For example::
1948
-
1949
- def is_str(val: Union[str, float]):
1950
- # "isinstance" type guard
1951
- if isinstance(val, str):
1952
- # Type of ``val`` is narrowed to ``str``
1953
- ...
1954
- else:
1955
- # Else, type of ``val`` is narrowed to ``float``.
1956
- ...
1957
-
1958
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
1959
- form of ``TypeA`` (it can even be a wider form) and this may lead to
1960
- type-unsafe results. The main reason is to allow for things like
1961
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
1962
- a subtype of the former, since ``List`` is invariant. The responsibility of
1963
- writing type-safe type guards is left to the user.
1964
-
1965
- ``TypeGuard`` also works with type variables. For more information, see
1966
- PEP 647 (User-Defined Type Guards).
1967
- """)
1968
- # 3.6
1969
- else:
1970
- class _TypeGuard(typing._FinalTypingBase, _root=True):
1971
- """Special typing form used to annotate the return type of a user-defined
1972
- type guard function. ``TypeGuard`` only accepts a single type argument.
1973
- At runtime, functions marked this way should return a boolean.
1974
-
1975
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
1976
- type checkers to determine a more precise type of an expression within a
1977
- program's code flow. Usually type narrowing is done by analyzing
1978
- conditional code flow and applying the narrowing to a block of code. The
1979
- conditional expression here is sometimes referred to as a "type guard".
1980
-
1981
- Sometimes it would be convenient to use a user-defined boolean function
1982
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
1983
- return type to alert static type checkers to this intention.
1984
-
1985
- Using ``-> TypeGuard`` tells the static type checker that for a given
1986
- function:
1987
-
1988
- 1. The return value is a boolean.
1989
- 2. If the return value is ``True``, the type of its argument
1990
- is the type inside ``TypeGuard``.
1991
-
1992
- For example::
1993
-
1994
- def is_str(val: Union[str, float]):
1995
- # "isinstance" type guard
1996
- if isinstance(val, str):
1997
- # Type of ``val`` is narrowed to ``str``
1998
- ...
1999
- else:
2000
- # Else, type of ``val`` is narrowed to ``float``.
2001
- ...
2002
-
2003
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
2004
- form of ``TypeA`` (it can even be a wider form) and this may lead to
2005
- type-unsafe results. The main reason is to allow for things like
2006
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
2007
- a subtype of the former, since ``List`` is invariant. The responsibility of
2008
- writing type-safe type guards is left to the user.
2009
-
2010
- ``TypeGuard`` also works with type variables. For more information, see
2011
- PEP 647 (User-Defined Type Guards).
2012
- """
2013
-
2014
- __slots__ = ('__type__',)
2015
-
2016
- def __init__(self, tp=None, **kwds):
2017
- self.__type__ = tp
2018
-
2019
- def __getitem__(self, item):
2020
- cls = type(self)
2021
- if self.__type__ is None:
2022
- return cls(typing._type_check(item,
2023
- f'{cls.__name__[1:]} accepts only a single type.'),
2024
- _root=True)
2025
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
2026
-
2027
- def _eval_type(self, globalns, localns):
2028
- new_tp = typing._eval_type(self.__type__, globalns, localns)
2029
- if new_tp == self.__type__:
2030
- return self
2031
- return type(self)(new_tp, _root=True)
2032
-
2033
- def __repr__(self):
2034
- r = super().__repr__()
2035
- if self.__type__ is not None:
2036
- r += f'[{typing._type_repr(self.__type__)}]'
2037
- return r
2038
-
2039
- def __hash__(self):
2040
- return hash((type(self).__name__, self.__type__))
2041
-
2042
- def __eq__(self, other):
2043
- if not isinstance(other, _TypeGuard):
2044
- return NotImplemented
2045
- if self.__type__ is not None:
2046
- return self.__type__ == other.__type__
2047
- return self is other
2048
-
2049
- TypeGuard = _TypeGuard(_root=True)
2050
-
2051
- if hasattr(typing, "Self"):
2052
- Self = typing.Self
2053
- elif sys.version_info[:2] >= (3, 7):
2054
- # Vendored from cpython typing._SpecialFrom
2055
- class _SpecialForm(typing._Final, _root=True):
2056
- __slots__ = ('_name', '__doc__', '_getitem')
2057
-
2058
- def __init__(self, getitem):
2059
- self._getitem = getitem
2060
- self._name = getitem.__name__
2061
- self.__doc__ = getitem.__doc__
2062
-
2063
- def __getattr__(self, item):
2064
- if item in {'__name__', '__qualname__'}:
2065
- return self._name
2066
-
2067
- raise AttributeError(item)
2068
-
2069
- def __mro_entries__(self, bases):
2070
- raise TypeError(f"Cannot subclass {self!r}")
2071
-
2072
- def __repr__(self):
2073
- return f'typing_extensions.{self._name}'
2074
-
2075
- def __reduce__(self):
2076
- return self._name
2077
-
2078
- def __call__(self, *args, **kwds):
2079
- raise TypeError(f"Cannot instantiate {self!r}")
2080
-
2081
- def __or__(self, other):
2082
- return typing.Union[self, other]
2083
-
2084
- def __ror__(self, other):
2085
- return typing.Union[other, self]
2086
-
2087
- def __instancecheck__(self, obj):
2088
- raise TypeError(f"{self} cannot be used with isinstance()")
2089
-
2090
- def __subclasscheck__(self, cls):
2091
- raise TypeError(f"{self} cannot be used with issubclass()")
2092
-
2093
- @typing._tp_cache
2094
- def __getitem__(self, parameters):
2095
- return self._getitem(self, parameters)
2096
-
2097
- @_SpecialForm
2098
- def Self(self, params):
2099
- """Used to spell the type of "self" in classes.
2100
-
2101
- Example::
2102
-
2103
- from typing import Self
2104
-
2105
- class ReturnsSelf:
2106
- def parse(self, data: bytes) -> Self:
2107
- ...
2108
- return self
2109
-
2110
- """
2111
-
2112
- raise TypeError(f"{self} is not subscriptable")
2113
- else:
2114
- class _Self(typing._FinalTypingBase, _root=True):
2115
- """Used to spell the type of "self" in classes.
2116
-
2117
- Example::
2118
-
2119
- from typing import Self
2120
-
2121
- class ReturnsSelf:
2122
- def parse(self, data: bytes) -> Self:
2123
- ...
2124
- return self
2125
-
2126
- """
2127
-
2128
- __slots__ = ()
2129
-
2130
- def __instancecheck__(self, obj):
2131
- raise TypeError(f"{self} cannot be used with isinstance().")
2132
-
2133
- def __subclasscheck__(self, cls):
2134
- raise TypeError(f"{self} cannot be used with issubclass().")
2135
-
2136
- Self = _Self(_root=True)
2137
-
2138
-
2139
- if hasattr(typing, 'Required'):
2140
- Required = typing.Required
2141
- NotRequired = typing.NotRequired
2142
- elif sys.version_info[:2] >= (3, 9):
2143
- class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
2144
- def __repr__(self):
2145
- return 'typing_extensions.' + self._name
2146
-
2147
- @_ExtensionsSpecialForm
2148
- def Required(self, parameters):
2149
- """A special typing construct to mark a key of a total=False TypedDict
2150
- as required. For example:
2151
-
2152
- class Movie(TypedDict, total=False):
2153
- title: Required[str]
2154
- year: int
2155
-
2156
- m = Movie(
2157
- title='The Matrix', # typechecker error if key is omitted
2158
- year=1999,
2159
- )
2160
-
2161
- There is no runtime checking that a required key is actually provided
2162
- when instantiating a related TypedDict.
2163
- """
2164
- item = typing._type_check(parameters, f'{self._name} accepts only single type')
2165
- return typing._GenericAlias(self, (item,))
2166
-
2167
- @_ExtensionsSpecialForm
2168
- def NotRequired(self, parameters):
2169
- """A special typing construct to mark a key of a TypedDict as
2170
- potentially missing. For example:
2171
-
2172
- class Movie(TypedDict):
2173
- title: str
2174
- year: NotRequired[int]
2175
-
2176
- m = Movie(
2177
- title='The Matrix', # typechecker error if key is omitted
2178
- year=1999,
2179
- )
2180
- """
2181
- item = typing._type_check(parameters, f'{self._name} accepts only single type')
2182
- return typing._GenericAlias(self, (item,))
2183
-
2184
- elif sys.version_info[:2] >= (3, 7):
2185
- class _RequiredForm(typing._SpecialForm, _root=True):
2186
- def __repr__(self):
2187
- return 'typing_extensions.' + self._name
2188
-
2189
- def __getitem__(self, parameters):
2190
- item = typing._type_check(parameters,
2191
- '{} accepts only single type'.format(self._name))
2192
- return typing._GenericAlias(self, (item,))
2193
-
2194
- Required = _RequiredForm(
2195
- 'Required',
2196
- doc="""A special typing construct to mark a key of a total=False TypedDict
2197
- as required. For example:
2198
-
2199
- class Movie(TypedDict, total=False):
2200
- title: Required[str]
2201
- year: int
2202
-
2203
- m = Movie(
2204
- title='The Matrix', # typechecker error if key is omitted
2205
- year=1999,
2206
- )
2207
-
2208
- There is no runtime checking that a required key is actually provided
2209
- when instantiating a related TypedDict.
2210
- """)
2211
- NotRequired = _RequiredForm(
2212
- 'NotRequired',
2213
- doc="""A special typing construct to mark a key of a TypedDict as
2214
- potentially missing. For example:
2215
-
2216
- class Movie(TypedDict):
2217
- title: str
2218
- year: NotRequired[int]
2219
-
2220
- m = Movie(
2221
- title='The Matrix', # typechecker error if key is omitted
2222
- year=1999,
2223
- )
2224
- """)
2225
- else:
2226
- # NOTE: Modeled after _Final's implementation when _FinalTypingBase available
2227
- class _MaybeRequired(typing._FinalTypingBase, _root=True):
2228
- __slots__ = ('__type__',)
2229
-
2230
- def __init__(self, tp=None, **kwds):
2231
- self.__type__ = tp
2232
-
2233
- def __getitem__(self, item):
2234
- cls = type(self)
2235
- if self.__type__ is None:
2236
- return cls(typing._type_check(item,
2237
- '{} accepts only single type.'.format(cls.__name__[1:])),
2238
- _root=True)
2239
- raise TypeError('{} cannot be further subscripted'
2240
- .format(cls.__name__[1:]))
2241
-
2242
- def _eval_type(self, globalns, localns):
2243
- new_tp = typing._eval_type(self.__type__, globalns, localns)
2244
- if new_tp == self.__type__:
2245
- return self
2246
- return type(self)(new_tp, _root=True)
2247
-
2248
- def __repr__(self):
2249
- r = super().__repr__()
2250
- if self.__type__ is not None:
2251
- r += '[{}]'.format(typing._type_repr(self.__type__))
2252
- return r
2253
-
2254
- def __hash__(self):
2255
- return hash((type(self).__name__, self.__type__))
2256
-
2257
- def __eq__(self, other):
2258
- if not isinstance(other, type(self)):
2259
- return NotImplemented
2260
- if self.__type__ is not None:
2261
- return self.__type__ == other.__type__
2262
- return self is other
2263
-
2264
- class _Required(_MaybeRequired, _root=True):
2265
- """A special typing construct to mark a key of a total=False TypedDict
2266
- as required. For example:
2267
-
2268
- class Movie(TypedDict, total=False):
2269
- title: Required[str]
2270
- year: int
2271
-
2272
- m = Movie(
2273
- title='The Matrix', # typechecker error if key is omitted
2274
- year=1999,
2275
- )
2276
-
2277
- There is no runtime checking that a required key is actually provided
2278
- when instantiating a related TypedDict.
2279
- """
2280
-
2281
- class _NotRequired(_MaybeRequired, _root=True):
2282
- """A special typing construct to mark a key of a TypedDict as
2283
- potentially missing. For example:
2284
-
2285
- class Movie(TypedDict):
2286
- title: str
2287
- year: NotRequired[int]
2288
-
2289
- m = Movie(
2290
- title='The Matrix', # typechecker error if key is omitted
2291
- year=1999,
2292
- )
2293
- """
2294
-
2295
- Required = _Required(_root=True)
2296
- NotRequired = _NotRequired(_root=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/node-media-server/app.js DELETED
@@ -1,18 +0,0 @@
1
- const NodeMediaServer = require('node-media-server');
2
-
3
- const config = {
4
- rtmp: {
5
- port: 7861,
6
- chunk_size: 60000,
7
- gop_cache: true,
8
- ping: 30,
9
- ping_timeout: 60
10
- },
11
- http: {
12
- port: 7860,
13
- allow_origin: '*'
14
- }
15
- };
16
-
17
- var nms = new NodeMediaServer(config)
18
- nms.run();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py DELETED
@@ -1,82 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import dataclasses
3
- import logging
4
- from collections import abc
5
- from typing import Any
6
-
7
- from detectron2.utils.registry import _convert_target_to_string, locate
8
-
9
- __all__ = ["dump_dataclass", "instantiate"]
10
-
11
-
12
- def dump_dataclass(obj: Any):
13
- """
14
- Dump a dataclass recursively into a dict that can be later instantiated.
15
-
16
- Args:
17
- obj: a dataclass object
18
-
19
- Returns:
20
- dict
21
- """
22
- assert dataclasses.is_dataclass(obj) and not isinstance(
23
- obj, type
24
- ), "dump_dataclass() requires an instance of a dataclass."
25
- ret = {"_target_": _convert_target_to_string(type(obj))}
26
- for f in dataclasses.fields(obj):
27
- v = getattr(obj, f.name)
28
- if dataclasses.is_dataclass(v):
29
- v = dump_dataclass(v)
30
- if isinstance(v, (list, tuple)):
31
- v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
32
- ret[f.name] = v
33
- return ret
34
-
35
-
36
- def instantiate(cfg):
37
- """
38
- Recursively instantiate objects defined in dictionaries by
39
- "_target_" and arguments.
40
-
41
- Args:
42
- cfg: a dict-like object with "_target_" that defines the caller, and
43
- other keys that define the arguments
44
-
45
- Returns:
46
- object instantiated by cfg
47
- """
48
- from omegaconf import ListConfig
49
-
50
- if isinstance(cfg, ListConfig):
51
- lst = [instantiate(x) for x in cfg]
52
- return ListConfig(lst, flags={"allow_objects": True})
53
- if isinstance(cfg, list):
54
- # Specialize for list, because many classes take
55
- # list[objects] as arguments, such as ResNet, DatasetMapper
56
- return [instantiate(x) for x in cfg]
57
-
58
- if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
59
- # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
60
- # but faster: https://github.com/facebookresearch/hydra/issues/1200
61
- cfg = {k: instantiate(v) for k, v in cfg.items()}
62
- cls = cfg.pop("_target_")
63
- cls = instantiate(cls)
64
-
65
- if isinstance(cls, str):
66
- cls_name = cls
67
- cls = locate(cls_name)
68
- assert cls is not None, cls_name
69
- else:
70
- try:
71
- cls_name = cls.__module__ + "." + cls.__qualname__
72
- except Exception:
73
- # target could be anything, so the above could fail
74
- cls_name = str(cls)
75
- assert callable(cls), f"_target_ {cls} does not define a callable object"
76
- try:
77
- return cls(**cfg)
78
- except TypeError:
79
- logger = logging.getLogger(__name__)
80
- logger.error(f"Error when instantiating {cls_name}!")
81
- raise
82
- return cfg # return as-is if don't know what to do
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/mask_ops.py DELETED
@@ -1,275 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import numpy as np
3
- from typing import Tuple
4
- import torch
5
- from PIL import Image
6
- from torch.nn import functional as F
7
-
8
- __all__ = ["paste_masks_in_image"]
9
-
10
-
11
- BYTES_PER_FLOAT = 4
12
- # TODO: This memory limit may be too much or too little. It would be better to
13
- # determine it based on available resources.
14
- GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit
15
-
16
-
17
- def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True):
18
- """
19
- Args:
20
- masks: N, 1, H, W
21
- boxes: N, 4
22
- img_h, img_w (int):
23
- skip_empty (bool): only paste masks within the region that
24
- tightly bound all boxes, and returns the results this region only.
25
- An important optimization for CPU.
26
-
27
- Returns:
28
- if skip_empty == False, a mask of shape (N, img_h, img_w)
29
- if skip_empty == True, a mask of shape (N, h', w'), and the slice
30
- object for the corresponding region.
31
- """
32
- # On GPU, paste all masks together (up to chunk size)
33
- # by using the entire image to sample the masks
34
- # Compared to pasting them one by one,
35
- # this has more operations but is faster on COCO-scale dataset.
36
- device = masks.device
37
-
38
- if skip_empty and not torch.jit.is_scripting():
39
- x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(
40
- dtype=torch.int32
41
- )
42
- x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
43
- y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
44
- else:
45
- x0_int, y0_int = 0, 0
46
- x1_int, y1_int = img_w, img_h
47
- x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
48
-
49
- N = masks.shape[0]
50
-
51
- img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
52
- img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
53
- img_y = (img_y - y0) / (y1 - y0) * 2 - 1
54
- img_x = (img_x - x0) / (x1 - x0) * 2 - 1
55
- # img_x, img_y have shapes (N, w), (N, h)
56
-
57
- gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
58
- gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
59
- grid = torch.stack([gx, gy], dim=3)
60
-
61
- if not torch.jit.is_scripting():
62
- if not masks.dtype.is_floating_point:
63
- masks = masks.float()
64
- img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False)
65
-
66
- if skip_empty and not torch.jit.is_scripting():
67
- return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
68
- else:
69
- return img_masks[:, 0], ()
70
-
71
-
72
- # Annotate boxes as Tensor (but not Boxes) in order to use scripting
73
- @torch.jit.script_if_tracing
74
- def paste_masks_in_image(
75
- masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5
76
- ):
77
- """
78
- Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image.
79
- The location, height, and width for pasting each mask is determined by their
80
- corresponding bounding boxes in boxes.
81
-
82
- Note:
83
- This is a complicated but more accurate implementation. In actual deployment, it is
84
- often enough to use a faster but less accurate implementation.
85
- See :func:`paste_mask_in_image_old` in this file for an alternative implementation.
86
-
87
- Args:
88
- masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of
89
- detected object instances in the image and Hmask, Wmask are the mask width and mask
90
- height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1].
91
- boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4).
92
- boxes[i] and masks[i] correspond to the same object instance.
93
- image_shape (tuple): height, width
94
- threshold (float): A threshold in [0, 1] for converting the (soft) masks to
95
- binary masks.
96
-
97
- Returns:
98
- img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
99
- number of detected object instances and Himage, Wimage are the image width
100
- and height. img_masks[i] is a binary mask for object instance i.
101
- """
102
-
103
- assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported"
104
- N = len(masks)
105
- if N == 0:
106
- return masks.new_empty((0,) + image_shape, dtype=torch.uint8)
107
- if not isinstance(boxes, torch.Tensor):
108
- boxes = boxes.tensor
109
- device = boxes.device
110
- assert len(boxes) == N, boxes.shape
111
-
112
- img_h, img_w = image_shape
113
-
114
- # The actual implementation split the input into chunks,
115
- # and paste them chunk by chunk.
116
- if device.type == "cpu" or torch.jit.is_scripting():
117
- # CPU is most efficient when they are pasted one by one with skip_empty=True
118
- # so that it performs minimal number of operations.
119
- num_chunks = N
120
- else:
121
- # GPU benefits from parallelism for larger chunks, but may have memory issue
122
- # int(img_h) because shape may be tensors in tracing
123
- num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
124
- assert (
125
- num_chunks <= N
126
- ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it"
127
- chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
128
-
129
- img_masks = torch.zeros(
130
- N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8
131
- )
132
- for inds in chunks:
133
- masks_chunk, spatial_inds = _do_paste_mask(
134
- masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu"
135
- )
136
-
137
- if threshold >= 0:
138
- masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
139
- else:
140
- # for visualization and debugging
141
- masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
142
-
143
- if torch.jit.is_scripting(): # Scripting does not use the optimized codepath
144
- img_masks[inds] = masks_chunk
145
- else:
146
- img_masks[(inds,) + spatial_inds] = masks_chunk
147
- return img_masks
148
-
149
-
150
- # The below are the original paste function (from Detectron1) which has
151
- # larger quantization error.
152
- # It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample.
153
-
154
-
155
- def paste_mask_in_image_old(mask, box, img_h, img_w, threshold):
156
- """
157
- Paste a single mask in an image.
158
- This is a per-box implementation of :func:`paste_masks_in_image`.
159
- This function has larger quantization error due to incorrect pixel
160
- modeling and is not used any more.
161
-
162
- Args:
163
- mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single
164
- object instance. Values are in [0, 1].
165
- box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners
166
- of the object instance.
167
- img_h, img_w (int): Image height and width.
168
- threshold (float): Mask binarization threshold in [0, 1].
169
-
170
- Returns:
171
- im_mask (Tensor):
172
- The resized and binarized object mask pasted into the original
173
- image plane (a tensor of shape (img_h, img_w)).
174
- """
175
- # Conversion from continuous box coordinates to discrete pixel coordinates
176
- # via truncation (cast to int32). This determines which pixels to paste the
177
- # mask onto.
178
- box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion
179
- # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to
180
- # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1
181
- # pixels (not x1 - x0 pixels).
182
- samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width
183
- samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height
184
-
185
- # Resample the mask from it's original grid to the new samples_w x samples_h grid
186
- mask = Image.fromarray(mask.cpu().numpy())
187
- mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR)
188
- mask = np.array(mask, copy=False)
189
-
190
- if threshold >= 0:
191
- mask = np.array(mask > threshold, dtype=np.uint8)
192
- mask = torch.from_numpy(mask)
193
- else:
194
- # for visualization and debugging, we also
195
- # allow it to return an unmodified mask
196
- mask = torch.from_numpy(mask * 255).to(torch.uint8)
197
-
198
- im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8)
199
- x_0 = max(box[0], 0)
200
- x_1 = min(box[2] + 1, img_w)
201
- y_0 = max(box[1], 0)
202
- y_1 = min(box[3] + 1, img_h)
203
-
204
- im_mask[y_0:y_1, x_0:x_1] = mask[
205
- (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
206
- ]
207
- return im_mask
208
-
209
-
210
- # Our pixel modeling requires extrapolation for any continuous
211
- # coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks,
212
- # we would like this extrapolation to be an interpolation between boundary values and zero,
213
- # instead of using absolute zero or boundary values.
214
- # Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this:
215
- # masks, scale = pad_masks(masks[:, 0, :, :], 1)
216
- # boxes = scale_boxes(boxes.tensor, scale)
217
-
218
-
219
- def pad_masks(masks, padding):
220
- """
221
- Args:
222
- masks (tensor): A tensor of shape (B, M, M) representing B masks.
223
- padding (int): Number of cells to pad on all sides.
224
-
225
- Returns:
226
- The padded masks and the scale factor of the padding size / original size.
227
- """
228
- B = masks.shape[0]
229
- M = masks.shape[-1]
230
- pad2 = 2 * padding
231
- scale = float(M + pad2) / M
232
- padded_masks = masks.new_zeros((B, M + pad2, M + pad2))
233
- padded_masks[:, padding:-padding, padding:-padding] = masks
234
- return padded_masks, scale
235
-
236
-
237
- def scale_boxes(boxes, scale):
238
- """
239
- Args:
240
- boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4
241
- coords representing the corners x0, y0, x1, y1,
242
- scale (float): The box scaling factor.
243
-
244
- Returns:
245
- Scaled boxes.
246
- """
247
- w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
248
- h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
249
- x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
250
- y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
251
-
252
- w_half *= scale
253
- h_half *= scale
254
-
255
- scaled_boxes = torch.zeros_like(boxes)
256
- scaled_boxes[:, 0] = x_c - w_half
257
- scaled_boxes[:, 2] = x_c + w_half
258
- scaled_boxes[:, 1] = y_c - h_half
259
- scaled_boxes[:, 3] = y_c + h_half
260
- return scaled_boxes
261
-
262
-
263
- @torch.jit.script_if_tracing
264
- def _paste_masks_tensor_shape(
265
- masks: torch.Tensor,
266
- boxes: torch.Tensor,
267
- image_shape: Tuple[torch.Tensor, torch.Tensor],
268
- threshold: float = 0.5,
269
- ):
270
- """
271
- A wrapper of paste_masks_in_image where image_shape is Tensor.
272
- During tracing, shapes might be tensors instead of ints. The Tensor->int
273
- conversion should be scripted rather than traced.
274
- """
275
- return paste_masks_in_image(masks, boxes, (int(image_shape[0]), int(image_shape[1])), threshold)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_model_e2e.py DELETED
@@ -1,223 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
-
4
- import itertools
5
- import unittest
6
- from contextlib import contextmanager
7
- from copy import deepcopy
8
- import torch
9
-
10
- from detectron2.structures import BitMasks, Boxes, ImageList, Instances
11
- from detectron2.utils.events import EventStorage
12
- from detectron2.utils.testing import get_model_no_weights
13
-
14
-
15
- @contextmanager
16
- def typecheck_hook(model, *, in_dtype=None, out_dtype=None):
17
- """
18
- Check that the model must be called with the given input/output dtype
19
- """
20
- if not isinstance(in_dtype, set):
21
- in_dtype = {in_dtype}
22
- if not isinstance(out_dtype, set):
23
- out_dtype = {out_dtype}
24
-
25
- def flatten(x):
26
- if isinstance(x, torch.Tensor):
27
- return [x]
28
- if isinstance(x, (list, tuple)):
29
- return list(itertools.chain(*[flatten(t) for t in x]))
30
- if isinstance(x, dict):
31
- return flatten(list(x.values()))
32
- return []
33
-
34
- def hook(module, input, output):
35
- if in_dtype is not None:
36
- dtypes = {x.dtype for x in flatten(input)}
37
- assert (
38
- dtypes == in_dtype
39
- ), f"Expected input dtype of {type(module)} is {in_dtype}. Got {dtypes} instead!"
40
-
41
- if out_dtype is not None:
42
- dtypes = {x.dtype for x in flatten(output)}
43
- assert (
44
- dtypes == out_dtype
45
- ), f"Expected output dtype of {type(module)} is {out_dtype}. Got {dtypes} instead!"
46
-
47
- with model.register_forward_hook(hook):
48
- yield
49
-
50
-
51
- def create_model_input(img, inst=None):
52
- if inst is not None:
53
- return {"image": img, "instances": inst}
54
- else:
55
- return {"image": img}
56
-
57
-
58
- def get_empty_instance(h, w):
59
- inst = Instances((h, w))
60
- inst.gt_boxes = Boxes(torch.rand(0, 4))
61
- inst.gt_classes = torch.tensor([]).to(dtype=torch.int64)
62
- inst.gt_masks = BitMasks(torch.rand(0, h, w))
63
- return inst
64
-
65
-
66
- def get_regular_bitmask_instances(h, w):
67
- inst = Instances((h, w))
68
- inst.gt_boxes = Boxes(torch.rand(3, 4))
69
- inst.gt_boxes.tensor[:, 2:] += inst.gt_boxes.tensor[:, :2]
70
- inst.gt_classes = torch.tensor([3, 4, 5]).to(dtype=torch.int64)
71
- inst.gt_masks = BitMasks((torch.rand(3, h, w) > 0.5))
72
- return inst
73
-
74
-
75
- class InstanceModelE2ETest:
76
- def setUp(self):
77
- torch.manual_seed(43)
78
- self.model = get_model_no_weights(self.CONFIG_PATH)
79
-
80
- def _test_eval(self, input_sizes):
81
- inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
82
- self.model.eval()
83
- self.model(inputs)
84
-
85
- def _test_train(self, input_sizes, instances):
86
- assert len(input_sizes) == len(instances)
87
- inputs = [
88
- create_model_input(torch.rand(3, s[0], s[1]), inst)
89
- for s, inst in zip(input_sizes, instances)
90
- ]
91
- self.model.train()
92
- with EventStorage():
93
- losses = self.model(inputs)
94
- sum(losses.values()).backward()
95
- del losses
96
-
97
- def _inf_tensor(self, *shape):
98
- return 1.0 / torch.zeros(*shape, device=self.model.device)
99
-
100
- def _nan_tensor(self, *shape):
101
- return torch.zeros(*shape, device=self.model.device).fill_(float("nan"))
102
-
103
- def test_empty_data(self):
104
- instances = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
105
- self._test_eval([(200, 250), (200, 249)])
106
- self._test_train([(200, 250), (200, 249)], instances)
107
-
108
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
109
- def test_eval_tocpu(self):
110
- model = deepcopy(self.model).cpu()
111
- model.eval()
112
- input_sizes = [(200, 250), (200, 249)]
113
- inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
114
- model(inputs)
115
-
116
-
117
- class MaskRCNNE2ETest(InstanceModelE2ETest, unittest.TestCase):
118
- CONFIG_PATH = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
119
-
120
- def test_half_empty_data(self):
121
- instances = [get_empty_instance(200, 250), get_regular_bitmask_instances(200, 249)]
122
- self._test_train([(200, 250), (200, 249)], instances)
123
-
124
- # This test is flaky because in some environment the output features are zero due to relu
125
- # def test_rpn_inf_nan_data(self):
126
- # self.model.eval()
127
- # for tensor in [self._inf_tensor, self._nan_tensor]:
128
- # images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
129
- # features = {
130
- # "p2": tensor(1, 256, 256, 256),
131
- # "p3": tensor(1, 256, 128, 128),
132
- # "p4": tensor(1, 256, 64, 64),
133
- # "p5": tensor(1, 256, 32, 32),
134
- # "p6": tensor(1, 256, 16, 16),
135
- # }
136
- # props, _ = self.model.proposal_generator(images, features)
137
- # self.assertEqual(len(props[0]), 0)
138
-
139
- def test_roiheads_inf_nan_data(self):
140
- self.model.eval()
141
- for tensor in [self._inf_tensor, self._nan_tensor]:
142
- images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
143
- features = {
144
- "p2": tensor(1, 256, 256, 256),
145
- "p3": tensor(1, 256, 128, 128),
146
- "p4": tensor(1, 256, 64, 64),
147
- "p5": tensor(1, 256, 32, 32),
148
- "p6": tensor(1, 256, 16, 16),
149
- }
150
- props = [Instances((510, 510))]
151
- props[0].proposal_boxes = Boxes([[10, 10, 20, 20]]).to(device=self.model.device)
152
- props[0].objectness_logits = torch.tensor([1.0]).reshape(1, 1)
153
- det, _ = self.model.roi_heads(images, features, props)
154
- self.assertEqual(len(det[0]), 0)
155
-
156
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
157
- def test_autocast(self):
158
- from torch.cuda.amp import autocast
159
-
160
- inputs = [{"image": torch.rand(3, 100, 100)}]
161
- self.model.eval()
162
- with autocast(), typecheck_hook(
163
- self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16
164
- ), typecheck_hook(
165
- self.model.roi_heads.box_predictor, in_dtype=torch.float16, out_dtype=torch.float16
166
- ):
167
- out = self.model.inference(inputs, do_postprocess=False)[0]
168
- self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32)
169
- self.assertEqual(out.pred_masks.dtype, torch.float16)
170
- self.assertEqual(out.scores.dtype, torch.float32) # scores comes from softmax
171
-
172
-
173
- class RetinaNetE2ETest(InstanceModelE2ETest, unittest.TestCase):
174
- CONFIG_PATH = "COCO-Detection/retinanet_R_50_FPN_1x.yaml"
175
-
176
- def test_inf_nan_data(self):
177
- self.model.eval()
178
- self.model.score_threshold = -999999999
179
- for tensor in [self._inf_tensor, self._nan_tensor]:
180
- images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
181
- features = [
182
- tensor(1, 256, 128, 128),
183
- tensor(1, 256, 64, 64),
184
- tensor(1, 256, 32, 32),
185
- tensor(1, 256, 16, 16),
186
- tensor(1, 256, 8, 8),
187
- ]
188
- pred_logits, pred_anchor_deltas = self.model.head(features)
189
- pred_logits = [tensor(*x.shape) for x in pred_logits]
190
- pred_anchor_deltas = [tensor(*x.shape) for x in pred_anchor_deltas]
191
- det = self.model.forward_inference(images, features, [pred_logits, pred_anchor_deltas])
192
- # all predictions (if any) are infinite or nan
193
- if len(det[0]):
194
- self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0)
195
-
196
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
197
- def test_autocast(self):
198
- from torch.cuda.amp import autocast
199
-
200
- inputs = [{"image": torch.rand(3, 100, 100)}]
201
- self.model.eval()
202
- with autocast(), typecheck_hook(
203
- self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16
204
- ), typecheck_hook(self.model.head, in_dtype=torch.float16, out_dtype=torch.float16):
205
- out = self.model(inputs)[0]["instances"]
206
- self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32)
207
- self.assertEqual(out.scores.dtype, torch.float16)
208
-
209
-
210
- class SemSegE2ETest(unittest.TestCase):
211
- CONFIG_PATH = "Misc/semantic_R_50_FPN_1x.yaml"
212
-
213
- def setUp(self):
214
- torch.manual_seed(43)
215
- self.model = get_model_no_weights(self.CONFIG_PATH)
216
-
217
- def _test_eval(self, input_sizes):
218
- inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes]
219
- self.model.eval()
220
- self.model(inputs)
221
-
222
- def test_forward(self):
223
- self._test_eval([(200, 250), (200, 249)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/julius/bands.py DELETED
@@ -1,119 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2020
3
- """
4
- Decomposition of a signal over frequency bands in the waveform domain.
5
- """
6
- from typing import Optional, Sequence
7
- import torch
8
-
9
- from .core import mel_frequencies
10
- from .lowpass import LowPassFilters
11
- from .utils import simple_repr
12
-
13
-
14
- class SplitBands(torch.nn.Module):
15
- """
16
- Decomposes a signal over the given frequency bands in the waveform domain using
17
- a cascade of low pass filters as implemented by `julius.lowpass.LowPassFilters`.
18
- You can either specify explicitely the frequency cutoffs, or just the number of bands,
19
- in which case the frequency cutoffs will be spread out evenly in mel scale.
20
-
21
- Args:
22
- sample_rate (float): Sample rate of the input signal in Hz.
23
- n_bands (int or None): number of bands, when not giving them explictely with `cutoffs`.
24
- In that case, the cutoff frequencies will be evenly spaced in mel-space.
25
- cutoffs (list[float] or None): list of frequency cutoffs in Hz.
26
- pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`,
27
- the output will have the same length as the input.
28
- zeros (float): Number of zero crossings to keep. See `LowPassFilters` for more informations.
29
- fft (bool or None): See `LowPassFilters` for more info.
30
-
31
- ..note::
32
- The sum of all the bands will always be the input signal.
33
-
34
- ..warning::
35
- Unlike `julius.lowpass.LowPassFilters`, the cutoffs frequencies must be provided in Hz along
36
- with the sample rate.
37
-
38
- Shape:
39
-
40
- - Input: `[*, T]`
41
- - Output: `[B, *, T']`, with `T'=T` if `pad` is True.
42
- If `n_bands` was provided, `B = n_bands` otherwise `B = len(cutoffs) + 1`
43
-
44
- >>> bands = SplitBands(sample_rate=128, n_bands=10)
45
- >>> x = torch.randn(6, 4, 1024)
46
- >>> list(bands(x).shape)
47
- [10, 6, 4, 1024]
48
- """
49
-
50
- def __init__(self, sample_rate: float, n_bands: Optional[int] = None,
51
- cutoffs: Optional[Sequence[float]] = None, pad: bool = True,
52
- zeros: float = 8, fft: Optional[bool] = None):
53
- super().__init__()
54
- if (cutoffs is None) + (n_bands is None) != 1:
55
- raise ValueError("You must provide either n_bands, or cutoffs, but not boths.")
56
-
57
- self.sample_rate = sample_rate
58
- self.n_bands = n_bands
59
- self._cutoffs = list(cutoffs) if cutoffs is not None else None
60
- self.pad = pad
61
- self.zeros = zeros
62
- self.fft = fft
63
-
64
- if cutoffs is None:
65
- if n_bands is None:
66
- raise ValueError("You must provide one of n_bands or cutoffs.")
67
- if not n_bands >= 1:
68
- raise ValueError(f"n_bands must be greater than one (got {n_bands})")
69
- cutoffs = mel_frequencies(n_bands + 1, 0, sample_rate / 2)[1:-1]
70
- else:
71
- if max(cutoffs) > 0.5 * sample_rate:
72
- raise ValueError("A cutoff above sample_rate/2 does not make sense.")
73
- if len(cutoffs) > 0:
74
- self.lowpass = LowPassFilters(
75
- [c / sample_rate for c in cutoffs], pad=pad, zeros=zeros, fft=fft)
76
- else:
77
- # Here I cannot make both TorchScript and MyPy happy.
78
- # I miss the good old times, before all this madness was created.
79
- self.lowpass = None # type: ignore
80
-
81
- def forward(self, input):
82
- if self.lowpass is None:
83
- return input[None]
84
- lows = self.lowpass(input)
85
- low = lows[0]
86
- bands = [low]
87
- for low_and_band in lows[1:]:
88
- # Get a bandpass filter by substracting lowpasses
89
- band = low_and_band - low
90
- bands.append(band)
91
- low = low_and_band
92
- # Last band is whatever is left in the signal
93
- bands.append(input - low)
94
- return torch.stack(bands)
95
-
96
- @property
97
- def cutoffs(self):
98
- if self._cutoffs is not None:
99
- return self._cutoffs
100
- elif self.lowpass is not None:
101
- return [c * self.sample_rate for c in self.lowpass.cutoffs]
102
- else:
103
- return []
104
-
105
- def __repr__(self):
106
- return simple_repr(self, overrides={"cutoffs": self._cutoffs})
107
-
108
-
109
- def split_bands(signal: torch.Tensor, sample_rate: float, n_bands: Optional[int] = None,
110
- cutoffs: Optional[Sequence[float]] = None, pad: bool = True,
111
- zeros: float = 8, fft: Optional[bool] = None):
112
- """
113
- Functional version of `SplitBands`, refer to this class for more information.
114
-
115
- >>> x = torch.randn(6, 4, 1024)
116
- >>> list(split_bands(x, sample_rate=64, cutoffs=[12, 24]).shape)
117
- [3, 6, 4, 1024]
118
- """
119
- return SplitBands(sample_rate, n_bands, cutoffs, pad, zeros, fft).to(signal)(signal)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/train/losses.py DELETED
@@ -1,59 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
-
5
- def feature_loss(fmap_r, fmap_g):
6
- loss = 0
7
- for dr, dg in zip(fmap_r, fmap_g):
8
- for rl, gl in zip(dr, dg):
9
- rl = rl.float().detach()
10
- gl = gl.float()
11
- loss += torch.mean(torch.abs(rl - gl))
12
-
13
- return loss * 2
14
-
15
-
16
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
17
- loss = 0
18
- r_losses = []
19
- g_losses = []
20
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
21
- dr = dr.float()
22
- dg = dg.float()
23
- r_loss = torch.mean((1 - dr) ** 2)
24
- g_loss = torch.mean(dg**2)
25
- loss += r_loss + g_loss
26
- r_losses.append(r_loss.item())
27
- g_losses.append(g_loss.item())
28
-
29
- return loss, r_losses, g_losses
30
-
31
-
32
- def generator_loss(disc_outputs):
33
- loss = 0
34
- gen_losses = []
35
- for dg in disc_outputs:
36
- dg = dg.float()
37
- l = torch.mean((1 - dg) ** 2)
38
- gen_losses.append(l)
39
- loss += l
40
-
41
- return loss, gen_losses
42
-
43
-
44
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
45
- """
46
- z_p, logs_q: [b, h, t_t]
47
- m_p, logs_p: [b, h, t_t]
48
- """
49
- z_p = z_p.float()
50
- logs_q = logs_q.float()
51
- m_p = m_p.float()
52
- logs_p = logs_p.float()
53
- z_mask = z_mask.float()
54
-
55
- kl = logs_p - logs_q - 0.5
56
- kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
57
- kl = torch.sum(kl * z_mask)
58
- l = kl / torch.sum(z_mask)
59
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Garena Drifters Velocidad.md DELETED
@@ -1,73 +0,0 @@
1
-
2
- <h1>Descargar Garena AOV Mod dinero ilimitado: Cómo obtener la mejor experiencia MOBA en su dispositivo móvil</h1>
3
- <p>Si eres un fan de los juegos multijugador online battle arena (MOBA), es posible que hayas oído hablar de Garena AOV, uno de los juegos más populares y emocionantes de este género. Pero ¿sabía usted que puede descargar Garena AOV mod dinero ilimitado y obtener acceso a características premium, contenido y recursos que mejorarán su experiencia de juego? En este artículo, te contaremos todo lo que necesitas saber sobre Garena AOV, por qué deberías descargar su versión mod y cómo hacerlo de forma segura y fácil. </p>
4
- <h2>¿Qué es Garena AOV? </h2>
5
- <p>Garena AOV es un nuevo juego 5v5 MOBA que fue desarrollado por Tencent Games y publicado por Garena. También es conocida como Arena del Valor o Reino del Valor en algunas regiones. El juego cuenta con gráficos ultra-HD, jugabilidad suave, héroes equilibrados y varios modos para adaptarse a diferentes preferencias y niveles de habilidad. Puedes elegir entre más de 100 héroes, cada uno con sus propias habilidades, roles y estilos. También puedes hacer equipo con tus amigos u otros jugadores en línea y competir en partidos clasificados, partidos casuales o eventos especiales. El juego es gratis para descargar y jugar, pero también ofrece compras en la aplicación para algunos artículos y servicios. </p>
6
- <h2>descargar garena drifters velocidad</h2><br /><p><b><b>Download Zip</b> &raquo;&raquo;&raquo; <a href="https://bltlly.com/2v6ICS">https://bltlly.com/2v6ICS</a></b></p><br /><br />
7
- <h3>Características de Garena AOV</h3>
8
- <p>Algunas de las características que hacen que Garena AOV se destaque de otros juegos de MOBA son:</p>
9
- <ul>
10
- <li>Una diversa lista de héroes, incluyendo personajes originales y con licencia de DC Comics, como Batman, Superman, Wonder Woman, Joker, Harley Quinn y más. </li>
11
- <li>Un sistema de juego justo y equilibrado que premia la habilidad y el trabajo en equipo, no la mecánica de pago a ganar. </li>
12
- <li>Una variedad de modos de juego, como Grand Battle (5v5), Valley Skirmish (3v3), Abyssal Clash (5v5 héroes al azar), Solo Battle (1v1), Hook Wars (5v5 con ganchos), Death Match (5v5 con respawns ilimitados), y más. </li>
13
-
14
- <li>Una plataforma social e interactiva que te permite chatear con tus amigos, unirte a gremios, ver transmisiones en vivo, compartir aspectos destacados y ganar recompensas. </li>
15
- </ul>
16
- <h3>Beneficios de jugar Garena AOV</h3>
17
- <p>Jugar a Garena AOV puede traerte muchos beneficios, como:</p>
18
- <ul>
19
- <li>Mejorar tu pensamiento estratégico, toma de decisiones, comunicación y habilidades de trabajo en equipo. </li>
20
- <li>Diviértete y diviértete con tus amigos u otros jugadores de todo el mundo. </li>
21
- <li>Aprender cosas nuevas sobre diferentes culturas, mitos, leyendas e historias a través de los héroes y sus antecedentes. </li>
22
- <li>Expresar su creatividad y personalidad a través de la personalización de sus héroes, pieles, emblemas, marcos, efectos, etc.</li>
23
- <li>Obtener recompensas y reconocimiento por tus logros y rendimiento en el juego. </li>
24
- </ul>
25
- <h2>¿Por qué descargar Garena AOV mod dinero ilimitado? </h2>
26
- <p>Como mencionamos anteriormente, Garena AOV es gratis para descargar y jugar, pero también tiene algunas compras en la aplicación que pueden mejorar su experiencia de juego. Por ejemplo, puedes comprar gemas, vales, monedas de oro, cofres de arcanos, cofres de héroes, cofres de piel, etc. Estos artículos pueden ayudarte a desbloquear nuevos héroes, pieles, conjuntos de arcanos, talentos, etc. Sin embargo, estos artículos no son baratos y pueden costar mucho dinero real. No todos pueden permitirse gastar tanto dinero en un juego, especialmente si tienen un presupuesto ajustado o tienen otras prioridades. Es por eso que algunas personas buscan maneras de obtener estos artículos de forma gratuita o a un costo más bajo. Una de las maneras de hacer eso es descargar Garena AOV mod ilimitado dinero. </p>
27
- <h3>Ventajas de usar Garena AOV mod unlimited money</h3>
28
- <p>Garena AOV mod unlimited money es una versión modificada del juego original que te da acceso a gemas ilimitadas, vales, monedas de oro y otros recursos. Con este mod, puedes:</p>
29
- <ul>
30
- <li>Desbloquea todos los héroes y skins que quieras, sin tener que esperar eventos, misiones o sorteos. </li>
31
-
32
- <li>Compre cualquier artículo de la tienda, como emblemas, marcos, efectos, etc., sin tener que preocuparse por quedarse sin joyas o cupones. </li>
33
- <li>Disfruta del juego sin anuncios ni interrupciones. </li>
34
- <li>Tener ventaja sobre tus oponentes en el juego, especialmente si están usando la versión normal. </li>
35
- </ul>
36
- <h3>Los riesgos de usar Garena AOV mod dinero ilimitado</h3>
37
- <p>Sin embargo, el uso de Garena AOV mod ilimitado dinero también viene con algunos riesgos y desventajas que usted debe ser consciente de antes de descargarlo. Algunos de ellos son:</p>
38
- <ul>
39
- <li>El mod puede no ser compatible con la última versión del juego o con su dispositivo. Esto puede causar fallos, fallos, errores o un rendimiento deficiente. </li>
40
- <li>El mod puede contener virus, malware, spyware u otros programas dañinos que pueden dañar su dispositivo o robar su información personal. </li>
41
- <li>El mod puede violar los términos y condiciones del juego y conseguir que se le prohibió jugar. Esto puede resultar en la pérdida de su cuenta, progreso y datos. </li>
42
- <li>El mod puede arruinar la diversión y el desafío del juego por lo que es demasiado fácil o aburrido. Puede perder interés en el juego o sentirse culpable por hacer trampa. </li>
43
- <li>El mod puede no funcionar como se anuncia o tener algunos costos ocultos o limitaciones. Usted puede terminar perdiendo su tiempo y recursos en algo que no entrega lo que esperaba. </li>
44
- </ul>
45
- <h2>Cómo descargar e instalar Garena AOV mod dinero ilimitado? </h2>
46
- <p>Si todavía desea descargar e instalar Garena AOV mod dinero ilimitado a pesar de los riesgos, es necesario seguir algunos pasos con cuidado y cautela. Estos son los pasos que debes seguir:</p>
47
- <h3>Paso 1: Encontrar una fuente confiable para el archivo apk mod</h3>
48
-
49
- <h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
50
- <p>Lo siguiente que debe hacer es habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo > seguridad > fuentes desconocidas > activar. También es posible que tenga que desactivar cualquier software antivirus o firewall que pueda bloquear o interferir con el proceso de instalación. </p>
51
- <p></p>
52
- <h3>Paso 3: Descargar e instalar el archivo apk mod</h3>
53
- <p>La tercera cosa que necesita hacer es descargar e instalar el archivo apk mod en su dispositivo. Para hacer esto, vaya a la página web donde encontró el mod y haga clic en el botón de descarga. Espere a que termine la descarga y luego localice el archivo en el almacenamiento del dispositivo. Toque en el archivo y siga las instrucciones en la pantalla para instalarlo. Es posible que necesite conceder algunos permisos o aceptar algunos términos y condiciones durante el proceso de instalación. </p>
54
- <h3>Paso 4: Iniciar el juego y disfrutar del dinero ilimitado</h3>
55
- <p>Lo último que tienes que hacer es lanzar el juego y disfrutar del dinero ilimitado. Para ello, abre el juego desde el cajón de la aplicación o la pantalla de inicio e inicia sesión con tu cuenta. Deberías ver que tienes gemas ilimitadas, vales, monedas de oro y otros recursos en tu cuenta. Ahora puedes usarlos para comprar lo que quieras de la tienda o desbloquear cualquier héroe o piel que te guste. Diviértete jugando Garena AOV con tus amigos u otros jugadores en línea! </p>
56
- <h2>Conclusión</h2>
57
-
58
- <h2>Preguntas frecuentes</h2>
59
- <p>Aquí hay algunas preguntas frecuentes sobre Garena AOV mod unlimited money:</p>
60
- <ol>
61
- <li><b>¿Es Garena AOV dinero ilimitado legal? </b></li>
62
- <p>No, Garena AOV mod dinero ilimitado no es legal. Es una versión modificada del juego original que viola los términos y condiciones del juego y sus desarrolladores. Usar este mod puede hacer que te prohíban jugar el juego o enfrentar acciones legales de las autoridades. </p>
63
- <li><b>¿Es seguro el dinero ilimitado Garena AOV mod? </b></li>
64
- <p>No necesariamente. Garena AOV mod dinero ilimitado puede contener virus, malware, spyware, u otros programas dañinos que pueden dañar su dispositivo o robar su información personal. Siempre debe escanear el archivo apk mod con un software antivirus de buena reputación antes de descargar e instalar. También debe hacer copias de seguridad de sus datos y utilizar una cuenta secundaria para jugar el juego con este mod. </p>
65
- <li><b>¿Garena AOV mod es dinero ilimitado gratis? </b></li>
66
- <p>Sí, Garena AOV mod dinero ilimitado es gratis para descargar y usar. Sin embargo, algunos sitios web pueden pedirle que complete encuestas, ofertas o tareas antes de darle el enlace de descarga. Usted debe evitar estos sitios web, ya que pueden ser estafas o intentos de phishing. También debe tener cuidado con los costos ocultos o limitaciones que pueden venir con este mod. </p>
67
- <li><b>¿Cómo puedo actualizar Garena AOV mod unlimited money? </b></li>
68
- <p>Puede actualizar Garena AOV mod dinero ilimitado siguiendo los mismos pasos que descargarlo e instalarlo. Sin embargo, siempre debes comprobar si el mod es compatible con la última versión del juego o con tu dispositivo antes de actualizarlo. También debe hacer una copia de seguridad de sus datos y desinstalar la versión anterior del mod antes de instalar el nuevo. </p>
69
- <li><b>¿Dónde puedo encontrar más información sobre Garena AOV? </b></li>
70
-
71
- </ol></p> 64aa2da5cf<br />
72
- <br />
73
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/lexer.py DELETED
@@ -1,883 +0,0 @@
1
- """
2
- pygments.lexer
3
- ~~~~~~~~~~~~~~
4
-
5
- Base lexer classes.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import re
12
- import sys
13
- import time
14
-
15
- from pip._vendor.pygments.filter import apply_filters, Filter
16
- from pip._vendor.pygments.filters import get_filter_by_name
17
- from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
18
- from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
19
- make_analysator, Future, guess_decode
20
- from pip._vendor.pygments.regexopt import regex_opt
21
-
22
- __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23
- 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24
- 'default', 'words', 'line_re']
25
-
26
- line_re = re.compile('.*?\n')
27
-
28
- _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
29
- (b'\xff\xfe\0\0', 'utf-32'),
30
- (b'\0\0\xfe\xff', 'utf-32be'),
31
- (b'\xff\xfe', 'utf-16'),
32
- (b'\xfe\xff', 'utf-16be')]
33
-
34
- _default_analyse = staticmethod(lambda x: 0.0)
35
-
36
-
37
- class LexerMeta(type):
38
- """
39
- This metaclass automagically converts ``analyse_text`` methods into
40
- static methods which always return float values.
41
- """
42
-
43
- def __new__(mcs, name, bases, d):
44
- if 'analyse_text' in d:
45
- d['analyse_text'] = make_analysator(d['analyse_text'])
46
- return type.__new__(mcs, name, bases, d)
47
-
48
-
49
- class Lexer(metaclass=LexerMeta):
50
- """
51
- Lexer for a specific language.
52
-
53
- Basic options recognized:
54
- ``stripnl``
55
- Strip leading and trailing newlines from the input (default: True).
56
- ``stripall``
57
- Strip all leading and trailing whitespace from the input
58
- (default: False).
59
- ``ensurenl``
60
- Make sure that the input ends with a newline (default: True). This
61
- is required for some lexers that consume input linewise.
62
-
63
- .. versionadded:: 1.3
64
-
65
- ``tabsize``
66
- If given and greater than 0, expand tabs in the input (default: 0).
67
- ``encoding``
68
- If given, must be an encoding name. This encoding will be used to
69
- convert the input string to Unicode, if it is not already a Unicode
70
- string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
71
- Latin1 detection. Can also be ``'chardet'`` to use the chardet
72
- library, if it is installed.
73
- ``inencoding``
74
- Overrides the ``encoding`` if given.
75
- """
76
-
77
- #: Name of the lexer
78
- name = None
79
-
80
- #: URL of the language specification/definition
81
- url = None
82
-
83
- #: Shortcuts for the lexer
84
- aliases = []
85
-
86
- #: File name globs
87
- filenames = []
88
-
89
- #: Secondary file name globs
90
- alias_filenames = []
91
-
92
- #: MIME types
93
- mimetypes = []
94
-
95
- #: Priority, should multiple lexers match and no content is provided
96
- priority = 0
97
-
98
- def __init__(self, **options):
99
- self.options = options
100
- self.stripnl = get_bool_opt(options, 'stripnl', True)
101
- self.stripall = get_bool_opt(options, 'stripall', False)
102
- self.ensurenl = get_bool_opt(options, 'ensurenl', True)
103
- self.tabsize = get_int_opt(options, 'tabsize', 0)
104
- self.encoding = options.get('encoding', 'guess')
105
- self.encoding = options.get('inencoding') or self.encoding
106
- self.filters = []
107
- for filter_ in get_list_opt(options, 'filters', ()):
108
- self.add_filter(filter_)
109
-
110
- def __repr__(self):
111
- if self.options:
112
- return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
113
- self.options)
114
- else:
115
- return '<pygments.lexers.%s>' % self.__class__.__name__
116
-
117
- def add_filter(self, filter_, **options):
118
- """
119
- Add a new stream filter to this lexer.
120
- """
121
- if not isinstance(filter_, Filter):
122
- filter_ = get_filter_by_name(filter_, **options)
123
- self.filters.append(filter_)
124
-
125
- def analyse_text(text):
126
- """
127
- Has to return a float between ``0`` and ``1`` that indicates
128
- if a lexer wants to highlight this text. Used by ``guess_lexer``.
129
- If this method returns ``0`` it won't highlight it in any case, if
130
- it returns ``1`` highlighting with this lexer is guaranteed.
131
-
132
- The `LexerMeta` metaclass automatically wraps this function so
133
- that it works like a static method (no ``self`` or ``cls``
134
- parameter) and the return value is automatically converted to
135
- `float`. If the return value is an object that is boolean `False`
136
- it's the same as if the return values was ``0.0``.
137
- """
138
-
139
- def get_tokens(self, text, unfiltered=False):
140
- """
141
- Return an iterable of (tokentype, value) pairs generated from
142
- `text`. If `unfiltered` is set to `True`, the filtering mechanism
143
- is bypassed even if filters are defined.
144
-
145
- Also preprocess the text, i.e. expand tabs and strip it if
146
- wanted and applies registered filters.
147
- """
148
- if not isinstance(text, str):
149
- if self.encoding == 'guess':
150
- text, _ = guess_decode(text)
151
- elif self.encoding == 'chardet':
152
- try:
153
- from pip._vendor import chardet
154
- except ImportError as e:
155
- raise ImportError('To enable chardet encoding guessing, '
156
- 'please install the chardet library '
157
- 'from http://chardet.feedparser.org/') from e
158
- # check for BOM first
159
- decoded = None
160
- for bom, encoding in _encoding_map:
161
- if text.startswith(bom):
162
- decoded = text[len(bom):].decode(encoding, 'replace')
163
- break
164
- # no BOM found, so use chardet
165
- if decoded is None:
166
- enc = chardet.detect(text[:1024]) # Guess using first 1KB
167
- decoded = text.decode(enc.get('encoding') or 'utf-8',
168
- 'replace')
169
- text = decoded
170
- else:
171
- text = text.decode(self.encoding)
172
- if text.startswith('\ufeff'):
173
- text = text[len('\ufeff'):]
174
- else:
175
- if text.startswith('\ufeff'):
176
- text = text[len('\ufeff'):]
177
-
178
- # text now *is* a unicode string
179
- text = text.replace('\r\n', '\n')
180
- text = text.replace('\r', '\n')
181
- if self.stripall:
182
- text = text.strip()
183
- elif self.stripnl:
184
- text = text.strip('\n')
185
- if self.tabsize > 0:
186
- text = text.expandtabs(self.tabsize)
187
- if self.ensurenl and not text.endswith('\n'):
188
- text += '\n'
189
-
190
- def streamer():
191
- for _, t, v in self.get_tokens_unprocessed(text):
192
- yield t, v
193
- stream = streamer()
194
- if not unfiltered:
195
- stream = apply_filters(stream, self.filters, self)
196
- return stream
197
-
198
- def get_tokens_unprocessed(self, text):
199
- """
200
- Return an iterable of (index, tokentype, value) pairs where "index"
201
- is the starting position of the token within the input text.
202
-
203
- In subclasses, implement this method as a generator to
204
- maximize effectiveness.
205
- """
206
- raise NotImplementedError
207
-
208
-
209
- class DelegatingLexer(Lexer):
210
- """
211
- This lexer takes two lexer as arguments. A root lexer and
212
- a language lexer. First everything is scanned using the language
213
- lexer, afterwards all ``Other`` tokens are lexed using the root
214
- lexer.
215
-
216
- The lexers from the ``template`` lexer package use this base lexer.
217
- """
218
-
219
- def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
220
- self.root_lexer = _root_lexer(**options)
221
- self.language_lexer = _language_lexer(**options)
222
- self.needle = _needle
223
- Lexer.__init__(self, **options)
224
-
225
- def get_tokens_unprocessed(self, text):
226
- buffered = ''
227
- insertions = []
228
- lng_buffer = []
229
- for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
230
- if t is self.needle:
231
- if lng_buffer:
232
- insertions.append((len(buffered), lng_buffer))
233
- lng_buffer = []
234
- buffered += v
235
- else:
236
- lng_buffer.append((i, t, v))
237
- if lng_buffer:
238
- insertions.append((len(buffered), lng_buffer))
239
- return do_insertions(insertions,
240
- self.root_lexer.get_tokens_unprocessed(buffered))
241
-
242
-
243
- # ------------------------------------------------------------------------------
244
- # RegexLexer and ExtendedRegexLexer
245
- #
246
-
247
-
248
- class include(str): # pylint: disable=invalid-name
249
- """
250
- Indicates that a state should include rules from another state.
251
- """
252
- pass
253
-
254
-
255
- class _inherit:
256
- """
257
- Indicates the a state should inherit from its superclass.
258
- """
259
- def __repr__(self):
260
- return 'inherit'
261
-
262
- inherit = _inherit() # pylint: disable=invalid-name
263
-
264
-
265
- class combined(tuple): # pylint: disable=invalid-name
266
- """
267
- Indicates a state combined from multiple states.
268
- """
269
-
270
- def __new__(cls, *args):
271
- return tuple.__new__(cls, args)
272
-
273
- def __init__(self, *args):
274
- # tuple.__init__ doesn't do anything
275
- pass
276
-
277
-
278
- class _PseudoMatch:
279
- """
280
- A pseudo match object constructed from a string.
281
- """
282
-
283
- def __init__(self, start, text):
284
- self._text = text
285
- self._start = start
286
-
287
- def start(self, arg=None):
288
- return self._start
289
-
290
- def end(self, arg=None):
291
- return self._start + len(self._text)
292
-
293
- def group(self, arg=None):
294
- if arg:
295
- raise IndexError('No such group')
296
- return self._text
297
-
298
- def groups(self):
299
- return (self._text,)
300
-
301
- def groupdict(self):
302
- return {}
303
-
304
-
305
- def bygroups(*args):
306
- """
307
- Callback that yields multiple actions for each group in the match.
308
- """
309
- def callback(lexer, match, ctx=None):
310
- for i, action in enumerate(args):
311
- if action is None:
312
- continue
313
- elif type(action) is _TokenType:
314
- data = match.group(i + 1)
315
- if data:
316
- yield match.start(i + 1), action, data
317
- else:
318
- data = match.group(i + 1)
319
- if data is not None:
320
- if ctx:
321
- ctx.pos = match.start(i + 1)
322
- for item in action(lexer,
323
- _PseudoMatch(match.start(i + 1), data), ctx):
324
- if item:
325
- yield item
326
- if ctx:
327
- ctx.pos = match.end()
328
- return callback
329
-
330
-
331
- class _This:
332
- """
333
- Special singleton used for indicating the caller class.
334
- Used by ``using``.
335
- """
336
-
337
- this = _This()
338
-
339
-
340
- def using(_other, **kwargs):
341
- """
342
- Callback that processes the match with a different lexer.
343
-
344
- The keyword arguments are forwarded to the lexer, except `state` which
345
- is handled separately.
346
-
347
- `state` specifies the state that the new lexer will start in, and can
348
- be an enumerable such as ('root', 'inline', 'string') or a simple
349
- string which is assumed to be on top of the root state.
350
-
351
- Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
352
- """
353
- gt_kwargs = {}
354
- if 'state' in kwargs:
355
- s = kwargs.pop('state')
356
- if isinstance(s, (list, tuple)):
357
- gt_kwargs['stack'] = s
358
- else:
359
- gt_kwargs['stack'] = ('root', s)
360
-
361
- if _other is this:
362
- def callback(lexer, match, ctx=None):
363
- # if keyword arguments are given the callback
364
- # function has to create a new lexer instance
365
- if kwargs:
366
- # XXX: cache that somehow
367
- kwargs.update(lexer.options)
368
- lx = lexer.__class__(**kwargs)
369
- else:
370
- lx = lexer
371
- s = match.start()
372
- for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
373
- yield i + s, t, v
374
- if ctx:
375
- ctx.pos = match.end()
376
- else:
377
- def callback(lexer, match, ctx=None):
378
- # XXX: cache that somehow
379
- kwargs.update(lexer.options)
380
- lx = _other(**kwargs)
381
-
382
- s = match.start()
383
- for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
384
- yield i + s, t, v
385
- if ctx:
386
- ctx.pos = match.end()
387
- return callback
388
-
389
-
390
- class default:
391
- """
392
- Indicates a state or state action (e.g. #pop) to apply.
393
- For example default('#pop') is equivalent to ('', Token, '#pop')
394
- Note that state tuples may be used as well.
395
-
396
- .. versionadded:: 2.0
397
- """
398
- def __init__(self, state):
399
- self.state = state
400
-
401
-
402
- class words(Future):
403
- """
404
- Indicates a list of literal words that is transformed into an optimized
405
- regex that matches any of the words.
406
-
407
- .. versionadded:: 2.0
408
- """
409
- def __init__(self, words, prefix='', suffix=''):
410
- self.words = words
411
- self.prefix = prefix
412
- self.suffix = suffix
413
-
414
- def get(self):
415
- return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
416
-
417
-
418
- class RegexLexerMeta(LexerMeta):
419
- """
420
- Metaclass for RegexLexer, creates the self._tokens attribute from
421
- self.tokens on the first instantiation.
422
- """
423
-
424
- def _process_regex(cls, regex, rflags, state):
425
- """Preprocess the regular expression component of a token definition."""
426
- if isinstance(regex, Future):
427
- regex = regex.get()
428
- return re.compile(regex, rflags).match
429
-
430
- def _process_token(cls, token):
431
- """Preprocess the token component of a token definition."""
432
- assert type(token) is _TokenType or callable(token), \
433
- 'token type must be simple type or callable, not %r' % (token,)
434
- return token
435
-
436
- def _process_new_state(cls, new_state, unprocessed, processed):
437
- """Preprocess the state transition action of a token definition."""
438
- if isinstance(new_state, str):
439
- # an existing state
440
- if new_state == '#pop':
441
- return -1
442
- elif new_state in unprocessed:
443
- return (new_state,)
444
- elif new_state == '#push':
445
- return new_state
446
- elif new_state[:5] == '#pop:':
447
- return -int(new_state[5:])
448
- else:
449
- assert False, 'unknown new state %r' % new_state
450
- elif isinstance(new_state, combined):
451
- # combine a new state from existing ones
452
- tmp_state = '_tmp_%d' % cls._tmpname
453
- cls._tmpname += 1
454
- itokens = []
455
- for istate in new_state:
456
- assert istate != new_state, 'circular state ref %r' % istate
457
- itokens.extend(cls._process_state(unprocessed,
458
- processed, istate))
459
- processed[tmp_state] = itokens
460
- return (tmp_state,)
461
- elif isinstance(new_state, tuple):
462
- # push more than one state
463
- for istate in new_state:
464
- assert (istate in unprocessed or
465
- istate in ('#pop', '#push')), \
466
- 'unknown new state ' + istate
467
- return new_state
468
- else:
469
- assert False, 'unknown new state def %r' % new_state
470
-
471
- def _process_state(cls, unprocessed, processed, state):
472
- """Preprocess a single state definition."""
473
- assert type(state) is str, "wrong state name %r" % state
474
- assert state[0] != '#', "invalid state name %r" % state
475
- if state in processed:
476
- return processed[state]
477
- tokens = processed[state] = []
478
- rflags = cls.flags
479
- for tdef in unprocessed[state]:
480
- if isinstance(tdef, include):
481
- # it's a state reference
482
- assert tdef != state, "circular state reference %r" % state
483
- tokens.extend(cls._process_state(unprocessed, processed,
484
- str(tdef)))
485
- continue
486
- if isinstance(tdef, _inherit):
487
- # should be processed already, but may not in the case of:
488
- # 1. the state has no counterpart in any parent
489
- # 2. the state includes more than one 'inherit'
490
- continue
491
- if isinstance(tdef, default):
492
- new_state = cls._process_new_state(tdef.state, unprocessed, processed)
493
- tokens.append((re.compile('').match, None, new_state))
494
- continue
495
-
496
- assert type(tdef) is tuple, "wrong rule def %r" % tdef
497
-
498
- try:
499
- rex = cls._process_regex(tdef[0], rflags, state)
500
- except Exception as err:
501
- raise ValueError("uncompilable regex %r in state %r of %r: %s" %
502
- (tdef[0], state, cls, err)) from err
503
-
504
- token = cls._process_token(tdef[1])
505
-
506
- if len(tdef) == 2:
507
- new_state = None
508
- else:
509
- new_state = cls._process_new_state(tdef[2],
510
- unprocessed, processed)
511
-
512
- tokens.append((rex, token, new_state))
513
- return tokens
514
-
515
- def process_tokendef(cls, name, tokendefs=None):
516
- """Preprocess a dictionary of token definitions."""
517
- processed = cls._all_tokens[name] = {}
518
- tokendefs = tokendefs or cls.tokens[name]
519
- for state in list(tokendefs):
520
- cls._process_state(tokendefs, processed, state)
521
- return processed
522
-
523
- def get_tokendefs(cls):
524
- """
525
- Merge tokens from superclasses in MRO order, returning a single tokendef
526
- dictionary.
527
-
528
- Any state that is not defined by a subclass will be inherited
529
- automatically. States that *are* defined by subclasses will, by
530
- default, override that state in the superclass. If a subclass wishes to
531
- inherit definitions from a superclass, it can use the special value
532
- "inherit", which will cause the superclass' state definition to be
533
- included at that point in the state.
534
- """
535
- tokens = {}
536
- inheritable = {}
537
- for c in cls.__mro__:
538
- toks = c.__dict__.get('tokens', {})
539
-
540
- for state, items in toks.items():
541
- curitems = tokens.get(state)
542
- if curitems is None:
543
- # N.b. because this is assigned by reference, sufficiently
544
- # deep hierarchies are processed incrementally (e.g. for
545
- # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
546
- # will not see any inherits in B).
547
- tokens[state] = items
548
- try:
549
- inherit_ndx = items.index(inherit)
550
- except ValueError:
551
- continue
552
- inheritable[state] = inherit_ndx
553
- continue
554
-
555
- inherit_ndx = inheritable.pop(state, None)
556
- if inherit_ndx is None:
557
- continue
558
-
559
- # Replace the "inherit" value with the items
560
- curitems[inherit_ndx:inherit_ndx+1] = items
561
- try:
562
- # N.b. this is the index in items (that is, the superclass
563
- # copy), so offset required when storing below.
564
- new_inh_ndx = items.index(inherit)
565
- except ValueError:
566
- pass
567
- else:
568
- inheritable[state] = inherit_ndx + new_inh_ndx
569
-
570
- return tokens
571
-
572
- def __call__(cls, *args, **kwds):
573
- """Instantiate cls after preprocessing its token definitions."""
574
- if '_tokens' not in cls.__dict__:
575
- cls._all_tokens = {}
576
- cls._tmpname = 0
577
- if hasattr(cls, 'token_variants') and cls.token_variants:
578
- # don't process yet
579
- pass
580
- else:
581
- cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
582
-
583
- return type.__call__(cls, *args, **kwds)
584
-
585
-
586
- class RegexLexer(Lexer, metaclass=RegexLexerMeta):
587
- """
588
- Base for simple stateful regular expression-based lexers.
589
- Simplifies the lexing process so that you need only
590
- provide a list of states and regular expressions.
591
- """
592
-
593
- #: Flags for compiling the regular expressions.
594
- #: Defaults to MULTILINE.
595
- flags = re.MULTILINE
596
-
597
- #: At all time there is a stack of states. Initially, the stack contains
598
- #: a single state 'root'. The top of the stack is called "the current state".
599
- #:
600
- #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
601
- #:
602
- #: ``new_state`` can be omitted to signify no state transition.
603
- #: If ``new_state`` is a string, it is pushed on the stack. This ensure
604
- #: the new current state is ``new_state``.
605
- #: If ``new_state`` is a tuple of strings, all of those strings are pushed
606
- #: on the stack and the current state will be the last element of the list.
607
- #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
608
- #: to signify a new, anonymous state combined from the rules of two
609
- #: or more existing ones.
610
- #: Furthermore, it can be '#pop' to signify going back one step in
611
- #: the state stack, or '#push' to push the current state on the stack
612
- #: again. Note that if you push while in a combined state, the combined
613
- #: state itself is pushed, and not only the state in which the rule is
614
- #: defined.
615
- #:
616
- #: The tuple can also be replaced with ``include('state')``, in which
617
- #: case the rules from the state named by the string are included in the
618
- #: current one.
619
- tokens = {}
620
-
621
- def get_tokens_unprocessed(self, text, stack=('root',)):
622
- """
623
- Split ``text`` into (tokentype, text) pairs.
624
-
625
- ``stack`` is the initial stack (default: ``['root']``)
626
- """
627
- pos = 0
628
- tokendefs = self._tokens
629
- statestack = list(stack)
630
- statetokens = tokendefs[statestack[-1]]
631
- while 1:
632
- for rexmatch, action, new_state in statetokens:
633
- m = rexmatch(text, pos)
634
- if m:
635
- if action is not None:
636
- if type(action) is _TokenType:
637
- yield pos, action, m.group()
638
- else:
639
- yield from action(self, m)
640
- pos = m.end()
641
- if new_state is not None:
642
- # state transition
643
- if isinstance(new_state, tuple):
644
- for state in new_state:
645
- if state == '#pop':
646
- if len(statestack) > 1:
647
- statestack.pop()
648
- elif state == '#push':
649
- statestack.append(statestack[-1])
650
- else:
651
- statestack.append(state)
652
- elif isinstance(new_state, int):
653
- # pop, but keep at least one state on the stack
654
- # (random code leading to unexpected pops should
655
- # not allow exceptions)
656
- if abs(new_state) >= len(statestack):
657
- del statestack[1:]
658
- else:
659
- del statestack[new_state:]
660
- elif new_state == '#push':
661
- statestack.append(statestack[-1])
662
- else:
663
- assert False, "wrong state def: %r" % new_state
664
- statetokens = tokendefs[statestack[-1]]
665
- break
666
- else:
667
- # We are here only if all state tokens have been considered
668
- # and there was not a match on any of them.
669
- try:
670
- if text[pos] == '\n':
671
- # at EOL, reset state to "root"
672
- statestack = ['root']
673
- statetokens = tokendefs['root']
674
- yield pos, Whitespace, '\n'
675
- pos += 1
676
- continue
677
- yield pos, Error, text[pos]
678
- pos += 1
679
- except IndexError:
680
- break
681
-
682
-
683
- class LexerContext:
684
- """
685
- A helper object that holds lexer position data.
686
- """
687
-
688
- def __init__(self, text, pos, stack=None, end=None):
689
- self.text = text
690
- self.pos = pos
691
- self.end = end or len(text) # end=0 not supported ;-)
692
- self.stack = stack or ['root']
693
-
694
- def __repr__(self):
695
- return 'LexerContext(%r, %r, %r)' % (
696
- self.text, self.pos, self.stack)
697
-
698
-
699
- class ExtendedRegexLexer(RegexLexer):
700
- """
701
- A RegexLexer that uses a context object to store its state.
702
- """
703
-
704
- def get_tokens_unprocessed(self, text=None, context=None):
705
- """
706
- Split ``text`` into (tokentype, text) pairs.
707
- If ``context`` is given, use this lexer context instead.
708
- """
709
- tokendefs = self._tokens
710
- if not context:
711
- ctx = LexerContext(text, 0)
712
- statetokens = tokendefs['root']
713
- else:
714
- ctx = context
715
- statetokens = tokendefs[ctx.stack[-1]]
716
- text = ctx.text
717
- while 1:
718
- for rexmatch, action, new_state in statetokens:
719
- m = rexmatch(text, ctx.pos, ctx.end)
720
- if m:
721
- if action is not None:
722
- if type(action) is _TokenType:
723
- yield ctx.pos, action, m.group()
724
- ctx.pos = m.end()
725
- else:
726
- yield from action(self, m, ctx)
727
- if not new_state:
728
- # altered the state stack?
729
- statetokens = tokendefs[ctx.stack[-1]]
730
- # CAUTION: callback must set ctx.pos!
731
- if new_state is not None:
732
- # state transition
733
- if isinstance(new_state, tuple):
734
- for state in new_state:
735
- if state == '#pop':
736
- if len(ctx.stack) > 1:
737
- ctx.stack.pop()
738
- elif state == '#push':
739
- ctx.stack.append(ctx.stack[-1])
740
- else:
741
- ctx.stack.append(state)
742
- elif isinstance(new_state, int):
743
- # see RegexLexer for why this check is made
744
- if abs(new_state) >= len(ctx.stack):
745
- del ctx.stack[1:]
746
- else:
747
- del ctx.stack[new_state:]
748
- elif new_state == '#push':
749
- ctx.stack.append(ctx.stack[-1])
750
- else:
751
- assert False, "wrong state def: %r" % new_state
752
- statetokens = tokendefs[ctx.stack[-1]]
753
- break
754
- else:
755
- try:
756
- if ctx.pos >= ctx.end:
757
- break
758
- if text[ctx.pos] == '\n':
759
- # at EOL, reset state to "root"
760
- ctx.stack = ['root']
761
- statetokens = tokendefs['root']
762
- yield ctx.pos, Text, '\n'
763
- ctx.pos += 1
764
- continue
765
- yield ctx.pos, Error, text[ctx.pos]
766
- ctx.pos += 1
767
- except IndexError:
768
- break
769
-
770
-
771
- def do_insertions(insertions, tokens):
772
- """
773
- Helper for lexers which must combine the results of several
774
- sublexers.
775
-
776
- ``insertions`` is a list of ``(index, itokens)`` pairs.
777
- Each ``itokens`` iterable should be inserted at position
778
- ``index`` into the token stream given by the ``tokens``
779
- argument.
780
-
781
- The result is a combined token stream.
782
-
783
- TODO: clean up the code here.
784
- """
785
- insertions = iter(insertions)
786
- try:
787
- index, itokens = next(insertions)
788
- except StopIteration:
789
- # no insertions
790
- yield from tokens
791
- return
792
-
793
- realpos = None
794
- insleft = True
795
-
796
- # iterate over the token stream where we want to insert
797
- # the tokens from the insertion list.
798
- for i, t, v in tokens:
799
- # first iteration. store the position of first item
800
- if realpos is None:
801
- realpos = i
802
- oldi = 0
803
- while insleft and i + len(v) >= index:
804
- tmpval = v[oldi:index - i]
805
- if tmpval:
806
- yield realpos, t, tmpval
807
- realpos += len(tmpval)
808
- for it_index, it_token, it_value in itokens:
809
- yield realpos, it_token, it_value
810
- realpos += len(it_value)
811
- oldi = index - i
812
- try:
813
- index, itokens = next(insertions)
814
- except StopIteration:
815
- insleft = False
816
- break # not strictly necessary
817
- if oldi < len(v):
818
- yield realpos, t, v[oldi:]
819
- realpos += len(v) - oldi
820
-
821
- # leftover tokens
822
- while insleft:
823
- # no normal tokens, set realpos to zero
824
- realpos = realpos or 0
825
- for p, t, v in itokens:
826
- yield realpos, t, v
827
- realpos += len(v)
828
- try:
829
- index, itokens = next(insertions)
830
- except StopIteration:
831
- insleft = False
832
- break # not strictly necessary
833
-
834
-
835
- class ProfilingRegexLexerMeta(RegexLexerMeta):
836
- """Metaclass for ProfilingRegexLexer, collects regex timing info."""
837
-
838
- def _process_regex(cls, regex, rflags, state):
839
- if isinstance(regex, words):
840
- rex = regex_opt(regex.words, prefix=regex.prefix,
841
- suffix=regex.suffix)
842
- else:
843
- rex = regex
844
- compiled = re.compile(rex, rflags)
845
-
846
- def match_func(text, pos, endpos=sys.maxsize):
847
- info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
848
- t0 = time.time()
849
- res = compiled.match(text, pos, endpos)
850
- t1 = time.time()
851
- info[0] += 1
852
- info[1] += t1 - t0
853
- return res
854
- return match_func
855
-
856
-
857
- class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
858
- """Drop-in replacement for RegexLexer that does profiling of its regexes."""
859
-
860
- _prof_data = []
861
- _prof_sort_index = 4 # defaults to time per call
862
-
863
- def get_tokens_unprocessed(self, text, stack=('root',)):
864
- # this needs to be a stack, since using(this) will produce nested calls
865
- self.__class__._prof_data.append({})
866
- yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
867
- rawdata = self.__class__._prof_data.pop()
868
- data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
869
- n, 1000 * t, 1000 * t / n)
870
- for ((s, r), (n, t)) in rawdata.items()),
871
- key=lambda x: x[self._prof_sort_index],
872
- reverse=True)
873
- sum_total = sum(x[3] for x in data)
874
-
875
- print()
876
- print('Profiling result for %s lexing %d chars in %.3f ms' %
877
- (self.__class__.__name__, len(text), sum_total))
878
- print('=' * 110)
879
- print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
880
- print('-' * 110)
881
- for d in data:
882
- print('%-20s %-65s %5d %8.4f %8.4f' % d)
883
- print('=' * 110)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/__init__.py DELETED
@@ -1,24 +0,0 @@
1
- """distutils
2
-
3
- The main package for the Python Module Distribution Utilities. Normally
4
- used from a setup script as
5
-
6
- from distutils.core import setup
7
-
8
- setup (...)
9
- """
10
-
11
- import sys
12
- import importlib
13
-
14
- __version__ = sys.version[: sys.version.index(' ')]
15
-
16
-
17
- try:
18
- # Allow Debian and pkgsrc (only) to customize system
19
- # behavior. Ref pypa/distutils#2 and pypa/distutils#16.
20
- # This hook is deprecated and no other environments
21
- # should use it.
22
- importlib.import_module('_distutils_system_mod')
23
- except ImportError:
24
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/build_meta.py DELETED
@@ -1,511 +0,0 @@
1
- """A PEP 517 interface to setuptools
2
-
3
- Previously, when a user or a command line tool (let's call it a "frontend")
4
- needed to make a request of setuptools to take a certain action, for
5
- example, generating a list of installation requirements, the frontend would
6
- would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
7
-
8
- PEP 517 defines a different method of interfacing with setuptools. Rather
9
- than calling "setup.py" directly, the frontend should:
10
-
11
- 1. Set the current directory to the directory with a setup.py file
12
- 2. Import this module into a safe python interpreter (one in which
13
- setuptools can potentially set global variables or crash hard).
14
- 3. Call one of the functions defined in PEP 517.
15
-
16
- What each function does is defined in PEP 517. However, here is a "casual"
17
- definition of the functions (this definition should not be relied on for
18
- bug reports or API stability):
19
-
20
- - `build_wheel`: build a wheel in the folder and return the basename
21
- - `get_requires_for_build_wheel`: get the `setup_requires` to build
22
- - `prepare_metadata_for_build_wheel`: get the `install_requires`
23
- - `build_sdist`: build an sdist in the folder and return the basename
24
- - `get_requires_for_build_sdist`: get the `setup_requires` to build
25
-
26
- Again, this is not a formal definition! Just a "taste" of the module.
27
- """
28
-
29
- import io
30
- import os
31
- import shlex
32
- import sys
33
- import tokenize
34
- import shutil
35
- import contextlib
36
- import tempfile
37
- import warnings
38
- from pathlib import Path
39
- from typing import Dict, Iterator, List, Optional, Union
40
-
41
- import setuptools
42
- import distutils
43
- from . import errors
44
- from ._path import same_path
45
- from ._reqs import parse_strings
46
- from ._deprecation_warning import SetuptoolsDeprecationWarning
47
- from distutils.util import strtobool
48
-
49
-
50
- __all__ = ['get_requires_for_build_sdist',
51
- 'get_requires_for_build_wheel',
52
- 'prepare_metadata_for_build_wheel',
53
- 'build_wheel',
54
- 'build_sdist',
55
- 'get_requires_for_build_editable',
56
- 'prepare_metadata_for_build_editable',
57
- 'build_editable',
58
- '__legacy__',
59
- 'SetupRequirementsError']
60
-
61
- SETUPTOOLS_ENABLE_FEATURES = os.getenv("SETUPTOOLS_ENABLE_FEATURES", "").lower()
62
- LEGACY_EDITABLE = "legacy-editable" in SETUPTOOLS_ENABLE_FEATURES.replace("_", "-")
63
-
64
-
65
- class SetupRequirementsError(BaseException):
66
- def __init__(self, specifiers):
67
- self.specifiers = specifiers
68
-
69
-
70
- class Distribution(setuptools.dist.Distribution):
71
- def fetch_build_eggs(self, specifiers):
72
- specifier_list = list(parse_strings(specifiers))
73
-
74
- raise SetupRequirementsError(specifier_list)
75
-
76
- @classmethod
77
- @contextlib.contextmanager
78
- def patch(cls):
79
- """
80
- Replace
81
- distutils.dist.Distribution with this class
82
- for the duration of this context.
83
- """
84
- orig = distutils.core.Distribution
85
- distutils.core.Distribution = cls
86
- try:
87
- yield
88
- finally:
89
- distutils.core.Distribution = orig
90
-
91
-
92
- @contextlib.contextmanager
93
- def no_install_setup_requires():
94
- """Temporarily disable installing setup_requires
95
-
96
- Under PEP 517, the backend reports build dependencies to the frontend,
97
- and the frontend is responsible for ensuring they're installed.
98
- So setuptools (acting as a backend) should not try to install them.
99
- """
100
- orig = setuptools._install_setup_requires
101
- setuptools._install_setup_requires = lambda attrs: None
102
- try:
103
- yield
104
- finally:
105
- setuptools._install_setup_requires = orig
106
-
107
-
108
- def _get_immediate_subdirectories(a_dir):
109
- return [name for name in os.listdir(a_dir)
110
- if os.path.isdir(os.path.join(a_dir, name))]
111
-
112
-
113
- def _file_with_extension(directory, extension):
114
- matching = (
115
- f for f in os.listdir(directory)
116
- if f.endswith(extension)
117
- )
118
- try:
119
- file, = matching
120
- except ValueError:
121
- raise ValueError(
122
- 'No distribution was found. Ensure that `setup.py` '
123
- 'is not empty and that it calls `setup()`.')
124
- return file
125
-
126
-
127
- def _open_setup_script(setup_script):
128
- if not os.path.exists(setup_script):
129
- # Supply a default setup.py
130
- return io.StringIO(u"from setuptools import setup; setup()")
131
-
132
- return getattr(tokenize, 'open', open)(setup_script)
133
-
134
-
135
- @contextlib.contextmanager
136
- def suppress_known_deprecation():
137
- with warnings.catch_warnings():
138
- warnings.filterwarnings('ignore', 'setup.py install is deprecated')
139
- yield
140
-
141
-
142
- _ConfigSettings = Optional[Dict[str, Union[str, List[str], None]]]
143
- """
144
- Currently the user can run::
145
-
146
- pip install -e . --config-settings key=value
147
- python -m build -C--key=value -C key=value
148
-
149
- - pip will pass both key and value as strings and overwriting repeated keys
150
- (pypa/pip#11059).
151
- - build will accumulate values associated with repeated keys in a list.
152
- It will also accept keys with no associated value.
153
- This means that an option passed by build can be ``str | list[str] | None``.
154
- - PEP 517 specifies that ``config_settings`` is an optional dict.
155
- """
156
-
157
-
158
- class _ConfigSettingsTranslator:
159
- """Translate ``config_settings`` into distutils-style command arguments.
160
- Only a limited number of options is currently supported.
161
- """
162
- # See pypa/setuptools#1928 pypa/setuptools#2491
163
-
164
- def _get_config(self, key: str, config_settings: _ConfigSettings) -> List[str]:
165
- """
166
- Get the value of a specific key in ``config_settings`` as a list of strings.
167
-
168
- >>> fn = _ConfigSettingsTranslator()._get_config
169
- >>> fn("--global-option", None)
170
- []
171
- >>> fn("--global-option", {})
172
- []
173
- >>> fn("--global-option", {'--global-option': 'foo'})
174
- ['foo']
175
- >>> fn("--global-option", {'--global-option': ['foo']})
176
- ['foo']
177
- >>> fn("--global-option", {'--global-option': 'foo'})
178
- ['foo']
179
- >>> fn("--global-option", {'--global-option': 'foo bar'})
180
- ['foo', 'bar']
181
- """
182
- cfg = config_settings or {}
183
- opts = cfg.get(key) or []
184
- return shlex.split(opts) if isinstance(opts, str) else opts
185
-
186
- def _valid_global_options(self):
187
- """Global options accepted by setuptools (e.g. quiet or verbose)."""
188
- options = (opt[:2] for opt in setuptools.dist.Distribution.global_options)
189
- return {flag for long_and_short in options for flag in long_and_short if flag}
190
-
191
- def _global_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
192
- """
193
- Let the user specify ``verbose`` or ``quiet`` + escape hatch via
194
- ``--global-option``.
195
- Note: ``-v``, ``-vv``, ``-vvv`` have similar effects in setuptools,
196
- so we just have to cover the basic scenario ``-v``.
197
-
198
- >>> fn = _ConfigSettingsTranslator()._global_args
199
- >>> list(fn(None))
200
- []
201
- >>> list(fn({"verbose": "False"}))
202
- ['-q']
203
- >>> list(fn({"verbose": "1"}))
204
- ['-v']
205
- >>> list(fn({"--verbose": None}))
206
- ['-v']
207
- >>> list(fn({"verbose": "true", "--global-option": "-q --no-user-cfg"}))
208
- ['-v', '-q', '--no-user-cfg']
209
- >>> list(fn({"--quiet": None}))
210
- ['-q']
211
- """
212
- cfg = config_settings or {}
213
- falsey = {"false", "no", "0", "off"}
214
- if "verbose" in cfg or "--verbose" in cfg:
215
- level = str(cfg.get("verbose") or cfg.get("--verbose") or "1")
216
- yield ("-q" if level.lower() in falsey else "-v")
217
- if "quiet" in cfg or "--quiet" in cfg:
218
- level = str(cfg.get("quiet") or cfg.get("--quiet") or "1")
219
- yield ("-v" if level.lower() in falsey else "-q")
220
-
221
- valid = self._valid_global_options()
222
- args = self._get_config("--global-option", config_settings)
223
- yield from (arg for arg in args if arg.strip("-") in valid)
224
-
225
- def __dist_info_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
226
- """
227
- The ``dist_info`` command accepts ``tag-date`` and ``tag-build``.
228
-
229
- .. warning::
230
- We cannot use this yet as it requires the ``sdist`` and ``bdist_wheel``
231
- commands run in ``build_sdist`` and ``build_wheel`` to re-use the egg-info
232
- directory created in ``prepare_metadata_for_build_wheel``.
233
-
234
- >>> fn = _ConfigSettingsTranslator()._ConfigSettingsTranslator__dist_info_args
235
- >>> list(fn(None))
236
- []
237
- >>> list(fn({"tag-date": "False"}))
238
- ['--no-date']
239
- >>> list(fn({"tag-date": None}))
240
- ['--no-date']
241
- >>> list(fn({"tag-date": "true", "tag-build": ".a"}))
242
- ['--tag-date', '--tag-build', '.a']
243
- """
244
- cfg = config_settings or {}
245
- if "tag-date" in cfg:
246
- val = strtobool(str(cfg["tag-date"] or "false"))
247
- yield ("--tag-date" if val else "--no-date")
248
- if "tag-build" in cfg:
249
- yield from ["--tag-build", str(cfg["tag-build"])]
250
-
251
- def _editable_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
252
- """
253
- The ``editable_wheel`` command accepts ``editable-mode=strict``.
254
-
255
- >>> fn = _ConfigSettingsTranslator()._editable_args
256
- >>> list(fn(None))
257
- []
258
- >>> list(fn({"editable-mode": "strict"}))
259
- ['--mode', 'strict']
260
- """
261
- cfg = config_settings or {}
262
- mode = cfg.get("editable-mode") or cfg.get("editable_mode")
263
- if not mode:
264
- return
265
- yield from ["--mode", str(mode)]
266
-
267
- def _arbitrary_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
268
- """
269
- Users may expect to pass arbitrary lists of arguments to a command
270
- via "--global-option" (example provided in PEP 517 of a "escape hatch").
271
-
272
- >>> fn = _ConfigSettingsTranslator()._arbitrary_args
273
- >>> list(fn(None))
274
- []
275
- >>> list(fn({}))
276
- []
277
- >>> list(fn({'--build-option': 'foo'}))
278
- ['foo']
279
- >>> list(fn({'--build-option': ['foo']}))
280
- ['foo']
281
- >>> list(fn({'--build-option': 'foo'}))
282
- ['foo']
283
- >>> list(fn({'--build-option': 'foo bar'}))
284
- ['foo', 'bar']
285
- >>> warnings.simplefilter('error', SetuptoolsDeprecationWarning)
286
- >>> list(fn({'--global-option': 'foo'})) # doctest: +IGNORE_EXCEPTION_DETAIL
287
- Traceback (most recent call last):
288
- SetuptoolsDeprecationWarning: ...arguments given via `--global-option`...
289
- """
290
- args = self._get_config("--global-option", config_settings)
291
- global_opts = self._valid_global_options()
292
- bad_args = []
293
-
294
- for arg in args:
295
- if arg.strip("-") not in global_opts:
296
- bad_args.append(arg)
297
- yield arg
298
-
299
- yield from self._get_config("--build-option", config_settings)
300
-
301
- if bad_args:
302
- msg = f"""
303
- The arguments {bad_args!r} were given via `--global-option`.
304
- Please use `--build-option` instead,
305
- `--global-option` is reserved to flags like `--verbose` or `--quiet`.
306
- """
307
- warnings.warn(msg, SetuptoolsDeprecationWarning)
308
-
309
-
310
- class _BuildMetaBackend(_ConfigSettingsTranslator):
311
- def _get_build_requires(self, config_settings, requirements):
312
- sys.argv = [
313
- *sys.argv[:1],
314
- *self._global_args(config_settings),
315
- "egg_info",
316
- *self._arbitrary_args(config_settings),
317
- ]
318
- try:
319
- with Distribution.patch():
320
- self.run_setup()
321
- except SetupRequirementsError as e:
322
- requirements += e.specifiers
323
-
324
- return requirements
325
-
326
- def run_setup(self, setup_script='setup.py'):
327
- # Note that we can reuse our build directory between calls
328
- # Correctness comes first, then optimization later
329
- __file__ = setup_script
330
- __name__ = '__main__'
331
-
332
- with _open_setup_script(__file__) as f:
333
- code = f.read().replace(r'\r\n', r'\n')
334
-
335
- exec(code, locals())
336
-
337
- def get_requires_for_build_wheel(self, config_settings=None):
338
- return self._get_build_requires(config_settings, requirements=['wheel'])
339
-
340
- def get_requires_for_build_sdist(self, config_settings=None):
341
- return self._get_build_requires(config_settings, requirements=[])
342
-
343
- def _bubble_up_info_directory(self, metadata_directory: str, suffix: str) -> str:
344
- """
345
- PEP 517 requires that the .dist-info directory be placed in the
346
- metadata_directory. To comply, we MUST copy the directory to the root.
347
-
348
- Returns the basename of the info directory, e.g. `proj-0.0.0.dist-info`.
349
- """
350
- info_dir = self._find_info_directory(metadata_directory, suffix)
351
- if not same_path(info_dir.parent, metadata_directory):
352
- shutil.move(str(info_dir), metadata_directory)
353
- # PEP 517 allow other files and dirs to exist in metadata_directory
354
- return info_dir.name
355
-
356
- def _find_info_directory(self, metadata_directory: str, suffix: str) -> Path:
357
- for parent, dirs, _ in os.walk(metadata_directory):
358
- candidates = [f for f in dirs if f.endswith(suffix)]
359
-
360
- if len(candidates) != 0 or len(dirs) != 1:
361
- assert len(candidates) == 1, f"Multiple {suffix} directories found"
362
- return Path(parent, candidates[0])
363
-
364
- msg = f"No {suffix} directory found in {metadata_directory}"
365
- raise errors.InternalError(msg)
366
-
367
- def prepare_metadata_for_build_wheel(self, metadata_directory,
368
- config_settings=None):
369
- sys.argv = [
370
- *sys.argv[:1],
371
- *self._global_args(config_settings),
372
- "dist_info",
373
- "--output-dir", metadata_directory,
374
- "--keep-egg-info",
375
- ]
376
- with no_install_setup_requires():
377
- self.run_setup()
378
-
379
- self._bubble_up_info_directory(metadata_directory, ".egg-info")
380
- return self._bubble_up_info_directory(metadata_directory, ".dist-info")
381
-
382
- def _build_with_temp_dir(self, setup_command, result_extension,
383
- result_directory, config_settings):
384
- result_directory = os.path.abspath(result_directory)
385
-
386
- # Build in a temporary directory, then copy to the target.
387
- os.makedirs(result_directory, exist_ok=True)
388
- with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
389
- sys.argv = [
390
- *sys.argv[:1],
391
- *self._global_args(config_settings),
392
- *setup_command,
393
- "--dist-dir", tmp_dist_dir,
394
- *self._arbitrary_args(config_settings),
395
- ]
396
- with no_install_setup_requires():
397
- self.run_setup()
398
-
399
- result_basename = _file_with_extension(
400
- tmp_dist_dir, result_extension)
401
- result_path = os.path.join(result_directory, result_basename)
402
- if os.path.exists(result_path):
403
- # os.rename will fail overwriting on non-Unix.
404
- os.remove(result_path)
405
- os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
406
-
407
- return result_basename
408
-
409
- def build_wheel(self, wheel_directory, config_settings=None,
410
- metadata_directory=None):
411
- with suppress_known_deprecation():
412
- return self._build_with_temp_dir(['bdist_wheel'], '.whl',
413
- wheel_directory, config_settings)
414
-
415
- def build_sdist(self, sdist_directory, config_settings=None):
416
- return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
417
- '.tar.gz', sdist_directory,
418
- config_settings)
419
-
420
- def _get_dist_info_dir(self, metadata_directory: Optional[str]) -> Optional[str]:
421
- if not metadata_directory:
422
- return None
423
- dist_info_candidates = list(Path(metadata_directory).glob("*.dist-info"))
424
- assert len(dist_info_candidates) <= 1
425
- return str(dist_info_candidates[0]) if dist_info_candidates else None
426
-
427
- if not LEGACY_EDITABLE:
428
-
429
- # PEP660 hooks:
430
- # build_editable
431
- # get_requires_for_build_editable
432
- # prepare_metadata_for_build_editable
433
- def build_editable(
434
- self, wheel_directory, config_settings=None, metadata_directory=None
435
- ):
436
- # XXX can or should we hide our editable_wheel command normally?
437
- info_dir = self._get_dist_info_dir(metadata_directory)
438
- opts = ["--dist-info-dir", info_dir] if info_dir else []
439
- cmd = ["editable_wheel", *opts, *self._editable_args(config_settings)]
440
- with suppress_known_deprecation():
441
- return self._build_with_temp_dir(
442
- cmd, ".whl", wheel_directory, config_settings
443
- )
444
-
445
- def get_requires_for_build_editable(self, config_settings=None):
446
- return self.get_requires_for_build_wheel(config_settings)
447
-
448
- def prepare_metadata_for_build_editable(self, metadata_directory,
449
- config_settings=None):
450
- return self.prepare_metadata_for_build_wheel(
451
- metadata_directory, config_settings
452
- )
453
-
454
-
455
- class _BuildMetaLegacyBackend(_BuildMetaBackend):
456
- """Compatibility backend for setuptools
457
-
458
- This is a version of setuptools.build_meta that endeavors
459
- to maintain backwards
460
- compatibility with pre-PEP 517 modes of invocation. It
461
- exists as a temporary
462
- bridge between the old packaging mechanism and the new
463
- packaging mechanism,
464
- and will eventually be removed.
465
- """
466
- def run_setup(self, setup_script='setup.py'):
467
- # In order to maintain compatibility with scripts assuming that
468
- # the setup.py script is in a directory on the PYTHONPATH, inject
469
- # '' into sys.path. (pypa/setuptools#1642)
470
- sys_path = list(sys.path) # Save the original path
471
-
472
- script_dir = os.path.dirname(os.path.abspath(setup_script))
473
- if script_dir not in sys.path:
474
- sys.path.insert(0, script_dir)
475
-
476
- # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
477
- # get the directory of the source code. They expect it to refer to the
478
- # setup.py script.
479
- sys_argv_0 = sys.argv[0]
480
- sys.argv[0] = setup_script
481
-
482
- try:
483
- super(_BuildMetaLegacyBackend,
484
- self).run_setup(setup_script=setup_script)
485
- finally:
486
- # While PEP 517 frontends should be calling each hook in a fresh
487
- # subprocess according to the standard (and thus it should not be
488
- # strictly necessary to restore the old sys.path), we'll restore
489
- # the original path so that the path manipulation does not persist
490
- # within the hook after run_setup is called.
491
- sys.path[:] = sys_path
492
- sys.argv[0] = sys_argv_0
493
-
494
-
495
- # The primary backend
496
- _BACKEND = _BuildMetaBackend()
497
-
498
- get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
499
- get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
500
- prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
501
- build_wheel = _BACKEND.build_wheel
502
- build_sdist = _BACKEND.build_sdist
503
-
504
- if not LEGACY_EDITABLE:
505
- get_requires_for_build_editable = _BACKEND.get_requires_for_build_editable
506
- prepare_metadata_for_build_editable = _BACKEND.prepare_metadata_for_build_editable
507
- build_editable = _BACKEND.build_editable
508
-
509
-
510
- # The legacy backend
511
- __legacy__ = _BuildMetaLegacyBackend()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bl1tzie/Jam/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Jam
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/cli.py DELETED
@@ -1,1018 +0,0 @@
1
- import datetime
2
- import os
3
- import signal
4
- import sys
5
- import warnings
6
- from functools import partial
7
- from typing import Optional
8
-
9
- import requests
10
-
11
- import openai
12
- import openai.wandb_logger
13
- from openai.upload_progress import BufferReader
14
- from openai.validators import (
15
- apply_necessary_remediation,
16
- apply_validators,
17
- get_search_validators,
18
- get_validators,
19
- read_any_format,
20
- write_out_file,
21
- write_out_search_file,
22
- )
23
-
24
-
25
- class bcolors:
26
- HEADER = "\033[95m"
27
- OKBLUE = "\033[94m"
28
- OKGREEN = "\033[92m"
29
- WARNING = "\033[93m"
30
- FAIL = "\033[91m"
31
- ENDC = "\033[0m"
32
- BOLD = "\033[1m"
33
- UNDERLINE = "\033[4m"
34
-
35
-
36
- def organization_info(obj):
37
- organization = getattr(obj, "organization", None)
38
- if organization is not None:
39
- return "[organization={}] ".format(organization)
40
- else:
41
- return ""
42
-
43
-
44
- def display(obj):
45
- sys.stderr.write(organization_info(obj))
46
- sys.stderr.flush()
47
- print(obj)
48
-
49
-
50
- def display_error(e):
51
- extra = (
52
- " (HTTP status code: {})".format(e.http_status)
53
- if e.http_status is not None
54
- else ""
55
- )
56
- sys.stderr.write(
57
- "{}{}Error:{} {}{}\n".format(
58
- organization_info(e), bcolors.FAIL, bcolors.ENDC, e, extra
59
- )
60
- )
61
-
62
-
63
- class Engine:
64
- @classmethod
65
- def get(cls, args):
66
- engine = openai.Engine.retrieve(id=args.id)
67
- display(engine)
68
-
69
- @classmethod
70
- def update(cls, args):
71
- engine = openai.Engine.modify(args.id, replicas=args.replicas)
72
- display(engine)
73
-
74
- @classmethod
75
- def generate(cls, args):
76
- warnings.warn(
77
- "Engine.generate is deprecated, use Completion.create", DeprecationWarning
78
- )
79
- if args.completions and args.completions > 1 and args.stream:
80
- raise ValueError("Can't stream multiple completions with openai CLI")
81
-
82
- kwargs = {}
83
- if args.model is not None:
84
- kwargs["model"] = args.model
85
- resp = openai.Engine(id=args.id).generate(
86
- completions=args.completions,
87
- context=args.context,
88
- length=args.length,
89
- stream=args.stream,
90
- temperature=args.temperature,
91
- top_p=args.top_p,
92
- logprobs=args.logprobs,
93
- stop=args.stop,
94
- **kwargs,
95
- )
96
- if not args.stream:
97
- resp = [resp]
98
-
99
- for part in resp:
100
- completions = len(part["data"])
101
- for c_idx, c in enumerate(part["data"]):
102
- if completions > 1:
103
- sys.stdout.write("===== Completion {} =====\n".format(c_idx))
104
- sys.stdout.write("".join(c["text"]))
105
- if completions > 1:
106
- sys.stdout.write("\n")
107
- sys.stdout.flush()
108
-
109
- @classmethod
110
- def search(cls, args):
111
- params = {
112
- "query": args.query,
113
- "max_rerank": args.max_rerank,
114
- "return_metadata": args.return_metadata,
115
- }
116
- if args.documents:
117
- params["documents"] = args.documents
118
- if args.file:
119
- params["file"] = args.file
120
-
121
- if args.version:
122
- params["version"] = args.version
123
-
124
- resp = openai.Engine(id=args.id).search(**params)
125
- scores = [
126
- (search_result["score"], search_result["document"])
127
- for search_result in resp["data"]
128
- ]
129
- scores.sort(reverse=True)
130
- dataset = (
131
- args.documents if args.documents else [x["text"] for x in resp["data"]]
132
- )
133
- for score, document_idx in scores:
134
- print("=== score {:.3f} ===".format(score))
135
- print(dataset[document_idx])
136
- if (
137
- args.return_metadata
138
- and args.file
139
- and "metadata" in resp["data"][document_idx]
140
- ):
141
- print(f"METADATA: {resp['data'][document_idx]['metadata']}")
142
-
143
- @classmethod
144
- def list(cls, args):
145
- engines = openai.Engine.list()
146
- display(engines)
147
-
148
-
149
- class Completion:
150
- @classmethod
151
- def create(cls, args):
152
- if args.n is not None and args.n > 1 and args.stream:
153
- raise ValueError("Can't stream completions with n>1 with the current CLI")
154
-
155
- if args.engine and args.model:
156
- warnings.warn(
157
- "In most cases, you should not be specifying both engine and model."
158
- )
159
-
160
- resp = openai.Completion.create(
161
- engine=args.engine,
162
- model=args.model,
163
- n=args.n,
164
- max_tokens=args.max_tokens,
165
- logprobs=args.logprobs,
166
- prompt=args.prompt,
167
- stream=args.stream,
168
- temperature=args.temperature,
169
- top_p=args.top_p,
170
- stop=args.stop,
171
- echo=True,
172
- )
173
- if not args.stream:
174
- resp = [resp]
175
-
176
- for part in resp:
177
- choices = part["choices"]
178
- for c_idx, c in enumerate(sorted(choices, key=lambda s: s["index"])):
179
- if len(choices) > 1:
180
- sys.stdout.write("===== Completion {} =====\n".format(c_idx))
181
- sys.stdout.write(c["text"])
182
- if len(choices) > 1:
183
- sys.stdout.write("\n")
184
- sys.stdout.flush()
185
-
186
-
187
- class Model:
188
- @classmethod
189
- def get(cls, args):
190
- resp = openai.Model.retrieve(id=args.id)
191
- print(resp)
192
-
193
- @classmethod
194
- def delete(cls, args):
195
- model = openai.Model.delete(args.id)
196
- print(model)
197
-
198
- @classmethod
199
- def list(cls, args):
200
- models = openai.Model.list()
201
- print(models)
202
-
203
-
204
- class File:
205
- @classmethod
206
- def create(cls, args):
207
- with open(args.file, "rb") as file_reader:
208
- buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
209
- resp = openai.File.create(
210
- file=buffer_reader,
211
- purpose=args.purpose,
212
- model=args.model,
213
- user_provided_filename=args.file,
214
- )
215
- print(resp)
216
-
217
- @classmethod
218
- def get(cls, args):
219
- resp = openai.File.retrieve(id=args.id)
220
- print(resp)
221
-
222
- @classmethod
223
- def delete(cls, args):
224
- file = openai.File.delete(args.id)
225
- print(file)
226
-
227
- @classmethod
228
- def list(cls, args):
229
- file = openai.File.list()
230
- print(file)
231
-
232
-
233
- class Search:
234
- @classmethod
235
- def prepare_data(cls, args, purpose):
236
-
237
- sys.stdout.write("Analyzing...\n")
238
- fname = args.file
239
- auto_accept = args.quiet
240
-
241
- optional_fields = ["metadata"]
242
-
243
- if purpose == "classifications":
244
- required_fields = ["text", "label"]
245
- else:
246
- required_fields = ["text"]
247
-
248
- df, remediation = read_any_format(
249
- fname, fields=required_fields + optional_fields
250
- )
251
-
252
- if "metadata" not in df:
253
- df["metadata"] = None
254
-
255
- apply_necessary_remediation(None, remediation)
256
- validators = get_search_validators(required_fields, optional_fields)
257
-
258
- write_out_file_func = partial(
259
- write_out_search_file,
260
- purpose=purpose,
261
- fields=required_fields + optional_fields,
262
- )
263
-
264
- apply_validators(
265
- df, fname, remediation, validators, auto_accept, write_out_file_func
266
- )
267
-
268
- @classmethod
269
- def create(cls, args):
270
- resp = openai.Search.create(
271
- query=args.query,
272
- documents=args.documents,
273
- model=args.model,
274
- )
275
- print(resp)
276
-
277
-
278
- class FineTune:
279
- @classmethod
280
- def list(cls, args):
281
- resp = openai.FineTune.list()
282
- print(resp)
283
-
284
- @classmethod
285
- def _is_url(cls, file: str):
286
- return file.lower().startswith("http")
287
-
288
- @classmethod
289
- def _download_file_from_public_url(cls, url: str) -> Optional[bytes]:
290
- resp = requests.get(url)
291
- if resp.status_code == 200:
292
- return resp.content
293
- else:
294
- return None
295
-
296
- @classmethod
297
- def _maybe_upload_file(
298
- cls,
299
- file: Optional[str] = None,
300
- content: Optional[bytes] = None,
301
- user_provided_file: Optional[str] = None,
302
- check_if_file_exists: bool = True,
303
- ):
304
- # Exactly one of `file` or `content` must be provided
305
- if (file is None) == (content is None):
306
- raise ValueError("Exactly one of `file` or `content` must be provided")
307
-
308
- if content is None:
309
- assert file is not None
310
- with open(file, "rb") as f:
311
- content = f.read()
312
-
313
- if check_if_file_exists:
314
- bytes = len(content)
315
- matching_files = openai.File.find_matching_files(
316
- name=user_provided_file or f.name, bytes=bytes, purpose="fine-tune"
317
- )
318
- if len(matching_files) > 0:
319
- file_ids = [f["id"] for f in matching_files]
320
- sys.stdout.write(
321
- "Found potentially duplicated files with name '{name}', purpose 'fine-tune' and size {size} bytes\n".format(
322
- name=os.path.basename(matching_files[0]["filename"]),
323
- size=matching_files[0]["bytes"] if "bytes" in matching_files[0] else matching_files[0]["size"],
324
- )
325
- )
326
- sys.stdout.write("\n".join(file_ids))
327
- while True:
328
- sys.stdout.write(
329
- "\nEnter file ID to reuse an already uploaded file, or an empty string to upload this file anyway: "
330
- )
331
- inp = sys.stdin.readline().strip()
332
- if inp in file_ids:
333
- sys.stdout.write(
334
- "Reusing already uploaded file: {id}\n".format(id=inp)
335
- )
336
- return inp
337
- elif inp == "":
338
- break
339
- else:
340
- sys.stdout.write(
341
- "File id '{id}' is not among the IDs of the potentially duplicated files\n".format(
342
- id=inp
343
- )
344
- )
345
-
346
- buffer_reader = BufferReader(content, desc="Upload progress")
347
- resp = openai.File.create(
348
- file=buffer_reader,
349
- purpose="fine-tune",
350
- user_provided_filename=user_provided_file or file,
351
- )
352
- sys.stdout.write(
353
- "Uploaded file from {file}: {id}\n".format(
354
- file=user_provided_file or file, id=resp["id"]
355
- )
356
- )
357
- return resp["id"]
358
-
359
- @classmethod
360
- def _get_or_upload(cls, file, check_if_file_exists=True):
361
- try:
362
- # 1. If it's a valid file, use it
363
- openai.File.retrieve(file)
364
- return file
365
- except openai.error.InvalidRequestError:
366
- pass
367
- if os.path.isfile(file):
368
- # 2. If it's a file on the filesystem, upload it
369
- return cls._maybe_upload_file(
370
- file=file, check_if_file_exists=check_if_file_exists
371
- )
372
- if cls._is_url(file):
373
- # 3. If it's a URL, download it temporarily
374
- content = cls._download_file_from_public_url(file)
375
- if content is not None:
376
- return cls._maybe_upload_file(
377
- content=content,
378
- check_if_file_exists=check_if_file_exists,
379
- user_provided_file=file,
380
- )
381
- return file
382
-
383
- @classmethod
384
- def create(cls, args):
385
- create_args = {
386
- "training_file": cls._get_or_upload(
387
- args.training_file, args.check_if_files_exist
388
- ),
389
- }
390
- if args.validation_file:
391
- create_args["validation_file"] = cls._get_or_upload(
392
- args.validation_file, args.check_if_files_exist
393
- )
394
-
395
- for hparam in (
396
- "model",
397
- "suffix",
398
- "n_epochs",
399
- "batch_size",
400
- "learning_rate_multiplier",
401
- "prompt_loss_weight",
402
- "compute_classification_metrics",
403
- "classification_n_classes",
404
- "classification_positive_class",
405
- "classification_betas",
406
- ):
407
- attr = getattr(args, hparam)
408
- if attr is not None:
409
- create_args[hparam] = attr
410
-
411
- resp = openai.FineTune.create(**create_args)
412
-
413
- if args.no_follow:
414
- print(resp)
415
- return
416
-
417
- sys.stdout.write(
418
- "Created fine-tune: {job_id}\n"
419
- "Streaming events until fine-tuning is complete...\n\n"
420
- "(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\n".format(
421
- job_id=resp["id"]
422
- )
423
- )
424
- cls._stream_events(resp["id"])
425
-
426
- @classmethod
427
- def get(cls, args):
428
- resp = openai.FineTune.retrieve(id=args.id)
429
- print(resp)
430
-
431
- @classmethod
432
- def results(cls, args):
433
- fine_tune = openai.FineTune.retrieve(id=args.id)
434
- if "result_files" not in fine_tune or len(fine_tune["result_files"]) == 0:
435
- raise openai.error.InvalidRequestError(
436
- f"No results file available for fine-tune {args.id}", "id"
437
- )
438
- result_file = openai.FineTune.retrieve(id=args.id)["result_files"][0]
439
- resp = openai.File.download(id=result_file["id"])
440
- print(resp.decode("utf-8"))
441
-
442
- @classmethod
443
- def events(cls, args):
444
- if args.stream:
445
- raise openai.error.OpenAIError(
446
- message=(
447
- "The --stream parameter is deprecated, use fine_tunes.follow "
448
- "instead:\n\n"
449
- " openai api fine_tunes.follow -i {id}\n".format(id=args.id)
450
- ),
451
- )
452
-
453
- resp = openai.FineTune.list_events(id=args.id) # type: ignore
454
- print(resp)
455
-
456
- @classmethod
457
- def follow(cls, args):
458
- cls._stream_events(args.id)
459
-
460
- @classmethod
461
- def _stream_events(cls, job_id):
462
- def signal_handler(sig, frame):
463
- status = openai.FineTune.retrieve(job_id).status
464
- sys.stdout.write(
465
- "\nStream interrupted. Job is still {status}.\n"
466
- "To resume the stream, run:\n\n"
467
- " openai api fine_tunes.follow -i {job_id}\n\n"
468
- "To cancel your job, run:\n\n"
469
- " openai api fine_tunes.cancel -i {job_id}\n\n".format(
470
- status=status, job_id=job_id
471
- )
472
- )
473
- sys.exit(0)
474
-
475
- signal.signal(signal.SIGINT, signal_handler)
476
-
477
- events = openai.FineTune.stream_events(job_id)
478
- # TODO(rachel): Add a nifty spinner here.
479
- try:
480
- for event in events:
481
- sys.stdout.write(
482
- "[%s] %s"
483
- % (
484
- datetime.datetime.fromtimestamp(event["created_at"]),
485
- event["message"],
486
- )
487
- )
488
- sys.stdout.write("\n")
489
- sys.stdout.flush()
490
- except Exception:
491
- sys.stdout.write(
492
- "\nStream interrupted (client disconnected).\n"
493
- "To resume the stream, run:\n\n"
494
- " openai api fine_tunes.follow -i {job_id}\n\n".format(job_id=job_id)
495
- )
496
- return
497
-
498
- resp = openai.FineTune.retrieve(id=job_id)
499
- status = resp["status"]
500
- if status == "succeeded":
501
- sys.stdout.write("\nJob complete! Status: succeeded 🎉")
502
- sys.stdout.write(
503
- "\nTry out your fine-tuned model:\n\n"
504
- "openai api completions.create -m {model} -p <YOUR_PROMPT>".format(
505
- model=resp["fine_tuned_model"]
506
- )
507
- )
508
- elif status == "failed":
509
- sys.stdout.write(
510
- "\nJob failed. Please contact [email protected] if you need assistance."
511
- )
512
- sys.stdout.write("\n")
513
-
514
- @classmethod
515
- def cancel(cls, args):
516
- resp = openai.FineTune.cancel(id=args.id)
517
- print(resp)
518
-
519
- @classmethod
520
- def prepare_data(cls, args):
521
-
522
- sys.stdout.write("Analyzing...\n")
523
- fname = args.file
524
- auto_accept = args.quiet
525
- df, remediation = read_any_format(fname)
526
- apply_necessary_remediation(None, remediation)
527
-
528
- validators = get_validators()
529
-
530
- apply_validators(
531
- df,
532
- fname,
533
- remediation,
534
- validators,
535
- auto_accept,
536
- write_out_file_func=write_out_file,
537
- )
538
-
539
-
540
- class WandbLogger:
541
- @classmethod
542
- def sync(cls, args):
543
- resp = openai.wandb_logger.WandbLogger.sync(
544
- id=args.id,
545
- n_fine_tunes=args.n_fine_tunes,
546
- project=args.project,
547
- entity=args.entity,
548
- force=args.force,
549
- )
550
- print(resp)
551
-
552
-
553
- def tools_register(parser):
554
- subparsers = parser.add_subparsers(
555
- title="Tools", help="Convenience client side tools"
556
- )
557
-
558
- def help(args):
559
- parser.print_help()
560
-
561
- parser.set_defaults(func=help)
562
-
563
- sub = subparsers.add_parser("fine_tunes.prepare_data")
564
- sub.add_argument(
565
- "-f",
566
- "--file",
567
- required=True,
568
- help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed."
569
- "This should be the local file path.",
570
- )
571
- sub.add_argument(
572
- "-q",
573
- "--quiet",
574
- required=False,
575
- action="store_true",
576
- help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
577
- )
578
- sub.set_defaults(func=FineTune.prepare_data)
579
-
580
- sub = subparsers.add_parser("search.prepare_data")
581
- sub.add_argument(
582
- "-f",
583
- "--file",
584
- required=True,
585
- help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing text examples to be analyzed."
586
- "This should be the local file path.",
587
- )
588
- sub.add_argument(
589
- "-q",
590
- "--quiet",
591
- required=False,
592
- action="store_true",
593
- help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
594
- )
595
- sub.set_defaults(func=partial(Search.prepare_data, purpose="search"))
596
-
597
- sub = subparsers.add_parser("classifications.prepare_data")
598
- sub.add_argument(
599
- "-f",
600
- "--file",
601
- required=True,
602
- help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing text-label examples to be analyzed."
603
- "This should be the local file path.",
604
- )
605
- sub.add_argument(
606
- "-q",
607
- "--quiet",
608
- required=False,
609
- action="store_true",
610
- help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
611
- )
612
- sub.set_defaults(func=partial(Search.prepare_data, purpose="classifications"))
613
-
614
- sub = subparsers.add_parser("answers.prepare_data")
615
- sub.add_argument(
616
- "-f",
617
- "--file",
618
- required=True,
619
- help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing text examples to be analyzed."
620
- "This should be the local file path.",
621
- )
622
- sub.add_argument(
623
- "-q",
624
- "--quiet",
625
- required=False,
626
- action="store_true",
627
- help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
628
- )
629
- sub.set_defaults(func=partial(Search.prepare_data, purpose="answer"))
630
-
631
-
632
- def api_register(parser):
633
- # Engine management
634
- subparsers = parser.add_subparsers(help="All API subcommands")
635
-
636
- def help(args):
637
- parser.print_help()
638
-
639
- parser.set_defaults(func=help)
640
-
641
- sub = subparsers.add_parser("engines.list")
642
- sub.set_defaults(func=Engine.list)
643
-
644
- sub = subparsers.add_parser("engines.get")
645
- sub.add_argument("-i", "--id", required=True)
646
- sub.set_defaults(func=Engine.get)
647
-
648
- sub = subparsers.add_parser("engines.update")
649
- sub.add_argument("-i", "--id", required=True)
650
- sub.add_argument("-r", "--replicas", type=int)
651
- sub.set_defaults(func=Engine.update)
652
-
653
- sub = subparsers.add_parser("engines.generate")
654
- sub.add_argument("-i", "--id", required=True)
655
- sub.add_argument(
656
- "--stream", help="Stream tokens as they're ready.", action="store_true"
657
- )
658
- sub.add_argument("-c", "--context", help="An optional context to generate from")
659
- sub.add_argument("-l", "--length", help="How many tokens to generate", type=int)
660
- sub.add_argument(
661
- "-t",
662
- "--temperature",
663
- help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
664
-
665
- Mutually exclusive with `top_p`.""",
666
- type=float,
667
- )
668
- sub.add_argument(
669
- "-p",
670
- "--top_p",
671
- help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
672
-
673
- Mutually exclusive with `temperature`.""",
674
- type=float,
675
- )
676
- sub.add_argument(
677
- "-n",
678
- "--completions",
679
- help="How many parallel completions to run on this context",
680
- type=int,
681
- )
682
- sub.add_argument(
683
- "--logprobs",
684
- help="Include the log probabilites on the `logprobs` most likely tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is supplied, the API will always return the logprob of the generated token, so there may be up to `logprobs+1` elements in the response.",
685
- type=int,
686
- )
687
- sub.add_argument(
688
- "--stop", help="A stop sequence at which to stop generating tokens."
689
- )
690
- sub.add_argument(
691
- "-m",
692
- "--model",
693
- required=False,
694
- help="A model (most commonly a model ID) to generate from. Defaults to the engine's default model.",
695
- )
696
- sub.set_defaults(func=Engine.generate)
697
-
698
- sub = subparsers.add_parser("engines.search")
699
- sub.add_argument("-i", "--id", required=True)
700
- sub.add_argument(
701
- "-d",
702
- "--documents",
703
- action="append",
704
- help="List of documents to search over. Only one of `documents` or `file` may be supplied.",
705
- required=False,
706
- )
707
- sub.add_argument(
708
- "-f",
709
- "--file",
710
- help="A file id to search over. Only one of `documents` or `file` may be supplied.",
711
- required=False,
712
- )
713
- sub.add_argument(
714
- "--max_rerank",
715
- help="The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.",
716
- type=int,
717
- default=200,
718
- )
719
- sub.add_argument(
720
- "--return_metadata",
721
- help="A special boolean flag for showing metadata. If set `true`, each document entry in the returned json will contain a 'metadata' field. Default to be `false`. This flag only takes effect when `file` is set.",
722
- type=bool,
723
- default=False,
724
- )
725
- sub.add_argument(
726
- "--version",
727
- help="The version of the search routing to use",
728
- )
729
-
730
- sub.add_argument("-q", "--query", required=True, help="Search query")
731
- sub.set_defaults(func=Engine.search)
732
-
733
- # Completions
734
- sub = subparsers.add_parser("completions.create")
735
- sub.add_argument(
736
- "-e",
737
- "--engine",
738
- help="The engine to use. See https://beta.openai.com/docs/engines for more about what engines are available.",
739
- )
740
- sub.add_argument(
741
- "-m",
742
- "--model",
743
- help="The model to use. At most one of `engine` or `model` should be specified.",
744
- )
745
- sub.add_argument(
746
- "--stream", help="Stream tokens as they're ready.", action="store_true"
747
- )
748
- sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
749
- sub.add_argument(
750
- "-M", "--max-tokens", help="The maximum number of tokens to generate", type=int
751
- )
752
- sub.add_argument(
753
- "-t",
754
- "--temperature",
755
- help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
756
-
757
- Mutually exclusive with `top_p`.""",
758
- type=float,
759
- )
760
- sub.add_argument(
761
- "-P",
762
- "--top_p",
763
- help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
764
-
765
- Mutually exclusive with `temperature`.""",
766
- type=float,
767
- )
768
- sub.add_argument(
769
- "-n",
770
- "--n",
771
- help="How many sub-completions to generate for each prompt.",
772
- type=int,
773
- )
774
- sub.add_argument(
775
- "--logprobs",
776
- help="Include the log probabilites on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
777
- type=int,
778
- )
779
- sub.add_argument(
780
- "--stop", help="A stop sequence at which to stop generating tokens."
781
- )
782
- sub.set_defaults(func=Completion.create)
783
-
784
- # Models
785
- sub = subparsers.add_parser("models.list")
786
- sub.set_defaults(func=Model.list)
787
-
788
- sub = subparsers.add_parser("models.get")
789
- sub.add_argument("-i", "--id", required=True, help="The model ID")
790
- sub.set_defaults(func=Model.get)
791
-
792
- sub = subparsers.add_parser("models.delete")
793
- sub.add_argument("-i", "--id", required=True, help="The model ID")
794
- sub.set_defaults(func=Model.delete)
795
-
796
- # Files
797
- sub = subparsers.add_parser("files.create")
798
-
799
- sub.add_argument(
800
- "-f",
801
- "--file",
802
- required=True,
803
- help="File to upload",
804
- )
805
- sub.add_argument(
806
- "-p",
807
- "--purpose",
808
- help="Why are you uploading this file? (see https://beta.openai.com/docs/api-reference/ for purposes)",
809
- required=True,
810
- )
811
- sub.add_argument(
812
- "-m",
813
- "--model",
814
- help="Model for search indexing (e.g. 'ada'). Only meaningful if --purpose is 'search'.",
815
- )
816
- sub.set_defaults(func=File.create)
817
-
818
- sub = subparsers.add_parser("files.get")
819
- sub.add_argument("-i", "--id", required=True, help="The files ID")
820
- sub.set_defaults(func=File.get)
821
-
822
- sub = subparsers.add_parser("files.delete")
823
- sub.add_argument("-i", "--id", required=True, help="The files ID")
824
- sub.set_defaults(func=File.delete)
825
-
826
- sub = subparsers.add_parser("files.list")
827
- sub.set_defaults(func=File.list)
828
-
829
- # Search
830
- sub = subparsers.add_parser("search.create")
831
-
832
- sub.add_argument(
833
- "-d",
834
- "--documents",
835
- help="Documents to search over",
836
- type=str,
837
- nargs="+",
838
- )
839
- sub.add_argument(
840
- "-q",
841
- "--query",
842
- required=True,
843
- help="Search query",
844
- )
845
- sub.add_argument(
846
- "-m",
847
- "--model",
848
- help="The model to search with",
849
- )
850
- sub.set_defaults(func=Search.create)
851
-
852
- # Finetune
853
- sub = subparsers.add_parser("fine_tunes.list")
854
- sub.set_defaults(func=FineTune.list)
855
-
856
- sub = subparsers.add_parser("fine_tunes.create")
857
- sub.add_argument(
858
- "-t",
859
- "--training_file",
860
- required=True,
861
- help="JSONL file containing prompt-completion examples for training. This can "
862
- "be the ID of a file uploaded through the OpenAI API (e.g. file-abcde12345), "
863
- 'a local file path, or a URL that starts with "http".',
864
- )
865
- sub.add_argument(
866
- "-v",
867
- "--validation_file",
868
- help="JSONL file containing prompt-completion examples for validation. This can "
869
- "be the ID of a file uploaded through the OpenAI API (e.g. file-abcde12345), "
870
- 'a local file path, or a URL that starts with "http".',
871
- )
872
- sub.add_argument(
873
- "--no_check_if_files_exist",
874
- dest="check_if_files_exist",
875
- action="store_false",
876
- help="If this argument is set and training_file or validation_file are file paths, immediately upload them. If this argument is not set, check if they may be duplicates of already uploaded files before uploading, based on file name and file size.",
877
- )
878
- sub.add_argument(
879
- "-m",
880
- "--model",
881
- help="The model to start fine-tuning from",
882
- )
883
- sub.add_argument(
884
- "--suffix",
885
- help="If set, this argument can be used to customize the generated fine-tuned model name."
886
- "All punctuation and whitespace in `suffix` will be replaced with a "
887
- "single dash, and the string will be lower cased. The max "
888
- "length of `suffix` is 40 chars. "
889
- "The generated name will match the form `{base_model}:ft-{org-title}:{suffix}-{timestamp}`. "
890
- 'For example, `openai api fine_tunes.create -t test.jsonl -m ada --suffix "custom model name" '
891
- "could generate a model with the name "
892
- "ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
893
- )
894
- sub.add_argument(
895
- "--no_follow",
896
- action="store_true",
897
- help="If set, returns immediately after creating the job. Otherwise, streams events and waits for the job to complete.",
898
- )
899
- sub.add_argument(
900
- "--n_epochs",
901
- type=int,
902
- help="The number of epochs to train the model for. An epoch refers to one "
903
- "full cycle through the training dataset.",
904
- )
905
- sub.add_argument(
906
- "--batch_size",
907
- type=int,
908
- help="The batch size to use for training. The batch size is the number of "
909
- "training examples used to train a single forward and backward pass.",
910
- )
911
- sub.add_argument(
912
- "--learning_rate_multiplier",
913
- type=float,
914
- help="The learning rate multiplier to use for training. The fine-tuning "
915
- "learning rate is determined by the original learning rate used for "
916
- "pretraining multiplied by this value.",
917
- )
918
- sub.add_argument(
919
- "--prompt_loss_weight",
920
- type=float,
921
- help="The weight to use for the prompt loss. The optimum value here depends "
922
- "depends on your use case. This determines how much the model prioritizes "
923
- "learning from prompt tokens vs learning from completion tokens.",
924
- )
925
- sub.add_argument(
926
- "--compute_classification_metrics",
927
- action="store_true",
928
- help="If set, we calculate classification-specific metrics such as accuracy "
929
- "and F-1 score using the validation set at the end of every epoch.",
930
- )
931
- sub.set_defaults(compute_classification_metrics=None)
932
- sub.add_argument(
933
- "--classification_n_classes",
934
- type=int,
935
- help="The number of classes in a classification task. This parameter is "
936
- "required for multiclass classification.",
937
- )
938
- sub.add_argument(
939
- "--classification_positive_class",
940
- help="The positive class in binary classification. This parameter is needed "
941
- "to generate precision, recall and F-1 metrics when doing binary "
942
- "classification.",
943
- )
944
- sub.add_argument(
945
- "--classification_betas",
946
- type=float,
947
- nargs="+",
948
- help="If this is provided, we calculate F-beta scores at the specified beta "
949
- "values. The F-beta score is a generalization of F-1 score. This is only "
950
- "used for binary classification.",
951
- )
952
- sub.set_defaults(func=FineTune.create)
953
-
954
- sub = subparsers.add_parser("fine_tunes.get")
955
- sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
956
- sub.set_defaults(func=FineTune.get)
957
-
958
- sub = subparsers.add_parser("fine_tunes.results")
959
- sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
960
- sub.set_defaults(func=FineTune.results)
961
-
962
- sub = subparsers.add_parser("fine_tunes.events")
963
- sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
964
-
965
- # TODO(rachel): Remove this in 1.0
966
- sub.add_argument(
967
- "-s",
968
- "--stream",
969
- action="store_true",
970
- help="[DEPRECATED] If set, events will be streamed until the job is done. Otherwise, "
971
- "displays the event history to date.",
972
- )
973
- sub.set_defaults(func=FineTune.events)
974
-
975
- sub = subparsers.add_parser("fine_tunes.follow")
976
- sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
977
- sub.set_defaults(func=FineTune.follow)
978
-
979
- sub = subparsers.add_parser("fine_tunes.cancel")
980
- sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
981
- sub.set_defaults(func=FineTune.cancel)
982
-
983
-
984
- def wandb_register(parser):
985
- subparsers = parser.add_subparsers(
986
- title="wandb", help="Logging with Weights & Biases"
987
- )
988
-
989
- def help(args):
990
- parser.print_help()
991
-
992
- parser.set_defaults(func=help)
993
-
994
- sub = subparsers.add_parser("sync")
995
- sub.add_argument("-i", "--id", help="The id of the fine-tune job (optional)")
996
- sub.add_argument(
997
- "-n",
998
- "--n_fine_tunes",
999
- type=int,
1000
- default=None,
1001
- help="Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced.",
1002
- )
1003
- sub.add_argument(
1004
- "--project",
1005
- default="GPT-3",
1006
- help="""Name of the project where you're sending runs. By default, it is "GPT-3".""",
1007
- )
1008
- sub.add_argument(
1009
- "--entity",
1010
- help="Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.",
1011
- )
1012
- sub.add_argument(
1013
- "--force",
1014
- action="store_true",
1015
- help="Forces logging and overwrite existing wandb run of the same fine-tune.",
1016
- )
1017
- sub.set_defaults(force=False)
1018
- sub.set_defaults(func=WandbLogger.sync)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_methods_and_attributes.cpp DELETED
@@ -1,372 +0,0 @@
1
- /*
2
- tests/test_methods_and_attributes.cpp -- constructors, deconstructors, attribute access,
3
- __str__, argument and return value conventions
4
-
5
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
6
-
7
- All rights reserved. Use of this source code is governed by a
8
- BSD-style license that can be found in the LICENSE file.
9
- */
10
-
11
- #include "pybind11_tests.h"
12
- #include "constructor_stats.h"
13
-
14
- #if !defined(PYBIND11_OVERLOAD_CAST)
15
- template <typename... Args>
16
- using overload_cast_ = pybind11::detail::overload_cast_impl<Args...>;
17
- #endif
18
-
19
- class ExampleMandA {
20
- public:
21
- ExampleMandA() { print_default_created(this); }
22
- ExampleMandA(int value) : value(value) { print_created(this, value); }
23
- ExampleMandA(const ExampleMandA &e) : value(e.value) { print_copy_created(this); }
24
- ExampleMandA(std::string&&) {}
25
- ExampleMandA(ExampleMandA &&e) : value(e.value) { print_move_created(this); }
26
- ~ExampleMandA() { print_destroyed(this); }
27
-
28
- std::string toString() {
29
- return "ExampleMandA[value=" + std::to_string(value) + "]";
30
- }
31
-
32
- void operator=(const ExampleMandA &e) { print_copy_assigned(this); value = e.value; }
33
- void operator=(ExampleMandA &&e) { print_move_assigned(this); value = e.value; }
34
-
35
- void add1(ExampleMandA other) { value += other.value; } // passing by value
36
- void add2(ExampleMandA &other) { value += other.value; } // passing by reference
37
- void add3(const ExampleMandA &other) { value += other.value; } // passing by const reference
38
- void add4(ExampleMandA *other) { value += other->value; } // passing by pointer
39
- void add5(const ExampleMandA *other) { value += other->value; } // passing by const pointer
40
-
41
- void add6(int other) { value += other; } // passing by value
42
- void add7(int &other) { value += other; } // passing by reference
43
- void add8(const int &other) { value += other; } // passing by const reference
44
- void add9(int *other) { value += *other; } // passing by pointer
45
- void add10(const int *other) { value += *other; } // passing by const pointer
46
-
47
- void consume_str(std::string&&) {}
48
-
49
- ExampleMandA self1() { return *this; } // return by value
50
- ExampleMandA &self2() { return *this; } // return by reference
51
- const ExampleMandA &self3() { return *this; } // return by const reference
52
- ExampleMandA *self4() { return this; } // return by pointer
53
- const ExampleMandA *self5() { return this; } // return by const pointer
54
-
55
- int internal1() { return value; } // return by value
56
- int &internal2() { return value; } // return by reference
57
- const int &internal3() { return value; } // return by const reference
58
- int *internal4() { return &value; } // return by pointer
59
- const int *internal5() { return &value; } // return by const pointer
60
-
61
- py::str overloaded() { return "()"; }
62
- py::str overloaded(int) { return "(int)"; }
63
- py::str overloaded(int, float) { return "(int, float)"; }
64
- py::str overloaded(float, int) { return "(float, int)"; }
65
- py::str overloaded(int, int) { return "(int, int)"; }
66
- py::str overloaded(float, float) { return "(float, float)"; }
67
- py::str overloaded(int) const { return "(int) const"; }
68
- py::str overloaded(int, float) const { return "(int, float) const"; }
69
- py::str overloaded(float, int) const { return "(float, int) const"; }
70
- py::str overloaded(int, int) const { return "(int, int) const"; }
71
- py::str overloaded(float, float) const { return "(float, float) const"; }
72
-
73
- static py::str overloaded(float) { return "static float"; }
74
-
75
- int value = 0;
76
- };
77
-
78
- struct TestProperties {
79
- int value = 1;
80
- static int static_value;
81
-
82
- int get() const { return value; }
83
- void set(int v) { value = v; }
84
-
85
- static int static_get() { return static_value; }
86
- static void static_set(int v) { static_value = v; }
87
- };
88
- int TestProperties::static_value = 1;
89
-
90
- struct TestPropertiesOverride : TestProperties {
91
- int value = 99;
92
- static int static_value;
93
- };
94
- int TestPropertiesOverride::static_value = 99;
95
-
96
- struct TestPropRVP {
97
- UserType v1{1};
98
- UserType v2{1};
99
- static UserType sv1;
100
- static UserType sv2;
101
-
102
- const UserType &get1() const { return v1; }
103
- const UserType &get2() const { return v2; }
104
- UserType get_rvalue() const { return v2; }
105
- void set1(int v) { v1.set(v); }
106
- void set2(int v) { v2.set(v); }
107
- };
108
- UserType TestPropRVP::sv1(1);
109
- UserType TestPropRVP::sv2(1);
110
-
111
- // Test None-allowed py::arg argument policy
112
- class NoneTester { public: int answer = 42; };
113
- int none1(const NoneTester &obj) { return obj.answer; }
114
- int none2(NoneTester *obj) { return obj ? obj->answer : -1; }
115
- int none3(std::shared_ptr<NoneTester> &obj) { return obj ? obj->answer : -1; }
116
- int none4(std::shared_ptr<NoneTester> *obj) { return obj && *obj ? (*obj)->answer : -1; }
117
- int none5(std::shared_ptr<NoneTester> obj) { return obj ? obj->answer : -1; }
118
-
119
- struct StrIssue {
120
- int val = -1;
121
-
122
- StrIssue() = default;
123
- StrIssue(int i) : val{i} {}
124
- };
125
-
126
- // Issues #854, #910: incompatible function args when member function/pointer is in unregistered base class
127
- class UnregisteredBase {
128
- public:
129
- void do_nothing() const {}
130
- void increase_value() { rw_value++; ro_value += 0.25; }
131
- void set_int(int v) { rw_value = v; }
132
- int get_int() const { return rw_value; }
133
- double get_double() const { return ro_value; }
134
- int rw_value = 42;
135
- double ro_value = 1.25;
136
- };
137
- class RegisteredDerived : public UnregisteredBase {
138
- public:
139
- using UnregisteredBase::UnregisteredBase;
140
- double sum() const { return rw_value + ro_value; }
141
- };
142
-
143
- // Test explicit lvalue ref-qualification
144
- struct RefQualified {
145
- int value = 0;
146
-
147
- void refQualified(int other) & { value += other; }
148
- int constRefQualified(int other) const & { return value + other; }
149
- };
150
-
151
- TEST_SUBMODULE(methods_and_attributes, m) {
152
- // test_methods_and_attributes
153
- py::class_<ExampleMandA> emna(m, "ExampleMandA");
154
- emna.def(py::init<>())
155
- .def(py::init<int>())
156
- .def(py::init<std::string&&>())
157
- .def(py::init<const ExampleMandA&>())
158
- .def("add1", &ExampleMandA::add1)
159
- .def("add2", &ExampleMandA::add2)
160
- .def("add3", &ExampleMandA::add3)
161
- .def("add4", &ExampleMandA::add4)
162
- .def("add5", &ExampleMandA::add5)
163
- .def("add6", &ExampleMandA::add6)
164
- .def("add7", &ExampleMandA::add7)
165
- .def("add8", &ExampleMandA::add8)
166
- .def("add9", &ExampleMandA::add9)
167
- .def("add10", &ExampleMandA::add10)
168
- .def("consume_str", &ExampleMandA::consume_str)
169
- .def("self1", &ExampleMandA::self1)
170
- .def("self2", &ExampleMandA::self2)
171
- .def("self3", &ExampleMandA::self3)
172
- .def("self4", &ExampleMandA::self4)
173
- .def("self5", &ExampleMandA::self5)
174
- .def("internal1", &ExampleMandA::internal1)
175
- .def("internal2", &ExampleMandA::internal2)
176
- .def("internal3", &ExampleMandA::internal3)
177
- .def("internal4", &ExampleMandA::internal4)
178
- .def("internal5", &ExampleMandA::internal5)
179
- #if defined(PYBIND11_OVERLOAD_CAST)
180
- .def("overloaded", py::overload_cast<>(&ExampleMandA::overloaded))
181
- .def("overloaded", py::overload_cast<int>(&ExampleMandA::overloaded))
182
- .def("overloaded", py::overload_cast<int, float>(&ExampleMandA::overloaded))
183
- .def("overloaded", py::overload_cast<float, int>(&ExampleMandA::overloaded))
184
- .def("overloaded", py::overload_cast<int, int>(&ExampleMandA::overloaded))
185
- .def("overloaded", py::overload_cast<float, float>(&ExampleMandA::overloaded))
186
- .def("overloaded_float", py::overload_cast<float, float>(&ExampleMandA::overloaded))
187
- .def("overloaded_const", py::overload_cast<int >(&ExampleMandA::overloaded, py::const_))
188
- .def("overloaded_const", py::overload_cast<int, float>(&ExampleMandA::overloaded, py::const_))
189
- .def("overloaded_const", py::overload_cast<float, int>(&ExampleMandA::overloaded, py::const_))
190
- .def("overloaded_const", py::overload_cast<int, int>(&ExampleMandA::overloaded, py::const_))
191
- .def("overloaded_const", py::overload_cast<float, float>(&ExampleMandA::overloaded, py::const_))
192
- #else
193
- // Use both the traditional static_cast method and the C++11 compatible overload_cast_
194
- .def("overloaded", overload_cast_<>()(&ExampleMandA::overloaded))
195
- .def("overloaded", overload_cast_<int>()(&ExampleMandA::overloaded))
196
- .def("overloaded", overload_cast_<int, float>()(&ExampleMandA::overloaded))
197
- .def("overloaded", static_cast<py::str (ExampleMandA::*)(float, int)>(&ExampleMandA::overloaded))
198
- .def("overloaded", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded))
199
- .def("overloaded", static_cast<py::str (ExampleMandA::*)(float, float)>(&ExampleMandA::overloaded))
200
- .def("overloaded_float", overload_cast_<float, float>()(&ExampleMandA::overloaded))
201
- .def("overloaded_const", overload_cast_<int >()(&ExampleMandA::overloaded, py::const_))
202
- .def("overloaded_const", overload_cast_<int, float>()(&ExampleMandA::overloaded, py::const_))
203
- .def("overloaded_const", static_cast<py::str (ExampleMandA::*)(float, int) const>(&ExampleMandA::overloaded))
204
- .def("overloaded_const", static_cast<py::str (ExampleMandA::*)(int, int) const>(&ExampleMandA::overloaded))
205
- .def("overloaded_const", static_cast<py::str (ExampleMandA::*)(float, float) const>(&ExampleMandA::overloaded))
206
- #endif
207
- // test_no_mixed_overloads
208
- // Raise error if trying to mix static/non-static overloads on the same name:
209
- .def_static("add_mixed_overloads1", []() {
210
- auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA"));
211
- emna.def ("overload_mixed1", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded))
212
- .def_static("overload_mixed1", static_cast<py::str ( *)(float )>(&ExampleMandA::overloaded));
213
- })
214
- .def_static("add_mixed_overloads2", []() {
215
- auto emna = py::reinterpret_borrow<py::class_<ExampleMandA>>(py::module::import("pybind11_tests.methods_and_attributes").attr("ExampleMandA"));
216
- emna.def_static("overload_mixed2", static_cast<py::str ( *)(float )>(&ExampleMandA::overloaded))
217
- .def ("overload_mixed2", static_cast<py::str (ExampleMandA::*)(int, int)>(&ExampleMandA::overloaded));
218
- })
219
- .def("__str__", &ExampleMandA::toString)
220
- .def_readwrite("value", &ExampleMandA::value);
221
-
222
- // test_copy_method
223
- // Issue #443: can't call copied methods in Python 3
224
- emna.attr("add2b") = emna.attr("add2");
225
-
226
- // test_properties, test_static_properties, test_static_cls
227
- py::class_<TestProperties>(m, "TestProperties")
228
- .def(py::init<>())
229
- .def_readonly("def_readonly", &TestProperties::value)
230
- .def_readwrite("def_readwrite", &TestProperties::value)
231
- .def_property("def_writeonly", nullptr,
232
- [](TestProperties& s,int v) { s.value = v; } )
233
- .def_property("def_property_writeonly", nullptr, &TestProperties::set)
234
- .def_property_readonly("def_property_readonly", &TestProperties::get)
235
- .def_property("def_property", &TestProperties::get, &TestProperties::set)
236
- .def_property("def_property_impossible", nullptr, nullptr)
237
- .def_readonly_static("def_readonly_static", &TestProperties::static_value)
238
- .def_readwrite_static("def_readwrite_static", &TestProperties::static_value)
239
- .def_property_static("def_writeonly_static", nullptr,
240
- [](py::object, int v) { TestProperties::static_value = v; })
241
- .def_property_readonly_static("def_property_readonly_static",
242
- [](py::object) { return TestProperties::static_get(); })
243
- .def_property_static("def_property_writeonly_static", nullptr,
244
- [](py::object, int v) { return TestProperties::static_set(v); })
245
- .def_property_static("def_property_static",
246
- [](py::object) { return TestProperties::static_get(); },
247
- [](py::object, int v) { TestProperties::static_set(v); })
248
- .def_property_static("static_cls",
249
- [](py::object cls) { return cls; },
250
- [](py::object cls, py::function f) { f(cls); });
251
-
252
- py::class_<TestPropertiesOverride, TestProperties>(m, "TestPropertiesOverride")
253
- .def(py::init<>())
254
- .def_readonly("def_readonly", &TestPropertiesOverride::value)
255
- .def_readonly_static("def_readonly_static", &TestPropertiesOverride::static_value);
256
-
257
- auto static_get1 = [](py::object) -> const UserType & { return TestPropRVP::sv1; };
258
- auto static_get2 = [](py::object) -> const UserType & { return TestPropRVP::sv2; };
259
- auto static_set1 = [](py::object, int v) { TestPropRVP::sv1.set(v); };
260
- auto static_set2 = [](py::object, int v) { TestPropRVP::sv2.set(v); };
261
- auto rvp_copy = py::return_value_policy::copy;
262
-
263
- // test_property_return_value_policies
264
- py::class_<TestPropRVP>(m, "TestPropRVP")
265
- .def(py::init<>())
266
- .def_property_readonly("ro_ref", &TestPropRVP::get1)
267
- .def_property_readonly("ro_copy", &TestPropRVP::get2, rvp_copy)
268
- .def_property_readonly("ro_func", py::cpp_function(&TestPropRVP::get2, rvp_copy))
269
- .def_property("rw_ref", &TestPropRVP::get1, &TestPropRVP::set1)
270
- .def_property("rw_copy", &TestPropRVP::get2, &TestPropRVP::set2, rvp_copy)
271
- .def_property("rw_func", py::cpp_function(&TestPropRVP::get2, rvp_copy), &TestPropRVP::set2)
272
- .def_property_readonly_static("static_ro_ref", static_get1)
273
- .def_property_readonly_static("static_ro_copy", static_get2, rvp_copy)
274
- .def_property_readonly_static("static_ro_func", py::cpp_function(static_get2, rvp_copy))
275
- .def_property_static("static_rw_ref", static_get1, static_set1)
276
- .def_property_static("static_rw_copy", static_get2, static_set2, rvp_copy)
277
- .def_property_static("static_rw_func", py::cpp_function(static_get2, rvp_copy), static_set2)
278
- // test_property_rvalue_policy
279
- .def_property_readonly("rvalue", &TestPropRVP::get_rvalue)
280
- .def_property_readonly_static("static_rvalue", [](py::object) { return UserType(1); });
281
-
282
- // test_metaclass_override
283
- struct MetaclassOverride { };
284
- py::class_<MetaclassOverride>(m, "MetaclassOverride", py::metaclass((PyObject *) &PyType_Type))
285
- .def_property_readonly_static("readonly", [](py::object) { return 1; });
286
-
287
- #if !defined(PYPY_VERSION)
288
- // test_dynamic_attributes
289
- class DynamicClass {
290
- public:
291
- DynamicClass() { print_default_created(this); }
292
- DynamicClass(const DynamicClass&) = delete;
293
- ~DynamicClass() { print_destroyed(this); }
294
- };
295
- py::class_<DynamicClass>(m, "DynamicClass", py::dynamic_attr())
296
- .def(py::init());
297
-
298
- class CppDerivedDynamicClass : public DynamicClass { };
299
- py::class_<CppDerivedDynamicClass, DynamicClass>(m, "CppDerivedDynamicClass")
300
- .def(py::init());
301
- #endif
302
-
303
- // test_bad_arg_default
304
- // Issue/PR #648: bad arg default debugging output
305
- #if !defined(NDEBUG)
306
- m.attr("debug_enabled") = true;
307
- #else
308
- m.attr("debug_enabled") = false;
309
- #endif
310
- m.def("bad_arg_def_named", []{
311
- auto m = py::module::import("pybind11_tests");
312
- m.def("should_fail", [](int, UnregisteredType) {}, py::arg(), py::arg("a") = UnregisteredType());
313
- });
314
- m.def("bad_arg_def_unnamed", []{
315
- auto m = py::module::import("pybind11_tests");
316
- m.def("should_fail", [](int, UnregisteredType) {}, py::arg(), py::arg() = UnregisteredType());
317
- });
318
-
319
- // test_accepts_none
320
- py::class_<NoneTester, std::shared_ptr<NoneTester>>(m, "NoneTester")
321
- .def(py::init<>());
322
- m.def("no_none1", &none1, py::arg().none(false));
323
- m.def("no_none2", &none2, py::arg().none(false));
324
- m.def("no_none3", &none3, py::arg().none(false));
325
- m.def("no_none4", &none4, py::arg().none(false));
326
- m.def("no_none5", &none5, py::arg().none(false));
327
- m.def("ok_none1", &none1);
328
- m.def("ok_none2", &none2, py::arg().none(true));
329
- m.def("ok_none3", &none3);
330
- m.def("ok_none4", &none4, py::arg().none(true));
331
- m.def("ok_none5", &none5);
332
-
333
- // test_str_issue
334
- // Issue #283: __str__ called on uninitialized instance when constructor arguments invalid
335
- py::class_<StrIssue>(m, "StrIssue")
336
- .def(py::init<int>())
337
- .def(py::init<>())
338
- .def("__str__", [](const StrIssue &si) {
339
- return "StrIssue[" + std::to_string(si.val) + "]"; }
340
- );
341
-
342
- // test_unregistered_base_implementations
343
- //
344
- // Issues #854/910: incompatible function args when member function/pointer is in unregistered
345
- // base class The methods and member pointers below actually resolve to members/pointers in
346
- // UnregisteredBase; before this test/fix they would be registered via lambda with a first
347
- // argument of an unregistered type, and thus uncallable.
348
- py::class_<RegisteredDerived>(m, "RegisteredDerived")
349
- .def(py::init<>())
350
- .def("do_nothing", &RegisteredDerived::do_nothing)
351
- .def("increase_value", &RegisteredDerived::increase_value)
352
- .def_readwrite("rw_value", &RegisteredDerived::rw_value)
353
- .def_readonly("ro_value", &RegisteredDerived::ro_value)
354
- // These should trigger a static_assert if uncommented
355
- //.def_readwrite("fails", &UserType::value) // should trigger a static_assert if uncommented
356
- //.def_readonly("fails", &UserType::value) // should trigger a static_assert if uncommented
357
- .def_property("rw_value_prop", &RegisteredDerived::get_int, &RegisteredDerived::set_int)
358
- .def_property_readonly("ro_value_prop", &RegisteredDerived::get_double)
359
- // This one is in the registered class:
360
- .def("sum", &RegisteredDerived::sum)
361
- ;
362
-
363
- using Adapted = decltype(py::method_adaptor<RegisteredDerived>(&RegisteredDerived::do_nothing));
364
- static_assert(std::is_same<Adapted, void (RegisteredDerived::*)() const>::value, "");
365
-
366
- // test_methods_and_attributes
367
- py::class_<RefQualified>(m, "RefQualified")
368
- .def(py::init<>())
369
- .def_readonly("value", &RefQualified::value)
370
- .def("refQualified", &RefQualified::refQualified)
371
- .def("constRefQualified", &RefQualified::constRefQualified);
372
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/complex/math_private.h DELETED
@@ -1,136 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- * Copyright 2013 Filipe RNC Maia
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
-
18
- /*
19
- * ====================================================
20
- * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
21
- *
22
- * Developed at SunPro, a Sun Microsystems, Inc. business.
23
- * Permission to use, copy, modify, and distribute this
24
- * software is freely granted, provided that this notice
25
- * is preserved.
26
- * ====================================================
27
- */
28
-
29
- /* adapted from FreeBSD:
30
- * lib/msun/src/math_private.h
31
- */
32
- #pragma once
33
-
34
- #include <thrust/detail/config.h>
35
- #include <thrust/complex.h>
36
- #include <thrust/detail/cstdint.h>
37
-
38
- namespace thrust{
39
- namespace detail{
40
- namespace complex{
41
-
42
- using thrust::complex;
43
-
44
- typedef union
45
- {
46
- float value;
47
- uint32_t word;
48
- } ieee_float_shape_type;
49
-
50
- __host__ __device__
51
- inline void get_float_word(uint32_t & i, float d){
52
- ieee_float_shape_type gf_u;
53
- gf_u.value = (d);
54
- (i) = gf_u.word;
55
- }
56
-
57
- __host__ __device__
58
- inline void get_float_word(int32_t & i, float d){
59
- ieee_float_shape_type gf_u;
60
- gf_u.value = (d);
61
- (i) = gf_u.word;
62
- }
63
-
64
- __host__ __device__
65
- inline void set_float_word(float & d, uint32_t i){
66
- ieee_float_shape_type sf_u;
67
- sf_u.word = (i);
68
- (d) = sf_u.value;
69
- }
70
-
71
- // Assumes little endian ordering
72
- typedef union
73
- {
74
- double value;
75
- struct
76
- {
77
- uint32_t lsw;
78
- uint32_t msw;
79
- } parts;
80
- struct
81
- {
82
- uint64_t w;
83
- } xparts;
84
- } ieee_double_shape_type;
85
-
86
- __host__ __device__ inline
87
- void get_high_word(uint32_t & i,double d){
88
- ieee_double_shape_type gh_u;
89
- gh_u.value = (d);
90
- (i) = gh_u.parts.msw;
91
- }
92
-
93
- /* Set the more significant 32 bits of a double from an int. */
94
- __host__ __device__ inline
95
- void set_high_word(double & d, uint32_t v){
96
- ieee_double_shape_type sh_u;
97
- sh_u.value = (d);
98
- sh_u.parts.msw = (v);
99
- (d) = sh_u.value;
100
- }
101
-
102
-
103
- __host__ __device__ inline
104
- void insert_words(double & d, uint32_t ix0, uint32_t ix1){
105
- ieee_double_shape_type iw_u;
106
- iw_u.parts.msw = (ix0);
107
- iw_u.parts.lsw = (ix1);
108
- (d) = iw_u.value;
109
- }
110
-
111
- /* Get two 32 bit ints from a double. */
112
- __host__ __device__ inline
113
- void extract_words(uint32_t & ix0,uint32_t & ix1, double d){
114
- ieee_double_shape_type ew_u;
115
- ew_u.value = (d);
116
- (ix0) = ew_u.parts.msw;
117
- (ix1) = ew_u.parts.lsw;
118
- }
119
-
120
- /* Get two 32 bit ints from a double. */
121
- __host__ __device__ inline
122
- void extract_words(int32_t & ix0,int32_t & ix1, double d){
123
- ieee_double_shape_type ew_u;
124
- ew_u.value = (d);
125
- (ix0) = ew_u.parts.msw;
126
- (ix1) = ew_u.parts.lsw;
127
- }
128
-
129
- } // namespace complex
130
-
131
- } // namespace detail
132
-
133
- } // namespace thrust
134
-
135
-
136
- #include <thrust/detail/complex/c99math.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/arithmetic_operators.h DELETED
@@ -1,432 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/functional/actor.h>
21
- #include <thrust/detail/functional/composite.h>
22
- #include <thrust/detail/functional/operators/operator_adaptors.h>
23
- #include <thrust/functional.h>
24
-
25
- namespace thrust
26
- {
27
- namespace detail
28
- {
29
- namespace functional
30
- {
31
-
32
- template<typename Eval>
33
- __host__ __device__
34
- actor<
35
- composite<
36
- transparent_unary_operator<thrust::negate<>>,
37
- actor<Eval>
38
- >
39
- >
40
- __host__ __device__
41
- operator-(const actor<Eval> &_1)
42
- {
43
- return compose(transparent_unary_operator<thrust::negate<>>(), _1);
44
- } // end operator-()
45
-
46
- // there's no standard unary_plus functional, so roll an ad hoc one here
47
- struct unary_plus
48
- {
49
- using is_transparent = void;
50
-
51
- __thrust_exec_check_disable__
52
- template <typename T1>
53
- __host__ __device__
54
- constexpr auto operator()(T1&& t1) const
55
- noexcept(noexcept(+THRUST_FWD(t1))) -> decltype(+THRUST_FWD(t1))
56
- {
57
- return +THRUST_FWD(t1);
58
- }
59
- };
60
-
61
- template<typename Eval>
62
- __host__ __device__
63
- actor<
64
- composite<
65
- transparent_unary_operator<unary_plus>,
66
- actor<Eval>
67
- >
68
- >
69
- operator+(const actor<Eval> &_1)
70
- {
71
- return compose(transparent_unary_operator<unary_plus>(), _1);
72
- } // end operator+()
73
-
74
- template<typename T1, typename T2>
75
- __host__ __device__
76
- actor<
77
- composite<
78
- transparent_binary_operator<thrust::plus<>>,
79
- actor<T1>,
80
- typename as_actor<T2>::type
81
- >
82
- >
83
- operator+(const actor<T1> &_1, const T2 &_2)
84
- {
85
- return compose(transparent_binary_operator<thrust::plus<>>(),
86
- make_actor(_1),
87
- make_actor(_2));
88
- } // end operator+()
89
-
90
- template<typename T1, typename T2>
91
- __host__ __device__
92
- actor<
93
- composite<
94
- transparent_binary_operator<thrust::plus<>>,
95
- typename as_actor<T1>::type,
96
- actor<T2>
97
- >
98
- >
99
- operator+(const T1 &_1, const actor<T2> &_2)
100
- {
101
- return compose(transparent_binary_operator<thrust::plus<>>(),
102
- make_actor(_1),
103
- make_actor(_2));
104
- } // end operator+()
105
-
106
- template<typename T1, typename T2>
107
- __host__ __device__
108
- actor<
109
- composite<
110
- transparent_binary_operator<thrust::plus<>>,
111
- actor<T1>,
112
- actor<T2>
113
- >
114
- >
115
- operator+(const actor<T1> &_1, const actor<T2> &_2)
116
- {
117
- return compose(transparent_binary_operator<thrust::plus<>>(),
118
- make_actor(_1),
119
- make_actor(_2));
120
- } // end operator+()
121
-
122
- template<typename T1, typename T2>
123
- __host__ __device__
124
- actor<
125
- composite<
126
- transparent_binary_operator<thrust::minus<>>,
127
- typename as_actor<T1>::type,
128
- actor<T2>
129
- >
130
- >
131
- operator-(const T1 &_1, const actor<T2> &_2)
132
- {
133
- return compose(transparent_binary_operator<thrust::minus<>>(),
134
- make_actor(_1),
135
- make_actor(_2));
136
- } // end operator-()
137
-
138
- template<typename T1, typename T2>
139
- __host__ __device__
140
- actor<
141
- composite<
142
- transparent_binary_operator<thrust::minus<>>,
143
- actor<T1>,
144
- typename as_actor<T2>::type
145
- >
146
- >
147
- operator-(const actor<T1> &_1, const T2 &_2)
148
- {
149
- return compose(transparent_binary_operator<thrust::minus<>>(),
150
- make_actor(_1),
151
- make_actor(_2));
152
- } // end operator-()
153
-
154
- template<typename T1, typename T2>
155
- __host__ __device__
156
- actor<
157
- composite<
158
- transparent_binary_operator<thrust::minus<>>,
159
- actor<T1>,
160
- actor<T2>
161
- >
162
- >
163
- operator-(const actor<T1> &_1, const actor<T2> &_2)
164
- {
165
- return compose(transparent_binary_operator<thrust::minus<>>(),
166
- make_actor(_1),
167
- make_actor(_2));
168
- } // end operator-()
169
-
170
- template<typename T1, typename T2>
171
- __host__ __device__
172
- actor<
173
- composite<
174
- transparent_binary_operator<thrust::multiplies<>>,
175
- typename as_actor<T1>::type,
176
- actor<T2>
177
- >
178
- >
179
- operator*(const T1 &_1, const actor<T2> &_2)
180
- {
181
- return compose(transparent_binary_operator<thrust::multiplies<>>(),
182
- make_actor(_1),
183
- make_actor(_2));
184
- } // end operator*()
185
-
186
- template<typename T1, typename T2>
187
- __host__ __device__
188
- actor<
189
- composite<
190
- transparent_binary_operator<thrust::multiplies<>>,
191
- actor<T1>,
192
- typename as_actor<T2>::type
193
- >
194
- >
195
- operator*(const actor<T1> &_1, const T2 &_2)
196
- {
197
- return compose(transparent_binary_operator<thrust::multiplies<>>(),
198
- make_actor(_1),
199
- make_actor(_2));
200
- } // end operator*()
201
-
202
- template<typename T1, typename T2>
203
- __host__ __device__
204
- actor<
205
- composite<
206
- transparent_binary_operator<thrust::multiplies<>>,
207
- actor<T1>,
208
- actor<T2>
209
- >
210
- >
211
- operator*(const actor<T1> &_1, const actor<T2> &_2)
212
- {
213
- return compose(transparent_binary_operator<thrust::multiplies<>>(),
214
- make_actor(_1),
215
- make_actor(_2));
216
- } // end operator*()
217
-
218
- template<typename T1, typename T2>
219
- __host__ __device__
220
- actor<
221
- composite<
222
- transparent_binary_operator<thrust::divides<>>,
223
- actor<T1>,
224
- typename as_actor<T2>::type
225
- >
226
- >
227
- operator/(const actor<T1> &_1, const T2 &_2)
228
- {
229
- return compose(transparent_binary_operator<thrust::divides<>>(),
230
- make_actor(_1),
231
- make_actor(_2));
232
- } // end operator/()
233
-
234
- template<typename T1, typename T2>
235
- __host__ __device__
236
- actor<
237
- composite<
238
- transparent_binary_operator<thrust::divides<>>,
239
- typename as_actor<T1>::type,
240
- actor<T2>
241
- >
242
- >
243
- operator/(const T1 &_1, const actor<T2> &_2)
244
- {
245
- return compose(transparent_binary_operator<thrust::divides<>>(),
246
- make_actor(_1),
247
- make_actor(_2));
248
- } // end operator/()
249
-
250
- template<typename T1, typename T2>
251
- __host__ __device__
252
- actor<
253
- composite<
254
- transparent_binary_operator<thrust::divides<>>,
255
- actor<T1>,
256
- actor<T2>
257
- >
258
- >
259
- operator/(const actor<T1> &_1, const actor<T2> &_2)
260
- {
261
- return compose(transparent_binary_operator<thrust::divides<>>(),
262
- make_actor(_1),
263
- make_actor(_2));
264
- } // end operator/()
265
-
266
- template<typename T1, typename T2>
267
- __host__ __device__
268
- actor<
269
- composite<
270
- transparent_binary_operator<thrust::modulus<>>,
271
- actor<T1>,
272
- typename as_actor<T2>::type
273
- >
274
- >
275
- operator%(const actor<T1> &_1, const T2 &_2)
276
- {
277
- return compose(transparent_binary_operator<thrust::modulus<>>(),
278
- make_actor(_1),
279
- make_actor(_2));
280
- } // end operator%()
281
-
282
- template<typename T1, typename T2>
283
- __host__ __device__
284
- actor<
285
- composite<
286
- transparent_binary_operator<thrust::modulus<>>,
287
- typename as_actor<T1>::type,
288
- actor<T2>
289
- >
290
- >
291
- operator%(const T1 &_1, const actor<T2> &_2)
292
- {
293
- return compose(transparent_binary_operator<thrust::modulus<void>>(),
294
- make_actor(_1),
295
- make_actor(_2));
296
- } // end operator%()
297
-
298
- template<typename T1, typename T2>
299
- __host__ __device__
300
- actor<
301
- composite<
302
- transparent_binary_operator<thrust::modulus<>>,
303
- actor<T1>,
304
- actor<T2>
305
- >
306
- >
307
- operator%(const actor<T1> &_1, const actor<T2> &_2)
308
- {
309
- return compose(transparent_binary_operator<thrust::modulus<>>(),
310
- make_actor(_1),
311
- make_actor(_2));
312
- } // end operator%()
313
-
314
- // there's no standard prefix_increment functional, so roll an ad hoc one here
315
- struct prefix_increment
316
- {
317
- using is_transparent = void;
318
-
319
- __thrust_exec_check_disable__
320
- template <typename T1>
321
- __host__ __device__
322
- constexpr auto operator()(T1&& t1) const
323
- noexcept(noexcept(++THRUST_FWD(t1))) -> decltype(++THRUST_FWD(t1))
324
- {
325
- return ++THRUST_FWD(t1);
326
- }
327
- }; // end prefix_increment
328
-
329
- template<typename Eval>
330
- __host__ __device__
331
- actor<
332
- composite<
333
- transparent_unary_operator<prefix_increment>,
334
- actor<Eval>
335
- >
336
- >
337
- operator++(const actor<Eval> &_1)
338
- {
339
- return compose(transparent_unary_operator<prefix_increment>(), _1);
340
- } // end operator++()
341
-
342
-
343
- // there's no standard postfix_increment functional, so roll an ad hoc one here
344
- struct postfix_increment
345
- {
346
- using is_transparent = void;
347
-
348
- __thrust_exec_check_disable__
349
- template <typename T1>
350
- __host__ __device__
351
- constexpr auto operator()(T1&& t1) const
352
- noexcept(noexcept(THRUST_FWD(t1)++)) -> decltype(THRUST_FWD(t1)++)
353
- {
354
- return THRUST_FWD(t1)++;
355
- }
356
- }; // end postfix_increment
357
-
358
- template<typename Eval>
359
- __host__ __device__
360
- actor<
361
- composite<
362
- transparent_unary_operator<postfix_increment>,
363
- actor<Eval>
364
- >
365
- >
366
- operator++(const actor<Eval> &_1, int)
367
- {
368
- return compose(transparent_unary_operator<postfix_increment>(), _1);
369
- } // end operator++()
370
-
371
-
372
- // there's no standard prefix_decrement functional, so roll an ad hoc one here
373
- struct prefix_decrement
374
- {
375
- using is_transparent = void;
376
-
377
- __thrust_exec_check_disable__
378
- template <typename T1>
379
- __host__ __device__
380
- constexpr auto operator()(T1&& t1) const
381
- noexcept(noexcept(--THRUST_FWD(t1))) -> decltype(--THRUST_FWD(t1))
382
- {
383
- return --THRUST_FWD(t1);
384
- }
385
- }; // end prefix_decrement
386
-
387
- template<typename Eval>
388
- __host__ __device__
389
- actor<
390
- composite<
391
- transparent_unary_operator<prefix_decrement>,
392
- actor<Eval>
393
- >
394
- >
395
- operator--(const actor<Eval> &_1)
396
- {
397
- return compose(transparent_unary_operator<prefix_decrement>(), _1);
398
- } // end operator--()
399
-
400
-
401
- // there's no standard postfix_decrement functional, so roll an ad hoc one here
402
- struct postfix_decrement
403
- {
404
- using is_transparent = void;
405
-
406
- __thrust_exec_check_disable__
407
- template <typename T1>
408
- __host__ __device__
409
- constexpr auto operator()(T1&& t1) const
410
- noexcept(noexcept(THRUST_FWD(t1)--)) -> decltype(THRUST_FWD(t1)--)
411
- {
412
- return THRUST_FWD(t1)--;
413
- }
414
- }; // end prefix_increment
415
-
416
- template<typename Eval>
417
- __host__ __device__
418
- actor<
419
- composite<
420
- transparent_unary_operator<postfix_decrement>,
421
- actor<Eval>
422
- >
423
- >
424
- operator--(const actor<Eval> &_1, int)
425
- {
426
- return compose(transparent_unary_operator<postfix_decrement>(), _1);
427
- } // end operator--()
428
-
429
- } // end functional
430
- } // end detail
431
- } // end thrust
432
-