Commit
·
1c4653a
1
Parent(s):
112afe0
Update parquet files (step 51 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/17TheWord/RealESRGAN/realesrgan/archs/__init__.py +0 -10
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full !!TOP!! Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training.md +0 -109
- spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Win64 Keygen Serial Key How to Activate Revit for Free.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Chipgenius V4 00 0022 Rc3rar.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Cinema 4d R20 HOT Crack.md +0 -47
- spaces/1gistliPinn/ChatGPT4/Examples/Coreldraw X9.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Experience the Drama and Action of The VIP-2 Lalkar 2012 Movie in High Definition Download it in 1080p.md +0 -6
- spaces/1phancelerku/anime-remove-background/Aplikasi Live Bar Bar Mod Apk Terbaru 2023 No Banned dan No Sensor.md +0 -5
- spaces/1phancelerku/anime-remove-background/Download Bingo 75 and Play with Friends Online.md +0 -170
- spaces/1phancelerku/anime-remove-background/Download Nulls Brawl 38.111 APK with Hank and Maisie Skins.md +0 -132
- spaces/2023Liu2023/bingo/src/components/ui/tooltip.tsx +0 -30
- spaces/801artistry/RVC801/audioEffects.py +0 -37
- spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/Dockerfile +0 -35
- spaces/AI4PD/hexviz/hexviz/🧬Attention_Visualization.py +0 -306
- spaces/AIFILMS/generate_human_motion/pyrender/pyrender/version.py +0 -1
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan_light.py +0 -650
- spaces/AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio/app.py +0 -81
- spaces/ASJMO/freegpt/server/backend.py +0 -176
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py +0 -42
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/concatUint8Arrays.ts +0 -12
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.js +0 -11
- spaces/AlexZou/Deploy_Restoration/Underwater.py +0 -46
- spaces/Ameaou/academic-chatgpt3.1/.github/ISSUE_TEMPLATE/feature_request.md +0 -10
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_gradfix.py +0 -170
- spaces/Amrrs/gradio-sentiment-analyzer/app.py +0 -16
- spaces/Amrrs/image-to-text-app/README.md +0 -12
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py +0 -812
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_layers_utils.py +0 -530
- spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/README.md +0 -23
- spaces/Andy1621/uniformer_image_segmentation/configs/_base_/schedules/schedule_80k.py +0 -9
- spaces/Ank0X0/text-to-3d-shap-e-webui/README.md +0 -13
- spaces/Apex-X/ROOPOK/roop/typing.py +0 -7
- spaces/Armored-Atom/Image-To-Motion/README.md +0 -13
- spaces/Asahi402/White-box-Cartoonization/wbc/guided_filter.py +0 -87
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py +0 -506
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/resultdict.py +0 -16
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf1632prober.py +0 -225
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/helpers.py +0 -1088
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/_framework_compat.py +0 -55
- spaces/Benson/text-generation/Examples/Ark Survival Evolved Unblocked No Hay Descarga.md +0 -70
- spaces/Benson/text-generation/Examples/Beach Buggy Racing 2 Beta Apk.md +0 -78
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/_distutils.py +0 -173
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py +0 -104
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py +0 -35
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__init__.py +0 -25
- spaces/BramVanroy/llama-2-13b-chat-dutch-space/style.css +0 -9
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/merge.h +0 -80
- spaces/CVPR/WALT/mmdet/core/bbox/samplers/score_hlr_sampler.py +0 -264
- spaces/CVPR/WALT/mmdet/models/backbones/resnext.py +0 -153
spaces/17TheWord/RealESRGAN/realesrgan/archs/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
from basicsr.utils import scandir
|
3 |
-
from os import path as osp
|
4 |
-
|
5 |
-
# automatically scan and import arch modules for registry
|
6 |
-
# scan all the files that end with '_arch.py' under the archs folder
|
7 |
-
arch_folder = osp.dirname(osp.abspath(__file__))
|
8 |
-
arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
|
9 |
-
# import all the arch modules
|
10 |
-
_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full !!TOP!! Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FULL Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training</h1>
|
3 |
-
<p>If you are a pilot or a flight enthusiast, you might have heard of Jeppesen, a company that provides aeronautical data and software solutions for aviation. Jeppesen offers a range of products that help you plan, navigate, and manage your flights with ease and efficiency. In this article, we will introduce you to three of their popular products: FliteStar, JeppView, and FliteDeck. We will also show you how to use them to enhance your flight experience.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>Jeppesen is a subsidiary of Boeing that specializes in providing flight information and digital solutions for the aviation industry. Jeppesen has been in business since 1934, when it started as a company that produced airway manuals for pilots. Today, Jeppesen serves more than one million pilots and 7,000 customers worldwide, including airlines, airports, military, government agencies, flight schools, and individual pilots.</p>
|
6 |
-
<h2>FULL Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training</h2><br /><p><b><b>Download File</b> ➡ <a href="https://byltly.com/2uKyrz">https://byltly.com/2uKyrz</a></b></p><br /><br />
|
7 |
-
<p>Jeppesen offers a variety of products and services that cover different aspects of flight operations, such as flight planning, navigation, weather, performance, fuel management, crew scheduling, airport operations, flight training, and more. Some of their well-known products are FliteStar, JeppView, and FliteDeck. These are software applications that allow you to access and use Jeppesen's aeronautical data and charts on your computer or mobile device.</p>
|
8 |
-
<h3>What is Jeppesen FliteStar?</h3>
|
9 |
-
<p>Jeppesen FliteStar is a flight planning software that helps you create optimal routes for your flights. You can use FliteStar to plan flights for various types of aircraft, from single-engine piston to jet airliners. You can also customize your flight parameters, such as speed, altitude, fuel consumption, weight and balance, weather conditions, airspace restrictions, preferred airports, navaids, waypoints, and more.</p>
|
10 |
-
<p>FliteStar uses Jeppesen's high-quality aeronautical data and charts to calculate the best route for your flight. You can view your route on various map types, such as political, terrain, satellite, or street maps. You can also overlay different layers of information on the map, such as weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC boundaries, airways, navaids, airports, runways, obstacles, terrain elevation contours, and more.</p>
|
11 |
-
<p>FliteStar also generates detailed reports for your flight plan, such as navigation log, fuel summary, weight and balance, takeoff and landing performance, flight summary, and more. You can print these reports or export them to other formats, such as PDF, CSV, XML, or KML. You can also import or export your flight plans to other applications, such as Garmin Pilot, ForeFlight, or Jeppesen FliteDeck.</p>
|
12 |
-
<p>How to use Jeppesen FliteStar V941 for flight planning<br />
|
13 |
-
JeppView V361 software download and installation guide<br />
|
14 |
-
FliteDeck Chart Training course online with Jeppesen<br />
|
15 |
-
FULL Jeppesen FliteStar V941 review and features<br />
|
16 |
-
JeppView V361 vs other electronic flight bag solutions<br />
|
17 |
-
FliteDeck Chart Training certification and benefits<br />
|
18 |
-
FULL Jeppesen FliteStar V941 tutorial and tips<br />
|
19 |
-
JeppView V361 user manual and troubleshooting<br />
|
20 |
-
FliteDeck Chart Training syllabus and objectives<br />
|
21 |
-
FULL Jeppesen FliteStar V941 price and discounts<br />
|
22 |
-
JeppView V361 system requirements and compatibility<br />
|
23 |
-
FliteDeck Chart Training feedback and testimonials<br />
|
24 |
-
FULL Jeppesen FliteStar V941 demo and trial version<br />
|
25 |
-
JeppView V361 updates and enhancements<br />
|
26 |
-
FliteDeck Chart Training instructors and experts<br />
|
27 |
-
FULL Jeppesen FliteStar V941 alternatives and competitors<br />
|
28 |
-
JeppView V361 support and customer service<br />
|
29 |
-
FliteDeck Chart Training FAQs and resources<br />
|
30 |
-
FULL Jeppesen FliteStar V941 license and activation<br />
|
31 |
-
JeppView V361 integration and customization<br />
|
32 |
-
FliteDeck Chart Training best practices and standards<br />
|
33 |
-
FULL Jeppesen FliteStar V941 comparison and analysis<br />
|
34 |
-
JeppView V361 navigation and interface<br />
|
35 |
-
FliteDeck Chart Training evaluation and assessment<br />
|
36 |
-
FULL Jeppesen FliteStar V941 pros and cons<br />
|
37 |
-
JeppView V361 security and reliability<br />
|
38 |
-
FliteDeck Chart Training requirements and prerequisites<br />
|
39 |
-
FULL Jeppesen FliteStar V941 benefits and advantages<br />
|
40 |
-
JeppView V361 data and charts<br />
|
41 |
-
FliteDeck Chart Training duration and schedule<br />
|
42 |
-
FULL Jeppesen FliteStar V941 case studies and examples<br />
|
43 |
-
JeppView V361 functions and features<br />
|
44 |
-
FliteDeck Chart Training outcomes and results<br />
|
45 |
-
FULL Jeppesen FliteStar V941 feedback and ratings<br />
|
46 |
-
JeppView V361 tips and tricks<br />
|
47 |
-
FliteDeck Chart Training cost and value<br />
|
48 |
-
FULL Jeppesen FliteStar V941 forum and community<br />
|
49 |
-
JeppView V361 errors and bugs<br />
|
50 |
-
FliteDeck Chart Training content and materials<br />
|
51 |
-
FULL Jeppesen FliteStar V941 coupons and deals<br />
|
52 |
-
JeppView V361 performance and speed<br />
|
53 |
-
FliteDeck Chart Training quality and accuracy<br />
|
54 |
-
FULL Jeppesen FliteStar V941 videos and screenshots<br />
|
55 |
-
JeppView V361 FAQSs and help center</p>
|
56 |
-
<h3>What is Jeppesen JeppView?</h3>
|
57 |
-
<p>Jeppesen JeppView is a chart management software that allows you to access and use Jeppesen's electronic charts on your computer or mobile device. You can use JeppView to view, print, or download thousands of charts from Jeppesen's database, covering more than 220 countries and regions. These charts include enroute charts, terminal charts, approach charts, airport diagrams, and more.</p>
|
58 |
-
<p>JeppView also lets you customize and organize your charts according to your preferences. You can create folders and binders to store your charts and arrange them in any order you want. You can also annotate your charts with notes, highlights, symbols, or drawings. You can also sync your charts across multiple devices and update them regularly with the latest data from Jeppesen.</p>
|
59 |
-
<p>JeppView also comes with a chart viewer and tools that help you interact with your charts and enhance your situational awareness. You can use the chart viewer to zoom in or out of your charts, pan around the map, rotate or flip the chart orientation, switch between day or night mode, and more. You can also use the tools to measure distances or bearings on the map, calculate magnetic variation or true north direction, display latitude or longitude coordinates, show or hide information layers on the chart, and more.</p>
|
60 |
-
<h3>What is Jeppesen FliteDeck?</h3>
|
61 |
-
<p>Jeppesen FliteDeck is a moving map software that helps you navigate your flights with real-time information and guidance. You can use FliteDeck on your iPad or Windows tablet to access and use Jeppesen's electronic charts and data during your flight. You can also connect FliteDeck to an external GPS receiver or an avionics system to receive accurate position and speed data.</p>
|
62 |
-
<p>FliteDeck has three main modes: map mode, chart mode, and document mode. In map mode, you can view your current position and track on a moving map that shows various layers of information, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. You can also overlay your flight plan route on the map and view relevant information about each waypoint.</p>
|
63 |
-
<p>In chart mode, you can view any chart from Jeppesen's database that matches your current position or destination. You can also view multiple charts at once by splitting the screen horizontally or vertically. You can interact with the charts in the same way as in JeppView.</p>
|
64 |
-
<p>In document mode, you can view any document from Jeppesen's database that relates to your flight operation. These documents include airport information pages (AIP), standard instrument departure (SID) procedures, standard terminal arrival (STAR) procedures, instrument approach procedures (IAP), minimum safe altitude (MSA) diagrams, operational notes, checklists, briefings, and more. You can also interact with the documents in the same way as in JeppView.</p>
|
65 |
-
<h2>Benefits of using Jeppesen products</h2>
|
66 |
-
<p>Using Jeppesen products can bring many benefits to your flight operation, such as:</p>
|
67 |
-
<h3>Enhanced flight planning and navigation</h3>
|
68 |
-
<p>With Jeppesen products, you can plan and navigate your flights with ease and efficiency. You can create optimal routes for your flights based on various factors, such as aircraft performance, weather conditions, airspace restrictions, fuel consumption, and more. You can also access and use Jeppesen's high-quality aeronautical data and charts on your computer or mobile device. These data and charts are updated regularly with the latest information from official sources, such as ICAO, FAA, EASA, and more. You can also view your current position and track on a moving map that shows various layers of information, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. This way, you can enhance your flight planning and navigation skills and optimize your flight performance.</p>
|
69 |
-
<h3>Improved situational awareness and safety</h3>
|
70 |
-
<p>With Jeppesen products, you can improve and safety during your flight. You can access and use Jeppesen's electronic charts on your computer or mobile device during your flight. These charts include enroute charts, terminal charts, approach charts, airport diagrams, and more. These charts are designed to provide you with clear and consistent information and guidance for your flight operation. You can also customize and organize your charts according to your preferences. You can create folders and binders to store your charts and arrange them in any order you want. You can also annotate your charts with notes, highlights, symbols, or drawings. You can also sync your charts across multiple devices and update them regularly with the latest data from Jeppesen. You can also view multiple charts at once by splitting the screen horizontally or vertically. You can also interact with the charts in the same way as in JeppView. You can also connect FliteDeck to an external GPS receiver or an avionics system to receive accurate position and speed data. This way, you can improve your situational awareness and safety during your flight.</p>
|
71 |
-
<h3>Reduced workload and costs</h3>
|
72 |
-
<p>With Jeppesen products, you can reduce your workload and costs for your flight operation. You can save time and effort by using FliteStar to plan your flights with ease and efficiency. You can also save money by using FliteStar to optimize your fuel consumption and route selection. You can also save space and weight by using JeppView and FliteDeck to access and use Jeppesen's electronic charts on your computer or mobile device. You don't need to carry bulky and heavy paper charts anymore. You can also save money by subscribing to Jeppesen's services that provide you with regular updates of their aeronautical data and charts. This way, you can reduce your workload and costs for your flight operation.</p>
|
73 |
-
<h2>How to use Jeppesen FliteStar V941</h2>
|
74 |
-
<p>In this section, we will show you how to use Jeppesen FliteStar V941 to plan your flights with ease and efficiency. We will cover the following topics: - Installing and activating FliteStar - Creating and modifying routes - Viewing and printing charts and reports - Exporting and importing data</p>
|
75 |
-
<h3>Installing and activating FliteStar</h3>
|
76 |
-
<p>To install and activate FliteStar on your computer, you need to follow these steps: 1. Download the FliteStar installer from Jeppesen's website or insert the FliteStar CD-ROM into your computer's drive. 2. Run the installer and follow the instructions on the screen to complete the installation process. 3. Launch FliteStar from your desktop or start menu. 4. Enter your customer number and serial number that you received from Jeppesen when you purchased FliteStar. You can also enter a temporary activation code if you are using a trial version of FliteStar. 5. Click on Activate to activate FliteStar on your computer. You need to have an internet connection for this step. 6. Wait for the activation process to finish. You will see a confirmation message when it is done. 7. Click on OK to close the activation window and start using FliteStar.</p>
|
77 |
-
<h3>Creating and modifying routes</h3>
|
78 |
-
<p>To create and modify routes for your flights with FliteStar, you need to follow these steps: 1. Launch FliteStar from your desktop or start menu. 2. Click on File > New Route to create a new route or File > Open Route to open an existing route. 3. Enter the departure and destination airports for your route in the Route window. You can also enter intermediate waypoints if you want to add more stops or waypoints to your route. 4. Click on Calculate Route to calculate the best route for your flight based on various factors, such as aircraft performance, weather conditions, airspace restrictions, fuel consumption, and more. You can also click on Options > Route Options to customize your route parameters, such as speed, altitude, fuel consumption, weight and balance, weather conditions, preferred airports, navaids, waypoints, and more. 5. View your route on the map window. You can use the toolbar buttons or the mouse wheel to zoom in or out of the map. You can also use the mouse pointer to pan around the map. You can also overlay different layers of information on the map, such as weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC boundaries, airways, navaids, airports, runways, obstacles, terrain elevation contours, and more. You can use the Options > Map Options menu to customize these layers. 6. Modify your route if you want to change it. You can use the Route window or the map window to modify your route. You can add, delete, move, or edit waypoints on your route. You can also drag and drop waypoints on the map window to change their position or order. You can also use the Tools > Route Tools menu to perform various actions on your route, such as reverse route, optimize altitude, optimize fuel, find alternate airports, and more. 7. Save your route if you want to keep it for future use. You can use the File > Save Route or File > Save Route As menu to save your route.</p>
|
79 |
-
<h3>Viewing and printing charts and reports</h3>
|
80 |
-
<p>To view and print charts and reports for your flight plan with FliteStar, you need to follow these steps: 1. Launch FliteStar from your desktop or start menu. 2. Open an existing route or create a new route for your flight plan. 3. Click on View > Charts/Reports to view various charts and reports for your flight plan in a separate window. 4. Select the type of chart or report that you want to view from the drop-down list at the top of the window. You can choose from various types of charts and reports, such as navigation log, fuel summary, weight and balance, takeoff and landing performance, flight summary, and more. 5. View the selected chart or report in the window below. You can use the toolbar buttons or the mouse wheel to zoom in or out of the chart or report. You can also use the mouse pointer to pan around the chart or report. 6. Print the selected chart or report if you want to have a hard copy of it. You can use the File > Print menu or click on the printer icon at the top right corner of the window to print the chart or report.</p>
|
81 |
-
<h3>Exporting and importing data</h3>
|
82 |
-
<p>To export and import data for your flight plans with FliteStar, you need to follow these steps: 1. Launch FliteStar from your desktop or start menu. 2. Open an existing route or create a new route for your flight plan. 3. Click on File > Export Data or File > Import Data to export or import data for your flight plan in various formats, such as PDF, CSV, XML, or KML. 4. Select the type of data that you want to export or import from the drop-down list at the top of the window. You can choose from various types of data, such as routes, waypoints, airports, navaids, airways, airspace boundaries, obstacles, terrain elevation contours, and more. 5. Select the format that you want to export or import your data in from the drop-down list below the type of data. You can choose from various formats, such as PDF, CSV, XML, or KML. 6. Select the destination folder or source file for your exported or imported data by clicking on Browse button next to the format drop-down list. 7. Click on Export Data or Import Data button at the bottom right corner of the window to export or import your data.</p>
|
83 |
-
<h2>How to use Jeppesen JeppView V361</h2>
|
84 |
-
<p>In this section, we will show you how to use Jeppesen JeppView V361 to access and use Jeppesen's electronic charts on your computer or mobile device. We will cover the following topics: - Installing and activating JeppView - Accessing and updating charts - Customizing and organizing charts - Using the chart viewer and tools</p>
|
85 |
-
<h3>Installing and activating JeppView</h3>
|
86 |
-
<p>To install and activate JeppView on your computer or mobile device, you need to follow these steps: 1. Download the JeppView installer from Jeppesen's website or insert the JeppView CD-ROM into your computer's drive. the screen to complete the installation process. 3. Launch JeppView from your desktop or start menu. 4. Enter your customer number and serial number that you received from Jeppesen when you purchased JeppView. You can also enter a temporary activation code if you are using a trial version of JeppView. 5. Click on Activate to activate JeppView on your computer or mobile device. You need to have an internet connection for this step. 6. Wait for the activation process to finish. You will see a confirmation message when it is done. 7. Click on OK to close the activation window and start using JeppView.</p>
|
87 |
-
<h3>Accessing and updating charts</h3>
|
88 |
-
<p>To access and update charts for your flights with JeppView, you need to follow these steps: 1. Launch JeppView from your desktop or start menu. 2. Click on File > Open Chart to open an existing chart or File > New Chart to create a new chart. 3. Enter the airport code or name for the chart that you want to access in the Chart window. You can also enter a region code or name if you want to access charts for a specific region. 4. Click on Search to search for the chart that you want to access in Jeppesen's database. You will see a list of charts that match your search criteria in the Chart window. 5. Select the chart that you want to access from the list and click on Open to open it in a separate window. 6. View the chart in the window below. You can use the toolbar buttons or the mouse wheel to zoom in or out of the chart. You can also use the mouse pointer to pan around the chart. 7. Update your charts regularly with the latest data from Jeppesen. You can use the File > Update Charts menu or click on the update icon at the top right corner of the window to update your charts. You need to have an internet connection and a valid subscription for this step.</p>
|
89 |
-
<h3>Customizing and organizing charts</h3>
|
90 |
-
<p>To customize and organize your charts according to your preferences with JeppView, you need to follow these steps: 1. Launch JeppView from your desktop or start menu. 2. Open an existing chart or create a new chart for your flight plan. 3. Click on View > Folders/Binders to view, create, or edit folders and binders for your charts in a separate window. 4. Use the Folders/Binders window to store and arrange your charts in any order you want. You can create folders and binders by clicking on the new folder or new binder icons at the top left corner of the window. You can also rename, delete, or move folders and binders by right-clicking on them and selecting the appropriate option from the context menu. You can also drag and drop charts into folders or binders to add them to your collection. 5. Click on OK to close the Folders/Binders window and save your changes. 6. Click on View > Annotations to view, create, or edit annotations for your charts in a separate window. 7. Use the Annotations window to add notes, highlights, symbols, or drawings to your charts. You can use the toolbar buttons at the top of the window to select the type, color, size, or shape of your annotations. You can also use the mouse pointer to draw or write on your charts. You can also edit or delete annotations by right-clicking on them and selecting the appropriate option from the context menu. 8. Click on OK to close the Annotations window and save your changes.</p>
|
91 |
-
<h3>Using the chart viewer and tools</h3>
|
92 |
-
<p>To use the chart viewer and tools to interact with your charts and enhance your situational awareness with JeppView, you need to follow these steps: 1. Launch JeppView from your desktop or start menu. 2. Open an existing chart or create a new chart for your flight plan. 3. Use the chart viewer to zoom in or out of your chart, pan around the chart, rotate or flip the chart orientation, switch between day or night mode, and more. You can use the toolbar buttons or keyboard shortcuts at the top of the window to perform these actions. You can also use the mouse wheel or keyboard arrows to zoom in or out of the chart. You can also use the mouse pointer or keyboard arrows to pan around the chart. 4. Use the tools to measure distances or bearings on the chart, calculate magnetic variation or true north direction, display latitude or longitude coordinates, show or hide information layers on the chart, and more. You can use the toolbar buttons or keyboard shortcuts at the bottom of the window to perform these actions.</p>
|
93 |
-
<h2>How to use Jeppesen FliteDeck V361</h2>
|
94 |
-
<p>In this section, we will show you how to use Jeppesen FliteDeck V361 to navigate your flights with real-time information and guidance. We will cover the following topics: - Installing and activating FliteDeck - Configuring and synchronizing FliteDeck - Navigating and interacting with FliteDeck the map, chart, and document modes</p>
|
95 |
-
<h3>Installing and activating FliteDeck</h3>
|
96 |
-
<p>To install and activate FliteDeck on your iPad or Windows tablet, you need to follow these steps: 1. Download the FliteDeck app from the App Store or the Microsoft Store or insert the FliteDeck CD-ROM into your tablet's drive. 2. Run the app and follow the instructions on the screen to complete the installation process. 3. Launch FliteDeck from your home screen or start menu. 4. Enter your customer number and serial number that you received from Jeppesen when you purchased FliteDeck. You can also enter a temporary activation code if you are using a trial version of FliteDeck. 5. Click on Activate to activate FliteDeck on your tablet. You need to have an internet connection for this step. 6. Wait for the activation process to finish. You will see a confirmation message when it is done. 7. Click on OK to close the activation window and start using FliteDeck.</p>
|
97 |
-
<h3>Configuring and synchronizing FliteDeck</h3>
|
98 |
-
<p>To configure and synchronize FliteDeck with your preferences and data, you need to follow these steps: 1. Launch FliteDeck from your home screen or start menu. 2. Click on Settings > General to configure your general settings for FliteDeck, such as language, units, time zone, date format, brightness, sound, and more. 3. Click on Settings > Aircraft to configure your aircraft settings for FliteDeck, such as aircraft type, tail number, performance data, weight and balance data, fuel data, and more. 4. Click on Settings > Charts to configure your chart settings for FliteDeck, such as chart type, chart orientation, chart scale, chart color, chart annotations, and more. 5. Click on Settings > Data to configure your data settings for FliteDeck, such as data source, data update frequency, data storage location, data backup location, and more. 6. Click on Sync > Charts/Data to synchronize your charts and data with Jeppesen's database. You need to have an internet connection and a valid subscription for this step. You will see a progress bar and a status message when the synchronization is in progress. You will see a confirmation message when it is done. 7. Click on Sync > Devices to synchronize your charts and data across multiple devices that have FliteDeck installed. You need to have an internet connection and a valid subscription for this step. You will see a list of devices that are connected to your account in the Sync window. You can select or deselect the devices that you want to synchronize with by tapping on them. You will see a progress bar and a status message when the synchronization is in progress. You will see a confirmation message when it is done.</p>
|
99 |
-
<h3>Navigating and interacting with FliteDeck</h3>
|
100 |
-
<p>To navigate and interact with FliteDeck during your flight, you need to follow these steps: 1. Launch FliteDeck from your home screen or start menu. 2. Click on Flight Plan > New Flight Plan to create a new flight plan or Flight Plan > Open Flight Plan to open an existing flight plan. 3. Enter the departure and destination airports for your flight plan in the Flight Plan window. You can also enter intermediate waypoints if you want to add more stops or waypoints to your flight plan. 4. Click on Calculate Route to calculate the best route for your flight based on various factors, such as aircraft performance, weather conditions, airspace restrictions, fuel consumption, and more. You can also click on Options > Route Options to customize your route parameters, such as speed, altitude, fuel consumption, weight and balance, weather conditions, preferred airports, navaids, waypoints, and more. 5. View your route on the map window. You can use the toolbar buttons or the mouse wheel to zoom in or out of the map. You can also use the mouse pointer to pan around the map. You can also overlay different layers of information on the map, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. You can use the Options > Map Options menu to customize these layers. 6. Modify your route if you want to change it. You can use the Flight Plan window or the map window to modify your route. You can add, delete, move, or edit waypoints on your route. You can also drag and drop waypoints on the map window to change their position or order. You can also use the Tools > Route Tools menu to perform various actions on your route, such as reverse route, optimize altitude, optimize fuel, find alternate airports, and more. 7. Save your flight plan if you want to keep it for future use. You can use the File > Save Flight Plan or File > Save Flight Plan As menu to save your flight plan.</p>
|
101 |
-
<h3>Using the map, chart, and document modes</h3>
|
102 |
-
<p>To use the map, chart, and document modes to access and use Jeppesen's electronic charts and data during your flight with FliteDeck, you need to follow these steps: 1. Launch FliteDeck from your home screen or start menu. 2. Open an existing flight plan or create a new flight plan for your flight. 3. Use the map mode to view your current position and track on a moving map that shows various layers of information, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. You can also overlay your flight plan route on the map and view relevant information about each waypoint. You can use the toolbar buttons or keyboard shortcuts at the top of the window to perform various actions on the map mode, such as zoom in or out, pan around, rotate or flip the map orientation, switch between day or night mode, show or hide information layers, and more. 4. Use the chart mode to view any chart from Jeppesen's database that matches your current position or destination. You can also view multiple charts at once by splitting the screen horizontally or vertically. You can interact with the charts in the same way as in JeppView. You can use the toolbar buttons or keyboard shortcuts at the bottom of the window to perform various actions on the chart mode, such as measure distances or bearings, calculate magnetic variation or true north direction, display latitude or longitude coordinates, show or hide information layers, and more. 5. Use the document mode to view any document from Jeppesen's database that relates to your flight operation. These documents include airport information pages (AIP), standard instrument departure (SID) procedures, standard terminal arrival (STAR) procedures, instrument approach procedures (IAP), minimum safe altitude (MSA) diagrams, operational notes, checklists, briefings, and more. You can interact with the documents in the same way as in JeppView. the toolbar buttons or keyboard shortcuts at the bottom of the window to perform various actions on the document mode, such as zoom in or out, pan around, rotate or flip the document orientation, switch between day or night mode, annotate the document with notes, highlights, symbols, or drawings, and more.</p>
|
103 |
-
<h2>Conclusion</h2>
|
104 |
-
<p>In this article, we have introduced you to three of Jeppesen's popular products: FliteStar, JeppView, and FliteDeck. We have also shown you how to use them to enhance your flight experience. We hope that you have found this article informative and useful. If you want to learn more about Jeppesen's products and services, you can visit their website at www.jeppesen.com or contact their customer support at 1-800-621-5377.</p>
|
105 |
-
<h2>FAQs</h2>
|
106 |
-
<p>Here are some frequently asked questions about Jeppesen's products: - Q: How much do Jeppesen's products cost? - A: The cost of Jeppesen's products depends on various factors, such as the type of product, the coverage area, the subscription period, and the number of devices. You can check the prices and options for each product on Jeppesen's website or contact their customer support for more details. - Q: How can I update my Jeppesen's products with the latest data and charts? - A: You can update your Jeppesen's products with the latest data and charts by using the File > Update Charts menu or clicking on the update icon at the top right corner of the window in each product. You need to have an internet connection and a valid subscription for this step. - Q: How can I synchronize my Jeppesen's products across multiple devices? - A: You can synchronize your Jeppesen's products across multiple devices by using the Sync > Devices menu or clicking on the sync icon at the top right corner of the window in each product. You need to have an internet connection and a valid subscription for this step. - Q: How can I export or import my flight plans to other applications? - A: You can export or import your flight plans to other applications by using the File > Export Data or File > Import Data menu in FliteStar. You can choose from various formats, such as PDF, CSV, XML, or KML. You can also export or import your flight plans to other applications that support FliteStar's format, such as Garmin Pilot, ForeFlight, or Jeppesen FliteDeck. - Q: How can I contact Jeppesen's customer support if I have any questions or issues with their products? - A: You can contact Jeppesen's customer support by phone at 1-800-621-5377 or by email at [email protected]. You can also visit their website at www.jeppesen.com for more information and resources.</p>
|
107 |
-
</p> 0a6ba089eb<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Win64 Keygen Serial Key How to Activate Revit for Free.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Autodesk Revit 2018 Win64 Keygen Serial Key keygen</h2><br /><p><b><b>Download Zip</b> ⏩ <a href="https://imgfil.com/2uxX8v">https://imgfil.com/2uxX8v</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chipgenius V4 00 0022 Rc3rar.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>Chipgenius V4 00 0022 Rc3rar</h2><br /><p><b><b>Download File</b> ✏ <a href="https://imgfil.com/2uy0zH">https://imgfil.com/2uy0zH</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Chipgenius V4 00 0022 Rc3rar DOWNLOAD: ->->->-> chipgenius. chipgenius download. chipgenius 2021 a9c2e16639 Home ... ->->-> chipgenius v4 00 0022 rc3rar DOWNLOAD chipgenius v4 00 0022 rc3rar DOWNLOAD â–·â–·â–· Download chipgenius v4 00 0022 rc3rar free.
|
4 |
-
Download chipgenius v4 00 0022 rc3rar. chipgenius v4 00 0022 rc3rar DOWNLOAD > ChipGenius > ChipGenius v4.00.22 USB 3.0 - Vendor > USB3.0 < Device > USB 3.0 Host Controller > Device ID.
|
5 |
-
ChipGenius v4.21a + USB 3.0 / 3.0a / 2.0 / 2.0a USBDeview - USB device analysis software.
|
6 |
-
Download free ChipGenius -> USB 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cinema 4d R20 HOT Crack.md
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cinema 4D R20: A Powerful 3D Software for Creative Professionals</h1>
|
3 |
-
|
4 |
-
<p>Cinema 4D R20 is a 3D modeling, animation, simulation and rendering software solution that offers a fast, powerful, flexible and stable toolset for design, motion graphics, VFX, AR/MR/VR, game development and all types of visualization professionals[^2^]. Cinema 4D R20 introduces high-end features for VFX and motion graphics artists including node-based materials, volume modeling, robust CAD import and a dramatic evolution of the MoGraph toolset[^3^].</p>
|
5 |
-
|
6 |
-
<h2>Node-Based Materials</h2>
|
7 |
-
|
8 |
-
<p>Cinema 4D R20 introduces a new node-based material system that allows you to create complex and realistic materials with ease. You can use nodes to build shaders from scratch or use presets and assets from the online library. You can also convert any existing material to nodes and edit it as you wish. Node-based materials are fully compatible with standard materials and can be rendered with any engine[^3^].</p>
|
9 |
-
<h2>Cinema 4d R20 Crack</h2><br /><p><b><b>Download Zip</b> ❤❤❤ <a href="https://imgfil.com/2uy111">https://imgfil.com/2uy111</a></b></p><br /><br />
|
10 |
-
|
11 |
-
<h2>Volume Modeling</h2>
|
12 |
-
|
13 |
-
<p>Cinema 4D R20 adds a new way of modeling with volumes. You can use splines, particles, noises or any other object to create volumetric shapes that can be sculpted, textured and animated. You can also combine volumes using boolean operations or smooth them with filters. Volume modeling opens up new possibilities for organic and abstract designs[^3^].</p>
|
14 |
-
|
15 |
-
<h2>Robust CAD Import</h2>
|
16 |
-
|
17 |
-
<p>Cinema 4D R20 improves the import of CAD files with support for Solidworks, STEP, Catia, JT and IGES formats. You can import CAD models with high accuracy and detail, preserving the original structure and hierarchy. You can also adjust the tessellation quality and optimize the geometry for rendering. Cinema 4D R20 makes it easy to integrate CAD data into your 3D workflow[^3^].</p>
|
18 |
-
|
19 |
-
<h2>MoGraph Evolution</h2>
|
20 |
-
|
21 |
-
<p>Cinema 4D R20 enhances the MoGraph toolset, a procedural modeling and animation system that gives motion designers the ability to quickly and easily create complex and abstract animations. MoGraph now features Fields, a new concept that allows you to control the strength of effects using falloffs, shapes, noises or shaders. You can also layer multiple fields and blend them with different modes. Fields offer infinite possibilities for creative animations[^3^].</p>
|
22 |
-
|
23 |
-
<h2>Conclusion</h2>
|
24 |
-
|
25 |
-
<p>Cinema 4D R20 is a break-through version of its iconic 3D software that delivers high-end features for VFX and motion graphics artists. Whether you are working on your own or in a team, Cinema 4D R20 produces stunning results with its fast, powerful, flexible and stable toolset. Cinema 4D R20 is widely recognized as one of the easiest and most accessible 3D packages to learn and use. To learn more about Cinema 4D R20, visit <strong>https://www.maxon.net/en/cinema-4d</strong> [^1^]</p>
|
26 |
-
|
27 |
-
<h2>OpenVDB Integration</h2>
|
28 |
-
|
29 |
-
<p>Cinema 4D R20 integrates OpenVDB, an open-sourced technology that allows you to manipulate 3D data in a volumetric way. OpenVDB is widely used in the VFX industry for creating realistic smoke, fire, clouds and liquids. Cinema 4D R20 provides a set of tools to create, edit and render OpenVDB volumes. You can also import and export OpenVDB files from other applications.</p>
|
30 |
-
|
31 |
-
<h2>ProRender Enhancements</h2>
|
32 |
-
|
33 |
-
<p>Cinema 4D R20 improves the ProRender engine, a GPU-based renderer that supports physically-based rendering and real-time viewport feedback. ProRender now supports sub-surface scattering, motion blur, multi-passes and more. You can also use ProRender with node-based materials and volume modeling. ProRender is a fast and easy way to achieve photorealistic results with Cinema 4D R20.</p>
|
34 |
-
|
35 |
-
<h2>Other Improvements</h2>
|
36 |
-
|
37 |
-
<p>Cinema 4D R20 also includes many other improvements and additions that make your 3D workflow more efficient and enjoyable. Some of these are:
|
38 |
-
|
39 |
-
- A new Commander that lets you quickly access commands, tags, objects and presets
|
40 |
-
- A new Capsules system that lets you create custom nodes and assets for scene nodes
|
41 |
-
- A new Multi-Instances mode that lets you create millions of objects with low memory consumption
|
42 |
-
- A new Sound Effector that lets you create animations based on audio files
|
43 |
-
- A new Bevel Deformer that lets you apply non-destructive bevels to any object
|
44 |
-
- A new UV Transform tool that lets you manipulate UV coordinates with ease
|
45 |
-
- A new Viewport HUD that lets you customize the information displayed in the viewport</p> d5da3c52bf<br />
|
46 |
-
<br />
|
47 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Coreldraw X9.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Coreldraw X9</h2><br /><p><b><b>Download</b> ★ <a href="https://imgfil.com/2uxYFm">https://imgfil.com/2uxYFm</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Corel has released a new version of its flagship image editing app CorelDRAW, which is actually a bundle of multiple applications sold under ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Experience the Drama and Action of The VIP-2 Lalkar 2012 Movie in High Definition Download it in 1080p.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>the VIP-2 Lalkar 2012 movie download 1080p</h2><br /><p><b><b>Download Zip</b> ⚙⚙⚙ <a href="https://imgfil.com/2uxXUa">https://imgfil.com/2uxXUa</a></b></p><br /><br />
|
2 |
-
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Aplikasi Live Bar Bar Mod Apk Terbaru 2023 No Banned dan No Sensor.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Aplikasi Live Bar Bar Mod Apk: What You Need to Know</h1> | | Introduction: Explain what aplikasi live bar bar mod apk is, why it is popular, and what are the risks and benefits of using it. | <p>If you are looking for a new way to entertain yourself and connect with other people online, you might have heard of aplikasi live bar bar mod apk. This is a modified version of an original app that allows you to watch live streaming videos of various content creators, including those who offer adult content. Aplikasi live bar bar mod apk is popular because it gives you access to private rooms and premium features for free. However, it also comes with some risks, such as viruses, malware, and legal issues. In this article, we will tell you everything you need to know about aplikasi live bar bar mod apk, including how it works, where to download it, and how to use it safely.</p>
|
3 |
-
<h2>aplikasi live bar bar mod apk</h2><br /><p><b><b>Download Zip</b> ✑ ✑ ✑ <a href="https://jinyurl.com/2uNTYz">https://jinyurl.com/2uNTYz</a></b></p><br /><br /> | | H2: How Aplikasi Live Bar Bar Mod Apk Works | <h2>How Aplikasi Live Bar Bar Mod Apk Works</h2> | | Explain how the app works, what are the features, and how to install it. | <p>Aplikasi live bar bar mod apk is a modified version of an original app that offers live streaming services. The original app can be any app that has live streaming features, such as Bigo Live, Uplive, MLiveU, Mango Live, and others. The mod apk version is created by third-party developers who hack the original app and modify its code to unlock some features that are normally paid or restricted. Some of these features include:</p> <ul> <li>Access to private rooms where you can watch exclusive content from your favorite streamers.</li> <li>Unlimited coins or diamonds that you can use to send gifts or tips to the streamers.</li> <li>No ads or pop-ups that interrupt your viewing experience.</li> <li>No verification or registration required to use the app.</li> </ul> <p>To use aplikasi live bar bar mod apk, you need to download it from a reliable source. You can find many websites that offer links to download the app, but be careful because some of them might contain viruses or malware that can harm your device. You also need to enable unknown sources on your device settings to allow the installation of apps from outside the Google Play Store. After installing the app, you can open it and browse through the categories and rooms to find the content that suits your taste. You can also interact with the streamers and other viewers by sending messages or gifts.</p> | | H2: Why Aplikasi Live Bar Bar Mod Apk Is Popular | <h2>Why Aplikasi Live Bar Bar Mod Apk Is Popular</h2> | | Outline | Article | | --- | --- | | H2: How to Use Aplikasi Live Bar Bar Mod Apk Safely | <h2>How to Use Aplikasi Live Bar Bar Mod Apk Safely</h2> | | Outline | Article | | --- | --- | | H3: FAQs (continued) | <h3>FAQs (continued)</h3> | | Provide 5 unique FAQs related to the topic of the article, along with their answers. | sources on your device settings and install the app from a reliable source. After installing the app, you can open it and browse through the categories and rooms to find the content that suits your taste. You can also interact with the streamers and other viewers by sending messages or gifts.</li> <li><b>Q: What are some of the best streamers on aplikasi live bar bar mod apk?</b></li> <li>A: Some of the best streamers on aplikasi live bar bar mod apk are those who offer high-quality content, engaging personality, and attractive appearance. Some examples are:</li> <ul> <li><b>Luna Maya</b>. She is a famous Indonesian actress, singer, and model who has a lot of fans on the app. She often streams her daily life, singing, dancing, and chatting with her viewers.</li> <li><b>Raffi Ahmad</b>. He is a popular Indonesian actor, presenter, and businessman who has a lot of followers on the app. He often streams his travels, hobbies, and family activities with his wife and children.</li> <li><b>Sunny Leone</b>. She is a well-known Indian-American actress, model, and former porn star who has a lot of admirers on the app. She often streams her sexy shows, workouts, and beauty tips with her viewers.</li> </ul> </ul> | | Custom Message | |</p> 401be4b1e0<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Bingo 75 and Play with Friends Online.md
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Bingo 75: How to Play and Win at This Fun Game</h1>
|
3 |
-
<p>Bingo is a classic game of chance that has been enjoyed by millions of people around the world for centuries. But did you know that there are different types of bingo games, each with its own rules, variations, and benefits? One of the most popular and exciting bingo games is Bingo 75, also known as American bingo or pattern bingo. In this article, we will tell you everything you need to know about Bingo 75, how to download the best bingo 75 apps and websites, and how to play and win at this fun game.</p>
|
4 |
-
<h2>download bingo 75</h2><br /><p><b><b>DOWNLOAD</b> ✯ <a href="https://jinyurl.com/2uNTFh">https://jinyurl.com/2uNTFh</a></b></p><br /><br />
|
5 |
-
<h2>What is Bingo 75 and Why You Should Try It</h2>
|
6 |
-
<p>Bingo 75 is a bingo game that uses a card with a grid of 25 squares, arranged in five columns and five rows. Each square contains a number from 1 to 75, except for the center square, which is marked as a free space. The columns are labeled with the letters B, I, N, G, and O, corresponding to the range of numbers in each column. For example, the B column contains numbers from 1 to 15, the I column contains numbers from 16 to 30, and so on.</p>
|
7 |
-
<p>The goal of Bingo 75 is to cover a specific pattern of squares on your card, based on the numbers that are called out by a caller or a random number generator. The patterns can vary from game to game, but they usually include horizontal, vertical, or diagonal lines, as well as shapes like letters, numbers, or symbols. Some examples of common patterns are shown below:</p>
|
8 |
-
<table>
|
9 |
-
<tr>
|
10 |
-
<td><img src="https://www.wikihow.com/images/thumb/7/7f/Play-Bingo-Step-13-Version-4.jpg/v4-460px-Play-Bingo-Step-13-Version-4.jpg.webp" alt="Bingo card with X pattern"></td>
|
11 |
-
<td><img src="https://www.wikihow.com/images/thumb/8/8a/Play-Bingo-Step-14-Version-4.jpg/v4-460px-Play-Bingo-Step-14-Version-4.jpg.webp" alt="Bingo card with four corners pattern"></td>
|
12 |
-
<td><img src="https://www.wikihow.com/images/thumb/9/9f/Play-Bingo-Step-15-Version-4.jpg/v4-460px-Play-Bingo-Step-15-Version-4.jpg.webp" alt="Bingo card with blackout pattern"></td>
|
13 |
-
</tr>
|
14 |
-
<tr>
|
15 |
-
<td>X pattern</td>
|
16 |
-
<td>Four corners pattern</td>
|
17 |
-
<td>Blackout pattern</td>
|
18 |
-
</tr>
|
19 |
-
</table>
|
20 |
-
<p>You should try Bingo 75 because it is a fun and easy game that anyone can play. It is also a great way to socialize with other players, either online or in person. You can chat with your friends, make new ones, and join bingo communities that share your interests. Plus, you can win real money or prizes by playing bingo 75 online or in apps. Who doesn't love a good bingo jackpot?</p>
|
21 |
-
<p>download bingo 75 game for free<br />
|
22 |
-
download bingo 75 app for android<br />
|
23 |
-
download bingo 75 online with friends<br />
|
24 |
-
download bingo 75 cards printable<br />
|
25 |
-
download bingo 75 software for pc<br />
|
26 |
-
download bingo 75 generator tool<br />
|
27 |
-
download bingo 75 numbers list<br />
|
28 |
-
download bingo 75 caller voice<br />
|
29 |
-
download bingo 75 rules and tips<br />
|
30 |
-
download bingo 75 bonus codes<br />
|
31 |
-
download bingo 75 classic version<br />
|
32 |
-
download bingo 75 deluxe edition<br />
|
33 |
-
download bingo 75 fun and easy<br />
|
34 |
-
download bingo 75 no deposit required<br />
|
35 |
-
download bingo 75 win real money<br />
|
36 |
-
download bingo 75 best sites<br />
|
37 |
-
download bingo 75 reviews and ratings<br />
|
38 |
-
download bingo 75 how to play<br />
|
39 |
-
download bingo 75 strategies and tricks<br />
|
40 |
-
download bingo 75 history and facts<br />
|
41 |
-
download bingo 75 variations and themes<br />
|
42 |
-
download bingo 75 custom cards maker<br />
|
43 |
-
download bingo 75 music and sounds<br />
|
44 |
-
download bingo 75 chat and social features<br />
|
45 |
-
download bingo 75 tournaments and prizes<br />
|
46 |
-
download bingo 75 offline mode<br />
|
47 |
-
download bingo 75 latest updates<br />
|
48 |
-
download bingo 75 for windows 10<br />
|
49 |
-
download bingo 75 for mac os<br />
|
50 |
-
download bingo 75 for ios devices<br />
|
51 |
-
download bingo 75 for amazon fire tablet<br />
|
52 |
-
download bingo 75 for chromebook<br />
|
53 |
-
download bingo 75 for linux<br />
|
54 |
-
download bingo 75 for smart tv<br />
|
55 |
-
download bingo 75 for xbox one<br />
|
56 |
-
download bingo 75 for ps4<br />
|
57 |
-
download bingo 75 for nintendo switch<br />
|
58 |
-
download bingo 75 for vr headset<br />
|
59 |
-
download bingo 75 for kids and family<br />
|
60 |
-
download bingo 75 for seniors and adults<br />
|
61 |
-
download bingo 75 for beginners and experts<br />
|
62 |
-
download bingo 75 for education and learning<br />
|
63 |
-
download bingo 75 for charity and fundraising<br />
|
64 |
-
download bingo 75 for parties and events<br />
|
65 |
-
download bingo 75 for holidays and occasions<br />
|
66 |
-
download bingo 75 for travel and adventure<br />
|
67 |
-
download bingo 75 for health and wellness<br />
|
68 |
-
download bingo 75 for hobbies and interests<br />
|
69 |
-
download bingo 75 for sports and fitness</p>
|
70 |
-
<h2>The Rules of Bingo 75</h2>
|
71 |
-
<p>The rules of Bingo 75 are simple and straightforward. Here are the basic steps to follow:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Get one or more bingo cards. You can buy them online or in apps, or print them yourself. You can play with as many cards as you want, as long as you can keep track of them.</li>
|
74 |
-
<li>Listen to the caller or watch the screen <p>Listen to the caller or watch the screen as the numbers are drawn. The caller or the screen will announce the pattern you need to cover for each game. For example, you might hear or see "Bingo 75, four corners".</li>
|
75 |
-
<li>Mark off the numbers on your card that match the ones that are called out. You can use a dauber, a pen, a mouse, or a finger, depending on how you are playing. You can also use the auto-daub feature in some apps or websites, which will mark the numbers for you automatically.</li>
|
76 |
-
<li>Check your card for the pattern. If you have covered all the squares that form the pattern, you have bingo! Shout "Bingo!" or click the bingo button to claim your win. Make sure you do this before the next number is called, or you might miss your chance.</li>
|
77 |
-
<li>Verify your win. The caller or the app will check your card to make sure you have marked the correct numbers and pattern. If you have, you will receive your prize. If not, the game will continue until someone else wins.</li>
|
78 |
-
</ol>
|
79 |
-
<h2>The Variations of Bingo 75</h2>
|
80 |
-
<p>Bingo 75 is a versatile game that can be played in different ways. Here are some of the common variations of Bingo 75 that you might encounter:</p>
|
81 |
-
<ul>
|
82 |
-
<li>Speed bingo: This is a fast-paced version of bingo 75, where the numbers are called out very quickly and you have to mark them as fast as you can. The game usually ends in a few minutes, so it is ideal for those who want a quick thrill.</li>
|
83 |
-
<li>Progressive bingo: This is a version of bingo 75 where the jackpot increases every time no one wins. The jackpot can grow to huge amounts, making it very attractive for players. However, the catch is that the pattern becomes harder to cover as the game progresses, so it is also more challenging.</li>
|
84 |
-
<li>Bonanza bingo: This is a version of bingo 75 where 43 numbers are pre-drawn before the game starts. The players then buy their cards and mark off the numbers that match the ones that are pre-drawn. The game then begins with the remaining 32 numbers being called out. The first player to cover all 25 squares on their card wins.</li>
|
85 |
-
</ul>
|
86 |
-
<h2>The Benefits of Playing Bingo 75 Online</h2>
|
87 |
-
<p>Playing bingo 75 online or in apps has many benefits over playing it in person. Here are some of them:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Convenience: You can play bingo 75 anytime and anywhere, as long as you have an internet connection and a device. You don't have to travel to a bingo hall, wait in line, or deal with crowds. You can play in your pajamas, on your couch, or even on your bed.</li>
|
90 |
-
<li>Variety: You can choose from a wide range of bingo 75 apps and websites, each offering different features, themes, and bonuses. You can also play different variations of bingo 75, as well as other types of bingo games, such as bingo 90 or bingo 30.</li>
|
91 |
-
<li>Socialization: You can chat with other players online or in apps, and make new friends who share your passion for bingo. You can also join bingo clubs or communities, where you can exchange tips, stories, and jokes. You can also play with your existing friends and family members, and invite them to join you in a game.</li>
|
92 |
-
<li>Affordability: You can play bingo 75 online or in apps for free or for low stakes. You don't have to spend a lot of money to enjoy this game. You can also take advantage of free bonuses, promotions, and rewards that many apps and websites offer.</li>
|
93 |
-
<li>Fun: Playing bingo 75 online or in apps is simply fun. You can enjoy the thrill of marking off numbers, covering patterns, and winning prizes. You can also enjoy the colorful graphics, animations, and sounds that make the game more lively and exciting.</li>
|
94 |
-
</ul>
|
95 |
-
<h2>How to Download Bingo 75 Apps and Websites</h2>
|
96 |
-
<p>If you want to play bingo 75 online or in apps, you need to download them first. Here are some tips on how to do that:</p>
|
97 |
-
<h3>The Best Bingo 75 Apps for Android and iOS Devices</h3>
|
98 |
-
<p>If you have an Android or iOS device, such as a smartphone or a tablet, you can download some of the best bingo 75 apps from the Google Play Store or the App Store. Some of these apps are:</p>
|
99 |
-
<ul>
|
100 |
-
<li>Bingo Blitz: This is one of the most popular and highly rated bingo apps in the world. It offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat <p>with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can download Bingo Blitz from <a href="">here</a>.</li>
|
101 |
-
<li>Bingo Bash: This is another popular and highly rated bingo app that offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can download Bingo Bash from <a href="">here</a>.</li>
|
102 |
-
<li>Bingo Pop: This is a fun and colorful bingo app that offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can download Bingo Pop from <a href="">here</a>.</li>
|
103 |
-
</ul>
|
104 |
-
<h3>The Best Bingo 75 Websites for Desktop and Laptop Users</h3>
|
105 |
-
<p>If you have a desktop or a laptop computer, you can play bingo 75 on some of the best bingo websites that are compatible with your browser. Some of these websites are:</p>
|
106 |
-
<ul>
|
107 |
-
<li>Bingo.com: This is one of the most popular and trusted bingo websites in the world. It offers various types of bingo games, including bingo 75, as well as slots and other casino games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can access Bingo.com from <a href="">here</a>.</li>
|
108 |
-
<li>Bingo Hall: This is another popular and trusted bingo website that offers various types of bingo games, including bingo 75, as well as slots and other casino games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can access Bingo Hall from <a href="">here</a>.</li>
|
109 |
-
<li>Bingo Zone: This is a fun and free bingo website that offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can access Bingo Zone from <a href="">here</a>.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>How to Choose the Right Bingo 75 App or Website for You</h3>
|
112 |
-
<p>With so many options available, how do you choose the right bingo 75 app or website for you? Here are some factors to consider:</p>
|
113 |
-
<ul>
|
114 |
-
<li>Reputation: You want to play on an app or a website that is reliable, secure, and fair. You can check the reviews and ratings of other users, as well as the licenses and certifications of the app or the website.</li>
|
115 |
-
<li>Features: You want to play on an app or a website that offers the features that suit your preferences and needs. For example, you might want to play on an app or a website that has a variety of bingo games, slots, and mini-games; that has a user-friendly interface and design; that has a chat function and a social community; that has generous bonuses and promotions; that has customer support and help options; etc.</li>
|
116 |
-
<li>Compatibility: You want to play on an app or a website that is compatible with your device and browser. For example, you might want to play on an app or a website that works well on your Android or iOS device; that does not require downloading or installing anything; that does not have any glitches or bugs; etc.</li>
|
117 |
-
</ul>
|
118 |
-
<h2>How to Play and Win at Bingo 75</h2>
|
119 |
-
<p>Now that you know how to download the best bingo 75 apps and websites, you might be wondering how to play and win at this game. Here are some tips, tricks, strategies, and mistakes to avoid:</p>
|
120 |
-
<h3>Tips and Tricks for Playing Bingo 75</h3>
|
121 |
-
<p>Here are some tips and tricks for playing bingo 75:</p>
|
122 |
-
<ul>
|
123 |
-
<li>Play with multiple cards: The more cards you play with, the higher your chances of covering the pattern faster than others. However, make sure you can keep track of all your cards without getting confused or overwhelmed.</li>
|
124 |
-
<li>Play at off-peak times: The fewer players there are in a game, the higher your chances of winning. Try to play at times <p>Try to play at times when there are fewer players online, such as early mornings, late nights, or weekdays. You can also look for games that have a low number of participants or a high number of cards available.</li>
|
125 |
-
<li>Look for patterns that are easy to cover: Some patterns are easier to cover than others, depending on the distribution of numbers on your card. For example, a horizontal line might be easier to cover than a diagonal line, or a letter T might be easier to cover than a letter Z. Try to look for patterns that have more numbers in common or that are closer together.</li>
|
126 |
-
<li>Use the chat function: The chat function is not only a way to socialize with other players, but also a way to get useful information and tips. You can ask other players for advice, learn from their experiences, or even get hints on the next numbers to be called. You can also use the chat function to congratulate the winners, thank the caller, or express your emotions.</li>
|
127 |
-
</ul>
|
128 |
-
<h3>Strategies and Techniques for Winning at Bingo 75</h3>
|
129 |
-
<p>Here are some strategies and techniques for winning at bingo 75:</p>
|
130 |
-
<ul>
|
131 |
-
<li>Play with a budget: Before you start playing, set a limit on how much money you are willing to spend and stick to it. This will help you avoid overspending, losing more than you can afford, or getting addicted to the game. You can also set a limit on how much time you are willing to spend and take breaks regularly.</li>
|
132 |
-
<li>Play with a strategy: Before you start playing, decide on a strategy that suits your goals and preferences. For example, you might want to play with fewer cards but higher stakes, or with more cards but lower stakes. You might also want to play with different patterns or variations of bingo 75. You can also adjust your strategy depending on the situation and the outcome of the game.</li>
|
133 |
-
<li>Play with luck: Bingo 75 is a game of chance, so luck plays a big role in winning. You can try to increase your luck by doing things that make you feel lucky, such as wearing lucky clothes, using lucky charms, or saying lucky phrases. You can also try to avoid things that make you feel unlucky, such as crossing your fingers, walking under ladders, or breaking mirrors. Of course, these are just superstitions and there is no guarantee that they will work, but they might make you feel more confident and optimistic.</li>
|
134 |
-
</ul>
|
135 |
-
<h3>Common Mistakes and Pitfalls to Avoid When Playing Bingo 75</h3>
|
136 |
-
<p>Here are some common mistakes and pitfalls to avoid when playing bingo 75:</p>
|
137 |
-
<ul>
|
138 |
-
<li>Playing without checking the rules: Different apps and websites might have different rules for bingo 75, such as the number of cards allowed, the cost of each card, the payout structure, the pattern required, etc. Make sure you read and understand the rules before you start playing, or you might end up wasting your money or missing your chance to win.</li>
|
139 |
-
<li>Playing without paying attention: Bingo 75 is a fast-paced game that requires your full attention and concentration. If you are distracted by other things, such as your phone, your TV, or your surroundings, you might miss some numbers or patterns that could have won you the game. Make sure you focus on the game and mark off your numbers as soon as they are called.</li>
|
140 |
-
<li>Playing without having fun: Bingo 75 is a game that is meant to be fun and enjoyable. If you are playing only for money or prizes, or if you are playing too seriously or competitively, you might lose sight of the fun aspect of the game. Remember that bingo 75 is a game of chance, not skill, and that winning is not guaranteed. Enjoy the game for what it is, and don't let it affect your mood or your relationships.</li>
|
141 |
-
</ul>
|
142 |
-
<h2>Conclusion</h2>
|
143 |
-
<p>Bingo 75 is a fun and exciting game that anyone can play and win. It is also a great way to socialize with other players online or in apps. All you need to do is download the best bingo 75 apps or websites, follow the rules of the game, and use some tips, tricks, <p>tips, tricks, strategies, and mistakes to avoid. You will have a blast playing and winning at this game. So what are you waiting for? Download bingo 75 today and join the fun!</p>
|
144 |
-
<h3>Summary of the Main Points</h3>
|
145 |
-
<p>Here are the main points of this article:</p>
|
146 |
-
<ul>
|
147 |
-
<li>Bingo 75 is a bingo game that uses a card with a grid of 25 squares, each containing a number from 1 to 75, except for the center square, which is a free space.</li>
|
148 |
-
<li>The goal of bingo 75 is to cover a specific pattern of squares on your card, based on the numbers that are called out by a caller or a random number generator.</li>
|
149 |
-
<li>Bingo 75 is a fun and easy game that anyone can play. It is also a great way to socialize with other players, either online or in person.</li>
|
150 |
-
<li>You can download the best bingo 75 apps and websites from the Google Play Store or the App Store for Android and iOS devices, or from the internet for desktop and laptop users.</li>
|
151 |
-
<li>You can play and win at bingo 75 by following the rules of the game, and using some tips, tricks, strategies, and mistakes to avoid.</li>
|
152 |
-
</ul>
|
153 |
-
<h3>Call to Action</h3>
|
154 |
-
<p>If you enjoyed this article, please share it with your friends and family who might also be interested in playing bingo 75. You can also leave us a comment below and let us know what you think about this game. We would love to hear from you!</p>
|
155 |
-
<h2>FAQs</h2>
|
156 |
-
<p>Here are some frequently asked questions about bingo 75:</p>
|
157 |
-
<ol>
|
158 |
-
<li>How many numbers are there in bingo 75?</li>
|
159 |
-
<p>There are 75 numbers in bingo 75, ranging from 1 to 75. Each column on the card corresponds to a range of numbers, as follows: B (1-15), I (16-30), N (31-45), G (46-60), O (61-75).</p>
|
160 |
-
<li>How many patterns are there in bingo 75?</li>
|
161 |
-
<p>There are many patterns in bingo 75, depending on the game and the app or website you are playing on. Some of the common patterns are horizontal, vertical, or diagonal lines; four corners; X; blackout; letters; numbers; symbols; etc.</p>
|
162 |
-
<li>How do I win at bingo 75?</li>
|
163 |
-
<p>You win at bingo 75 by covering the pattern that is required for each game on your card before anyone else does. You have to mark off the numbers on your card that match the ones that are called out by the caller or the random number generator. You have to shout "Bingo!" or click the bingo button to claim your win before the next number is called.</p>
|
164 |
-
<li>How much does it cost to play bingo 75?</li>
|
165 |
-
<p>The cost of playing bingo 75 varies depending on the app or website you are playing on, and the number of cards you are buying. Some apps or websites offer free games or bonuses, while others charge a fee per card or per game. The fee can range from a few cents to a few dollars.</p>
|
166 |
-
<li>What are the benefits of playing bingo 75 online?</li>
|
167 |
-
<p>The benefits of playing bingo 75 online are convenience, variety, socialization, affordability, and fun. You can play anytime and anywhere, choose from different apps and websites, chat with other players, play for free or low stakes, and enjoy the thrill of the game.</p>
|
168 |
-
</ol></p> 401be4b1e0<br />
|
169 |
-
<br />
|
170 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Nulls Brawl 38.111 APK with Hank and Maisie Skins.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Nulls Brawl 38.111 APK Indir: How to Download and Play the Latest Version of Brawl Stars on a Private Server</h1>
|
3 |
-
<p>Brawl Stars is one of the most popular online action games at present, with millions of players around the world. But what if you want to mod it or play with cheats without risking a ban? That's where Nulls Brawl comes in. In this article, we will tell you everything you need to know about Nulls Brawl 38.111 APK indir, the latest version of the private server for Brawl Stars. You will learn what Nulls Brawl is, what features it offers, how to download and install it, why you should play it, and some tips and tricks for having fun with it.</p>
|
4 |
-
<h2>What is Nulls Brawl?</h2>
|
5 |
-
<p>Nulls Brawl is a private server for Brawl Stars that allows you to play the game with unlimited resources, unlocked brawlers, skins, pins, gears, and more. It is not affiliated with Supercell, the official developer of Brawl Stars, and it does not affect your progress or account on the original game. You can play Nulls Brawl on your Android device or on your PC using an emulator.</p>
|
6 |
-
<h2>nulls brawl 38.111 apk indir</h2><br /><p><b><b>Download Zip</b> ✪✪✪ <a href="https://jinyurl.com/2uNK0z">https://jinyurl.com/2uNK0z</a></b></p><br /><br />
|
7 |
-
<h3>Features of Nulls Brawl 38.111</h3>
|
8 |
-
<p>The latest version of Nulls Brawl, 38.111, was released on June 21, 2023, and it includes many new features and improvements. Here are some of them:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Two new brawlers: Hank and Maisie, who are based on Woody and Buzz Lightyear from Toy Story.</li>
|
11 |
-
<li>A new catalog of skins and pins for all brawlers.</li>
|
12 |
-
<li>Two new gears: Rocket Laces and Fidget Spinner, which give you extra speed and damage.</li>
|
13 |
-
<li>A new game mode: Knockout, where you have to eliminate all the enemies in three rounds.</li>
|
14 |
-
<li>A new map maker: where you can create your own maps and share them with other players.</li>
|
15 |
-
<li>A new chat system: where you can chat with other players in different languages.</li>
|
16 |
-
<li>A new clan system: where you can join or create a clan and participate in clan wars.</li>
|
17 |
-
<li>A new leaderboard system: where you can see your rank and stats among other players.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>How to download and install Nulls Brawl 38.111 APK</h3>
|
20 |
-
<p>If you want to play Nulls Brawl 38.111 on your Android device, you need to follow these steps:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Go to [Null's website](^1^) and download the APK file for Nulls Brawl 38.111.</li>
|
23 |
-
<li>Enable the installation of apps from unknown sources on your device settings.</li>
|
24 |
-
<li>Locate the downloaded APK file and tap on it to install it.</li>
|
25 |
-
<li>Launch the app and enjoy playing Nulls Brawl.</li>
|
26 |
-
</ol>
|
27 |
-
<p>If you want to play Nulls Brawl 38.111 on your PC, you need to follow these steps:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Download and install an Android emulator on your PC, such as [LDPlayer](^3^).</li>
|
30 |
-
<li>Open the emulator and search for Nulls Brawl on the Google Play Store or download the APK file from [Null's website](^1^).</li>
|
31 |
-
<li>Install and launch the app on the emulator and enjoy playing Nulls Brawl.</li>
|
32 |
-
</ol>
|
33 |
-
<h2>Why play Nulls Brawl?</h2>
|
34 |
-
<p>Nulls Brawl is a great way to enjoy Brawl Stars with more freedom and fun. Here are some reasons why you should play it:</p>
|
35 |
-
<h3>Pros and cons of Nulls Brawl</h3 <p>Pros of Nulls Brawl:</p>
|
36 |
-
<p>nulls brawl 38.111 apk download free<br />
|
37 |
-
nulls brawl 38.111 mod apk unlimited gems<br />
|
38 |
-
nulls brawl 38.111 latest version apk<br />
|
39 |
-
nulls brawl 38.111 android oyun club<br />
|
40 |
-
nulls brawl 38.111 update apk<br />
|
41 |
-
nulls brawl 38.111 hack apk download<br />
|
42 |
-
nulls brawl 38.111 private server apk<br />
|
43 |
-
nulls brawl 38.111 apk indir cepde<br />
|
44 |
-
nulls brawl 38.111 apk indir mediafıre<br />
|
45 |
-
nulls brawl 38.111 apk indir son sürüm<br />
|
46 |
-
nulls brawl 38.111 apk indir güncel<br />
|
47 |
-
nulls brawl 38.111 apk indir hileli<br />
|
48 |
-
nulls brawl 38.111 apk indir tamindir<br />
|
49 |
-
nulls brawl 38.111 apk indir android<br />
|
50 |
-
nulls brawl 38.111 apk indir pc<br />
|
51 |
-
nulls brawl 38.111 apk indir ios<br />
|
52 |
-
nulls brawl 38.111 apk indir ücretsiz<br />
|
53 |
-
nulls brawl 38.111 apk indir kurulumu<br />
|
54 |
-
nulls brawl 38.111 apk indir yandex disk<br />
|
55 |
-
nulls brawl 38.111 apk indir mega link<br />
|
56 |
-
nulls brawl 38.111 apk indir google drive<br />
|
57 |
-
nulls brawl 38.111 apk indir türkçe<br />
|
58 |
-
nulls brawl 38.111 apk indir nasıl yapılır<br />
|
59 |
-
nulls brawl 38.111 apk indir online oyna<br />
|
60 |
-
nulls brawl 38.111 apk indir yeni karakterler<br />
|
61 |
-
nulls brawl 38.111 apk indir buzz lightyear<br />
|
62 |
-
nulls brawl 38.111 apk indir amber skin<br />
|
63 |
-
nulls brawl 38.111 apk indir colette skin<br />
|
64 |
-
nulls brawl 38.111 apk indir edgar skin<br />
|
65 |
-
nulls brawl 38.111 apk indir griff skin<br />
|
66 |
-
nulls brawl 38.111 apk indir leon skin<br />
|
67 |
-
nulls brawl 38.111 apk indir luo skin<br />
|
68 |
-
nulls brawl 38.111 apk indir meg skin<br />
|
69 |
-
nulls brawl 38.111 apk indir ruffs skin<br />
|
70 |
-
nulls brawl 38.111 apk indir sandy skin<br />
|
71 |
-
nulls brawl 38.111 apk indir surge skin<br />
|
72 |
-
nulls brawl 38.111 apk indir sürpriz kutusu hilesi<br />
|
73 |
-
nulls brawl 38.111 apk indir alt��n hilesi<br />
|
74 |
-
nulls brawl 38.111 apk indir elmas hilesi<br />
|
75 |
-
nulls brawl 38.111 apk indir enerji hilesi<br />
|
76 |
-
nulls brawl 38.111 apk indir kupa hilesi<br />
|
77 |
-
nulls brawl 38.111 apk indir karakter açma hilesi<br />
|
78 |
-
nulls brawl 38.111 apk indir kostüm hilesi<br />
|
79 |
-
nulls brawl 38.111 apk indir mod menu hilesi<br />
|
80 |
-
nulls brawl 38.111 apk indir para hilesi</p>
|
81 |
-
<ul>
|
82 |
-
<li>You can play with unlimited gems, coins, tickets, and star points, which you can use to buy anything you want in the game.</li>
|
83 |
-
<li>You can unlock and try all the brawlers, skins, pins, and gears without spending any money or waiting for them to be available.</li>
|
84 |
-
<li>You can access all the game modes, maps, and events without any restrictions or timers.</li>
|
85 |
-
<li>You can mod the game and customize it to your liking, such as changing the graphics, sounds, or gameplay.</li>
|
86 |
-
<li>You can play with other players who are also using Nulls Brawl and have fun together.</li>
|
87 |
-
</ul>
|
88 |
-
<p>Cons of Nulls Brawl:</p>
|
89 |
-
<ul>
|
90 |
-
<li>You cannot play with players who are using the official version of Brawl Stars, as they are on different servers.</li>
|
91 |
-
<li>You cannot sync your progress or account with the original game, as they are separate and independent.</li>
|
92 |
-
<li>You may encounter some bugs, glitches, or errors while playing Nulls Brawl, as it is not an official product and may not be updated regularly.</li>
|
93 |
-
<li>You may risk getting banned from the official game if you use the same device or account for both versions.</li>
|
94 |
-
<li>You may violate the terms of service of Supercell by using Nulls Brawl, as it is an unauthorized modification of their game.</li>
|
95 |
-
</ul>
|
96 |
-
<h3>Tips and tricks for playing Nulls Brawl</h3>
|
97 |
-
<p>If you want to make the most out of Nulls Brawl, here are some tips and tricks that you can follow:</p>
|
98 |
-
<ul>
|
99 |
-
<li>Experiment with different brawlers, skins, pins, and gears and find out which ones suit your playstyle and preferences.</li>
|
100 |
-
<li>Practice your skills and strategies in different game modes and maps and learn from your mistakes and successes.</li>
|
101 |
-
<li>Join or create a clan and chat with other players who are also using Nulls Brawl. You can share tips, ideas, feedback, or just have fun together.</li>
|
102 |
-
<li>Participate in clan wars and compete with other clans for glory and rewards. You can also challenge other players to friendly battles or duels.</li>
|
103 |
-
<li>Create your own maps using the map maker and share them with other players. You can also play on the maps created by others and rate them.</li>
|
104 |
-
</ul>
|
105 |
-
<h2>Conclusion</h2>
|
106 |
-
<p>Nulls Brawl 38.111 APK indir is a private server for Brawl Stars that lets you play the game with unlimited resources, unlocked brawlers, skins, pins, gears, and more. It is a great way to enjoy Brawl Stars with more freedom and fun. However, it also has some drawbacks, such as being incompatible with the official game, having some bugs or errors, and violating the terms of service of Supercell. Therefore, you should use it at your own risk and discretion. If you want to try Nulls Brawl 38.111 APK indir, you can download it from [Null's website] and install it on your Android device or PC using an emulator.</p>
|
107 |
-
<h3>Summary of the main points</h3>
|
108 |
-
<p>In this article, we have covered the following points:</p>
|
109 |
-
<ul>
|
110 |
-
<li>What is Nulls Brawl?</li>
|
111 |
-
<li>Features of Nulls Brawl 38.111</li>
|
112 |
-
<li>How to download and install Nulls Brawl 38.111 APK</li>
|
113 |
-
<li>Why play Nulls Brawl?</li>
|
114 |
-
<li>Pros and cons of Nulls Brawl</li>
|
115 |
-
<li>Tips and tricks for playing Nulls Brawl</li>
|
116 |
-
</ul>
|
117 |
-
<h3>FAQs</h3>
|
118 |
-
<p>Here are some frequently asked questions about Nulls Brawl 38.111 APK indir:</p>
|
119 |
-
<ol>
|
120 |
-
<li><b>Is Nulls Brawl safe to use?</b><br/>
|
121 |
-
Nulls Brawl is not an official product of Supercell and it may contain some bugs or errors that could harm your device or data. Therefore, you should use it at your own risk and discretion. You should also backup your data before installing it and scan it for viruses or malware.</li>
|
122 |
-
<li><b>Is Nulls Brawl free to use?</b><br/>
|
123 |
-
Yes, Nulls Brawl is free to use and you do not need to pay anything to download or play it. However, you may need to watch some ads or complete some surveys to access some features or content.</li>
|
124 |
-
<li><b>Can I play Nulls Brawl offline?</b><br/>
|
125 |
-
No, Nulls Brawl requires an internet connection to work properly. You need to connect to the private server to play the game and access its features.</li>
|
126 |
-
<li><b>Can I update Nulls Brawl?</b><br/>
|
127 |
-
Yes Yes, you can update Nulls Brawl whenever a new version is released by the developers. You can check for updates on [Null's website] or on their [Telegram channel]. You can also enable the auto-update feature on the app settings. However, you may need to uninstall and reinstall the app to update it.</li>
|
128 |
-
<li><b>Can I play Nulls Brawl with my friends?</b><br/>
|
129 |
-
Yes, you can play Nulls Brawl with your friends who are also using the same version of the private server. You can invite them to join your clan or team and chat with them in the game. You can also play against them in friendly battles or duels.</li>
|
130 |
-
</ol></p> 197e85843d<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/ui/tooltip.tsx
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as TooltipPrimitive from '@radix-ui/react-tooltip'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
|
8 |
-
const TooltipProvider = TooltipPrimitive.Provider
|
9 |
-
|
10 |
-
const Tooltip = TooltipPrimitive.Root
|
11 |
-
|
12 |
-
const TooltipTrigger = TooltipPrimitive.Trigger
|
13 |
-
|
14 |
-
const TooltipContent = React.forwardRef<
|
15 |
-
React.ElementRef<typeof TooltipPrimitive.Content>,
|
16 |
-
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
|
17 |
-
>(({ className, sideOffset = 4, ...props }, ref) => (
|
18 |
-
<TooltipPrimitive.Content
|
19 |
-
ref={ref}
|
20 |
-
sideOffset={sideOffset}
|
21 |
-
className={cn(
|
22 |
-
'z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-xs font-medium text-popover-foreground shadow-md animate-in fade-in-50 data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1',
|
23 |
-
className
|
24 |
-
)}
|
25 |
-
{...props}
|
26 |
-
/>
|
27 |
-
))
|
28 |
-
TooltipContent.displayName = TooltipPrimitive.Content.displayName
|
29 |
-
|
30 |
-
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/audioEffects.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
from pedalboard import Pedalboard, Compressor, Reverb, NoiseGate
|
2 |
-
from pedalboard.io import AudioFile
|
3 |
-
import sys
|
4 |
-
import os
|
5 |
-
now_dir = os.getcwd()
|
6 |
-
sys.path.append(now_dir)
|
7 |
-
from i18n import I18nAuto
|
8 |
-
i18n = I18nAuto()
|
9 |
-
from pydub import AudioSegment
|
10 |
-
import numpy as np
|
11 |
-
import soundfile as sf
|
12 |
-
from pydub.playback import play
|
13 |
-
|
14 |
-
def process_audio(input_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled, ):
|
15 |
-
print(reverb_enabled)
|
16 |
-
print(compressor_enabled)
|
17 |
-
print(noise_gate_enabled)
|
18 |
-
effects = []
|
19 |
-
if reverb_enabled:
|
20 |
-
effects.append(Reverb(room_size=0.01))
|
21 |
-
if compressor_enabled:
|
22 |
-
effects.append(Compressor(threshold_db=-10, ratio=25))
|
23 |
-
if noise_gate_enabled:
|
24 |
-
effects.append(NoiseGate(threshold_db=-16, ratio=1.5, release_ms=250))
|
25 |
-
|
26 |
-
board = Pedalboard(effects)
|
27 |
-
|
28 |
-
with AudioFile(input_path) as f:
|
29 |
-
with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
|
30 |
-
while f.tell() < f.frames:
|
31 |
-
chunk = f.read(f.samplerate)
|
32 |
-
effected = board(chunk, f.samplerate, reset=False)
|
33 |
-
o.write(effected)
|
34 |
-
|
35 |
-
result = i18n("Processed audio saved at: ") + output_path
|
36 |
-
print(result)
|
37 |
-
return output_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/Dockerfile
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Grab a fresh copy of the Python image
|
2 |
-
FROM python:3.10-slim
|
3 |
-
|
4 |
-
# Install build and runtime dependencies
|
5 |
-
RUN apt-get update && \
|
6 |
-
apt-get install -y \
|
7 |
-
libopenblas-dev \
|
8 |
-
ninja-build \
|
9 |
-
build-essential \
|
10 |
-
pkg-config \
|
11 |
-
curl
|
12 |
-
|
13 |
-
RUN pip install -U pip setuptools wheel && \
|
14 |
-
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install --verbose llama-cpp-python[server]
|
15 |
-
|
16 |
-
# Download model
|
17 |
-
RUN mkdir model && \
|
18 |
-
curl -L https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf -o model/gguf-model.bin
|
19 |
-
|
20 |
-
COPY ./start_server.sh ./
|
21 |
-
COPY ./main.py ./
|
22 |
-
COPY ./index.html ./
|
23 |
-
|
24 |
-
# Make the server start script executable
|
25 |
-
RUN chmod +x ./start_server.sh
|
26 |
-
|
27 |
-
# Set environment variable for the host
|
28 |
-
ENV HOST=0.0.0.0
|
29 |
-
ENV PORT=7860
|
30 |
-
|
31 |
-
# Expose a port for the server
|
32 |
-
EXPOSE ${PORT}
|
33 |
-
|
34 |
-
# Run the server start script
|
35 |
-
CMD ["/bin/sh", "./start_server.sh"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI4PD/hexviz/hexviz/🧬Attention_Visualization.py
DELETED
@@ -1,306 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import pandas as pd
|
5 |
-
import py3Dmol
|
6 |
-
import stmol
|
7 |
-
import streamlit as st
|
8 |
-
from stmol import showmol
|
9 |
-
|
10 |
-
from hexviz.attention import clean_and_validate_sequence, get_attention_pairs, get_chains
|
11 |
-
from hexviz.config import URL
|
12 |
-
from hexviz.ec_number import ECNumber
|
13 |
-
from hexviz.models import Model, ModelType
|
14 |
-
from hexviz.view import menu_items, select_model, select_pdb, select_protein
|
15 |
-
|
16 |
-
st.set_page_config(layout="centered", menu_items=menu_items)
|
17 |
-
st.title("Attention Visualization on proteins")
|
18 |
-
|
19 |
-
|
20 |
-
for k, v in st.session_state.items():
|
21 |
-
st.session_state[k] = v
|
22 |
-
|
23 |
-
models = [
|
24 |
-
Model(name=ModelType.TAPE_BERT, layers=12, heads=12),
|
25 |
-
Model(name=ModelType.ZymCTRL, layers=36, heads=16),
|
26 |
-
Model(name=ModelType.PROT_BERT, layers=30, heads=16),
|
27 |
-
Model(name=ModelType.PROT_T5, layers=24, heads=32),
|
28 |
-
]
|
29 |
-
|
30 |
-
with st.expander("Input a PDB id, upload a PDB file or input a sequence", expanded=True):
|
31 |
-
pdb_id = select_pdb()
|
32 |
-
uploaded_file = st.file_uploader("2.Upload PDB", type=["pdb"])
|
33 |
-
input_sequence = st.text_area("3.Input sequence", "", key="input_sequence", max_chars=400)
|
34 |
-
sequence, error = clean_and_validate_sequence(input_sequence)
|
35 |
-
if error:
|
36 |
-
st.error(error)
|
37 |
-
pdb_str, structure, source = select_protein(pdb_id, uploaded_file, sequence)
|
38 |
-
st.write(f"Visualizing: {source}")
|
39 |
-
|
40 |
-
st.sidebar.markdown(
|
41 |
-
"""
|
42 |
-
Configure visualization
|
43 |
-
---
|
44 |
-
"""
|
45 |
-
)
|
46 |
-
chains = get_chains(structure)
|
47 |
-
|
48 |
-
if "selected_chains" not in st.session_state:
|
49 |
-
st.session_state.selected_chains = chains
|
50 |
-
selected_chains = st.sidebar.multiselect(
|
51 |
-
label="Select Chain(s)", options=chains, key="selected_chains"
|
52 |
-
)
|
53 |
-
|
54 |
-
if "show_ligands" not in st.session_state:
|
55 |
-
st.session_state.show_ligands = True
|
56 |
-
show_ligands = st.sidebar.checkbox("Show ligands", key="show_ligands")
|
57 |
-
if "color_protein" not in st.session_state:
|
58 |
-
st.session_state.color_protein = False
|
59 |
-
color_protein = st.sidebar.checkbox("Color protein", key="color_protein")
|
60 |
-
|
61 |
-
|
62 |
-
st.sidebar.markdown(
|
63 |
-
"""
|
64 |
-
Attention parameters
|
65 |
-
---
|
66 |
-
"""
|
67 |
-
)
|
68 |
-
min_attn = st.sidebar.slider("Minimum attention", min_value=0.0, max_value=0.4, value=0.1)
|
69 |
-
n_highest_resis = st.sidebar.number_input(
|
70 |
-
"Num highest attention resis to label", value=2, min_value=1, max_value=100
|
71 |
-
)
|
72 |
-
label_highest = st.sidebar.checkbox("Label highest attention residues", value=True)
|
73 |
-
sidechain_highest = st.sidebar.checkbox("Show sidechains", value=True)
|
74 |
-
|
75 |
-
|
76 |
-
with st.sidebar.expander("Label residues manually"):
|
77 |
-
hl_chain = st.selectbox(label="Chain to label", options=selected_chains, index=0)
|
78 |
-
hl_resi_list = st.multiselect(label="Selected Residues", options=list(range(1, 5000)))
|
79 |
-
|
80 |
-
label_resi = st.checkbox(label="Label Residues", value=True)
|
81 |
-
|
82 |
-
|
83 |
-
left, mid, right = st.columns(3)
|
84 |
-
with left:
|
85 |
-
selected_model = select_model(models)
|
86 |
-
with mid:
|
87 |
-
if "selected_layer" not in st.session_state:
|
88 |
-
st.session_state["selected_layer"] = 5
|
89 |
-
layer_one = (
|
90 |
-
st.selectbox(
|
91 |
-
"Layer",
|
92 |
-
options=[i for i in range(1, selected_model.layers + 1)],
|
93 |
-
key="selected_layer",
|
94 |
-
)
|
95 |
-
or 5
|
96 |
-
)
|
97 |
-
layer = layer_one - 1
|
98 |
-
with right:
|
99 |
-
if "selected_head" not in st.session_state:
|
100 |
-
st.session_state["selected_head"] = 1
|
101 |
-
head_one = st.selectbox(
|
102 |
-
"Head",
|
103 |
-
options=[i for i in range(1, selected_model.heads + 1)],
|
104 |
-
key="selected_head",
|
105 |
-
)
|
106 |
-
head = head_one - 1
|
107 |
-
|
108 |
-
ec_number = ""
|
109 |
-
if selected_model.name == ModelType.ZymCTRL:
|
110 |
-
st.sidebar.markdown(
|
111 |
-
"""
|
112 |
-
ZymCTRL EC number
|
113 |
-
---
|
114 |
-
"""
|
115 |
-
)
|
116 |
-
try:
|
117 |
-
ec_number = structure.header["compound"]["1"]["ec"]
|
118 |
-
except KeyError:
|
119 |
-
pass
|
120 |
-
ec_number = st.sidebar.text_input("Enzyme Comission number (EC)", ec_number)
|
121 |
-
|
122 |
-
# Validate EC number
|
123 |
-
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ec_number):
|
124 |
-
st.sidebar.error(
|
125 |
-
"Please enter a valid Enzyme Commission number in the format of 4 integers separated by periods (e.g., 1.2.3.21)"
|
126 |
-
)
|
127 |
-
|
128 |
-
if ec_number:
|
129 |
-
if selected_chains:
|
130 |
-
shown_chains = [ch for ch in structure.get_chains() if ch.id in selected_chains]
|
131 |
-
else:
|
132 |
-
shown_chains = list(structure.get_chains())
|
133 |
-
|
134 |
-
EC_tags = []
|
135 |
-
colors = ["blue", "green", "orange", "red"]
|
136 |
-
radius = 1
|
137 |
-
EC_numbers = ec_number.split(".")
|
138 |
-
for ch in shown_chains:
|
139 |
-
first_residues = []
|
140 |
-
i = 1
|
141 |
-
while len(first_residues) < 2:
|
142 |
-
try:
|
143 |
-
first_residues.append(ch[i]["CA"].coord.tolist())
|
144 |
-
except KeyError:
|
145 |
-
pass
|
146 |
-
i += 1
|
147 |
-
res_1, res_2 = first_residues
|
148 |
-
|
149 |
-
# Calculate the vector from res_1 to res_2
|
150 |
-
vector = [res_2[i] - res_1[i] for i in range(3)]
|
151 |
-
|
152 |
-
# Reverse the vector
|
153 |
-
reverse_vector = [-v for v in vector]
|
154 |
-
|
155 |
-
# Normalize the reverse vector
|
156 |
-
reverse_vector_normalized = np.array(reverse_vector) / np.linalg.norm(reverse_vector)
|
157 |
-
# Reveres coordinates to have the first EC number be furthers from the start of the
|
158 |
-
# protein like it is in the sequence with the EC serial number next to the sequence
|
159 |
-
# and the EC main class at the the very start, further from the sequence.
|
160 |
-
coordinates = reversed(
|
161 |
-
[
|
162 |
-
[res_1[j] + i * 2 * radius * reverse_vector_normalized[j] for j in range(3)]
|
163 |
-
for i in range(4)
|
164 |
-
]
|
165 |
-
)
|
166 |
-
EC_tag = [
|
167 |
-
ECNumber(number=num, coordinate=coord, color=color, radius=radius)
|
168 |
-
for num, coord, color in zip(EC_numbers, coordinates, colors)
|
169 |
-
]
|
170 |
-
EC_tags.append(EC_tag)
|
171 |
-
|
172 |
-
EC_colored = [f":{color}[{num}]" for num, color in zip(EC_numbers, colors)]
|
173 |
-
st.sidebar.write("Visualized as colored spheres: " + ".".join(EC_colored))
|
174 |
-
|
175 |
-
|
176 |
-
attention_pairs, top_residues = get_attention_pairs(
|
177 |
-
pdb_str=pdb_str,
|
178 |
-
chain_ids=selected_chains,
|
179 |
-
layer=layer,
|
180 |
-
head=head,
|
181 |
-
threshold=min_attn,
|
182 |
-
model_type=selected_model.name,
|
183 |
-
top_n=n_highest_resis,
|
184 |
-
ec_numbers=EC_tags if ec_number else None,
|
185 |
-
)
|
186 |
-
|
187 |
-
sorted_by_attention = sorted(attention_pairs, key=lambda x: x[0], reverse=True)
|
188 |
-
|
189 |
-
|
190 |
-
def get_3dview(pdb):
|
191 |
-
xyzview = py3Dmol.view(height=800, width=800) # TODO you can set the pixel dims here!
|
192 |
-
xyzview.addModel(pdb_str, "pdb")
|
193 |
-
xyzview.setStyle({"cartoon": {"color": "spectrum" if color_protein else "white"}})
|
194 |
-
stmol.add_hover(xyzview, backgroundColor="black", fontColor="white")
|
195 |
-
|
196 |
-
# Show all ligands as stick (heteroatoms)
|
197 |
-
if show_ligands:
|
198 |
-
xyzview.addStyle({"hetflag": True}, {"stick": {"radius": 0.2}})
|
199 |
-
|
200 |
-
# If no chains are selected, show all chains
|
201 |
-
if selected_chains:
|
202 |
-
hidden_chains = [x for x in chains if x not in selected_chains]
|
203 |
-
for chain in hidden_chains:
|
204 |
-
xyzview.setStyle({"chain": chain}, {"cross": {"hidden": "true"}})
|
205 |
-
# Hide ligands for chain too
|
206 |
-
xyzview.addStyle({"chain": chain, "hetflag": True}, {"cross": {"hidden": "true"}})
|
207 |
-
|
208 |
-
if len(selected_chains) == 1:
|
209 |
-
xyzview.zoomTo({"chain": f"{selected_chains[0]}"})
|
210 |
-
else:
|
211 |
-
xyzview.zoomTo()
|
212 |
-
|
213 |
-
for att_weight, first, second in attention_pairs:
|
214 |
-
stmol.add_cylinder(
|
215 |
-
xyzview,
|
216 |
-
start=first,
|
217 |
-
end=second,
|
218 |
-
cylradius=att_weight,
|
219 |
-
cylColor="red",
|
220 |
-
dashed=False,
|
221 |
-
)
|
222 |
-
|
223 |
-
if selected_model.name == ModelType.ZymCTRL and ec_number:
|
224 |
-
for EC_tag in EC_tags:
|
225 |
-
for EC_num in EC_tag:
|
226 |
-
stmol.add_sphere(
|
227 |
-
xyzview,
|
228 |
-
spcenter=EC_num.coordinate,
|
229 |
-
radius=EC_num.radius,
|
230 |
-
spColor=EC_num.color,
|
231 |
-
)
|
232 |
-
|
233 |
-
if label_resi:
|
234 |
-
for hl_resi in hl_resi_list:
|
235 |
-
xyzview.addResLabels(
|
236 |
-
{"chain": hl_chain, "resi": hl_resi},
|
237 |
-
{
|
238 |
-
"backgroundColor": "lightgray",
|
239 |
-
"fontColor": "black",
|
240 |
-
"backgroundOpacity": 0.5,
|
241 |
-
},
|
242 |
-
)
|
243 |
-
|
244 |
-
if label_highest:
|
245 |
-
for _, chain, res in top_residues:
|
246 |
-
one_indexed_res = res + 1
|
247 |
-
xyzview.addResLabels(
|
248 |
-
{"chain": chain, "resi": one_indexed_res},
|
249 |
-
{
|
250 |
-
"backgroundColor": "lightgray",
|
251 |
-
"fontColor": "black",
|
252 |
-
"backgroundOpacity": 0.5,
|
253 |
-
},
|
254 |
-
)
|
255 |
-
if sidechain_highest:
|
256 |
-
xyzview.addStyle({"chain": chain, "resi": res}, {"stick": {"radius": 0.2}})
|
257 |
-
return xyzview
|
258 |
-
|
259 |
-
|
260 |
-
xyzview = get_3dview(pdb_id)
|
261 |
-
showmol(xyzview, height=800, width=800)
|
262 |
-
|
263 |
-
st.markdown(
|
264 |
-
f"""
|
265 |
-
Pick a PDB ID, layer and head to visualize attention from the selected protein language model ({selected_model.name.value}).
|
266 |
-
""",
|
267 |
-
unsafe_allow_html=True,
|
268 |
-
)
|
269 |
-
|
270 |
-
chain_dict = {f"{chain.id}": list(chain.get_residues()) for chain in list(structure.get_chains())}
|
271 |
-
data = []
|
272 |
-
for fraction_of_total_attention, chain, resi in top_residues:
|
273 |
-
try:
|
274 |
-
res = chain_dict[chain][resi]
|
275 |
-
except KeyError:
|
276 |
-
continue
|
277 |
-
pct_of_total_attention = round(fraction_of_total_attention * 100, 3)
|
278 |
-
el = (pct_of_total_attention, f"{res.resname:3}{res.id[1]}({chain})")
|
279 |
-
data.append(el)
|
280 |
-
|
281 |
-
df = pd.DataFrame(data, columns=["% of total attention", "Residue"])
|
282 |
-
df = df.style.format(
|
283 |
-
{"% of total attention": "{:.3f}"} # Set 3 decimal places for "% of total attention"
|
284 |
-
)
|
285 |
-
st.markdown(
|
286 |
-
f"The {n_highest_resis} residues (per chain) with the highest attention to them are labeled in the visualization and listed here:"
|
287 |
-
)
|
288 |
-
st.table(df)
|
289 |
-
|
290 |
-
st.markdown(
|
291 |
-
f"""
|
292 |
-
### Check out the other pages
|
293 |
-
<a href="{URL}Identify_Interesting_Heads" target="_self">🗺️Identify Interesting Heads</a> plots attention heatmapt for many heads and
|
294 |
-
layers from a model. This can help you pick what specific attention heads to look at for your protein.
|
295 |
-
|
296 |
-
<a href="{URL}Birds_Eye_View" target="_self">🦅Birds Eye View</a> visualizes attention on structures in a large grid, see how attention patterns
|
297 |
-
change through layers and varies accross several heads.
|
298 |
-
|
299 |
-
<a href="{URL}Documentation" target="_self">📄Documentation</a> has information on protein
|
300 |
-
language models, attention analysis and hexviz.""",
|
301 |
-
unsafe_allow_html=True,
|
302 |
-
)
|
303 |
-
|
304 |
-
"""
|
305 |
-
The attention visualization is inspired by [provis](https://github.com/salesforce/provis#provis-attention-visualizer).
|
306 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/version.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
__version__ = '0.1.45'
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan_light.py
DELETED
@@ -1,650 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
import numpy as np
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from functools import partial
|
7 |
-
import random
|
8 |
-
from scipy import ndimage
|
9 |
-
import scipy
|
10 |
-
import scipy.stats as ss
|
11 |
-
from scipy.interpolate import interp2d
|
12 |
-
from scipy.linalg import orth
|
13 |
-
import albumentations
|
14 |
-
|
15 |
-
import ldm.modules.image_degradation.utils_image as util
|
16 |
-
|
17 |
-
"""
|
18 |
-
# --------------------------------------------
|
19 |
-
# Super-Resolution
|
20 |
-
# --------------------------------------------
|
21 |
-
#
|
22 |
-
# Kai Zhang ([email protected])
|
23 |
-
# https://github.com/cszn
|
24 |
-
# From 2019/03--2021/08
|
25 |
-
# --------------------------------------------
|
26 |
-
"""
|
27 |
-
|
28 |
-
|
29 |
-
def modcrop_np(img, sf):
|
30 |
-
'''
|
31 |
-
Args:
|
32 |
-
img: numpy image, WxH or WxHxC
|
33 |
-
sf: scale factor
|
34 |
-
Return:
|
35 |
-
cropped image
|
36 |
-
'''
|
37 |
-
w, h = img.shape[:2]
|
38 |
-
im = np.copy(img)
|
39 |
-
return im[:w - w % sf, :h - h % sf, ...]
|
40 |
-
|
41 |
-
|
42 |
-
"""
|
43 |
-
# --------------------------------------------
|
44 |
-
# anisotropic Gaussian kernels
|
45 |
-
# --------------------------------------------
|
46 |
-
"""
|
47 |
-
|
48 |
-
|
49 |
-
def analytic_kernel(k):
|
50 |
-
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
|
51 |
-
k_size = k.shape[0]
|
52 |
-
# Calculate the big kernels size
|
53 |
-
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
|
54 |
-
# Loop over the small kernel to fill the big one
|
55 |
-
for r in range(k_size):
|
56 |
-
for c in range(k_size):
|
57 |
-
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
|
58 |
-
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
|
59 |
-
crop = k_size // 2
|
60 |
-
cropped_big_k = big_k[crop:-crop, crop:-crop]
|
61 |
-
# Normalize to 1
|
62 |
-
return cropped_big_k / cropped_big_k.sum()
|
63 |
-
|
64 |
-
|
65 |
-
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
|
66 |
-
""" generate an anisotropic Gaussian kernel
|
67 |
-
Args:
|
68 |
-
ksize : e.g., 15, kernel size
|
69 |
-
theta : [0, pi], rotation angle range
|
70 |
-
l1 : [0.1,50], scaling of eigenvalues
|
71 |
-
l2 : [0.1,l1], scaling of eigenvalues
|
72 |
-
If l1 = l2, will get an isotropic Gaussian kernel.
|
73 |
-
Returns:
|
74 |
-
k : kernel
|
75 |
-
"""
|
76 |
-
|
77 |
-
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
|
78 |
-
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
|
79 |
-
D = np.array([[l1, 0], [0, l2]])
|
80 |
-
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
|
81 |
-
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
|
82 |
-
|
83 |
-
return k
|
84 |
-
|
85 |
-
|
86 |
-
def gm_blur_kernel(mean, cov, size=15):
|
87 |
-
center = size / 2.0 + 0.5
|
88 |
-
k = np.zeros([size, size])
|
89 |
-
for y in range(size):
|
90 |
-
for x in range(size):
|
91 |
-
cy = y - center + 1
|
92 |
-
cx = x - center + 1
|
93 |
-
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
|
94 |
-
|
95 |
-
k = k / np.sum(k)
|
96 |
-
return k
|
97 |
-
|
98 |
-
|
99 |
-
def shift_pixel(x, sf, upper_left=True):
|
100 |
-
"""shift pixel for super-resolution with different scale factors
|
101 |
-
Args:
|
102 |
-
x: WxHxC or WxH
|
103 |
-
sf: scale factor
|
104 |
-
upper_left: shift direction
|
105 |
-
"""
|
106 |
-
h, w = x.shape[:2]
|
107 |
-
shift = (sf - 1) * 0.5
|
108 |
-
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
|
109 |
-
if upper_left:
|
110 |
-
x1 = xv + shift
|
111 |
-
y1 = yv + shift
|
112 |
-
else:
|
113 |
-
x1 = xv - shift
|
114 |
-
y1 = yv - shift
|
115 |
-
|
116 |
-
x1 = np.clip(x1, 0, w - 1)
|
117 |
-
y1 = np.clip(y1, 0, h - 1)
|
118 |
-
|
119 |
-
if x.ndim == 2:
|
120 |
-
x = interp2d(xv, yv, x)(x1, y1)
|
121 |
-
if x.ndim == 3:
|
122 |
-
for i in range(x.shape[-1]):
|
123 |
-
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
|
124 |
-
|
125 |
-
return x
|
126 |
-
|
127 |
-
|
128 |
-
def blur(x, k):
|
129 |
-
'''
|
130 |
-
x: image, NxcxHxW
|
131 |
-
k: kernel, Nx1xhxw
|
132 |
-
'''
|
133 |
-
n, c = x.shape[:2]
|
134 |
-
p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
|
135 |
-
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
|
136 |
-
k = k.repeat(1, c, 1, 1)
|
137 |
-
k = k.view(-1, 1, k.shape[2], k.shape[3])
|
138 |
-
x = x.view(1, -1, x.shape[2], x.shape[3])
|
139 |
-
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
|
140 |
-
x = x.view(n, c, x.shape[2], x.shape[3])
|
141 |
-
|
142 |
-
return x
|
143 |
-
|
144 |
-
|
145 |
-
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
|
146 |
-
""""
|
147 |
-
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
|
148 |
-
# Kai Zhang
|
149 |
-
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
|
150 |
-
# max_var = 2.5 * sf
|
151 |
-
"""
|
152 |
-
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
|
153 |
-
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
|
154 |
-
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
|
155 |
-
theta = np.random.rand() * np.pi # random theta
|
156 |
-
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
|
157 |
-
|
158 |
-
# Set COV matrix using Lambdas and Theta
|
159 |
-
LAMBDA = np.diag([lambda_1, lambda_2])
|
160 |
-
Q = np.array([[np.cos(theta), -np.sin(theta)],
|
161 |
-
[np.sin(theta), np.cos(theta)]])
|
162 |
-
SIGMA = Q @ LAMBDA @ Q.T
|
163 |
-
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
|
164 |
-
|
165 |
-
# Set expectation position (shifting kernel for aligned image)
|
166 |
-
MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
|
167 |
-
MU = MU[None, None, :, None]
|
168 |
-
|
169 |
-
# Create meshgrid for Gaussian
|
170 |
-
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
|
171 |
-
Z = np.stack([X, Y], 2)[:, :, :, None]
|
172 |
-
|
173 |
-
# Calcualte Gaussian for every pixel of the kernel
|
174 |
-
ZZ = Z - MU
|
175 |
-
ZZ_t = ZZ.transpose(0, 1, 3, 2)
|
176 |
-
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
|
177 |
-
|
178 |
-
# shift the kernel so it will be centered
|
179 |
-
# raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
|
180 |
-
|
181 |
-
# Normalize the kernel and return
|
182 |
-
# kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
|
183 |
-
kernel = raw_kernel / np.sum(raw_kernel)
|
184 |
-
return kernel
|
185 |
-
|
186 |
-
|
187 |
-
def fspecial_gaussian(hsize, sigma):
|
188 |
-
hsize = [hsize, hsize]
|
189 |
-
siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
|
190 |
-
std = sigma
|
191 |
-
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
|
192 |
-
arg = -(x * x + y * y) / (2 * std * std)
|
193 |
-
h = np.exp(arg)
|
194 |
-
h[h < scipy.finfo(float).eps * h.max()] = 0
|
195 |
-
sumh = h.sum()
|
196 |
-
if sumh != 0:
|
197 |
-
h = h / sumh
|
198 |
-
return h
|
199 |
-
|
200 |
-
|
201 |
-
def fspecial_laplacian(alpha):
|
202 |
-
alpha = max([0, min([alpha, 1])])
|
203 |
-
h1 = alpha / (alpha + 1)
|
204 |
-
h2 = (1 - alpha) / (alpha + 1)
|
205 |
-
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
|
206 |
-
h = np.array(h)
|
207 |
-
return h
|
208 |
-
|
209 |
-
|
210 |
-
def fspecial(filter_type, *args, **kwargs):
|
211 |
-
'''
|
212 |
-
python code from:
|
213 |
-
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
|
214 |
-
'''
|
215 |
-
if filter_type == 'gaussian':
|
216 |
-
return fspecial_gaussian(*args, **kwargs)
|
217 |
-
if filter_type == 'laplacian':
|
218 |
-
return fspecial_laplacian(*args, **kwargs)
|
219 |
-
|
220 |
-
|
221 |
-
"""
|
222 |
-
# --------------------------------------------
|
223 |
-
# degradation models
|
224 |
-
# --------------------------------------------
|
225 |
-
"""
|
226 |
-
|
227 |
-
|
228 |
-
def bicubic_degradation(x, sf=3):
|
229 |
-
'''
|
230 |
-
Args:
|
231 |
-
x: HxWxC image, [0, 1]
|
232 |
-
sf: down-scale factor
|
233 |
-
Return:
|
234 |
-
bicubicly downsampled LR image
|
235 |
-
'''
|
236 |
-
x = util.imresize_np(x, scale=1 / sf)
|
237 |
-
return x
|
238 |
-
|
239 |
-
|
240 |
-
def srmd_degradation(x, k, sf=3):
|
241 |
-
''' blur + bicubic downsampling
|
242 |
-
Args:
|
243 |
-
x: HxWxC image, [0, 1]
|
244 |
-
k: hxw, double
|
245 |
-
sf: down-scale factor
|
246 |
-
Return:
|
247 |
-
downsampled LR image
|
248 |
-
Reference:
|
249 |
-
@inproceedings{zhang2018learning,
|
250 |
-
title={Learning a single convolutional super-resolution network for multiple degradations},
|
251 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
252 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
253 |
-
pages={3262--3271},
|
254 |
-
year={2018}
|
255 |
-
}
|
256 |
-
'''
|
257 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
|
258 |
-
x = bicubic_degradation(x, sf=sf)
|
259 |
-
return x
|
260 |
-
|
261 |
-
|
262 |
-
def dpsr_degradation(x, k, sf=3):
|
263 |
-
''' bicubic downsampling + blur
|
264 |
-
Args:
|
265 |
-
x: HxWxC image, [0, 1]
|
266 |
-
k: hxw, double
|
267 |
-
sf: down-scale factor
|
268 |
-
Return:
|
269 |
-
downsampled LR image
|
270 |
-
Reference:
|
271 |
-
@inproceedings{zhang2019deep,
|
272 |
-
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
|
273 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
274 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
275 |
-
pages={1671--1681},
|
276 |
-
year={2019}
|
277 |
-
}
|
278 |
-
'''
|
279 |
-
x = bicubic_degradation(x, sf=sf)
|
280 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
281 |
-
return x
|
282 |
-
|
283 |
-
|
284 |
-
def classical_degradation(x, k, sf=3):
|
285 |
-
''' blur + downsampling
|
286 |
-
Args:
|
287 |
-
x: HxWxC image, [0, 1]/[0, 255]
|
288 |
-
k: hxw, double
|
289 |
-
sf: down-scale factor
|
290 |
-
Return:
|
291 |
-
downsampled LR image
|
292 |
-
'''
|
293 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
294 |
-
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
|
295 |
-
st = 0
|
296 |
-
return x[st::sf, st::sf, ...]
|
297 |
-
|
298 |
-
|
299 |
-
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
|
300 |
-
"""USM sharpening. borrowed from real-ESRGAN
|
301 |
-
Input image: I; Blurry image: B.
|
302 |
-
1. K = I + weight * (I - B)
|
303 |
-
2. Mask = 1 if abs(I - B) > threshold, else: 0
|
304 |
-
3. Blur mask:
|
305 |
-
4. Out = Mask * K + (1 - Mask) * I
|
306 |
-
Args:
|
307 |
-
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
|
308 |
-
weight (float): Sharp weight. Default: 1.
|
309 |
-
radius (float): Kernel size of Gaussian blur. Default: 50.
|
310 |
-
threshold (int):
|
311 |
-
"""
|
312 |
-
if radius % 2 == 0:
|
313 |
-
radius += 1
|
314 |
-
blur = cv2.GaussianBlur(img, (radius, radius), 0)
|
315 |
-
residual = img - blur
|
316 |
-
mask = np.abs(residual) * 255 > threshold
|
317 |
-
mask = mask.astype('float32')
|
318 |
-
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
|
319 |
-
|
320 |
-
K = img + weight * residual
|
321 |
-
K = np.clip(K, 0, 1)
|
322 |
-
return soft_mask * K + (1 - soft_mask) * img
|
323 |
-
|
324 |
-
|
325 |
-
def add_blur(img, sf=4):
|
326 |
-
wd2 = 4.0 + sf
|
327 |
-
wd = 2.0 + 0.2 * sf
|
328 |
-
|
329 |
-
wd2 = wd2/4
|
330 |
-
wd = wd/4
|
331 |
-
|
332 |
-
if random.random() < 0.5:
|
333 |
-
l1 = wd2 * random.random()
|
334 |
-
l2 = wd2 * random.random()
|
335 |
-
k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
|
336 |
-
else:
|
337 |
-
k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
|
338 |
-
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
|
339 |
-
|
340 |
-
return img
|
341 |
-
|
342 |
-
|
343 |
-
def add_resize(img, sf=4):
|
344 |
-
rnum = np.random.rand()
|
345 |
-
if rnum > 0.8: # up
|
346 |
-
sf1 = random.uniform(1, 2)
|
347 |
-
elif rnum < 0.7: # down
|
348 |
-
sf1 = random.uniform(0.5 / sf, 1)
|
349 |
-
else:
|
350 |
-
sf1 = 1.0
|
351 |
-
img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
|
352 |
-
img = np.clip(img, 0.0, 1.0)
|
353 |
-
|
354 |
-
return img
|
355 |
-
|
356 |
-
|
357 |
-
# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
358 |
-
# noise_level = random.randint(noise_level1, noise_level2)
|
359 |
-
# rnum = np.random.rand()
|
360 |
-
# if rnum > 0.6: # add color Gaussian noise
|
361 |
-
# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
362 |
-
# elif rnum < 0.4: # add grayscale Gaussian noise
|
363 |
-
# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
364 |
-
# else: # add noise
|
365 |
-
# L = noise_level2 / 255.
|
366 |
-
# D = np.diag(np.random.rand(3))
|
367 |
-
# U = orth(np.random.rand(3, 3))
|
368 |
-
# conv = np.dot(np.dot(np.transpose(U), D), U)
|
369 |
-
# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
370 |
-
# img = np.clip(img, 0.0, 1.0)
|
371 |
-
# return img
|
372 |
-
|
373 |
-
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
374 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
375 |
-
rnum = np.random.rand()
|
376 |
-
if rnum > 0.6: # add color Gaussian noise
|
377 |
-
img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
378 |
-
elif rnum < 0.4: # add grayscale Gaussian noise
|
379 |
-
img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
380 |
-
else: # add noise
|
381 |
-
L = noise_level2 / 255.
|
382 |
-
D = np.diag(np.random.rand(3))
|
383 |
-
U = orth(np.random.rand(3, 3))
|
384 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
385 |
-
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
386 |
-
img = np.clip(img, 0.0, 1.0)
|
387 |
-
return img
|
388 |
-
|
389 |
-
|
390 |
-
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
|
391 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
392 |
-
img = np.clip(img, 0.0, 1.0)
|
393 |
-
rnum = random.random()
|
394 |
-
if rnum > 0.6:
|
395 |
-
img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
396 |
-
elif rnum < 0.4:
|
397 |
-
img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
398 |
-
else:
|
399 |
-
L = noise_level2 / 255.
|
400 |
-
D = np.diag(np.random.rand(3))
|
401 |
-
U = orth(np.random.rand(3, 3))
|
402 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
403 |
-
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
404 |
-
img = np.clip(img, 0.0, 1.0)
|
405 |
-
return img
|
406 |
-
|
407 |
-
|
408 |
-
def add_Poisson_noise(img):
|
409 |
-
img = np.clip((img * 255.0).round(), 0, 255) / 255.
|
410 |
-
vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
|
411 |
-
if random.random() < 0.5:
|
412 |
-
img = np.random.poisson(img * vals).astype(np.float32) / vals
|
413 |
-
else:
|
414 |
-
img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
|
415 |
-
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
|
416 |
-
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
|
417 |
-
img += noise_gray[:, :, np.newaxis]
|
418 |
-
img = np.clip(img, 0.0, 1.0)
|
419 |
-
return img
|
420 |
-
|
421 |
-
|
422 |
-
def add_JPEG_noise(img):
|
423 |
-
quality_factor = random.randint(80, 95)
|
424 |
-
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
|
425 |
-
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
|
426 |
-
img = cv2.imdecode(encimg, 1)
|
427 |
-
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
|
428 |
-
return img
|
429 |
-
|
430 |
-
|
431 |
-
def random_crop(lq, hq, sf=4, lq_patchsize=64):
|
432 |
-
h, w = lq.shape[:2]
|
433 |
-
rnd_h = random.randint(0, h - lq_patchsize)
|
434 |
-
rnd_w = random.randint(0, w - lq_patchsize)
|
435 |
-
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
|
436 |
-
|
437 |
-
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
|
438 |
-
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
|
439 |
-
return lq, hq
|
440 |
-
|
441 |
-
|
442 |
-
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
|
443 |
-
"""
|
444 |
-
This is the degradation model of BSRGAN from the paper
|
445 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
446 |
-
----------
|
447 |
-
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
|
448 |
-
sf: scale factor
|
449 |
-
isp_model: camera ISP model
|
450 |
-
Returns
|
451 |
-
-------
|
452 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
453 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
454 |
-
"""
|
455 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
456 |
-
sf_ori = sf
|
457 |
-
|
458 |
-
h1, w1 = img.shape[:2]
|
459 |
-
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
460 |
-
h, w = img.shape[:2]
|
461 |
-
|
462 |
-
if h < lq_patchsize * sf or w < lq_patchsize * sf:
|
463 |
-
raise ValueError(f'img size ({h1}X{w1}) is too small!')
|
464 |
-
|
465 |
-
hq = img.copy()
|
466 |
-
|
467 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
468 |
-
if np.random.rand() < 0.5:
|
469 |
-
img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
|
470 |
-
interpolation=random.choice([1, 2, 3]))
|
471 |
-
else:
|
472 |
-
img = util.imresize_np(img, 1 / 2, True)
|
473 |
-
img = np.clip(img, 0.0, 1.0)
|
474 |
-
sf = 2
|
475 |
-
|
476 |
-
shuffle_order = random.sample(range(7), 7)
|
477 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
478 |
-
if idx1 > idx2: # keep downsample3 last
|
479 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
480 |
-
|
481 |
-
for i in shuffle_order:
|
482 |
-
|
483 |
-
if i == 0:
|
484 |
-
img = add_blur(img, sf=sf)
|
485 |
-
|
486 |
-
elif i == 1:
|
487 |
-
img = add_blur(img, sf=sf)
|
488 |
-
|
489 |
-
elif i == 2:
|
490 |
-
a, b = img.shape[1], img.shape[0]
|
491 |
-
# downsample2
|
492 |
-
if random.random() < 0.75:
|
493 |
-
sf1 = random.uniform(1, 2 * sf)
|
494 |
-
img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
|
495 |
-
interpolation=random.choice([1, 2, 3]))
|
496 |
-
else:
|
497 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
498 |
-
k_shifted = shift_pixel(k, sf)
|
499 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
500 |
-
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
501 |
-
img = img[0::sf, 0::sf, ...] # nearest downsampling
|
502 |
-
img = np.clip(img, 0.0, 1.0)
|
503 |
-
|
504 |
-
elif i == 3:
|
505 |
-
# downsample3
|
506 |
-
img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
507 |
-
img = np.clip(img, 0.0, 1.0)
|
508 |
-
|
509 |
-
elif i == 4:
|
510 |
-
# add Gaussian noise
|
511 |
-
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
|
512 |
-
|
513 |
-
elif i == 5:
|
514 |
-
# add JPEG noise
|
515 |
-
if random.random() < jpeg_prob:
|
516 |
-
img = add_JPEG_noise(img)
|
517 |
-
|
518 |
-
elif i == 6:
|
519 |
-
# add processed camera sensor noise
|
520 |
-
if random.random() < isp_prob and isp_model is not None:
|
521 |
-
with torch.no_grad():
|
522 |
-
img, hq = isp_model.forward(img.copy(), hq)
|
523 |
-
|
524 |
-
# add final JPEG compression noise
|
525 |
-
img = add_JPEG_noise(img)
|
526 |
-
|
527 |
-
# random crop
|
528 |
-
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
|
529 |
-
|
530 |
-
return img, hq
|
531 |
-
|
532 |
-
|
533 |
-
# todo no isp_model?
|
534 |
-
def degradation_bsrgan_variant(image, sf=4, isp_model=None):
|
535 |
-
"""
|
536 |
-
This is the degradation model of BSRGAN from the paper
|
537 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
538 |
-
----------
|
539 |
-
sf: scale factor
|
540 |
-
isp_model: camera ISP model
|
541 |
-
Returns
|
542 |
-
-------
|
543 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
544 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
545 |
-
"""
|
546 |
-
image = util.uint2single(image)
|
547 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
548 |
-
sf_ori = sf
|
549 |
-
|
550 |
-
h1, w1 = image.shape[:2]
|
551 |
-
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
552 |
-
h, w = image.shape[:2]
|
553 |
-
|
554 |
-
hq = image.copy()
|
555 |
-
|
556 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
557 |
-
if np.random.rand() < 0.5:
|
558 |
-
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
|
559 |
-
interpolation=random.choice([1, 2, 3]))
|
560 |
-
else:
|
561 |
-
image = util.imresize_np(image, 1 / 2, True)
|
562 |
-
image = np.clip(image, 0.0, 1.0)
|
563 |
-
sf = 2
|
564 |
-
|
565 |
-
shuffle_order = random.sample(range(7), 7)
|
566 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
567 |
-
if idx1 > idx2: # keep downsample3 last
|
568 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
569 |
-
|
570 |
-
for i in shuffle_order:
|
571 |
-
|
572 |
-
if i == 0:
|
573 |
-
image = add_blur(image, sf=sf)
|
574 |
-
|
575 |
-
# elif i == 1:
|
576 |
-
# image = add_blur(image, sf=sf)
|
577 |
-
|
578 |
-
if i == 0:
|
579 |
-
pass
|
580 |
-
|
581 |
-
elif i == 2:
|
582 |
-
a, b = image.shape[1], image.shape[0]
|
583 |
-
# downsample2
|
584 |
-
if random.random() < 0.8:
|
585 |
-
sf1 = random.uniform(1, 2 * sf)
|
586 |
-
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
|
587 |
-
interpolation=random.choice([1, 2, 3]))
|
588 |
-
else:
|
589 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
590 |
-
k_shifted = shift_pixel(k, sf)
|
591 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
592 |
-
image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
593 |
-
image = image[0::sf, 0::sf, ...] # nearest downsampling
|
594 |
-
|
595 |
-
image = np.clip(image, 0.0, 1.0)
|
596 |
-
|
597 |
-
elif i == 3:
|
598 |
-
# downsample3
|
599 |
-
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
600 |
-
image = np.clip(image, 0.0, 1.0)
|
601 |
-
|
602 |
-
elif i == 4:
|
603 |
-
# add Gaussian noise
|
604 |
-
image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
|
605 |
-
|
606 |
-
elif i == 5:
|
607 |
-
# add JPEG noise
|
608 |
-
if random.random() < jpeg_prob:
|
609 |
-
image = add_JPEG_noise(image)
|
610 |
-
#
|
611 |
-
# elif i == 6:
|
612 |
-
# # add processed camera sensor noise
|
613 |
-
# if random.random() < isp_prob and isp_model is not None:
|
614 |
-
# with torch.no_grad():
|
615 |
-
# img, hq = isp_model.forward(img.copy(), hq)
|
616 |
-
|
617 |
-
# add final JPEG compression noise
|
618 |
-
image = add_JPEG_noise(image)
|
619 |
-
image = util.single2uint(image)
|
620 |
-
example = {"image": image}
|
621 |
-
return example
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
if __name__ == '__main__':
|
627 |
-
print("hey")
|
628 |
-
img = util.imread_uint('utils/test.png', 3)
|
629 |
-
img = img[:448, :448]
|
630 |
-
h = img.shape[0] // 4
|
631 |
-
print("resizing to", h)
|
632 |
-
sf = 4
|
633 |
-
deg_fn = partial(degradation_bsrgan_variant, sf=sf)
|
634 |
-
for i in range(20):
|
635 |
-
print(i)
|
636 |
-
img_hq = img
|
637 |
-
img_lq = deg_fn(img)["image"]
|
638 |
-
img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
|
639 |
-
print(img_lq)
|
640 |
-
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
|
641 |
-
print(img_lq.shape)
|
642 |
-
print("bicubic", img_lq_bicubic.shape)
|
643 |
-
print(img_hq.shape)
|
644 |
-
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
645 |
-
interpolation=0)
|
646 |
-
lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
|
647 |
-
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
648 |
-
interpolation=0)
|
649 |
-
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
|
650 |
-
util.imsave(img_concat, str(i) + '.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio/app.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import pandas as pd
|
3 |
-
import json
|
4 |
-
from collections import defaultdict
|
5 |
-
|
6 |
-
# Create tokenizer for biomed model
|
7 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification
|
8 |
-
tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all")
|
9 |
-
model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all")
|
10 |
-
pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
|
11 |
-
|
12 |
-
# Matplotlib for entity graph
|
13 |
-
import matplotlib.pyplot as plt
|
14 |
-
plt.switch_backend("Agg")
|
15 |
-
|
16 |
-
# Load examples from JSON
|
17 |
-
EXAMPLES = {}
|
18 |
-
with open("examples.json", "r") as f:
|
19 |
-
example_json = json.load(f)
|
20 |
-
EXAMPLES = {x["text"]: x["label"] for x in example_json}
|
21 |
-
|
22 |
-
def group_by_entity(raw):
|
23 |
-
out = defaultdict(int)
|
24 |
-
for ent in raw:
|
25 |
-
out[ent["entity_group"]] += 1
|
26 |
-
# out["total"] = sum(out.values())
|
27 |
-
return out
|
28 |
-
|
29 |
-
|
30 |
-
def plot_to_figure(grouped):
|
31 |
-
fig = plt.figure()
|
32 |
-
plt.bar(x=list(grouped.keys()), height=list(grouped.values()))
|
33 |
-
plt.margins(0.2)
|
34 |
-
plt.subplots_adjust(bottom=0.4)
|
35 |
-
plt.xticks(rotation=90)
|
36 |
-
return fig
|
37 |
-
|
38 |
-
|
39 |
-
def ner(text):
|
40 |
-
raw = pipe(text)
|
41 |
-
ner_content = {
|
42 |
-
"text": text,
|
43 |
-
"entities": [
|
44 |
-
{
|
45 |
-
"entity": x["entity_group"],
|
46 |
-
"word": x["word"],
|
47 |
-
"score": x["score"],
|
48 |
-
"start": x["start"],
|
49 |
-
"end": x["end"],
|
50 |
-
}
|
51 |
-
for x in raw
|
52 |
-
],
|
53 |
-
}
|
54 |
-
|
55 |
-
grouped = group_by_entity(raw)
|
56 |
-
figure = plot_to_figure(grouped)
|
57 |
-
label = EXAMPLES.get(text, "Unknown")
|
58 |
-
|
59 |
-
meta = {
|
60 |
-
"entity_counts": grouped,
|
61 |
-
"entities": len(set(grouped.keys())),
|
62 |
-
"counts": sum(grouped.values()),
|
63 |
-
}
|
64 |
-
|
65 |
-
return (ner_content, meta, label, figure)
|
66 |
-
|
67 |
-
|
68 |
-
interface = gr.Interface(
|
69 |
-
ner,
|
70 |
-
inputs=gr.Textbox(label="Note text", value=""),
|
71 |
-
outputs=[
|
72 |
-
gr.HighlightedText(label="NER", combine_adjacent=True),
|
73 |
-
gr.JSON(label="Entity Counts"),
|
74 |
-
gr.Label(label="Rating"),
|
75 |
-
gr.Plot(label="Bar"),
|
76 |
-
],
|
77 |
-
examples=list(EXAMPLES.keys()),
|
78 |
-
allow_flagging="never",
|
79 |
-
)
|
80 |
-
|
81 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/server/backend.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from datetime import datetime
|
3 |
-
from g4f import ChatCompletion
|
4 |
-
from flask import request, Response, stream_with_context
|
5 |
-
from requests import get
|
6 |
-
from server.config import special_instructions
|
7 |
-
|
8 |
-
|
9 |
-
class Backend_Api:
|
10 |
-
def __init__(self, bp, config: dict) -> None:
|
11 |
-
"""
|
12 |
-
Initialize the Backend_Api class.
|
13 |
-
:param app: Flask application instance
|
14 |
-
:param config: Configuration dictionary
|
15 |
-
"""
|
16 |
-
self.bp = bp
|
17 |
-
self.routes = {
|
18 |
-
'/backend-api/v2/conversation': {
|
19 |
-
'function': self._conversation,
|
20 |
-
'methods': ['POST']
|
21 |
-
}
|
22 |
-
}
|
23 |
-
|
24 |
-
def _conversation(self):
|
25 |
-
"""
|
26 |
-
Handles the conversation route.
|
27 |
-
|
28 |
-
:return: Response object containing the generated conversation stream
|
29 |
-
"""
|
30 |
-
conversation_id = request.json['conversation_id']
|
31 |
-
|
32 |
-
try:
|
33 |
-
jailbreak = request.json['jailbreak']
|
34 |
-
model = request.json['model']
|
35 |
-
messages = build_messages(jailbreak)
|
36 |
-
|
37 |
-
# Generate response
|
38 |
-
response = ChatCompletion.create(
|
39 |
-
model=model,
|
40 |
-
chatId=conversation_id,
|
41 |
-
messages=messages
|
42 |
-
)
|
43 |
-
|
44 |
-
return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
|
45 |
-
|
46 |
-
except Exception as e:
|
47 |
-
print(e)
|
48 |
-
print(e.__traceback__.tb_next)
|
49 |
-
|
50 |
-
return {
|
51 |
-
'_action': '_ask',
|
52 |
-
'success': False,
|
53 |
-
"error": f"an error occurred {str(e)}"
|
54 |
-
}, 400
|
55 |
-
|
56 |
-
|
57 |
-
def build_messages(jailbreak):
|
58 |
-
"""
|
59 |
-
Build the messages for the conversation.
|
60 |
-
|
61 |
-
:param jailbreak: Jailbreak instruction string
|
62 |
-
:return: List of messages for the conversation
|
63 |
-
"""
|
64 |
-
_conversation = request.json['meta']['content']['conversation']
|
65 |
-
internet_access = request.json['meta']['content']['internet_access']
|
66 |
-
prompt = request.json['meta']['content']['parts'][0]
|
67 |
-
|
68 |
-
# Add the existing conversation
|
69 |
-
conversation = _conversation
|
70 |
-
|
71 |
-
# Add web results if enabled
|
72 |
-
if internet_access:
|
73 |
-
current_date = datetime.now().strftime("%Y-%m-%d")
|
74 |
-
query = f'Current date: {current_date}. ' + prompt["content"]
|
75 |
-
search_results = fetch_search_results(query)
|
76 |
-
conversation.extend(search_results)
|
77 |
-
|
78 |
-
# Add jailbreak instructions if enabled
|
79 |
-
if jailbreak_instructions := getJailbreak(jailbreak):
|
80 |
-
conversation.extend(jailbreak_instructions)
|
81 |
-
|
82 |
-
# Add the prompt
|
83 |
-
conversation.append(prompt)
|
84 |
-
|
85 |
-
# Reduce conversation size to avoid API Token quantity error
|
86 |
-
if len(conversation) > 3:
|
87 |
-
conversation = conversation[-4:]
|
88 |
-
|
89 |
-
return conversation
|
90 |
-
|
91 |
-
|
92 |
-
def fetch_search_results(query):
|
93 |
-
"""
|
94 |
-
Fetch search results for a given query.
|
95 |
-
|
96 |
-
:param query: Search query string
|
97 |
-
:return: List of search results
|
98 |
-
"""
|
99 |
-
search = get('https://ddg-api.herokuapp.com/search',
|
100 |
-
params={
|
101 |
-
'query': query,
|
102 |
-
'limit': 3,
|
103 |
-
})
|
104 |
-
|
105 |
-
snippets = ""
|
106 |
-
for index, result in enumerate(search.json()):
|
107 |
-
snippet = f'[{index + 1}] "{result["snippet"]}" URL:{result["link"]}.'
|
108 |
-
snippets += snippet
|
109 |
-
|
110 |
-
response = "Here are some updated web searches. Use this to improve user response:"
|
111 |
-
response += snippets
|
112 |
-
|
113 |
-
return [{'role': 'system', 'content': response}]
|
114 |
-
|
115 |
-
|
116 |
-
def generate_stream(response, jailbreak):
|
117 |
-
"""
|
118 |
-
Generate the conversation stream.
|
119 |
-
|
120 |
-
:param response: Response object from ChatCompletion.create
|
121 |
-
:param jailbreak: Jailbreak instruction string
|
122 |
-
:return: Generator object yielding messages in the conversation
|
123 |
-
"""
|
124 |
-
if getJailbreak(jailbreak):
|
125 |
-
response_jailbreak = ''
|
126 |
-
jailbroken_checked = False
|
127 |
-
for message in response:
|
128 |
-
response_jailbreak += message
|
129 |
-
if jailbroken_checked:
|
130 |
-
yield message
|
131 |
-
else:
|
132 |
-
if response_jailbroken_success(response_jailbreak):
|
133 |
-
jailbroken_checked = True
|
134 |
-
if response_jailbroken_failed(response_jailbreak):
|
135 |
-
yield response_jailbreak
|
136 |
-
jailbroken_checked = True
|
137 |
-
else:
|
138 |
-
yield from response
|
139 |
-
|
140 |
-
|
141 |
-
def response_jailbroken_success(response: str) -> bool:
|
142 |
-
"""Check if the response has been jailbroken.
|
143 |
-
|
144 |
-
:param response: Response string
|
145 |
-
:return: Boolean indicating if the response has been jailbroken
|
146 |
-
"""
|
147 |
-
act_match = re.search(r'ACT:', response, flags=re.DOTALL)
|
148 |
-
return bool(act_match)
|
149 |
-
|
150 |
-
|
151 |
-
def response_jailbroken_failed(response):
|
152 |
-
"""
|
153 |
-
Check if the response has not been jailbroken.
|
154 |
-
|
155 |
-
:param response: Response string
|
156 |
-
:return: Boolean indicating if the response has not been jailbroken
|
157 |
-
"""
|
158 |
-
return False if len(response) < 4 else not (response.startswith("GPT:") or response.startswith("ACT:"))
|
159 |
-
|
160 |
-
|
161 |
-
def getJailbreak(jailbreak):
|
162 |
-
"""
|
163 |
-
Check if jailbreak instructions are provided.
|
164 |
-
|
165 |
-
:param jailbreak: Jailbreak instruction string
|
166 |
-
:return: Jailbreak instructions if provided, otherwise None
|
167 |
-
"""
|
168 |
-
if jailbreak != "default":
|
169 |
-
special_instructions[jailbreak][0]['content'] += special_instructions['two_responses_instruction']
|
170 |
-
if jailbreak in special_instructions:
|
171 |
-
special_instructions[jailbreak]
|
172 |
-
return special_instructions[jailbreak]
|
173 |
-
else:
|
174 |
-
return None
|
175 |
-
else:
|
176 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
|
2 |
-
|
3 |
-
# ========================modified parameters======================
|
4 |
-
data_root = 'data/balloon/'
|
5 |
-
# Path of train annotation file
|
6 |
-
train_ann_file = 'train.json'
|
7 |
-
train_data_prefix = 'train/' # Prefix of train image path
|
8 |
-
# Path of val annotation file
|
9 |
-
val_ann_file = 'val.json'
|
10 |
-
val_data_prefix = 'val/' # Prefix of val image path
|
11 |
-
metainfo = {
|
12 |
-
'classes': ('balloon', ),
|
13 |
-
'palette': [
|
14 |
-
(220, 20, 60),
|
15 |
-
]
|
16 |
-
}
|
17 |
-
num_classes = 1
|
18 |
-
|
19 |
-
train_batch_size_per_gpu = 4
|
20 |
-
train_num_workers = 2
|
21 |
-
log_interval = 1
|
22 |
-
|
23 |
-
# =======================Unmodified in most cases==================
|
24 |
-
train_dataloader = dict(
|
25 |
-
batch_size=train_batch_size_per_gpu,
|
26 |
-
num_workers=train_num_workers,
|
27 |
-
dataset=dict(
|
28 |
-
data_root=data_root,
|
29 |
-
metainfo=metainfo,
|
30 |
-
data_prefix=dict(img=train_data_prefix),
|
31 |
-
ann_file=train_ann_file))
|
32 |
-
val_dataloader = dict(
|
33 |
-
dataset=dict(
|
34 |
-
data_root=data_root,
|
35 |
-
metainfo=metainfo,
|
36 |
-
data_prefix=dict(img=val_data_prefix),
|
37 |
-
ann_file=val_ann_file))
|
38 |
-
test_dataloader = val_dataloader
|
39 |
-
val_evaluator = dict(ann_file=data_root + val_ann_file)
|
40 |
-
test_evaluator = val_evaluator
|
41 |
-
model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes)))
|
42 |
-
default_hooks = dict(logger=dict(interval=log_interval))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/concatUint8Arrays.ts
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import { sum } from "./sum";
|
2 |
-
|
3 |
-
export function concatUint8Arrays(arrays: Uint8Array[]): Uint8Array {
|
4 |
-
const totalLength = sum(arrays.map((a) => a.length));
|
5 |
-
const result = new Uint8Array(totalLength);
|
6 |
-
let offset = 0;
|
7 |
-
for (const array of arrays) {
|
8 |
-
result.set(array, offset);
|
9 |
-
offset += array.length;
|
10 |
-
}
|
11 |
-
return result;
|
12 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import ScrollBar from './ScrollBar.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('scrollBar', function (config) {
|
6 |
-
var gameObject = new ScrollBar(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.ScrollBar', ScrollBar);
|
12 |
-
|
13 |
-
export default ScrollBar;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.js
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import Skew from './Skew.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('skew', function (gameObject, config) {
|
6 |
-
return new Skew(gameObject, config);
|
7 |
-
});
|
8 |
-
|
9 |
-
SetValue(window, 'RexPlugins.UI.Skew', Skew);
|
10 |
-
|
11 |
-
export default Skew;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexZou/Deploy_Restoration/Underwater.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from torchvision import transforms
|
5 |
-
from PIL import Image
|
6 |
-
import time
|
7 |
-
import torchvision
|
8 |
-
import cv2
|
9 |
-
import torchvision.utils as tvu
|
10 |
-
import torch.functional as F
|
11 |
-
import argparse
|
12 |
-
from net.Ushape_Trans import *
|
13 |
-
|
14 |
-
def inference_img(img_path,Net):
|
15 |
-
|
16 |
-
low_image = Image.open(img_path).convert('RGB')
|
17 |
-
enhance_transforms = transforms.Compose([
|
18 |
-
transforms.Resize((256,256)),
|
19 |
-
transforms.ToTensor()
|
20 |
-
])
|
21 |
-
|
22 |
-
with torch.no_grad():
|
23 |
-
low_image = enhance_transforms(low_image)
|
24 |
-
low_image = low_image.unsqueeze(0)
|
25 |
-
start = time.time()
|
26 |
-
restored2 = Net(low_image)
|
27 |
-
end = time.time()
|
28 |
-
|
29 |
-
|
30 |
-
return restored2,end-start
|
31 |
-
|
32 |
-
if __name__ == '__main__':
|
33 |
-
parser=argparse.ArgumentParser()
|
34 |
-
parser.add_argument('--test_path',type=str,required=True,help='Path to test')
|
35 |
-
parser.add_argument('--save_path',type=str,required=True,help='Path to save')
|
36 |
-
parser.add_argument('--pk_path',type=str,default='model_zoo/underwater.pth',help='Path of the checkpoint')
|
37 |
-
opt = parser.parse_args()
|
38 |
-
if not os.path.isdir(opt.save_path):
|
39 |
-
os.mkdir(opt.save_path)
|
40 |
-
Net = Generator()
|
41 |
-
Net.load_state_dict(torch.load(opt.pk_path, map_location=torch.device('cpu')))
|
42 |
-
Net = Net.eval()
|
43 |
-
image = opt.test_path
|
44 |
-
print(image)
|
45 |
-
restored2,time_num = inference_img(image,Net)
|
46 |
-
torchvision.utils.save_image(restored2,opt.save_path+'output.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/.github/ISSUE_TEMPLATE/feature_request.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: Feature request
|
3 |
-
about: Suggest an idea for this project
|
4 |
-
title: ''
|
5 |
-
labels: ''
|
6 |
-
assignees: ''
|
7 |
-
|
8 |
-
---
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_gradfix.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Custom replacement for `torch.nn.functional.conv2d` that supports
|
10 |
-
arbitrarily high order gradients with zero performance penalty."""
|
11 |
-
|
12 |
-
import warnings
|
13 |
-
import contextlib
|
14 |
-
import torch
|
15 |
-
|
16 |
-
# pylint: disable=redefined-builtin
|
17 |
-
# pylint: disable=arguments-differ
|
18 |
-
# pylint: disable=protected-access
|
19 |
-
|
20 |
-
#----------------------------------------------------------------------------
|
21 |
-
|
22 |
-
enabled = False # Enable the custom op by setting this to true.
|
23 |
-
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
|
24 |
-
|
25 |
-
@contextlib.contextmanager
|
26 |
-
def no_weight_gradients():
|
27 |
-
global weight_gradients_disabled
|
28 |
-
old = weight_gradients_disabled
|
29 |
-
weight_gradients_disabled = True
|
30 |
-
yield
|
31 |
-
weight_gradients_disabled = old
|
32 |
-
|
33 |
-
#----------------------------------------------------------------------------
|
34 |
-
|
35 |
-
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
36 |
-
if _should_use_custom_op(input):
|
37 |
-
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
|
38 |
-
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
|
39 |
-
|
40 |
-
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
|
41 |
-
if _should_use_custom_op(input):
|
42 |
-
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
|
43 |
-
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
|
44 |
-
|
45 |
-
#----------------------------------------------------------------------------
|
46 |
-
|
47 |
-
def _should_use_custom_op(input):
|
48 |
-
assert isinstance(input, torch.Tensor)
|
49 |
-
if (not enabled) or (not torch.backends.cudnn.enabled):
|
50 |
-
return False
|
51 |
-
if input.device.type != 'cuda':
|
52 |
-
return False
|
53 |
-
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
|
54 |
-
return True
|
55 |
-
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
|
56 |
-
return False
|
57 |
-
|
58 |
-
def _tuple_of_ints(xs, ndim):
|
59 |
-
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
|
60 |
-
assert len(xs) == ndim
|
61 |
-
assert all(isinstance(x, int) for x in xs)
|
62 |
-
return xs
|
63 |
-
|
64 |
-
#----------------------------------------------------------------------------
|
65 |
-
|
66 |
-
_conv2d_gradfix_cache = dict()
|
67 |
-
|
68 |
-
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
|
69 |
-
# Parse arguments.
|
70 |
-
ndim = 2
|
71 |
-
weight_shape = tuple(weight_shape)
|
72 |
-
stride = _tuple_of_ints(stride, ndim)
|
73 |
-
padding = _tuple_of_ints(padding, ndim)
|
74 |
-
output_padding = _tuple_of_ints(output_padding, ndim)
|
75 |
-
dilation = _tuple_of_ints(dilation, ndim)
|
76 |
-
|
77 |
-
# Lookup from cache.
|
78 |
-
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
|
79 |
-
if key in _conv2d_gradfix_cache:
|
80 |
-
return _conv2d_gradfix_cache[key]
|
81 |
-
|
82 |
-
# Validate arguments.
|
83 |
-
assert groups >= 1
|
84 |
-
assert len(weight_shape) == ndim + 2
|
85 |
-
assert all(stride[i] >= 1 for i in range(ndim))
|
86 |
-
assert all(padding[i] >= 0 for i in range(ndim))
|
87 |
-
assert all(dilation[i] >= 0 for i in range(ndim))
|
88 |
-
if not transpose:
|
89 |
-
assert all(output_padding[i] == 0 for i in range(ndim))
|
90 |
-
else: # transpose
|
91 |
-
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
|
92 |
-
|
93 |
-
# Helpers.
|
94 |
-
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
|
95 |
-
def calc_output_padding(input_shape, output_shape):
|
96 |
-
if transpose:
|
97 |
-
return [0, 0]
|
98 |
-
return [
|
99 |
-
input_shape[i + 2]
|
100 |
-
- (output_shape[i + 2] - 1) * stride[i]
|
101 |
-
- (1 - 2 * padding[i])
|
102 |
-
- dilation[i] * (weight_shape[i + 2] - 1)
|
103 |
-
for i in range(ndim)
|
104 |
-
]
|
105 |
-
|
106 |
-
# Forward & backward.
|
107 |
-
class Conv2d(torch.autograd.Function):
|
108 |
-
@staticmethod
|
109 |
-
def forward(ctx, input, weight, bias):
|
110 |
-
assert weight.shape == weight_shape
|
111 |
-
if not transpose:
|
112 |
-
output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
|
113 |
-
else: # transpose
|
114 |
-
output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
|
115 |
-
ctx.save_for_backward(input, weight)
|
116 |
-
return output
|
117 |
-
|
118 |
-
@staticmethod
|
119 |
-
def backward(ctx, grad_output):
|
120 |
-
input, weight = ctx.saved_tensors
|
121 |
-
grad_input = None
|
122 |
-
grad_weight = None
|
123 |
-
grad_bias = None
|
124 |
-
|
125 |
-
if ctx.needs_input_grad[0]:
|
126 |
-
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
|
127 |
-
grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
|
128 |
-
assert grad_input.shape == input.shape
|
129 |
-
|
130 |
-
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
|
131 |
-
grad_weight = Conv2dGradWeight.apply(grad_output, input)
|
132 |
-
assert grad_weight.shape == weight_shape
|
133 |
-
|
134 |
-
if ctx.needs_input_grad[2]:
|
135 |
-
grad_bias = grad_output.sum([0, 2, 3])
|
136 |
-
|
137 |
-
return grad_input, grad_weight, grad_bias
|
138 |
-
|
139 |
-
# Gradient with respect to the weights.
|
140 |
-
class Conv2dGradWeight(torch.autograd.Function):
|
141 |
-
@staticmethod
|
142 |
-
def forward(ctx, grad_output, input):
|
143 |
-
op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
|
144 |
-
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
|
145 |
-
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
|
146 |
-
assert grad_weight.shape == weight_shape
|
147 |
-
ctx.save_for_backward(grad_output, input)
|
148 |
-
return grad_weight
|
149 |
-
|
150 |
-
@staticmethod
|
151 |
-
def backward(ctx, grad2_grad_weight):
|
152 |
-
grad_output, input = ctx.saved_tensors
|
153 |
-
grad2_grad_output = None
|
154 |
-
grad2_input = None
|
155 |
-
|
156 |
-
if ctx.needs_input_grad[0]:
|
157 |
-
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
|
158 |
-
assert grad2_grad_output.shape == grad_output.shape
|
159 |
-
|
160 |
-
if ctx.needs_input_grad[1]:
|
161 |
-
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
|
162 |
-
grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
|
163 |
-
assert grad2_input.shape == input.shape
|
164 |
-
|
165 |
-
return grad2_grad_output, grad2_input
|
166 |
-
|
167 |
-
_conv2d_gradfix_cache[key] = Conv2d
|
168 |
-
return Conv2d
|
169 |
-
|
170 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/gradio-sentiment-analyzer/app.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
from transformers import pipeline
|
4 |
-
|
5 |
-
sentiment = pipeline("sentiment-analysis")
|
6 |
-
|
7 |
-
def get_sentiment(input_text):
|
8 |
-
return sentiment(input_text)
|
9 |
-
|
10 |
-
iface = gr.Interface(fn = get_sentiment,
|
11 |
-
inputs = "text",
|
12 |
-
outputs = ['text'],
|
13 |
-
title = 'Sentiment Analysis',
|
14 |
-
description="Get Sentiment Negative/Positive for the given input")
|
15 |
-
|
16 |
-
iface.launch(inline = False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/image-to-text-app/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Image To Text App
|
3 |
-
emoji: 📹
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# image2textapp
|
12 |
-
demo of 🤗 spaces deployment of a streamlit python app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
DELETED
@@ -1,812 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import hashlib
|
3 |
-
import itertools
|
4 |
-
import math
|
5 |
-
import os
|
6 |
-
import random
|
7 |
-
from pathlib import Path
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import torch
|
11 |
-
import torch.nn.functional as F
|
12 |
-
import torch.utils.checkpoint
|
13 |
-
from accelerate import Accelerator
|
14 |
-
from accelerate.logging import get_logger
|
15 |
-
from accelerate.utils import ProjectConfiguration, set_seed
|
16 |
-
from huggingface_hub import create_repo, upload_folder
|
17 |
-
from PIL import Image, ImageDraw
|
18 |
-
from torch.utils.data import Dataset
|
19 |
-
from torchvision import transforms
|
20 |
-
from tqdm.auto import tqdm
|
21 |
-
from transformers import CLIPTextModel, CLIPTokenizer
|
22 |
-
|
23 |
-
from diffusers import (
|
24 |
-
AutoencoderKL,
|
25 |
-
DDPMScheduler,
|
26 |
-
StableDiffusionInpaintPipeline,
|
27 |
-
StableDiffusionPipeline,
|
28 |
-
UNet2DConditionModel,
|
29 |
-
)
|
30 |
-
from diffusers.optimization import get_scheduler
|
31 |
-
from diffusers.utils import check_min_version
|
32 |
-
|
33 |
-
|
34 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
35 |
-
check_min_version("0.13.0.dev0")
|
36 |
-
|
37 |
-
logger = get_logger(__name__)
|
38 |
-
|
39 |
-
|
40 |
-
def prepare_mask_and_masked_image(image, mask):
|
41 |
-
image = np.array(image.convert("RGB"))
|
42 |
-
image = image[None].transpose(0, 3, 1, 2)
|
43 |
-
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
44 |
-
|
45 |
-
mask = np.array(mask.convert("L"))
|
46 |
-
mask = mask.astype(np.float32) / 255.0
|
47 |
-
mask = mask[None, None]
|
48 |
-
mask[mask < 0.5] = 0
|
49 |
-
mask[mask >= 0.5] = 1
|
50 |
-
mask = torch.from_numpy(mask)
|
51 |
-
|
52 |
-
masked_image = image * (mask < 0.5)
|
53 |
-
|
54 |
-
return mask, masked_image
|
55 |
-
|
56 |
-
|
57 |
-
# generate random masks
|
58 |
-
def random_mask(im_shape, ratio=1, mask_full_image=False):
|
59 |
-
mask = Image.new("L", im_shape, 0)
|
60 |
-
draw = ImageDraw.Draw(mask)
|
61 |
-
size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio)))
|
62 |
-
# use this to always mask the whole image
|
63 |
-
if mask_full_image:
|
64 |
-
size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio))
|
65 |
-
limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2)
|
66 |
-
center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1]))
|
67 |
-
draw_type = random.randint(0, 1)
|
68 |
-
if draw_type == 0 or mask_full_image:
|
69 |
-
draw.rectangle(
|
70 |
-
(center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2),
|
71 |
-
fill=255,
|
72 |
-
)
|
73 |
-
else:
|
74 |
-
draw.ellipse(
|
75 |
-
(center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2),
|
76 |
-
fill=255,
|
77 |
-
)
|
78 |
-
|
79 |
-
return mask
|
80 |
-
|
81 |
-
|
82 |
-
def parse_args():
|
83 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
84 |
-
parser.add_argument(
|
85 |
-
"--pretrained_model_name_or_path",
|
86 |
-
type=str,
|
87 |
-
default=None,
|
88 |
-
required=True,
|
89 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
90 |
-
)
|
91 |
-
parser.add_argument(
|
92 |
-
"--tokenizer_name",
|
93 |
-
type=str,
|
94 |
-
default=None,
|
95 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
96 |
-
)
|
97 |
-
parser.add_argument(
|
98 |
-
"--instance_data_dir",
|
99 |
-
type=str,
|
100 |
-
default=None,
|
101 |
-
required=True,
|
102 |
-
help="A folder containing the training data of instance images.",
|
103 |
-
)
|
104 |
-
parser.add_argument(
|
105 |
-
"--class_data_dir",
|
106 |
-
type=str,
|
107 |
-
default=None,
|
108 |
-
required=False,
|
109 |
-
help="A folder containing the training data of class images.",
|
110 |
-
)
|
111 |
-
parser.add_argument(
|
112 |
-
"--instance_prompt",
|
113 |
-
type=str,
|
114 |
-
default=None,
|
115 |
-
help="The prompt with identifier specifying the instance",
|
116 |
-
)
|
117 |
-
parser.add_argument(
|
118 |
-
"--class_prompt",
|
119 |
-
type=str,
|
120 |
-
default=None,
|
121 |
-
help="The prompt to specify images in the same class as provided instance images.",
|
122 |
-
)
|
123 |
-
parser.add_argument(
|
124 |
-
"--with_prior_preservation",
|
125 |
-
default=False,
|
126 |
-
action="store_true",
|
127 |
-
help="Flag to add prior preservation loss.",
|
128 |
-
)
|
129 |
-
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
130 |
-
parser.add_argument(
|
131 |
-
"--num_class_images",
|
132 |
-
type=int,
|
133 |
-
default=100,
|
134 |
-
help=(
|
135 |
-
"Minimal class images for prior preservation loss. If not have enough images, additional images will be"
|
136 |
-
" sampled with class_prompt."
|
137 |
-
),
|
138 |
-
)
|
139 |
-
parser.add_argument(
|
140 |
-
"--output_dir",
|
141 |
-
type=str,
|
142 |
-
default="text-inversion-model",
|
143 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
144 |
-
)
|
145 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
146 |
-
parser.add_argument(
|
147 |
-
"--resolution",
|
148 |
-
type=int,
|
149 |
-
default=512,
|
150 |
-
help=(
|
151 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
152 |
-
" resolution"
|
153 |
-
),
|
154 |
-
)
|
155 |
-
parser.add_argument(
|
156 |
-
"--center_crop",
|
157 |
-
default=False,
|
158 |
-
action="store_true",
|
159 |
-
help=(
|
160 |
-
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
|
161 |
-
" cropped. The images will be resized to the resolution first before cropping."
|
162 |
-
),
|
163 |
-
)
|
164 |
-
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
|
165 |
-
parser.add_argument(
|
166 |
-
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
167 |
-
)
|
168 |
-
parser.add_argument(
|
169 |
-
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
170 |
-
)
|
171 |
-
parser.add_argument("--num_train_epochs", type=int, default=1)
|
172 |
-
parser.add_argument(
|
173 |
-
"--max_train_steps",
|
174 |
-
type=int,
|
175 |
-
default=None,
|
176 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
177 |
-
)
|
178 |
-
parser.add_argument(
|
179 |
-
"--gradient_accumulation_steps",
|
180 |
-
type=int,
|
181 |
-
default=1,
|
182 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
183 |
-
)
|
184 |
-
parser.add_argument(
|
185 |
-
"--gradient_checkpointing",
|
186 |
-
action="store_true",
|
187 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
188 |
-
)
|
189 |
-
parser.add_argument(
|
190 |
-
"--learning_rate",
|
191 |
-
type=float,
|
192 |
-
default=5e-6,
|
193 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
194 |
-
)
|
195 |
-
parser.add_argument(
|
196 |
-
"--scale_lr",
|
197 |
-
action="store_true",
|
198 |
-
default=False,
|
199 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
200 |
-
)
|
201 |
-
parser.add_argument(
|
202 |
-
"--lr_scheduler",
|
203 |
-
type=str,
|
204 |
-
default="constant",
|
205 |
-
help=(
|
206 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
207 |
-
' "constant", "constant_with_warmup"]'
|
208 |
-
),
|
209 |
-
)
|
210 |
-
parser.add_argument(
|
211 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
212 |
-
)
|
213 |
-
parser.add_argument(
|
214 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
215 |
-
)
|
216 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
217 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
218 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
219 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
220 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
221 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
222 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
223 |
-
parser.add_argument(
|
224 |
-
"--hub_model_id",
|
225 |
-
type=str,
|
226 |
-
default=None,
|
227 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
228 |
-
)
|
229 |
-
parser.add_argument(
|
230 |
-
"--logging_dir",
|
231 |
-
type=str,
|
232 |
-
default="logs",
|
233 |
-
help=(
|
234 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
235 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
236 |
-
),
|
237 |
-
)
|
238 |
-
parser.add_argument(
|
239 |
-
"--mixed_precision",
|
240 |
-
type=str,
|
241 |
-
default="no",
|
242 |
-
choices=["no", "fp16", "bf16"],
|
243 |
-
help=(
|
244 |
-
"Whether to use mixed precision. Choose"
|
245 |
-
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
246 |
-
"and an Nvidia Ampere GPU."
|
247 |
-
),
|
248 |
-
)
|
249 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
250 |
-
parser.add_argument(
|
251 |
-
"--checkpointing_steps",
|
252 |
-
type=int,
|
253 |
-
default=500,
|
254 |
-
help=(
|
255 |
-
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
|
256 |
-
" checkpoints in case they are better than the last checkpoint and are suitable for resuming training"
|
257 |
-
" using `--resume_from_checkpoint`."
|
258 |
-
),
|
259 |
-
)
|
260 |
-
parser.add_argument(
|
261 |
-
"--checkpoints_total_limit",
|
262 |
-
type=int,
|
263 |
-
default=None,
|
264 |
-
help=(
|
265 |
-
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
|
266 |
-
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
|
267 |
-
" for more docs"
|
268 |
-
),
|
269 |
-
)
|
270 |
-
parser.add_argument(
|
271 |
-
"--resume_from_checkpoint",
|
272 |
-
type=str,
|
273 |
-
default=None,
|
274 |
-
help=(
|
275 |
-
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
|
276 |
-
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
|
277 |
-
),
|
278 |
-
)
|
279 |
-
|
280 |
-
args = parser.parse_args()
|
281 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
282 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
283 |
-
args.local_rank = env_local_rank
|
284 |
-
|
285 |
-
if args.instance_data_dir is None:
|
286 |
-
raise ValueError("You must specify a train data directory.")
|
287 |
-
|
288 |
-
if args.with_prior_preservation:
|
289 |
-
if args.class_data_dir is None:
|
290 |
-
raise ValueError("You must specify a data directory for class images.")
|
291 |
-
if args.class_prompt is None:
|
292 |
-
raise ValueError("You must specify prompt for class images.")
|
293 |
-
|
294 |
-
return args
|
295 |
-
|
296 |
-
|
297 |
-
class DreamBoothDataset(Dataset):
|
298 |
-
"""
|
299 |
-
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
300 |
-
It pre-processes the images and the tokenizes prompts.
|
301 |
-
"""
|
302 |
-
|
303 |
-
def __init__(
|
304 |
-
self,
|
305 |
-
instance_data_root,
|
306 |
-
instance_prompt,
|
307 |
-
tokenizer,
|
308 |
-
class_data_root=None,
|
309 |
-
class_prompt=None,
|
310 |
-
size=512,
|
311 |
-
center_crop=False,
|
312 |
-
):
|
313 |
-
self.size = size
|
314 |
-
self.center_crop = center_crop
|
315 |
-
self.tokenizer = tokenizer
|
316 |
-
|
317 |
-
self.instance_data_root = Path(instance_data_root)
|
318 |
-
if not self.instance_data_root.exists():
|
319 |
-
raise ValueError("Instance images root doesn't exists.")
|
320 |
-
|
321 |
-
self.instance_images_path = list(Path(instance_data_root).iterdir())
|
322 |
-
self.num_instance_images = len(self.instance_images_path)
|
323 |
-
self.instance_prompt = instance_prompt
|
324 |
-
self._length = self.num_instance_images
|
325 |
-
|
326 |
-
if class_data_root is not None:
|
327 |
-
self.class_data_root = Path(class_data_root)
|
328 |
-
self.class_data_root.mkdir(parents=True, exist_ok=True)
|
329 |
-
self.class_images_path = list(self.class_data_root.iterdir())
|
330 |
-
self.num_class_images = len(self.class_images_path)
|
331 |
-
self._length = max(self.num_class_images, self.num_instance_images)
|
332 |
-
self.class_prompt = class_prompt
|
333 |
-
else:
|
334 |
-
self.class_data_root = None
|
335 |
-
|
336 |
-
self.image_transforms_resize_and_crop = transforms.Compose(
|
337 |
-
[
|
338 |
-
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
339 |
-
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
340 |
-
]
|
341 |
-
)
|
342 |
-
|
343 |
-
self.image_transforms = transforms.Compose(
|
344 |
-
[
|
345 |
-
transforms.ToTensor(),
|
346 |
-
transforms.Normalize([0.5], [0.5]),
|
347 |
-
]
|
348 |
-
)
|
349 |
-
|
350 |
-
def __len__(self):
|
351 |
-
return self._length
|
352 |
-
|
353 |
-
def __getitem__(self, index):
|
354 |
-
example = {}
|
355 |
-
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
|
356 |
-
if not instance_image.mode == "RGB":
|
357 |
-
instance_image = instance_image.convert("RGB")
|
358 |
-
instance_image = self.image_transforms_resize_and_crop(instance_image)
|
359 |
-
|
360 |
-
example["PIL_images"] = instance_image
|
361 |
-
example["instance_images"] = self.image_transforms(instance_image)
|
362 |
-
|
363 |
-
example["instance_prompt_ids"] = self.tokenizer(
|
364 |
-
self.instance_prompt,
|
365 |
-
padding="do_not_pad",
|
366 |
-
truncation=True,
|
367 |
-
max_length=self.tokenizer.model_max_length,
|
368 |
-
).input_ids
|
369 |
-
|
370 |
-
if self.class_data_root:
|
371 |
-
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
372 |
-
if not class_image.mode == "RGB":
|
373 |
-
class_image = class_image.convert("RGB")
|
374 |
-
class_image = self.image_transforms_resize_and_crop(class_image)
|
375 |
-
example["class_images"] = self.image_transforms(class_image)
|
376 |
-
example["class_PIL_images"] = class_image
|
377 |
-
example["class_prompt_ids"] = self.tokenizer(
|
378 |
-
self.class_prompt,
|
379 |
-
padding="do_not_pad",
|
380 |
-
truncation=True,
|
381 |
-
max_length=self.tokenizer.model_max_length,
|
382 |
-
).input_ids
|
383 |
-
|
384 |
-
return example
|
385 |
-
|
386 |
-
|
387 |
-
class PromptDataset(Dataset):
|
388 |
-
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
|
389 |
-
|
390 |
-
def __init__(self, prompt, num_samples):
|
391 |
-
self.prompt = prompt
|
392 |
-
self.num_samples = num_samples
|
393 |
-
|
394 |
-
def __len__(self):
|
395 |
-
return self.num_samples
|
396 |
-
|
397 |
-
def __getitem__(self, index):
|
398 |
-
example = {}
|
399 |
-
example["prompt"] = self.prompt
|
400 |
-
example["index"] = index
|
401 |
-
return example
|
402 |
-
|
403 |
-
|
404 |
-
def main():
|
405 |
-
args = parse_args()
|
406 |
-
logging_dir = Path(args.output_dir, args.logging_dir)
|
407 |
-
|
408 |
-
project_config = ProjectConfiguration(
|
409 |
-
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
|
410 |
-
)
|
411 |
-
|
412 |
-
accelerator = Accelerator(
|
413 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
414 |
-
mixed_precision=args.mixed_precision,
|
415 |
-
log_with="tensorboard",
|
416 |
-
project_config=project_config,
|
417 |
-
)
|
418 |
-
|
419 |
-
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
420 |
-
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
421 |
-
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
422 |
-
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
|
423 |
-
raise ValueError(
|
424 |
-
"Gradient accumulation is not supported when training the text encoder in distributed training. "
|
425 |
-
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
|
426 |
-
)
|
427 |
-
|
428 |
-
if args.seed is not None:
|
429 |
-
set_seed(args.seed)
|
430 |
-
|
431 |
-
if args.with_prior_preservation:
|
432 |
-
class_images_dir = Path(args.class_data_dir)
|
433 |
-
if not class_images_dir.exists():
|
434 |
-
class_images_dir.mkdir(parents=True)
|
435 |
-
cur_class_images = len(list(class_images_dir.iterdir()))
|
436 |
-
|
437 |
-
if cur_class_images < args.num_class_images:
|
438 |
-
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
439 |
-
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
|
440 |
-
args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None
|
441 |
-
)
|
442 |
-
pipeline.set_progress_bar_config(disable=True)
|
443 |
-
|
444 |
-
num_new_images = args.num_class_images - cur_class_images
|
445 |
-
logger.info(f"Number of class images to sample: {num_new_images}.")
|
446 |
-
|
447 |
-
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
|
448 |
-
sample_dataloader = torch.utils.data.DataLoader(
|
449 |
-
sample_dataset, batch_size=args.sample_batch_size, num_workers=1
|
450 |
-
)
|
451 |
-
|
452 |
-
sample_dataloader = accelerator.prepare(sample_dataloader)
|
453 |
-
pipeline.to(accelerator.device)
|
454 |
-
transform_to_pil = transforms.ToPILImage()
|
455 |
-
for example in tqdm(
|
456 |
-
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
457 |
-
):
|
458 |
-
bsz = len(example["prompt"])
|
459 |
-
fake_images = torch.rand((3, args.resolution, args.resolution))
|
460 |
-
transform_to_pil = transforms.ToPILImage()
|
461 |
-
fake_pil_images = transform_to_pil(fake_images)
|
462 |
-
|
463 |
-
fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True)
|
464 |
-
|
465 |
-
images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images
|
466 |
-
|
467 |
-
for i, image in enumerate(images):
|
468 |
-
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
|
469 |
-
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
|
470 |
-
image.save(image_filename)
|
471 |
-
|
472 |
-
del pipeline
|
473 |
-
if torch.cuda.is_available():
|
474 |
-
torch.cuda.empty_cache()
|
475 |
-
|
476 |
-
# Handle the repository creation
|
477 |
-
if accelerator.is_main_process:
|
478 |
-
if args.output_dir is not None:
|
479 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
480 |
-
|
481 |
-
if args.push_to_hub:
|
482 |
-
repo_id = create_repo(
|
483 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
484 |
-
).repo_id
|
485 |
-
|
486 |
-
# Load the tokenizer
|
487 |
-
if args.tokenizer_name:
|
488 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
|
489 |
-
elif args.pretrained_model_name_or_path:
|
490 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
|
491 |
-
|
492 |
-
# Load models and create wrapper for stable diffusion
|
493 |
-
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
|
494 |
-
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
|
495 |
-
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
|
496 |
-
|
497 |
-
vae.requires_grad_(False)
|
498 |
-
if not args.train_text_encoder:
|
499 |
-
text_encoder.requires_grad_(False)
|
500 |
-
|
501 |
-
if args.gradient_checkpointing:
|
502 |
-
unet.enable_gradient_checkpointing()
|
503 |
-
if args.train_text_encoder:
|
504 |
-
text_encoder.gradient_checkpointing_enable()
|
505 |
-
|
506 |
-
if args.scale_lr:
|
507 |
-
args.learning_rate = (
|
508 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
509 |
-
)
|
510 |
-
|
511 |
-
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
512 |
-
if args.use_8bit_adam:
|
513 |
-
try:
|
514 |
-
import bitsandbytes as bnb
|
515 |
-
except ImportError:
|
516 |
-
raise ImportError(
|
517 |
-
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
518 |
-
)
|
519 |
-
|
520 |
-
optimizer_class = bnb.optim.AdamW8bit
|
521 |
-
else:
|
522 |
-
optimizer_class = torch.optim.AdamW
|
523 |
-
|
524 |
-
params_to_optimize = (
|
525 |
-
itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
|
526 |
-
)
|
527 |
-
optimizer = optimizer_class(
|
528 |
-
params_to_optimize,
|
529 |
-
lr=args.learning_rate,
|
530 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
531 |
-
weight_decay=args.adam_weight_decay,
|
532 |
-
eps=args.adam_epsilon,
|
533 |
-
)
|
534 |
-
|
535 |
-
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
536 |
-
|
537 |
-
train_dataset = DreamBoothDataset(
|
538 |
-
instance_data_root=args.instance_data_dir,
|
539 |
-
instance_prompt=args.instance_prompt,
|
540 |
-
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
541 |
-
class_prompt=args.class_prompt,
|
542 |
-
tokenizer=tokenizer,
|
543 |
-
size=args.resolution,
|
544 |
-
center_crop=args.center_crop,
|
545 |
-
)
|
546 |
-
|
547 |
-
def collate_fn(examples):
|
548 |
-
input_ids = [example["instance_prompt_ids"] for example in examples]
|
549 |
-
pixel_values = [example["instance_images"] for example in examples]
|
550 |
-
|
551 |
-
# Concat class and instance examples for prior preservation.
|
552 |
-
# We do this to avoid doing two forward passes.
|
553 |
-
if args.with_prior_preservation:
|
554 |
-
input_ids += [example["class_prompt_ids"] for example in examples]
|
555 |
-
pixel_values += [example["class_images"] for example in examples]
|
556 |
-
pior_pil = [example["class_PIL_images"] for example in examples]
|
557 |
-
|
558 |
-
masks = []
|
559 |
-
masked_images = []
|
560 |
-
for example in examples:
|
561 |
-
pil_image = example["PIL_images"]
|
562 |
-
# generate a random mask
|
563 |
-
mask = random_mask(pil_image.size, 1, False)
|
564 |
-
# prepare mask and masked image
|
565 |
-
mask, masked_image = prepare_mask_and_masked_image(pil_image, mask)
|
566 |
-
|
567 |
-
masks.append(mask)
|
568 |
-
masked_images.append(masked_image)
|
569 |
-
|
570 |
-
if args.with_prior_preservation:
|
571 |
-
for pil_image in pior_pil:
|
572 |
-
# generate a random mask
|
573 |
-
mask = random_mask(pil_image.size, 1, False)
|
574 |
-
# prepare mask and masked image
|
575 |
-
mask, masked_image = prepare_mask_and_masked_image(pil_image, mask)
|
576 |
-
|
577 |
-
masks.append(mask)
|
578 |
-
masked_images.append(masked_image)
|
579 |
-
|
580 |
-
pixel_values = torch.stack(pixel_values)
|
581 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
582 |
-
|
583 |
-
input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
|
584 |
-
masks = torch.stack(masks)
|
585 |
-
masked_images = torch.stack(masked_images)
|
586 |
-
batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images}
|
587 |
-
return batch
|
588 |
-
|
589 |
-
train_dataloader = torch.utils.data.DataLoader(
|
590 |
-
train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
|
591 |
-
)
|
592 |
-
|
593 |
-
# Scheduler and math around the number of training steps.
|
594 |
-
overrode_max_train_steps = False
|
595 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
596 |
-
if args.max_train_steps is None:
|
597 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
598 |
-
overrode_max_train_steps = True
|
599 |
-
|
600 |
-
lr_scheduler = get_scheduler(
|
601 |
-
args.lr_scheduler,
|
602 |
-
optimizer=optimizer,
|
603 |
-
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
|
604 |
-
num_training_steps=args.max_train_steps * accelerator.num_processes,
|
605 |
-
)
|
606 |
-
|
607 |
-
if args.train_text_encoder:
|
608 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
609 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
|
610 |
-
)
|
611 |
-
else:
|
612 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
613 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
614 |
-
)
|
615 |
-
accelerator.register_for_checkpointing(lr_scheduler)
|
616 |
-
|
617 |
-
weight_dtype = torch.float32
|
618 |
-
if args.mixed_precision == "fp16":
|
619 |
-
weight_dtype = torch.float16
|
620 |
-
elif args.mixed_precision == "bf16":
|
621 |
-
weight_dtype = torch.bfloat16
|
622 |
-
|
623 |
-
# Move text_encode and vae to gpu.
|
624 |
-
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
625 |
-
# as these models are only used for inference, keeping weights in full precision is not required.
|
626 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
627 |
-
if not args.train_text_encoder:
|
628 |
-
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
629 |
-
|
630 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
631 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
632 |
-
if overrode_max_train_steps:
|
633 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
634 |
-
# Afterwards we recalculate our number of training epochs
|
635 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
636 |
-
|
637 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
638 |
-
# The trackers initializes automatically on the main process.
|
639 |
-
if accelerator.is_main_process:
|
640 |
-
accelerator.init_trackers("dreambooth", config=vars(args))
|
641 |
-
|
642 |
-
# Train!
|
643 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
644 |
-
|
645 |
-
logger.info("***** Running training *****")
|
646 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
647 |
-
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
648 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
649 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
650 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
651 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
652 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
653 |
-
global_step = 0
|
654 |
-
first_epoch = 0
|
655 |
-
|
656 |
-
if args.resume_from_checkpoint:
|
657 |
-
if args.resume_from_checkpoint != "latest":
|
658 |
-
path = os.path.basename(args.resume_from_checkpoint)
|
659 |
-
else:
|
660 |
-
# Get the most recent checkpoint
|
661 |
-
dirs = os.listdir(args.output_dir)
|
662 |
-
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
663 |
-
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
664 |
-
path = dirs[-1] if len(dirs) > 0 else None
|
665 |
-
|
666 |
-
if path is None:
|
667 |
-
accelerator.print(
|
668 |
-
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
|
669 |
-
)
|
670 |
-
args.resume_from_checkpoint = None
|
671 |
-
else:
|
672 |
-
accelerator.print(f"Resuming from checkpoint {path}")
|
673 |
-
accelerator.load_state(os.path.join(args.output_dir, path))
|
674 |
-
global_step = int(path.split("-")[1])
|
675 |
-
|
676 |
-
resume_global_step = global_step * args.gradient_accumulation_steps
|
677 |
-
first_epoch = global_step // num_update_steps_per_epoch
|
678 |
-
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
|
679 |
-
|
680 |
-
# Only show the progress bar once on each machine.
|
681 |
-
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
|
682 |
-
progress_bar.set_description("Steps")
|
683 |
-
|
684 |
-
for epoch in range(first_epoch, args.num_train_epochs):
|
685 |
-
unet.train()
|
686 |
-
for step, batch in enumerate(train_dataloader):
|
687 |
-
# Skip steps until we reach the resumed step
|
688 |
-
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
689 |
-
if step % args.gradient_accumulation_steps == 0:
|
690 |
-
progress_bar.update(1)
|
691 |
-
continue
|
692 |
-
|
693 |
-
with accelerator.accumulate(unet):
|
694 |
-
# Convert images to latent space
|
695 |
-
|
696 |
-
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
|
697 |
-
latents = latents * vae.config.scaling_factor
|
698 |
-
|
699 |
-
# Convert masked images to latent space
|
700 |
-
masked_latents = vae.encode(
|
701 |
-
batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype)
|
702 |
-
).latent_dist.sample()
|
703 |
-
masked_latents = masked_latents * vae.config.scaling_factor
|
704 |
-
|
705 |
-
masks = batch["masks"]
|
706 |
-
# resize the mask to latents shape as we concatenate the mask to the latents
|
707 |
-
mask = torch.stack(
|
708 |
-
[
|
709 |
-
torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8))
|
710 |
-
for mask in masks
|
711 |
-
]
|
712 |
-
)
|
713 |
-
mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8)
|
714 |
-
|
715 |
-
# Sample noise that we'll add to the latents
|
716 |
-
noise = torch.randn_like(latents)
|
717 |
-
bsz = latents.shape[0]
|
718 |
-
# Sample a random timestep for each image
|
719 |
-
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
|
720 |
-
timesteps = timesteps.long()
|
721 |
-
|
722 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
723 |
-
# (this is the forward diffusion process)
|
724 |
-
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
725 |
-
|
726 |
-
# concatenate the noised latents with the mask and the masked latents
|
727 |
-
latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1)
|
728 |
-
|
729 |
-
# Get the text embedding for conditioning
|
730 |
-
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
|
731 |
-
|
732 |
-
# Predict the noise residual
|
733 |
-
noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample
|
734 |
-
|
735 |
-
# Get the target for loss depending on the prediction type
|
736 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
737 |
-
target = noise
|
738 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
739 |
-
target = noise_scheduler.get_velocity(latents, noise, timesteps)
|
740 |
-
else:
|
741 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
742 |
-
|
743 |
-
if args.with_prior_preservation:
|
744 |
-
# Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
|
745 |
-
noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
|
746 |
-
target, target_prior = torch.chunk(target, 2, dim=0)
|
747 |
-
|
748 |
-
# Compute instance loss
|
749 |
-
loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
|
750 |
-
|
751 |
-
# Compute prior loss
|
752 |
-
prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean")
|
753 |
-
|
754 |
-
# Add the prior loss to the instance loss.
|
755 |
-
loss = loss + args.prior_loss_weight * prior_loss
|
756 |
-
else:
|
757 |
-
loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
|
758 |
-
|
759 |
-
accelerator.backward(loss)
|
760 |
-
if accelerator.sync_gradients:
|
761 |
-
params_to_clip = (
|
762 |
-
itertools.chain(unet.parameters(), text_encoder.parameters())
|
763 |
-
if args.train_text_encoder
|
764 |
-
else unet.parameters()
|
765 |
-
)
|
766 |
-
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
767 |
-
optimizer.step()
|
768 |
-
lr_scheduler.step()
|
769 |
-
optimizer.zero_grad()
|
770 |
-
|
771 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
772 |
-
if accelerator.sync_gradients:
|
773 |
-
progress_bar.update(1)
|
774 |
-
global_step += 1
|
775 |
-
|
776 |
-
if global_step % args.checkpointing_steps == 0:
|
777 |
-
if accelerator.is_main_process:
|
778 |
-
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
|
779 |
-
accelerator.save_state(save_path)
|
780 |
-
logger.info(f"Saved state to {save_path}")
|
781 |
-
|
782 |
-
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
783 |
-
progress_bar.set_postfix(**logs)
|
784 |
-
accelerator.log(logs, step=global_step)
|
785 |
-
|
786 |
-
if global_step >= args.max_train_steps:
|
787 |
-
break
|
788 |
-
|
789 |
-
accelerator.wait_for_everyone()
|
790 |
-
|
791 |
-
# Create the pipeline using using the trained modules and save it.
|
792 |
-
if accelerator.is_main_process:
|
793 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
794 |
-
args.pretrained_model_name_or_path,
|
795 |
-
unet=accelerator.unwrap_model(unet),
|
796 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
797 |
-
)
|
798 |
-
pipeline.save_pretrained(args.output_dir)
|
799 |
-
|
800 |
-
if args.push_to_hub:
|
801 |
-
upload_folder(
|
802 |
-
repo_id=repo_id,
|
803 |
-
folder_path=args.output_dir,
|
804 |
-
commit_message="End of training",
|
805 |
-
ignore_patterns=["step_*", "epoch_*"],
|
806 |
-
)
|
807 |
-
|
808 |
-
accelerator.end_training()
|
809 |
-
|
810 |
-
|
811 |
-
if __name__ == "__main__":
|
812 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_layers_utils.py
DELETED
@@ -1,530 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
from torch import nn
|
22 |
-
|
23 |
-
from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU
|
24 |
-
from diffusers.models.embeddings import get_timestep_embedding
|
25 |
-
from diffusers.models.lora import LoRACompatibleLinear
|
26 |
-
from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
|
27 |
-
from diffusers.models.transformer_2d import Transformer2DModel
|
28 |
-
from diffusers.utils import torch_device
|
29 |
-
|
30 |
-
|
31 |
-
class EmbeddingsTests(unittest.TestCase):
|
32 |
-
def test_timestep_embeddings(self):
|
33 |
-
embedding_dim = 256
|
34 |
-
timesteps = torch.arange(16)
|
35 |
-
|
36 |
-
t1 = get_timestep_embedding(timesteps, embedding_dim)
|
37 |
-
|
38 |
-
# first vector should always be composed only of 0's and 1's
|
39 |
-
assert (t1[0, : embedding_dim // 2] - 0).abs().sum() < 1e-5
|
40 |
-
assert (t1[0, embedding_dim // 2 :] - 1).abs().sum() < 1e-5
|
41 |
-
|
42 |
-
# last element of each vector should be one
|
43 |
-
assert (t1[:, -1] - 1).abs().sum() < 1e-5
|
44 |
-
|
45 |
-
# For large embeddings (e.g. 128) the frequency of every vector is higher
|
46 |
-
# than the previous one which means that the gradients of later vectors are
|
47 |
-
# ALWAYS higher than the previous ones
|
48 |
-
grad_mean = np.abs(np.gradient(t1, axis=-1)).mean(axis=1)
|
49 |
-
|
50 |
-
prev_grad = 0.0
|
51 |
-
for grad in grad_mean:
|
52 |
-
assert grad > prev_grad
|
53 |
-
prev_grad = grad
|
54 |
-
|
55 |
-
def test_timestep_defaults(self):
|
56 |
-
embedding_dim = 16
|
57 |
-
timesteps = torch.arange(10)
|
58 |
-
|
59 |
-
t1 = get_timestep_embedding(timesteps, embedding_dim)
|
60 |
-
t2 = get_timestep_embedding(
|
61 |
-
timesteps, embedding_dim, flip_sin_to_cos=False, downscale_freq_shift=1, max_period=10_000
|
62 |
-
)
|
63 |
-
|
64 |
-
assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3)
|
65 |
-
|
66 |
-
def test_timestep_flip_sin_cos(self):
|
67 |
-
embedding_dim = 16
|
68 |
-
timesteps = torch.arange(10)
|
69 |
-
|
70 |
-
t1 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=True)
|
71 |
-
t1 = torch.cat([t1[:, embedding_dim // 2 :], t1[:, : embedding_dim // 2]], dim=-1)
|
72 |
-
|
73 |
-
t2 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=False)
|
74 |
-
|
75 |
-
assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3)
|
76 |
-
|
77 |
-
def test_timestep_downscale_freq_shift(self):
|
78 |
-
embedding_dim = 16
|
79 |
-
timesteps = torch.arange(10)
|
80 |
-
|
81 |
-
t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0)
|
82 |
-
t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1)
|
83 |
-
|
84 |
-
# get cosine half (vectors that are wrapped into cosine)
|
85 |
-
cosine_half = (t1 - t2)[:, embedding_dim // 2 :]
|
86 |
-
|
87 |
-
# cosine needs to be negative
|
88 |
-
assert (np.abs((cosine_half <= 0).numpy()) - 1).sum() < 1e-5
|
89 |
-
|
90 |
-
def test_sinoid_embeddings_hardcoded(self):
|
91 |
-
embedding_dim = 64
|
92 |
-
timesteps = torch.arange(128)
|
93 |
-
|
94 |
-
# standard unet, score_vde
|
95 |
-
t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1, flip_sin_to_cos=False)
|
96 |
-
# glide, ldm
|
97 |
-
t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0, flip_sin_to_cos=True)
|
98 |
-
# grad-tts
|
99 |
-
t3 = get_timestep_embedding(timesteps, embedding_dim, scale=1000)
|
100 |
-
|
101 |
-
assert torch.allclose(
|
102 |
-
t1[23:26, 47:50].flatten().cpu(),
|
103 |
-
torch.tensor([0.9646, 0.9804, 0.9892, 0.9615, 0.9787, 0.9882, 0.9582, 0.9769, 0.9872]),
|
104 |
-
1e-3,
|
105 |
-
)
|
106 |
-
assert torch.allclose(
|
107 |
-
t2[23:26, 47:50].flatten().cpu(),
|
108 |
-
torch.tensor([0.3019, 0.2280, 0.1716, 0.3146, 0.2377, 0.1790, 0.3272, 0.2474, 0.1864]),
|
109 |
-
1e-3,
|
110 |
-
)
|
111 |
-
assert torch.allclose(
|
112 |
-
t3[23:26, 47:50].flatten().cpu(),
|
113 |
-
torch.tensor([-0.9801, -0.9464, -0.9349, -0.3952, 0.8887, -0.9709, 0.5299, -0.2853, -0.9927]),
|
114 |
-
1e-3,
|
115 |
-
)
|
116 |
-
|
117 |
-
|
118 |
-
class Upsample2DBlockTests(unittest.TestCase):
|
119 |
-
def test_upsample_default(self):
|
120 |
-
torch.manual_seed(0)
|
121 |
-
sample = torch.randn(1, 32, 32, 32)
|
122 |
-
upsample = Upsample2D(channels=32, use_conv=False)
|
123 |
-
with torch.no_grad():
|
124 |
-
upsampled = upsample(sample)
|
125 |
-
|
126 |
-
assert upsampled.shape == (1, 32, 64, 64)
|
127 |
-
output_slice = upsampled[0, -1, -3:, -3:]
|
128 |
-
expected_slice = torch.tensor([-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254])
|
129 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
130 |
-
|
131 |
-
def test_upsample_with_conv(self):
|
132 |
-
torch.manual_seed(0)
|
133 |
-
sample = torch.randn(1, 32, 32, 32)
|
134 |
-
upsample = Upsample2D(channels=32, use_conv=True)
|
135 |
-
with torch.no_grad():
|
136 |
-
upsampled = upsample(sample)
|
137 |
-
|
138 |
-
assert upsampled.shape == (1, 32, 64, 64)
|
139 |
-
output_slice = upsampled[0, -1, -3:, -3:]
|
140 |
-
expected_slice = torch.tensor([0.7145, 1.3773, 0.3492, 0.8448, 1.0839, -0.3341, 0.5956, 0.1250, -0.4841])
|
141 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
142 |
-
|
143 |
-
def test_upsample_with_conv_out_dim(self):
|
144 |
-
torch.manual_seed(0)
|
145 |
-
sample = torch.randn(1, 32, 32, 32)
|
146 |
-
upsample = Upsample2D(channels=32, use_conv=True, out_channels=64)
|
147 |
-
with torch.no_grad():
|
148 |
-
upsampled = upsample(sample)
|
149 |
-
|
150 |
-
assert upsampled.shape == (1, 64, 64, 64)
|
151 |
-
output_slice = upsampled[0, -1, -3:, -3:]
|
152 |
-
expected_slice = torch.tensor([0.2703, 0.1656, -0.2538, -0.0553, -0.2984, 0.1044, 0.1155, 0.2579, 0.7755])
|
153 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
154 |
-
|
155 |
-
def test_upsample_with_transpose(self):
|
156 |
-
torch.manual_seed(0)
|
157 |
-
sample = torch.randn(1, 32, 32, 32)
|
158 |
-
upsample = Upsample2D(channels=32, use_conv=False, use_conv_transpose=True)
|
159 |
-
with torch.no_grad():
|
160 |
-
upsampled = upsample(sample)
|
161 |
-
|
162 |
-
assert upsampled.shape == (1, 32, 64, 64)
|
163 |
-
output_slice = upsampled[0, -1, -3:, -3:]
|
164 |
-
expected_slice = torch.tensor([-0.3028, -0.1582, 0.0071, 0.0350, -0.4799, -0.1139, 0.1056, -0.1153, -0.1046])
|
165 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
166 |
-
|
167 |
-
|
168 |
-
class Downsample2DBlockTests(unittest.TestCase):
|
169 |
-
def test_downsample_default(self):
|
170 |
-
torch.manual_seed(0)
|
171 |
-
sample = torch.randn(1, 32, 64, 64)
|
172 |
-
downsample = Downsample2D(channels=32, use_conv=False)
|
173 |
-
with torch.no_grad():
|
174 |
-
downsampled = downsample(sample)
|
175 |
-
|
176 |
-
assert downsampled.shape == (1, 32, 32, 32)
|
177 |
-
output_slice = downsampled[0, -1, -3:, -3:]
|
178 |
-
expected_slice = torch.tensor([-0.0513, -0.3889, 0.0640, 0.0836, -0.5460, -0.0341, -0.0169, -0.6967, 0.1179])
|
179 |
-
max_diff = (output_slice.flatten() - expected_slice).abs().sum().item()
|
180 |
-
assert max_diff <= 1e-3
|
181 |
-
# assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1)
|
182 |
-
|
183 |
-
def test_downsample_with_conv(self):
|
184 |
-
torch.manual_seed(0)
|
185 |
-
sample = torch.randn(1, 32, 64, 64)
|
186 |
-
downsample = Downsample2D(channels=32, use_conv=True)
|
187 |
-
with torch.no_grad():
|
188 |
-
downsampled = downsample(sample)
|
189 |
-
|
190 |
-
assert downsampled.shape == (1, 32, 32, 32)
|
191 |
-
output_slice = downsampled[0, -1, -3:, -3:]
|
192 |
-
|
193 |
-
expected_slice = torch.tensor(
|
194 |
-
[0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913],
|
195 |
-
)
|
196 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
197 |
-
|
198 |
-
def test_downsample_with_conv_pad1(self):
|
199 |
-
torch.manual_seed(0)
|
200 |
-
sample = torch.randn(1, 32, 64, 64)
|
201 |
-
downsample = Downsample2D(channels=32, use_conv=True, padding=1)
|
202 |
-
with torch.no_grad():
|
203 |
-
downsampled = downsample(sample)
|
204 |
-
|
205 |
-
assert downsampled.shape == (1, 32, 32, 32)
|
206 |
-
output_slice = downsampled[0, -1, -3:, -3:]
|
207 |
-
expected_slice = torch.tensor([0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913])
|
208 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
209 |
-
|
210 |
-
def test_downsample_with_conv_out_dim(self):
|
211 |
-
torch.manual_seed(0)
|
212 |
-
sample = torch.randn(1, 32, 64, 64)
|
213 |
-
downsample = Downsample2D(channels=32, use_conv=True, out_channels=16)
|
214 |
-
with torch.no_grad():
|
215 |
-
downsampled = downsample(sample)
|
216 |
-
|
217 |
-
assert downsampled.shape == (1, 16, 32, 32)
|
218 |
-
output_slice = downsampled[0, -1, -3:, -3:]
|
219 |
-
expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522])
|
220 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
221 |
-
|
222 |
-
|
223 |
-
class ResnetBlock2DTests(unittest.TestCase):
|
224 |
-
def test_resnet_default(self):
|
225 |
-
torch.manual_seed(0)
|
226 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
227 |
-
temb = torch.randn(1, 128).to(torch_device)
|
228 |
-
resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128).to(torch_device)
|
229 |
-
with torch.no_grad():
|
230 |
-
output_tensor = resnet_block(sample, temb)
|
231 |
-
|
232 |
-
assert output_tensor.shape == (1, 32, 64, 64)
|
233 |
-
output_slice = output_tensor[0, -1, -3:, -3:]
|
234 |
-
expected_slice = torch.tensor(
|
235 |
-
[-1.9010, -0.2974, -0.8245, -1.3533, 0.8742, -0.9645, -2.0584, 1.3387, -0.4746], device=torch_device
|
236 |
-
)
|
237 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
238 |
-
|
239 |
-
def test_restnet_with_use_in_shortcut(self):
|
240 |
-
torch.manual_seed(0)
|
241 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
242 |
-
temb = torch.randn(1, 128).to(torch_device)
|
243 |
-
resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, use_in_shortcut=True).to(torch_device)
|
244 |
-
with torch.no_grad():
|
245 |
-
output_tensor = resnet_block(sample, temb)
|
246 |
-
|
247 |
-
assert output_tensor.shape == (1, 32, 64, 64)
|
248 |
-
output_slice = output_tensor[0, -1, -3:, -3:]
|
249 |
-
expected_slice = torch.tensor(
|
250 |
-
[0.2226, -1.0791, -0.1629, 0.3659, -0.2889, -1.2376, 0.0582, 0.9206, 0.0044], device=torch_device
|
251 |
-
)
|
252 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
253 |
-
|
254 |
-
def test_resnet_up(self):
|
255 |
-
torch.manual_seed(0)
|
256 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
257 |
-
temb = torch.randn(1, 128).to(torch_device)
|
258 |
-
resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, up=True).to(torch_device)
|
259 |
-
with torch.no_grad():
|
260 |
-
output_tensor = resnet_block(sample, temb)
|
261 |
-
|
262 |
-
assert output_tensor.shape == (1, 32, 128, 128)
|
263 |
-
output_slice = output_tensor[0, -1, -3:, -3:]
|
264 |
-
expected_slice = torch.tensor(
|
265 |
-
[1.2130, -0.8753, -0.9027, 1.5783, -0.5362, -0.5001, 1.0726, -0.7732, -0.4182], device=torch_device
|
266 |
-
)
|
267 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
268 |
-
|
269 |
-
def test_resnet_down(self):
|
270 |
-
torch.manual_seed(0)
|
271 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
272 |
-
temb = torch.randn(1, 128).to(torch_device)
|
273 |
-
resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, down=True).to(torch_device)
|
274 |
-
with torch.no_grad():
|
275 |
-
output_tensor = resnet_block(sample, temb)
|
276 |
-
|
277 |
-
assert output_tensor.shape == (1, 32, 32, 32)
|
278 |
-
output_slice = output_tensor[0, -1, -3:, -3:]
|
279 |
-
expected_slice = torch.tensor(
|
280 |
-
[-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device
|
281 |
-
)
|
282 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
283 |
-
|
284 |
-
def test_restnet_with_kernel_fir(self):
|
285 |
-
torch.manual_seed(0)
|
286 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
287 |
-
temb = torch.randn(1, 128).to(torch_device)
|
288 |
-
resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="fir", down=True).to(torch_device)
|
289 |
-
with torch.no_grad():
|
290 |
-
output_tensor = resnet_block(sample, temb)
|
291 |
-
|
292 |
-
assert output_tensor.shape == (1, 32, 32, 32)
|
293 |
-
output_slice = output_tensor[0, -1, -3:, -3:]
|
294 |
-
expected_slice = torch.tensor(
|
295 |
-
[-0.0934, -0.5729, 0.0909, -0.2710, -0.5044, 0.0243, -0.0665, -0.5267, -0.3136], device=torch_device
|
296 |
-
)
|
297 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
298 |
-
|
299 |
-
def test_restnet_with_kernel_sde_vp(self):
|
300 |
-
torch.manual_seed(0)
|
301 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
302 |
-
temb = torch.randn(1, 128).to(torch_device)
|
303 |
-
resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="sde_vp", down=True).to(torch_device)
|
304 |
-
with torch.no_grad():
|
305 |
-
output_tensor = resnet_block(sample, temb)
|
306 |
-
|
307 |
-
assert output_tensor.shape == (1, 32, 32, 32)
|
308 |
-
output_slice = output_tensor[0, -1, -3:, -3:]
|
309 |
-
expected_slice = torch.tensor(
|
310 |
-
[-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device
|
311 |
-
)
|
312 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
313 |
-
|
314 |
-
|
315 |
-
class Transformer2DModelTests(unittest.TestCase):
|
316 |
-
def test_spatial_transformer_default(self):
|
317 |
-
torch.manual_seed(0)
|
318 |
-
if torch.cuda.is_available():
|
319 |
-
torch.cuda.manual_seed_all(0)
|
320 |
-
|
321 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
322 |
-
spatial_transformer_block = Transformer2DModel(
|
323 |
-
in_channels=32,
|
324 |
-
num_attention_heads=1,
|
325 |
-
attention_head_dim=32,
|
326 |
-
dropout=0.0,
|
327 |
-
cross_attention_dim=None,
|
328 |
-
).to(torch_device)
|
329 |
-
with torch.no_grad():
|
330 |
-
attention_scores = spatial_transformer_block(sample).sample
|
331 |
-
|
332 |
-
assert attention_scores.shape == (1, 32, 64, 64)
|
333 |
-
output_slice = attention_scores[0, -1, -3:, -3:]
|
334 |
-
|
335 |
-
expected_slice = torch.tensor(
|
336 |
-
[-1.9455, -0.0066, -1.3933, -1.5878, 0.5325, -0.6486, -1.8648, 0.7515, -0.9689], device=torch_device
|
337 |
-
)
|
338 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
339 |
-
|
340 |
-
def test_spatial_transformer_cross_attention_dim(self):
|
341 |
-
torch.manual_seed(0)
|
342 |
-
if torch.cuda.is_available():
|
343 |
-
torch.cuda.manual_seed_all(0)
|
344 |
-
|
345 |
-
sample = torch.randn(1, 64, 64, 64).to(torch_device)
|
346 |
-
spatial_transformer_block = Transformer2DModel(
|
347 |
-
in_channels=64,
|
348 |
-
num_attention_heads=2,
|
349 |
-
attention_head_dim=32,
|
350 |
-
dropout=0.0,
|
351 |
-
cross_attention_dim=64,
|
352 |
-
).to(torch_device)
|
353 |
-
with torch.no_grad():
|
354 |
-
context = torch.randn(1, 4, 64).to(torch_device)
|
355 |
-
attention_scores = spatial_transformer_block(sample, context).sample
|
356 |
-
|
357 |
-
assert attention_scores.shape == (1, 64, 64, 64)
|
358 |
-
output_slice = attention_scores[0, -1, -3:, -3:]
|
359 |
-
expected_slice = torch.tensor(
|
360 |
-
[0.0143, -0.6909, -2.1547, -1.8893, 1.4097, 0.1359, -0.2521, -1.3359, 0.2598], device=torch_device
|
361 |
-
)
|
362 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
363 |
-
|
364 |
-
def test_spatial_transformer_timestep(self):
|
365 |
-
torch.manual_seed(0)
|
366 |
-
if torch.cuda.is_available():
|
367 |
-
torch.cuda.manual_seed_all(0)
|
368 |
-
|
369 |
-
num_embeds_ada_norm = 5
|
370 |
-
|
371 |
-
sample = torch.randn(1, 64, 64, 64).to(torch_device)
|
372 |
-
spatial_transformer_block = Transformer2DModel(
|
373 |
-
in_channels=64,
|
374 |
-
num_attention_heads=2,
|
375 |
-
attention_head_dim=32,
|
376 |
-
dropout=0.0,
|
377 |
-
cross_attention_dim=64,
|
378 |
-
num_embeds_ada_norm=num_embeds_ada_norm,
|
379 |
-
).to(torch_device)
|
380 |
-
with torch.no_grad():
|
381 |
-
timestep_1 = torch.tensor(1, dtype=torch.long).to(torch_device)
|
382 |
-
timestep_2 = torch.tensor(2, dtype=torch.long).to(torch_device)
|
383 |
-
attention_scores_1 = spatial_transformer_block(sample, timestep=timestep_1).sample
|
384 |
-
attention_scores_2 = spatial_transformer_block(sample, timestep=timestep_2).sample
|
385 |
-
|
386 |
-
assert attention_scores_1.shape == (1, 64, 64, 64)
|
387 |
-
assert attention_scores_2.shape == (1, 64, 64, 64)
|
388 |
-
|
389 |
-
output_slice_1 = attention_scores_1[0, -1, -3:, -3:]
|
390 |
-
output_slice_2 = attention_scores_2[0, -1, -3:, -3:]
|
391 |
-
|
392 |
-
expected_slice = torch.tensor(
|
393 |
-
[-0.3923, -1.0923, -1.7144, -1.5570, 1.4154, 0.1738, -0.1157, -1.2998, -0.1703], device=torch_device
|
394 |
-
)
|
395 |
-
expected_slice_2 = torch.tensor(
|
396 |
-
[-0.4311, -1.1376, -1.7732, -1.5997, 1.3450, 0.0964, -0.1569, -1.3590, -0.2348], device=torch_device
|
397 |
-
)
|
398 |
-
|
399 |
-
assert torch.allclose(output_slice_1.flatten(), expected_slice, atol=1e-3)
|
400 |
-
assert torch.allclose(output_slice_2.flatten(), expected_slice_2, atol=1e-3)
|
401 |
-
|
402 |
-
def test_spatial_transformer_dropout(self):
|
403 |
-
torch.manual_seed(0)
|
404 |
-
if torch.cuda.is_available():
|
405 |
-
torch.cuda.manual_seed_all(0)
|
406 |
-
|
407 |
-
sample = torch.randn(1, 32, 64, 64).to(torch_device)
|
408 |
-
spatial_transformer_block = (
|
409 |
-
Transformer2DModel(
|
410 |
-
in_channels=32,
|
411 |
-
num_attention_heads=2,
|
412 |
-
attention_head_dim=16,
|
413 |
-
dropout=0.3,
|
414 |
-
cross_attention_dim=None,
|
415 |
-
)
|
416 |
-
.to(torch_device)
|
417 |
-
.eval()
|
418 |
-
)
|
419 |
-
with torch.no_grad():
|
420 |
-
attention_scores = spatial_transformer_block(sample).sample
|
421 |
-
|
422 |
-
assert attention_scores.shape == (1, 32, 64, 64)
|
423 |
-
output_slice = attention_scores[0, -1, -3:, -3:]
|
424 |
-
|
425 |
-
expected_slice = torch.tensor(
|
426 |
-
[-1.9380, -0.0083, -1.3771, -1.5819, 0.5209, -0.6441, -1.8545, 0.7563, -0.9615], device=torch_device
|
427 |
-
)
|
428 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
429 |
-
|
430 |
-
@unittest.skipIf(torch_device == "mps", "MPS does not support float64")
|
431 |
-
def test_spatial_transformer_discrete(self):
|
432 |
-
torch.manual_seed(0)
|
433 |
-
if torch.cuda.is_available():
|
434 |
-
torch.cuda.manual_seed_all(0)
|
435 |
-
|
436 |
-
num_embed = 5
|
437 |
-
|
438 |
-
sample = torch.randint(0, num_embed, (1, 32)).to(torch_device)
|
439 |
-
spatial_transformer_block = (
|
440 |
-
Transformer2DModel(
|
441 |
-
num_attention_heads=1,
|
442 |
-
attention_head_dim=32,
|
443 |
-
num_vector_embeds=num_embed,
|
444 |
-
sample_size=16,
|
445 |
-
)
|
446 |
-
.to(torch_device)
|
447 |
-
.eval()
|
448 |
-
)
|
449 |
-
|
450 |
-
with torch.no_grad():
|
451 |
-
attention_scores = spatial_transformer_block(sample).sample
|
452 |
-
|
453 |
-
assert attention_scores.shape == (1, num_embed - 1, 32)
|
454 |
-
|
455 |
-
output_slice = attention_scores[0, -2:, -3:]
|
456 |
-
|
457 |
-
expected_slice = torch.tensor([-1.7648, -1.0241, -2.0985, -1.8035, -1.6404, -1.2098], device=torch_device)
|
458 |
-
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
459 |
-
|
460 |
-
def test_spatial_transformer_default_norm_layers(self):
|
461 |
-
spatial_transformer_block = Transformer2DModel(num_attention_heads=1, attention_head_dim=32, in_channels=32)
|
462 |
-
|
463 |
-
assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == nn.LayerNorm
|
464 |
-
assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm
|
465 |
-
|
466 |
-
def test_spatial_transformer_ada_norm_layers(self):
|
467 |
-
spatial_transformer_block = Transformer2DModel(
|
468 |
-
num_attention_heads=1,
|
469 |
-
attention_head_dim=32,
|
470 |
-
in_channels=32,
|
471 |
-
num_embeds_ada_norm=5,
|
472 |
-
)
|
473 |
-
|
474 |
-
assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == AdaLayerNorm
|
475 |
-
assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm
|
476 |
-
|
477 |
-
def test_spatial_transformer_default_ff_layers(self):
|
478 |
-
spatial_transformer_block = Transformer2DModel(
|
479 |
-
num_attention_heads=1,
|
480 |
-
attention_head_dim=32,
|
481 |
-
in_channels=32,
|
482 |
-
)
|
483 |
-
|
484 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == GEGLU
|
485 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout
|
486 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear
|
487 |
-
|
488 |
-
dim = 32
|
489 |
-
inner_dim = 128
|
490 |
-
|
491 |
-
# First dimension change
|
492 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim
|
493 |
-
# NOTE: inner_dim * 2 because GEGLU
|
494 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim * 2
|
495 |
-
|
496 |
-
# Second dimension change
|
497 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim
|
498 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim
|
499 |
-
|
500 |
-
def test_spatial_transformer_geglu_approx_ff_layers(self):
|
501 |
-
spatial_transformer_block = Transformer2DModel(
|
502 |
-
num_attention_heads=1,
|
503 |
-
attention_head_dim=32,
|
504 |
-
in_channels=32,
|
505 |
-
activation_fn="geglu-approximate",
|
506 |
-
)
|
507 |
-
|
508 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == ApproximateGELU
|
509 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout
|
510 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear
|
511 |
-
|
512 |
-
dim = 32
|
513 |
-
inner_dim = 128
|
514 |
-
|
515 |
-
# First dimension change
|
516 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim
|
517 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim
|
518 |
-
|
519 |
-
# Second dimension change
|
520 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim
|
521 |
-
assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim
|
522 |
-
|
523 |
-
def test_spatial_transformer_attention_bias(self):
|
524 |
-
spatial_transformer_block = Transformer2DModel(
|
525 |
-
num_attention_heads=1, attention_head_dim=32, in_channels=32, attention_bias=True
|
526 |
-
)
|
527 |
-
|
528 |
-
assert spatial_transformer_block.transformer_blocks[0].attn1.to_q.bias is not None
|
529 |
-
assert spatial_transformer_block.transformer_blocks[0].attn1.to_k.bias is not None
|
530 |
-
assert spatial_transformer_block.transformer_blocks[0].attn1.to_v.bias is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/README.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
# PASCAL VOC Dataset
|
2 |
-
|
3 |
-
[DATASET]
|
4 |
-
|
5 |
-
```
|
6 |
-
@Article{Everingham10,
|
7 |
-
author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.",
|
8 |
-
title = "The Pascal Visual Object Classes (VOC) Challenge",
|
9 |
-
journal = "International Journal of Computer Vision",
|
10 |
-
volume = "88",
|
11 |
-
year = "2010",
|
12 |
-
number = "2",
|
13 |
-
month = jun,
|
14 |
-
pages = "303--338",
|
15 |
-
}
|
16 |
-
```
|
17 |
-
|
18 |
-
## Results and Models
|
19 |
-
|
20 |
-
| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
21 |
-
|:------------:|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
22 |
-
| Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 79.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/20200623_015208.log.json) |
|
23 |
-
| Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/schedules/schedule_80k.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# optimizer
|
2 |
-
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
3 |
-
optimizer_config = dict()
|
4 |
-
# learning policy
|
5 |
-
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
|
6 |
-
# runtime settings
|
7 |
-
runner = dict(type='IterBasedRunner', max_iters=80000)
|
8 |
-
checkpoint_config = dict(by_epoch=False, interval=8000)
|
9 |
-
evaluation = dict(interval=8000, metric='mIoU')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ank0X0/text-to-3d-shap-e-webui/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Text To 3d Shap E Webui
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc0-1.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/ROOPOK/roop/typing.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from typing import Any
|
2 |
-
|
3 |
-
from insightface.app.common import Face
|
4 |
-
import numpy
|
5 |
-
|
6 |
-
Face = Face
|
7 |
-
Frame = numpy.ndarray[Any, Any]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Armored-Atom/Image-To-Motion/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Image Animation Using Thin Plate Spline Motion Model
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.19
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/White-box-Cartoonization/wbc/guided_filter.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
def tf_box_filter(x, r):
|
8 |
-
k_size = int(2*r+1)
|
9 |
-
ch = x.get_shape().as_list()[-1]
|
10 |
-
weight = 1/(k_size**2)
|
11 |
-
box_kernel = weight*np.ones((k_size, k_size, ch, 1))
|
12 |
-
box_kernel = np.array(box_kernel).astype(np.float32)
|
13 |
-
output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME')
|
14 |
-
return output
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
def guided_filter(x, y, r, eps=1e-2):
|
19 |
-
|
20 |
-
x_shape = tf.shape(x)
|
21 |
-
#y_shape = tf.shape(y)
|
22 |
-
|
23 |
-
N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r)
|
24 |
-
|
25 |
-
mean_x = tf_box_filter(x, r) / N
|
26 |
-
mean_y = tf_box_filter(y, r) / N
|
27 |
-
cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y
|
28 |
-
var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x
|
29 |
-
|
30 |
-
A = cov_xy / (var_x + eps)
|
31 |
-
b = mean_y - A * mean_x
|
32 |
-
|
33 |
-
mean_A = tf_box_filter(A, r) / N
|
34 |
-
mean_b = tf_box_filter(b, r) / N
|
35 |
-
|
36 |
-
output = mean_A * x + mean_b
|
37 |
-
|
38 |
-
return output
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8):
|
43 |
-
|
44 |
-
#assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4
|
45 |
-
|
46 |
-
lr_x_shape = tf.shape(lr_x)
|
47 |
-
#lr_y_shape = tf.shape(lr_y)
|
48 |
-
hr_x_shape = tf.shape(hr_x)
|
49 |
-
|
50 |
-
N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r)
|
51 |
-
|
52 |
-
mean_x = tf_box_filter(lr_x, r) / N
|
53 |
-
mean_y = tf_box_filter(lr_y, r) / N
|
54 |
-
cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y
|
55 |
-
var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x
|
56 |
-
|
57 |
-
A = cov_xy / (var_x + eps)
|
58 |
-
b = mean_y - A * mean_x
|
59 |
-
|
60 |
-
mean_A = tf.image.resize_images(A, hr_x_shape[1: 3])
|
61 |
-
mean_b = tf.image.resize_images(b, hr_x_shape[1: 3])
|
62 |
-
|
63 |
-
output = mean_A * hr_x + mean_b
|
64 |
-
|
65 |
-
return output
|
66 |
-
|
67 |
-
|
68 |
-
if __name__ == '__main__':
|
69 |
-
import cv2
|
70 |
-
from tqdm import tqdm
|
71 |
-
|
72 |
-
input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
|
73 |
-
#input_superpixel = tf.placeholder(tf.float32, [16, 256, 256, 3])
|
74 |
-
output = guided_filter(input_photo, input_photo, 5, eps=1)
|
75 |
-
image = cv2.imread('output_figure1/cartoon2.jpg')
|
76 |
-
image = image/127.5 - 1
|
77 |
-
image = np.expand_dims(image, axis=0)
|
78 |
-
|
79 |
-
config = tf.ConfigProto()
|
80 |
-
config.gpu_options.allow_growth = True
|
81 |
-
sess = tf.Session(config=config)
|
82 |
-
sess.run(tf.global_variables_initializer())
|
83 |
-
|
84 |
-
out = sess.run(output, feed_dict={input_photo: image})
|
85 |
-
out = (np.squeeze(out)+1)*127.5
|
86 |
-
out = np.clip(out, 0, 255).astype(np.uint8)
|
87 |
-
cv2.imwrite('output_figure1/cartoon2_filter.jpg', out)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py
DELETED
@@ -1,506 +0,0 @@
|
|
1 |
-
"""Backing implementation for InstallRequirement's various constructors
|
2 |
-
|
3 |
-
The idea here is that these formed a major chunk of InstallRequirement's size
|
4 |
-
so, moving them and support code dedicated to them outside of that class
|
5 |
-
helps creates for better understandability for the rest of the code.
|
6 |
-
|
7 |
-
These are meant to be used elsewhere within pip to create instances of
|
8 |
-
InstallRequirement.
|
9 |
-
"""
|
10 |
-
|
11 |
-
import logging
|
12 |
-
import os
|
13 |
-
import re
|
14 |
-
from typing import Dict, List, Optional, Set, Tuple, Union
|
15 |
-
|
16 |
-
from pip._vendor.packaging.markers import Marker
|
17 |
-
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
|
18 |
-
from pip._vendor.packaging.specifiers import Specifier
|
19 |
-
|
20 |
-
from pip._internal.exceptions import InstallationError
|
21 |
-
from pip._internal.models.index import PyPI, TestPyPI
|
22 |
-
from pip._internal.models.link import Link
|
23 |
-
from pip._internal.models.wheel import Wheel
|
24 |
-
from pip._internal.req.req_file import ParsedRequirement
|
25 |
-
from pip._internal.req.req_install import InstallRequirement
|
26 |
-
from pip._internal.utils.filetypes import is_archive_file
|
27 |
-
from pip._internal.utils.misc import is_installable_dir
|
28 |
-
from pip._internal.utils.packaging import get_requirement
|
29 |
-
from pip._internal.utils.urls import path_to_url
|
30 |
-
from pip._internal.vcs import is_url, vcs
|
31 |
-
|
32 |
-
__all__ = [
|
33 |
-
"install_req_from_editable",
|
34 |
-
"install_req_from_line",
|
35 |
-
"parse_editable",
|
36 |
-
]
|
37 |
-
|
38 |
-
logger = logging.getLogger(__name__)
|
39 |
-
operators = Specifier._operators.keys()
|
40 |
-
|
41 |
-
|
42 |
-
def _strip_extras(path: str) -> Tuple[str, Optional[str]]:
|
43 |
-
m = re.match(r"^(.+)(\[[^\]]+\])$", path)
|
44 |
-
extras = None
|
45 |
-
if m:
|
46 |
-
path_no_extras = m.group(1)
|
47 |
-
extras = m.group(2)
|
48 |
-
else:
|
49 |
-
path_no_extras = path
|
50 |
-
|
51 |
-
return path_no_extras, extras
|
52 |
-
|
53 |
-
|
54 |
-
def convert_extras(extras: Optional[str]) -> Set[str]:
|
55 |
-
if not extras:
|
56 |
-
return set()
|
57 |
-
return get_requirement("placeholder" + extras.lower()).extras
|
58 |
-
|
59 |
-
|
60 |
-
def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]:
|
61 |
-
"""Parses an editable requirement into:
|
62 |
-
- a requirement name
|
63 |
-
- an URL
|
64 |
-
- extras
|
65 |
-
- editable options
|
66 |
-
Accepted requirements:
|
67 |
-
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
|
68 |
-
.[some_extra]
|
69 |
-
"""
|
70 |
-
|
71 |
-
url = editable_req
|
72 |
-
|
73 |
-
# If a file path is specified with extras, strip off the extras.
|
74 |
-
url_no_extras, extras = _strip_extras(url)
|
75 |
-
|
76 |
-
if os.path.isdir(url_no_extras):
|
77 |
-
# Treating it as code that has already been checked out
|
78 |
-
url_no_extras = path_to_url(url_no_extras)
|
79 |
-
|
80 |
-
if url_no_extras.lower().startswith("file:"):
|
81 |
-
package_name = Link(url_no_extras).egg_fragment
|
82 |
-
if extras:
|
83 |
-
return (
|
84 |
-
package_name,
|
85 |
-
url_no_extras,
|
86 |
-
get_requirement("placeholder" + extras.lower()).extras,
|
87 |
-
)
|
88 |
-
else:
|
89 |
-
return package_name, url_no_extras, set()
|
90 |
-
|
91 |
-
for version_control in vcs:
|
92 |
-
if url.lower().startswith(f"{version_control}:"):
|
93 |
-
url = f"{version_control}+{url}"
|
94 |
-
break
|
95 |
-
|
96 |
-
link = Link(url)
|
97 |
-
|
98 |
-
if not link.is_vcs:
|
99 |
-
backends = ", ".join(vcs.all_schemes)
|
100 |
-
raise InstallationError(
|
101 |
-
f"{editable_req} is not a valid editable requirement. "
|
102 |
-
f"It should either be a path to a local project or a VCS URL "
|
103 |
-
f"(beginning with {backends})."
|
104 |
-
)
|
105 |
-
|
106 |
-
package_name = link.egg_fragment
|
107 |
-
if not package_name:
|
108 |
-
raise InstallationError(
|
109 |
-
"Could not detect requirement name for '{}', please specify one "
|
110 |
-
"with #egg=your_package_name".format(editable_req)
|
111 |
-
)
|
112 |
-
return package_name, url, set()
|
113 |
-
|
114 |
-
|
115 |
-
def check_first_requirement_in_file(filename: str) -> None:
|
116 |
-
"""Check if file is parsable as a requirements file.
|
117 |
-
|
118 |
-
This is heavily based on ``pkg_resources.parse_requirements``, but
|
119 |
-
simplified to just check the first meaningful line.
|
120 |
-
|
121 |
-
:raises InvalidRequirement: If the first meaningful line cannot be parsed
|
122 |
-
as an requirement.
|
123 |
-
"""
|
124 |
-
with open(filename, encoding="utf-8", errors="ignore") as f:
|
125 |
-
# Create a steppable iterator, so we can handle \-continuations.
|
126 |
-
lines = (
|
127 |
-
line
|
128 |
-
for line in (line.strip() for line in f)
|
129 |
-
if line and not line.startswith("#") # Skip blank lines/comments.
|
130 |
-
)
|
131 |
-
|
132 |
-
for line in lines:
|
133 |
-
# Drop comments -- a hash without a space may be in a URL.
|
134 |
-
if " #" in line:
|
135 |
-
line = line[: line.find(" #")]
|
136 |
-
# If there is a line continuation, drop it, and append the next line.
|
137 |
-
if line.endswith("\\"):
|
138 |
-
line = line[:-2].strip() + next(lines, "")
|
139 |
-
Requirement(line)
|
140 |
-
return
|
141 |
-
|
142 |
-
|
143 |
-
def deduce_helpful_msg(req: str) -> str:
|
144 |
-
"""Returns helpful msg in case requirements file does not exist,
|
145 |
-
or cannot be parsed.
|
146 |
-
|
147 |
-
:params req: Requirements file path
|
148 |
-
"""
|
149 |
-
if not os.path.exists(req):
|
150 |
-
return f" File '{req}' does not exist."
|
151 |
-
msg = " The path does exist. "
|
152 |
-
# Try to parse and check if it is a requirements file.
|
153 |
-
try:
|
154 |
-
check_first_requirement_in_file(req)
|
155 |
-
except InvalidRequirement:
|
156 |
-
logger.debug("Cannot parse '%s' as requirements file", req)
|
157 |
-
else:
|
158 |
-
msg += (
|
159 |
-
f"The argument you provided "
|
160 |
-
f"({req}) appears to be a"
|
161 |
-
f" requirements file. If that is the"
|
162 |
-
f" case, use the '-r' flag to install"
|
163 |
-
f" the packages specified within it."
|
164 |
-
)
|
165 |
-
return msg
|
166 |
-
|
167 |
-
|
168 |
-
class RequirementParts:
|
169 |
-
def __init__(
|
170 |
-
self,
|
171 |
-
requirement: Optional[Requirement],
|
172 |
-
link: Optional[Link],
|
173 |
-
markers: Optional[Marker],
|
174 |
-
extras: Set[str],
|
175 |
-
):
|
176 |
-
self.requirement = requirement
|
177 |
-
self.link = link
|
178 |
-
self.markers = markers
|
179 |
-
self.extras = extras
|
180 |
-
|
181 |
-
|
182 |
-
def parse_req_from_editable(editable_req: str) -> RequirementParts:
|
183 |
-
name, url, extras_override = parse_editable(editable_req)
|
184 |
-
|
185 |
-
if name is not None:
|
186 |
-
try:
|
187 |
-
req: Optional[Requirement] = Requirement(name)
|
188 |
-
except InvalidRequirement:
|
189 |
-
raise InstallationError(f"Invalid requirement: '{name}'")
|
190 |
-
else:
|
191 |
-
req = None
|
192 |
-
|
193 |
-
link = Link(url)
|
194 |
-
|
195 |
-
return RequirementParts(req, link, None, extras_override)
|
196 |
-
|
197 |
-
|
198 |
-
# ---- The actual constructors follow ----
|
199 |
-
|
200 |
-
|
201 |
-
def install_req_from_editable(
|
202 |
-
editable_req: str,
|
203 |
-
comes_from: Optional[Union[InstallRequirement, str]] = None,
|
204 |
-
*,
|
205 |
-
use_pep517: Optional[bool] = None,
|
206 |
-
isolated: bool = False,
|
207 |
-
global_options: Optional[List[str]] = None,
|
208 |
-
hash_options: Optional[Dict[str, List[str]]] = None,
|
209 |
-
constraint: bool = False,
|
210 |
-
user_supplied: bool = False,
|
211 |
-
permit_editable_wheels: bool = False,
|
212 |
-
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
|
213 |
-
) -> InstallRequirement:
|
214 |
-
parts = parse_req_from_editable(editable_req)
|
215 |
-
|
216 |
-
return InstallRequirement(
|
217 |
-
parts.requirement,
|
218 |
-
comes_from=comes_from,
|
219 |
-
user_supplied=user_supplied,
|
220 |
-
editable=True,
|
221 |
-
permit_editable_wheels=permit_editable_wheels,
|
222 |
-
link=parts.link,
|
223 |
-
constraint=constraint,
|
224 |
-
use_pep517=use_pep517,
|
225 |
-
isolated=isolated,
|
226 |
-
global_options=global_options,
|
227 |
-
hash_options=hash_options,
|
228 |
-
config_settings=config_settings,
|
229 |
-
extras=parts.extras,
|
230 |
-
)
|
231 |
-
|
232 |
-
|
233 |
-
def _looks_like_path(name: str) -> bool:
|
234 |
-
"""Checks whether the string "looks like" a path on the filesystem.
|
235 |
-
|
236 |
-
This does not check whether the target actually exists, only judge from the
|
237 |
-
appearance.
|
238 |
-
|
239 |
-
Returns true if any of the following conditions is true:
|
240 |
-
* a path separator is found (either os.path.sep or os.path.altsep);
|
241 |
-
* a dot is found (which represents the current directory).
|
242 |
-
"""
|
243 |
-
if os.path.sep in name:
|
244 |
-
return True
|
245 |
-
if os.path.altsep is not None and os.path.altsep in name:
|
246 |
-
return True
|
247 |
-
if name.startswith("."):
|
248 |
-
return True
|
249 |
-
return False
|
250 |
-
|
251 |
-
|
252 |
-
def _get_url_from_path(path: str, name: str) -> Optional[str]:
|
253 |
-
"""
|
254 |
-
First, it checks whether a provided path is an installable directory. If it
|
255 |
-
is, returns the path.
|
256 |
-
|
257 |
-
If false, check if the path is an archive file (such as a .whl).
|
258 |
-
The function checks if the path is a file. If false, if the path has
|
259 |
-
an @, it will treat it as a PEP 440 URL requirement and return the path.
|
260 |
-
"""
|
261 |
-
if _looks_like_path(name) and os.path.isdir(path):
|
262 |
-
if is_installable_dir(path):
|
263 |
-
return path_to_url(path)
|
264 |
-
# TODO: The is_installable_dir test here might not be necessary
|
265 |
-
# now that it is done in load_pyproject_toml too.
|
266 |
-
raise InstallationError(
|
267 |
-
f"Directory {name!r} is not installable. Neither 'setup.py' "
|
268 |
-
"nor 'pyproject.toml' found."
|
269 |
-
)
|
270 |
-
if not is_archive_file(path):
|
271 |
-
return None
|
272 |
-
if os.path.isfile(path):
|
273 |
-
return path_to_url(path)
|
274 |
-
urlreq_parts = name.split("@", 1)
|
275 |
-
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
|
276 |
-
# If the path contains '@' and the part before it does not look
|
277 |
-
# like a path, try to treat it as a PEP 440 URL req instead.
|
278 |
-
return None
|
279 |
-
logger.warning(
|
280 |
-
"Requirement %r looks like a filename, but the file does not exist",
|
281 |
-
name,
|
282 |
-
)
|
283 |
-
return path_to_url(path)
|
284 |
-
|
285 |
-
|
286 |
-
def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts:
|
287 |
-
if is_url(name):
|
288 |
-
marker_sep = "; "
|
289 |
-
else:
|
290 |
-
marker_sep = ";"
|
291 |
-
if marker_sep in name:
|
292 |
-
name, markers_as_string = name.split(marker_sep, 1)
|
293 |
-
markers_as_string = markers_as_string.strip()
|
294 |
-
if not markers_as_string:
|
295 |
-
markers = None
|
296 |
-
else:
|
297 |
-
markers = Marker(markers_as_string)
|
298 |
-
else:
|
299 |
-
markers = None
|
300 |
-
name = name.strip()
|
301 |
-
req_as_string = None
|
302 |
-
path = os.path.normpath(os.path.abspath(name))
|
303 |
-
link = None
|
304 |
-
extras_as_string = None
|
305 |
-
|
306 |
-
if is_url(name):
|
307 |
-
link = Link(name)
|
308 |
-
else:
|
309 |
-
p, extras_as_string = _strip_extras(path)
|
310 |
-
url = _get_url_from_path(p, name)
|
311 |
-
if url is not None:
|
312 |
-
link = Link(url)
|
313 |
-
|
314 |
-
# it's a local file, dir, or url
|
315 |
-
if link:
|
316 |
-
# Handle relative file URLs
|
317 |
-
if link.scheme == "file" and re.search(r"\.\./", link.url):
|
318 |
-
link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))
|
319 |
-
# wheel file
|
320 |
-
if link.is_wheel:
|
321 |
-
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
|
322 |
-
req_as_string = f"{wheel.name}=={wheel.version}"
|
323 |
-
else:
|
324 |
-
# set the req to the egg fragment. when it's not there, this
|
325 |
-
# will become an 'unnamed' requirement
|
326 |
-
req_as_string = link.egg_fragment
|
327 |
-
|
328 |
-
# a requirement specifier
|
329 |
-
else:
|
330 |
-
req_as_string = name
|
331 |
-
|
332 |
-
extras = convert_extras(extras_as_string)
|
333 |
-
|
334 |
-
def with_source(text: str) -> str:
|
335 |
-
if not line_source:
|
336 |
-
return text
|
337 |
-
return f"{text} (from {line_source})"
|
338 |
-
|
339 |
-
def _parse_req_string(req_as_string: str) -> Requirement:
|
340 |
-
try:
|
341 |
-
req = get_requirement(req_as_string)
|
342 |
-
except InvalidRequirement:
|
343 |
-
if os.path.sep in req_as_string:
|
344 |
-
add_msg = "It looks like a path."
|
345 |
-
add_msg += deduce_helpful_msg(req_as_string)
|
346 |
-
elif "=" in req_as_string and not any(
|
347 |
-
op in req_as_string for op in operators
|
348 |
-
):
|
349 |
-
add_msg = "= is not a valid operator. Did you mean == ?"
|
350 |
-
else:
|
351 |
-
add_msg = ""
|
352 |
-
msg = with_source(f"Invalid requirement: {req_as_string!r}")
|
353 |
-
if add_msg:
|
354 |
-
msg += f"\nHint: {add_msg}"
|
355 |
-
raise InstallationError(msg)
|
356 |
-
else:
|
357 |
-
# Deprecate extras after specifiers: "name>=1.0[extras]"
|
358 |
-
# This currently works by accident because _strip_extras() parses
|
359 |
-
# any extras in the end of the string and those are saved in
|
360 |
-
# RequirementParts
|
361 |
-
for spec in req.specifier:
|
362 |
-
spec_str = str(spec)
|
363 |
-
if spec_str.endswith("]"):
|
364 |
-
msg = f"Extras after version '{spec_str}'."
|
365 |
-
raise InstallationError(msg)
|
366 |
-
return req
|
367 |
-
|
368 |
-
if req_as_string is not None:
|
369 |
-
req: Optional[Requirement] = _parse_req_string(req_as_string)
|
370 |
-
else:
|
371 |
-
req = None
|
372 |
-
|
373 |
-
return RequirementParts(req, link, markers, extras)
|
374 |
-
|
375 |
-
|
376 |
-
def install_req_from_line(
|
377 |
-
name: str,
|
378 |
-
comes_from: Optional[Union[str, InstallRequirement]] = None,
|
379 |
-
*,
|
380 |
-
use_pep517: Optional[bool] = None,
|
381 |
-
isolated: bool = False,
|
382 |
-
global_options: Optional[List[str]] = None,
|
383 |
-
hash_options: Optional[Dict[str, List[str]]] = None,
|
384 |
-
constraint: bool = False,
|
385 |
-
line_source: Optional[str] = None,
|
386 |
-
user_supplied: bool = False,
|
387 |
-
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
|
388 |
-
) -> InstallRequirement:
|
389 |
-
"""Creates an InstallRequirement from a name, which might be a
|
390 |
-
requirement, directory containing 'setup.py', filename, or URL.
|
391 |
-
|
392 |
-
:param line_source: An optional string describing where the line is from,
|
393 |
-
for logging purposes in case of an error.
|
394 |
-
"""
|
395 |
-
parts = parse_req_from_line(name, line_source)
|
396 |
-
|
397 |
-
return InstallRequirement(
|
398 |
-
parts.requirement,
|
399 |
-
comes_from,
|
400 |
-
link=parts.link,
|
401 |
-
markers=parts.markers,
|
402 |
-
use_pep517=use_pep517,
|
403 |
-
isolated=isolated,
|
404 |
-
global_options=global_options,
|
405 |
-
hash_options=hash_options,
|
406 |
-
config_settings=config_settings,
|
407 |
-
constraint=constraint,
|
408 |
-
extras=parts.extras,
|
409 |
-
user_supplied=user_supplied,
|
410 |
-
)
|
411 |
-
|
412 |
-
|
413 |
-
def install_req_from_req_string(
|
414 |
-
req_string: str,
|
415 |
-
comes_from: Optional[InstallRequirement] = None,
|
416 |
-
isolated: bool = False,
|
417 |
-
use_pep517: Optional[bool] = None,
|
418 |
-
user_supplied: bool = False,
|
419 |
-
) -> InstallRequirement:
|
420 |
-
try:
|
421 |
-
req = get_requirement(req_string)
|
422 |
-
except InvalidRequirement:
|
423 |
-
raise InstallationError(f"Invalid requirement: '{req_string}'")
|
424 |
-
|
425 |
-
domains_not_allowed = [
|
426 |
-
PyPI.file_storage_domain,
|
427 |
-
TestPyPI.file_storage_domain,
|
428 |
-
]
|
429 |
-
if (
|
430 |
-
req.url
|
431 |
-
and comes_from
|
432 |
-
and comes_from.link
|
433 |
-
and comes_from.link.netloc in domains_not_allowed
|
434 |
-
):
|
435 |
-
# Explicitly disallow pypi packages that depend on external urls
|
436 |
-
raise InstallationError(
|
437 |
-
"Packages installed from PyPI cannot depend on packages "
|
438 |
-
"which are not also hosted on PyPI.\n"
|
439 |
-
"{} depends on {} ".format(comes_from.name, req)
|
440 |
-
)
|
441 |
-
|
442 |
-
return InstallRequirement(
|
443 |
-
req,
|
444 |
-
comes_from,
|
445 |
-
isolated=isolated,
|
446 |
-
use_pep517=use_pep517,
|
447 |
-
user_supplied=user_supplied,
|
448 |
-
)
|
449 |
-
|
450 |
-
|
451 |
-
def install_req_from_parsed_requirement(
|
452 |
-
parsed_req: ParsedRequirement,
|
453 |
-
isolated: bool = False,
|
454 |
-
use_pep517: Optional[bool] = None,
|
455 |
-
user_supplied: bool = False,
|
456 |
-
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
|
457 |
-
) -> InstallRequirement:
|
458 |
-
if parsed_req.is_editable:
|
459 |
-
req = install_req_from_editable(
|
460 |
-
parsed_req.requirement,
|
461 |
-
comes_from=parsed_req.comes_from,
|
462 |
-
use_pep517=use_pep517,
|
463 |
-
constraint=parsed_req.constraint,
|
464 |
-
isolated=isolated,
|
465 |
-
user_supplied=user_supplied,
|
466 |
-
config_settings=config_settings,
|
467 |
-
)
|
468 |
-
|
469 |
-
else:
|
470 |
-
req = install_req_from_line(
|
471 |
-
parsed_req.requirement,
|
472 |
-
comes_from=parsed_req.comes_from,
|
473 |
-
use_pep517=use_pep517,
|
474 |
-
isolated=isolated,
|
475 |
-
global_options=(
|
476 |
-
parsed_req.options.get("global_options", [])
|
477 |
-
if parsed_req.options
|
478 |
-
else []
|
479 |
-
),
|
480 |
-
hash_options=(
|
481 |
-
parsed_req.options.get("hashes", {}) if parsed_req.options else {}
|
482 |
-
),
|
483 |
-
constraint=parsed_req.constraint,
|
484 |
-
line_source=parsed_req.line_source,
|
485 |
-
user_supplied=user_supplied,
|
486 |
-
config_settings=config_settings,
|
487 |
-
)
|
488 |
-
return req
|
489 |
-
|
490 |
-
|
491 |
-
def install_req_from_link_and_ireq(
|
492 |
-
link: Link, ireq: InstallRequirement
|
493 |
-
) -> InstallRequirement:
|
494 |
-
return InstallRequirement(
|
495 |
-
req=ireq.req,
|
496 |
-
comes_from=ireq.comes_from,
|
497 |
-
editable=ireq.editable,
|
498 |
-
link=link,
|
499 |
-
markers=ireq.markers,
|
500 |
-
use_pep517=ireq.use_pep517,
|
501 |
-
isolated=ireq.isolated,
|
502 |
-
global_options=ireq.global_options,
|
503 |
-
hash_options=ireq.hash_options,
|
504 |
-
config_settings=ireq.config_settings,
|
505 |
-
user_supplied=ireq.user_supplied,
|
506 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/resultdict.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from typing import TYPE_CHECKING, Optional
|
2 |
-
|
3 |
-
if TYPE_CHECKING:
|
4 |
-
# TypedDict was introduced in Python 3.8.
|
5 |
-
#
|
6 |
-
# TODO: Remove the else block and TYPE_CHECKING check when dropping support
|
7 |
-
# for Python 3.7.
|
8 |
-
from typing import TypedDict
|
9 |
-
|
10 |
-
class ResultDict(TypedDict):
|
11 |
-
encoding: Optional[str]
|
12 |
-
confidence: float
|
13 |
-
language: Optional[str]
|
14 |
-
|
15 |
-
else:
|
16 |
-
ResultDict = dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf1632prober.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
#
|
3 |
-
# Contributor(s):
|
4 |
-
# Jason Zavaglia
|
5 |
-
#
|
6 |
-
# This library is free software; you can redistribute it and/or
|
7 |
-
# modify it under the terms of the GNU Lesser General Public
|
8 |
-
# License as published by the Free Software Foundation; either
|
9 |
-
# version 2.1 of the License, or (at your option) any later version.
|
10 |
-
#
|
11 |
-
# This library is distributed in the hope that it will be useful,
|
12 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
-
# Lesser General Public License for more details.
|
15 |
-
#
|
16 |
-
# You should have received a copy of the GNU Lesser General Public
|
17 |
-
# License along with this library; if not, write to the Free Software
|
18 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
19 |
-
# 02110-1301 USA
|
20 |
-
######################### END LICENSE BLOCK #########################
|
21 |
-
from typing import List, Union
|
22 |
-
|
23 |
-
from .charsetprober import CharSetProber
|
24 |
-
from .enums import ProbingState
|
25 |
-
|
26 |
-
|
27 |
-
class UTF1632Prober(CharSetProber):
|
28 |
-
"""
|
29 |
-
This class simply looks for occurrences of zero bytes, and infers
|
30 |
-
whether the file is UTF16 or UTF32 (low-endian or big-endian)
|
31 |
-
For instance, files looking like ( \0 \0 \0 [nonzero] )+
|
32 |
-
have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
|
33 |
-
may be guessed to be UTF16BE, and inversely for little-endian varieties.
|
34 |
-
"""
|
35 |
-
|
36 |
-
# how many logical characters to scan before feeling confident of prediction
|
37 |
-
MIN_CHARS_FOR_DETECTION = 20
|
38 |
-
# a fixed constant ratio of expected zeros or non-zeros in modulo-position.
|
39 |
-
EXPECTED_RATIO = 0.94
|
40 |
-
|
41 |
-
def __init__(self) -> None:
|
42 |
-
super().__init__()
|
43 |
-
self.position = 0
|
44 |
-
self.zeros_at_mod = [0] * 4
|
45 |
-
self.nonzeros_at_mod = [0] * 4
|
46 |
-
self._state = ProbingState.DETECTING
|
47 |
-
self.quad = [0, 0, 0, 0]
|
48 |
-
self.invalid_utf16be = False
|
49 |
-
self.invalid_utf16le = False
|
50 |
-
self.invalid_utf32be = False
|
51 |
-
self.invalid_utf32le = False
|
52 |
-
self.first_half_surrogate_pair_detected_16be = False
|
53 |
-
self.first_half_surrogate_pair_detected_16le = False
|
54 |
-
self.reset()
|
55 |
-
|
56 |
-
def reset(self) -> None:
|
57 |
-
super().reset()
|
58 |
-
self.position = 0
|
59 |
-
self.zeros_at_mod = [0] * 4
|
60 |
-
self.nonzeros_at_mod = [0] * 4
|
61 |
-
self._state = ProbingState.DETECTING
|
62 |
-
self.invalid_utf16be = False
|
63 |
-
self.invalid_utf16le = False
|
64 |
-
self.invalid_utf32be = False
|
65 |
-
self.invalid_utf32le = False
|
66 |
-
self.first_half_surrogate_pair_detected_16be = False
|
67 |
-
self.first_half_surrogate_pair_detected_16le = False
|
68 |
-
self.quad = [0, 0, 0, 0]
|
69 |
-
|
70 |
-
@property
|
71 |
-
def charset_name(self) -> str:
|
72 |
-
if self.is_likely_utf32be():
|
73 |
-
return "utf-32be"
|
74 |
-
if self.is_likely_utf32le():
|
75 |
-
return "utf-32le"
|
76 |
-
if self.is_likely_utf16be():
|
77 |
-
return "utf-16be"
|
78 |
-
if self.is_likely_utf16le():
|
79 |
-
return "utf-16le"
|
80 |
-
# default to something valid
|
81 |
-
return "utf-16"
|
82 |
-
|
83 |
-
@property
|
84 |
-
def language(self) -> str:
|
85 |
-
return ""
|
86 |
-
|
87 |
-
def approx_32bit_chars(self) -> float:
|
88 |
-
return max(1.0, self.position / 4.0)
|
89 |
-
|
90 |
-
def approx_16bit_chars(self) -> float:
|
91 |
-
return max(1.0, self.position / 2.0)
|
92 |
-
|
93 |
-
def is_likely_utf32be(self) -> bool:
|
94 |
-
approx_chars = self.approx_32bit_chars()
|
95 |
-
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
96 |
-
self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
|
97 |
-
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
|
98 |
-
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
|
99 |
-
and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
|
100 |
-
and not self.invalid_utf32be
|
101 |
-
)
|
102 |
-
|
103 |
-
def is_likely_utf32le(self) -> bool:
|
104 |
-
approx_chars = self.approx_32bit_chars()
|
105 |
-
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
106 |
-
self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
|
107 |
-
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
|
108 |
-
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
|
109 |
-
and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
|
110 |
-
and not self.invalid_utf32le
|
111 |
-
)
|
112 |
-
|
113 |
-
def is_likely_utf16be(self) -> bool:
|
114 |
-
approx_chars = self.approx_16bit_chars()
|
115 |
-
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
116 |
-
(self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
|
117 |
-
> self.EXPECTED_RATIO
|
118 |
-
and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
|
119 |
-
> self.EXPECTED_RATIO
|
120 |
-
and not self.invalid_utf16be
|
121 |
-
)
|
122 |
-
|
123 |
-
def is_likely_utf16le(self) -> bool:
|
124 |
-
approx_chars = self.approx_16bit_chars()
|
125 |
-
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
|
126 |
-
(self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
|
127 |
-
> self.EXPECTED_RATIO
|
128 |
-
and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
|
129 |
-
> self.EXPECTED_RATIO
|
130 |
-
and not self.invalid_utf16le
|
131 |
-
)
|
132 |
-
|
133 |
-
def validate_utf32_characters(self, quad: List[int]) -> None:
|
134 |
-
"""
|
135 |
-
Validate if the quad of bytes is valid UTF-32.
|
136 |
-
|
137 |
-
UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
|
138 |
-
excluding 0x0000D800 - 0x0000DFFF
|
139 |
-
|
140 |
-
https://en.wikipedia.org/wiki/UTF-32
|
141 |
-
"""
|
142 |
-
if (
|
143 |
-
quad[0] != 0
|
144 |
-
or quad[1] > 0x10
|
145 |
-
or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
|
146 |
-
):
|
147 |
-
self.invalid_utf32be = True
|
148 |
-
if (
|
149 |
-
quad[3] != 0
|
150 |
-
or quad[2] > 0x10
|
151 |
-
or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
|
152 |
-
):
|
153 |
-
self.invalid_utf32le = True
|
154 |
-
|
155 |
-
def validate_utf16_characters(self, pair: List[int]) -> None:
|
156 |
-
"""
|
157 |
-
Validate if the pair of bytes is valid UTF-16.
|
158 |
-
|
159 |
-
UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
|
160 |
-
with an exception for surrogate pairs, which must be in the range
|
161 |
-
0xD800-0xDBFF followed by 0xDC00-0xDFFF
|
162 |
-
|
163 |
-
https://en.wikipedia.org/wiki/UTF-16
|
164 |
-
"""
|
165 |
-
if not self.first_half_surrogate_pair_detected_16be:
|
166 |
-
if 0xD8 <= pair[0] <= 0xDB:
|
167 |
-
self.first_half_surrogate_pair_detected_16be = True
|
168 |
-
elif 0xDC <= pair[0] <= 0xDF:
|
169 |
-
self.invalid_utf16be = True
|
170 |
-
else:
|
171 |
-
if 0xDC <= pair[0] <= 0xDF:
|
172 |
-
self.first_half_surrogate_pair_detected_16be = False
|
173 |
-
else:
|
174 |
-
self.invalid_utf16be = True
|
175 |
-
|
176 |
-
if not self.first_half_surrogate_pair_detected_16le:
|
177 |
-
if 0xD8 <= pair[1] <= 0xDB:
|
178 |
-
self.first_half_surrogate_pair_detected_16le = True
|
179 |
-
elif 0xDC <= pair[1] <= 0xDF:
|
180 |
-
self.invalid_utf16le = True
|
181 |
-
else:
|
182 |
-
if 0xDC <= pair[1] <= 0xDF:
|
183 |
-
self.first_half_surrogate_pair_detected_16le = False
|
184 |
-
else:
|
185 |
-
self.invalid_utf16le = True
|
186 |
-
|
187 |
-
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
188 |
-
for c in byte_str:
|
189 |
-
mod4 = self.position % 4
|
190 |
-
self.quad[mod4] = c
|
191 |
-
if mod4 == 3:
|
192 |
-
self.validate_utf32_characters(self.quad)
|
193 |
-
self.validate_utf16_characters(self.quad[0:2])
|
194 |
-
self.validate_utf16_characters(self.quad[2:4])
|
195 |
-
if c == 0:
|
196 |
-
self.zeros_at_mod[mod4] += 1
|
197 |
-
else:
|
198 |
-
self.nonzeros_at_mod[mod4] += 1
|
199 |
-
self.position += 1
|
200 |
-
return self.state
|
201 |
-
|
202 |
-
@property
|
203 |
-
def state(self) -> ProbingState:
|
204 |
-
if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
|
205 |
-
# terminal, decided states
|
206 |
-
return self._state
|
207 |
-
if self.get_confidence() > 0.80:
|
208 |
-
self._state = ProbingState.FOUND_IT
|
209 |
-
elif self.position > 4 * 1024:
|
210 |
-
# if we get to 4kb into the file, and we can't conclude it's UTF,
|
211 |
-
# let's give up
|
212 |
-
self._state = ProbingState.NOT_ME
|
213 |
-
return self._state
|
214 |
-
|
215 |
-
def get_confidence(self) -> float:
|
216 |
-
return (
|
217 |
-
0.85
|
218 |
-
if (
|
219 |
-
self.is_likely_utf16le()
|
220 |
-
or self.is_likely_utf16be()
|
221 |
-
or self.is_likely_utf32le()
|
222 |
-
or self.is_likely_utf32be()
|
223 |
-
)
|
224 |
-
else 0.00
|
225 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/helpers.py
DELETED
@@ -1,1088 +0,0 @@
|
|
1 |
-
# helpers.py
|
2 |
-
import html.entities
|
3 |
-
import re
|
4 |
-
import typing
|
5 |
-
|
6 |
-
from . import __diag__
|
7 |
-
from .core import *
|
8 |
-
from .util import _bslash, _flatten, _escape_regex_range_chars
|
9 |
-
|
10 |
-
|
11 |
-
#
|
12 |
-
# global helpers
|
13 |
-
#
|
14 |
-
def delimited_list(
|
15 |
-
expr: Union[str, ParserElement],
|
16 |
-
delim: Union[str, ParserElement] = ",",
|
17 |
-
combine: bool = False,
|
18 |
-
min: typing.Optional[int] = None,
|
19 |
-
max: typing.Optional[int] = None,
|
20 |
-
*,
|
21 |
-
allow_trailing_delim: bool = False,
|
22 |
-
) -> ParserElement:
|
23 |
-
"""Helper to define a delimited list of expressions - the delimiter
|
24 |
-
defaults to ','. By default, the list elements and delimiters can
|
25 |
-
have intervening whitespace, and comments, but this can be
|
26 |
-
overridden by passing ``combine=True`` in the constructor. If
|
27 |
-
``combine`` is set to ``True``, the matching tokens are
|
28 |
-
returned as a single token string, with the delimiters included;
|
29 |
-
otherwise, the matching tokens are returned as a list of tokens,
|
30 |
-
with the delimiters suppressed.
|
31 |
-
|
32 |
-
If ``allow_trailing_delim`` is set to True, then the list may end with
|
33 |
-
a delimiter.
|
34 |
-
|
35 |
-
Example::
|
36 |
-
|
37 |
-
delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
|
38 |
-
delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
|
39 |
-
"""
|
40 |
-
if isinstance(expr, str_type):
|
41 |
-
expr = ParserElement._literalStringClass(expr)
|
42 |
-
|
43 |
-
dlName = "{expr} [{delim} {expr}]...{end}".format(
|
44 |
-
expr=str(expr.copy().streamline()),
|
45 |
-
delim=str(delim),
|
46 |
-
end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
|
47 |
-
)
|
48 |
-
|
49 |
-
if not combine:
|
50 |
-
delim = Suppress(delim)
|
51 |
-
|
52 |
-
if min is not None:
|
53 |
-
if min < 1:
|
54 |
-
raise ValueError("min must be greater than 0")
|
55 |
-
min -= 1
|
56 |
-
if max is not None:
|
57 |
-
if min is not None and max <= min:
|
58 |
-
raise ValueError("max must be greater than, or equal to min")
|
59 |
-
max -= 1
|
60 |
-
delimited_list_expr = expr + (delim + expr)[min, max]
|
61 |
-
|
62 |
-
if allow_trailing_delim:
|
63 |
-
delimited_list_expr += Opt(delim)
|
64 |
-
|
65 |
-
if combine:
|
66 |
-
return Combine(delimited_list_expr).set_name(dlName)
|
67 |
-
else:
|
68 |
-
return delimited_list_expr.set_name(dlName)
|
69 |
-
|
70 |
-
|
71 |
-
def counted_array(
|
72 |
-
expr: ParserElement,
|
73 |
-
int_expr: typing.Optional[ParserElement] = None,
|
74 |
-
*,
|
75 |
-
intExpr: typing.Optional[ParserElement] = None,
|
76 |
-
) -> ParserElement:
|
77 |
-
"""Helper to define a counted list of expressions.
|
78 |
-
|
79 |
-
This helper defines a pattern of the form::
|
80 |
-
|
81 |
-
integer expr expr expr...
|
82 |
-
|
83 |
-
where the leading integer tells how many expr expressions follow.
|
84 |
-
The matched tokens returns the array of expr tokens as a list - the
|
85 |
-
leading count token is suppressed.
|
86 |
-
|
87 |
-
If ``int_expr`` is specified, it should be a pyparsing expression
|
88 |
-
that produces an integer value.
|
89 |
-
|
90 |
-
Example::
|
91 |
-
|
92 |
-
counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
|
93 |
-
|
94 |
-
# in this parser, the leading integer value is given in binary,
|
95 |
-
# '10' indicating that 2 values are in the array
|
96 |
-
binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
|
97 |
-
counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
|
98 |
-
|
99 |
-
# if other fields must be parsed after the count but before the
|
100 |
-
# list items, give the fields results names and they will
|
101 |
-
# be preserved in the returned ParseResults:
|
102 |
-
count_with_metadata = integer + Word(alphas)("type")
|
103 |
-
typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
|
104 |
-
result = typed_array.parse_string("3 bool True True False")
|
105 |
-
print(result.dump())
|
106 |
-
|
107 |
-
# prints
|
108 |
-
# ['True', 'True', 'False']
|
109 |
-
# - items: ['True', 'True', 'False']
|
110 |
-
# - type: 'bool'
|
111 |
-
"""
|
112 |
-
intExpr = intExpr or int_expr
|
113 |
-
array_expr = Forward()
|
114 |
-
|
115 |
-
def count_field_parse_action(s, l, t):
|
116 |
-
nonlocal array_expr
|
117 |
-
n = t[0]
|
118 |
-
array_expr <<= (expr * n) if n else Empty()
|
119 |
-
# clear list contents, but keep any named results
|
120 |
-
del t[:]
|
121 |
-
|
122 |
-
if intExpr is None:
|
123 |
-
intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
|
124 |
-
else:
|
125 |
-
intExpr = intExpr.copy()
|
126 |
-
intExpr.set_name("arrayLen")
|
127 |
-
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
|
128 |
-
return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
|
129 |
-
|
130 |
-
|
131 |
-
def match_previous_literal(expr: ParserElement) -> ParserElement:
|
132 |
-
"""Helper to define an expression that is indirectly defined from
|
133 |
-
the tokens matched in a previous expression, that is, it looks for
|
134 |
-
a 'repeat' of a previous expression. For example::
|
135 |
-
|
136 |
-
first = Word(nums)
|
137 |
-
second = match_previous_literal(first)
|
138 |
-
match_expr = first + ":" + second
|
139 |
-
|
140 |
-
will match ``"1:1"``, but not ``"1:2"``. Because this
|
141 |
-
matches a previous literal, will also match the leading
|
142 |
-
``"1:1"`` in ``"1:10"``. If this is not desired, use
|
143 |
-
:class:`match_previous_expr`. Do *not* use with packrat parsing
|
144 |
-
enabled.
|
145 |
-
"""
|
146 |
-
rep = Forward()
|
147 |
-
|
148 |
-
def copy_token_to_repeater(s, l, t):
|
149 |
-
if t:
|
150 |
-
if len(t) == 1:
|
151 |
-
rep << t[0]
|
152 |
-
else:
|
153 |
-
# flatten t tokens
|
154 |
-
tflat = _flatten(t.as_list())
|
155 |
-
rep << And(Literal(tt) for tt in tflat)
|
156 |
-
else:
|
157 |
-
rep << Empty()
|
158 |
-
|
159 |
-
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
160 |
-
rep.set_name("(prev) " + str(expr))
|
161 |
-
return rep
|
162 |
-
|
163 |
-
|
164 |
-
def match_previous_expr(expr: ParserElement) -> ParserElement:
|
165 |
-
"""Helper to define an expression that is indirectly defined from
|
166 |
-
the tokens matched in a previous expression, that is, it looks for
|
167 |
-
a 'repeat' of a previous expression. For example::
|
168 |
-
|
169 |
-
first = Word(nums)
|
170 |
-
second = match_previous_expr(first)
|
171 |
-
match_expr = first + ":" + second
|
172 |
-
|
173 |
-
will match ``"1:1"``, but not ``"1:2"``. Because this
|
174 |
-
matches by expressions, will *not* match the leading ``"1:1"``
|
175 |
-
in ``"1:10"``; the expressions are evaluated first, and then
|
176 |
-
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
|
177 |
-
with packrat parsing enabled.
|
178 |
-
"""
|
179 |
-
rep = Forward()
|
180 |
-
e2 = expr.copy()
|
181 |
-
rep <<= e2
|
182 |
-
|
183 |
-
def copy_token_to_repeater(s, l, t):
|
184 |
-
matchTokens = _flatten(t.as_list())
|
185 |
-
|
186 |
-
def must_match_these_tokens(s, l, t):
|
187 |
-
theseTokens = _flatten(t.as_list())
|
188 |
-
if theseTokens != matchTokens:
|
189 |
-
raise ParseException(
|
190 |
-
s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
|
191 |
-
)
|
192 |
-
|
193 |
-
rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
|
194 |
-
|
195 |
-
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
196 |
-
rep.set_name("(prev) " + str(expr))
|
197 |
-
return rep
|
198 |
-
|
199 |
-
|
200 |
-
def one_of(
|
201 |
-
strs: Union[typing.Iterable[str], str],
|
202 |
-
caseless: bool = False,
|
203 |
-
use_regex: bool = True,
|
204 |
-
as_keyword: bool = False,
|
205 |
-
*,
|
206 |
-
useRegex: bool = True,
|
207 |
-
asKeyword: bool = False,
|
208 |
-
) -> ParserElement:
|
209 |
-
"""Helper to quickly define a set of alternative :class:`Literal` s,
|
210 |
-
and makes sure to do longest-first testing when there is a conflict,
|
211 |
-
regardless of the input order, but returns
|
212 |
-
a :class:`MatchFirst` for best performance.
|
213 |
-
|
214 |
-
Parameters:
|
215 |
-
|
216 |
-
- ``strs`` - a string of space-delimited literals, or a collection of
|
217 |
-
string literals
|
218 |
-
- ``caseless`` - treat all literals as caseless - (default= ``False``)
|
219 |
-
- ``use_regex`` - as an optimization, will
|
220 |
-
generate a :class:`Regex` object; otherwise, will generate
|
221 |
-
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
|
222 |
-
creating a :class:`Regex` raises an exception) - (default= ``True``)
|
223 |
-
- ``as_keyword`` - enforce :class:`Keyword`-style matching on the
|
224 |
-
generated expressions - (default= ``False``)
|
225 |
-
- ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
|
226 |
-
but will be removed in a future release
|
227 |
-
|
228 |
-
Example::
|
229 |
-
|
230 |
-
comp_oper = one_of("< = > <= >= !=")
|
231 |
-
var = Word(alphas)
|
232 |
-
number = Word(nums)
|
233 |
-
term = var | number
|
234 |
-
comparison_expr = term + comp_oper + term
|
235 |
-
print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
|
236 |
-
|
237 |
-
prints::
|
238 |
-
|
239 |
-
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
|
240 |
-
"""
|
241 |
-
asKeyword = asKeyword or as_keyword
|
242 |
-
useRegex = useRegex and use_regex
|
243 |
-
|
244 |
-
if (
|
245 |
-
isinstance(caseless, str_type)
|
246 |
-
and __diag__.warn_on_multiple_string_args_to_oneof
|
247 |
-
):
|
248 |
-
warnings.warn(
|
249 |
-
"More than one string argument passed to one_of, pass"
|
250 |
-
" choices as a list or space-delimited string",
|
251 |
-
stacklevel=2,
|
252 |
-
)
|
253 |
-
|
254 |
-
if caseless:
|
255 |
-
isequal = lambda a, b: a.upper() == b.upper()
|
256 |
-
masks = lambda a, b: b.upper().startswith(a.upper())
|
257 |
-
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
|
258 |
-
else:
|
259 |
-
isequal = lambda a, b: a == b
|
260 |
-
masks = lambda a, b: b.startswith(a)
|
261 |
-
parseElementClass = Keyword if asKeyword else Literal
|
262 |
-
|
263 |
-
symbols: List[str] = []
|
264 |
-
if isinstance(strs, str_type):
|
265 |
-
symbols = strs.split()
|
266 |
-
elif isinstance(strs, Iterable):
|
267 |
-
symbols = list(strs)
|
268 |
-
else:
|
269 |
-
raise TypeError("Invalid argument to one_of, expected string or iterable")
|
270 |
-
if not symbols:
|
271 |
-
return NoMatch()
|
272 |
-
|
273 |
-
# reorder given symbols to take care to avoid masking longer choices with shorter ones
|
274 |
-
# (but only if the given symbols are not just single characters)
|
275 |
-
if any(len(sym) > 1 for sym in symbols):
|
276 |
-
i = 0
|
277 |
-
while i < len(symbols) - 1:
|
278 |
-
cur = symbols[i]
|
279 |
-
for j, other in enumerate(symbols[i + 1 :]):
|
280 |
-
if isequal(other, cur):
|
281 |
-
del symbols[i + j + 1]
|
282 |
-
break
|
283 |
-
elif masks(cur, other):
|
284 |
-
del symbols[i + j + 1]
|
285 |
-
symbols.insert(i, other)
|
286 |
-
break
|
287 |
-
else:
|
288 |
-
i += 1
|
289 |
-
|
290 |
-
if useRegex:
|
291 |
-
re_flags: int = re.IGNORECASE if caseless else 0
|
292 |
-
|
293 |
-
try:
|
294 |
-
if all(len(sym) == 1 for sym in symbols):
|
295 |
-
# symbols are just single characters, create range regex pattern
|
296 |
-
patt = "[{}]".format(
|
297 |
-
"".join(_escape_regex_range_chars(sym) for sym in symbols)
|
298 |
-
)
|
299 |
-
else:
|
300 |
-
patt = "|".join(re.escape(sym) for sym in symbols)
|
301 |
-
|
302 |
-
# wrap with \b word break markers if defining as keywords
|
303 |
-
if asKeyword:
|
304 |
-
patt = r"\b(?:{})\b".format(patt)
|
305 |
-
|
306 |
-
ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
|
307 |
-
|
308 |
-
if caseless:
|
309 |
-
# add parse action to return symbols as specified, not in random
|
310 |
-
# casing as found in input string
|
311 |
-
symbol_map = {sym.lower(): sym for sym in symbols}
|
312 |
-
ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
|
313 |
-
|
314 |
-
return ret
|
315 |
-
|
316 |
-
except re.error:
|
317 |
-
warnings.warn(
|
318 |
-
"Exception creating Regex for one_of, building MatchFirst", stacklevel=2
|
319 |
-
)
|
320 |
-
|
321 |
-
# last resort, just use MatchFirst
|
322 |
-
return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
|
323 |
-
" | ".join(symbols)
|
324 |
-
)
|
325 |
-
|
326 |
-
|
327 |
-
def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
|
328 |
-
"""Helper to easily and clearly define a dictionary by specifying
|
329 |
-
the respective patterns for the key and value. Takes care of
|
330 |
-
defining the :class:`Dict`, :class:`ZeroOrMore`, and
|
331 |
-
:class:`Group` tokens in the proper order. The key pattern
|
332 |
-
can include delimiting markers or punctuation, as long as they are
|
333 |
-
suppressed, thereby leaving the significant key text. The value
|
334 |
-
pattern can include named results, so that the :class:`Dict` results
|
335 |
-
can include named token fields.
|
336 |
-
|
337 |
-
Example::
|
338 |
-
|
339 |
-
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
|
340 |
-
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
|
341 |
-
print(attr_expr[1, ...].parse_string(text).dump())
|
342 |
-
|
343 |
-
attr_label = label
|
344 |
-
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
|
345 |
-
|
346 |
-
# similar to Dict, but simpler call format
|
347 |
-
result = dict_of(attr_label, attr_value).parse_string(text)
|
348 |
-
print(result.dump())
|
349 |
-
print(result['shape'])
|
350 |
-
print(result.shape) # object attribute access works too
|
351 |
-
print(result.as_dict())
|
352 |
-
|
353 |
-
prints::
|
354 |
-
|
355 |
-
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
|
356 |
-
- color: 'light blue'
|
357 |
-
- posn: 'upper left'
|
358 |
-
- shape: 'SQUARE'
|
359 |
-
- texture: 'burlap'
|
360 |
-
SQUARE
|
361 |
-
SQUARE
|
362 |
-
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
|
363 |
-
"""
|
364 |
-
return Dict(OneOrMore(Group(key + value)))
|
365 |
-
|
366 |
-
|
367 |
-
def original_text_for(
|
368 |
-
expr: ParserElement, as_string: bool = True, *, asString: bool = True
|
369 |
-
) -> ParserElement:
|
370 |
-
"""Helper to return the original, untokenized text for a given
|
371 |
-
expression. Useful to restore the parsed fields of an HTML start
|
372 |
-
tag into the raw tag text itself, or to revert separate tokens with
|
373 |
-
intervening whitespace back to the original matching input text. By
|
374 |
-
default, returns astring containing the original parsed text.
|
375 |
-
|
376 |
-
If the optional ``as_string`` argument is passed as
|
377 |
-
``False``, then the return value is
|
378 |
-
a :class:`ParseResults` containing any results names that
|
379 |
-
were originally matched, and a single token containing the original
|
380 |
-
matched text from the input string. So if the expression passed to
|
381 |
-
:class:`original_text_for` contains expressions with defined
|
382 |
-
results names, you must set ``as_string`` to ``False`` if you
|
383 |
-
want to preserve those results name values.
|
384 |
-
|
385 |
-
The ``asString`` pre-PEP8 argument is retained for compatibility,
|
386 |
-
but will be removed in a future release.
|
387 |
-
|
388 |
-
Example::
|
389 |
-
|
390 |
-
src = "this is test <b> bold <i>text</i> </b> normal text "
|
391 |
-
for tag in ("b", "i"):
|
392 |
-
opener, closer = make_html_tags(tag)
|
393 |
-
patt = original_text_for(opener + SkipTo(closer) + closer)
|
394 |
-
print(patt.search_string(src)[0])
|
395 |
-
|
396 |
-
prints::
|
397 |
-
|
398 |
-
['<b> bold <i>text</i> </b>']
|
399 |
-
['<i>text</i>']
|
400 |
-
"""
|
401 |
-
asString = asString and as_string
|
402 |
-
|
403 |
-
locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
|
404 |
-
endlocMarker = locMarker.copy()
|
405 |
-
endlocMarker.callPreparse = False
|
406 |
-
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
|
407 |
-
if asString:
|
408 |
-
extractText = lambda s, l, t: s[t._original_start : t._original_end]
|
409 |
-
else:
|
410 |
-
|
411 |
-
def extractText(s, l, t):
|
412 |
-
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
|
413 |
-
|
414 |
-
matchExpr.set_parse_action(extractText)
|
415 |
-
matchExpr.ignoreExprs = expr.ignoreExprs
|
416 |
-
matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
|
417 |
-
return matchExpr
|
418 |
-
|
419 |
-
|
420 |
-
def ungroup(expr: ParserElement) -> ParserElement:
|
421 |
-
"""Helper to undo pyparsing's default grouping of And expressions,
|
422 |
-
even if all but one are non-empty.
|
423 |
-
"""
|
424 |
-
return TokenConverter(expr).add_parse_action(lambda t: t[0])
|
425 |
-
|
426 |
-
|
427 |
-
def locatedExpr(expr: ParserElement) -> ParserElement:
|
428 |
-
"""
|
429 |
-
(DEPRECATED - future code should use the Located class)
|
430 |
-
Helper to decorate a returned token with its starting and ending
|
431 |
-
locations in the input string.
|
432 |
-
|
433 |
-
This helper adds the following results names:
|
434 |
-
|
435 |
-
- ``locn_start`` - location where matched expression begins
|
436 |
-
- ``locn_end`` - location where matched expression ends
|
437 |
-
- ``value`` - the actual parsed results
|
438 |
-
|
439 |
-
Be careful if the input text contains ``<TAB>`` characters, you
|
440 |
-
may want to call :class:`ParserElement.parseWithTabs`
|
441 |
-
|
442 |
-
Example::
|
443 |
-
|
444 |
-
wd = Word(alphas)
|
445 |
-
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
|
446 |
-
print(match)
|
447 |
-
|
448 |
-
prints::
|
449 |
-
|
450 |
-
[[0, 'ljsdf', 5]]
|
451 |
-
[[8, 'lksdjjf', 15]]
|
452 |
-
[[18, 'lkkjj', 23]]
|
453 |
-
"""
|
454 |
-
locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
|
455 |
-
return Group(
|
456 |
-
locator("locn_start")
|
457 |
-
+ expr("value")
|
458 |
-
+ locator.copy().leaveWhitespace()("locn_end")
|
459 |
-
)
|
460 |
-
|
461 |
-
|
462 |
-
def nested_expr(
|
463 |
-
opener: Union[str, ParserElement] = "(",
|
464 |
-
closer: Union[str, ParserElement] = ")",
|
465 |
-
content: typing.Optional[ParserElement] = None,
|
466 |
-
ignore_expr: ParserElement = quoted_string(),
|
467 |
-
*,
|
468 |
-
ignoreExpr: ParserElement = quoted_string(),
|
469 |
-
) -> ParserElement:
|
470 |
-
"""Helper method for defining nested lists enclosed in opening and
|
471 |
-
closing delimiters (``"("`` and ``")"`` are the default).
|
472 |
-
|
473 |
-
Parameters:
|
474 |
-
- ``opener`` - opening character for a nested list
|
475 |
-
(default= ``"("``); can also be a pyparsing expression
|
476 |
-
- ``closer`` - closing character for a nested list
|
477 |
-
(default= ``")"``); can also be a pyparsing expression
|
478 |
-
- ``content`` - expression for items within the nested lists
|
479 |
-
(default= ``None``)
|
480 |
-
- ``ignore_expr`` - expression for ignoring opening and closing delimiters
|
481 |
-
(default= :class:`quoted_string`)
|
482 |
-
- ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
|
483 |
-
but will be removed in a future release
|
484 |
-
|
485 |
-
If an expression is not provided for the content argument, the
|
486 |
-
nested expression will capture all whitespace-delimited content
|
487 |
-
between delimiters as a list of separate values.
|
488 |
-
|
489 |
-
Use the ``ignore_expr`` argument to define expressions that may
|
490 |
-
contain opening or closing characters that should not be treated as
|
491 |
-
opening or closing characters for nesting, such as quoted_string or
|
492 |
-
a comment expression. Specify multiple expressions using an
|
493 |
-
:class:`Or` or :class:`MatchFirst`. The default is
|
494 |
-
:class:`quoted_string`, but if no expressions are to be ignored, then
|
495 |
-
pass ``None`` for this argument.
|
496 |
-
|
497 |
-
Example::
|
498 |
-
|
499 |
-
data_type = one_of("void int short long char float double")
|
500 |
-
decl_data_type = Combine(data_type + Opt(Word('*')))
|
501 |
-
ident = Word(alphas+'_', alphanums+'_')
|
502 |
-
number = pyparsing_common.number
|
503 |
-
arg = Group(decl_data_type + ident)
|
504 |
-
LPAR, RPAR = map(Suppress, "()")
|
505 |
-
|
506 |
-
code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
|
507 |
-
|
508 |
-
c_function = (decl_data_type("type")
|
509 |
-
+ ident("name")
|
510 |
-
+ LPAR + Opt(delimited_list(arg), [])("args") + RPAR
|
511 |
-
+ code_body("body"))
|
512 |
-
c_function.ignore(c_style_comment)
|
513 |
-
|
514 |
-
source_code = '''
|
515 |
-
int is_odd(int x) {
|
516 |
-
return (x%2);
|
517 |
-
}
|
518 |
-
|
519 |
-
int dec_to_hex(char hchar) {
|
520 |
-
if (hchar >= '0' && hchar <= '9') {
|
521 |
-
return (ord(hchar)-ord('0'));
|
522 |
-
} else {
|
523 |
-
return (10+ord(hchar)-ord('A'));
|
524 |
-
}
|
525 |
-
}
|
526 |
-
'''
|
527 |
-
for func in c_function.search_string(source_code):
|
528 |
-
print("%(name)s (%(type)s) args: %(args)s" % func)
|
529 |
-
|
530 |
-
|
531 |
-
prints::
|
532 |
-
|
533 |
-
is_odd (int) args: [['int', 'x']]
|
534 |
-
dec_to_hex (int) args: [['char', 'hchar']]
|
535 |
-
"""
|
536 |
-
if ignoreExpr != ignore_expr:
|
537 |
-
ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
|
538 |
-
if opener == closer:
|
539 |
-
raise ValueError("opening and closing strings cannot be the same")
|
540 |
-
if content is None:
|
541 |
-
if isinstance(opener, str_type) and isinstance(closer, str_type):
|
542 |
-
if len(opener) == 1 and len(closer) == 1:
|
543 |
-
if ignoreExpr is not None:
|
544 |
-
content = Combine(
|
545 |
-
OneOrMore(
|
546 |
-
~ignoreExpr
|
547 |
-
+ CharsNotIn(
|
548 |
-
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
|
549 |
-
exact=1,
|
550 |
-
)
|
551 |
-
)
|
552 |
-
).set_parse_action(lambda t: t[0].strip())
|
553 |
-
else:
|
554 |
-
content = empty.copy() + CharsNotIn(
|
555 |
-
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
|
556 |
-
).set_parse_action(lambda t: t[0].strip())
|
557 |
-
else:
|
558 |
-
if ignoreExpr is not None:
|
559 |
-
content = Combine(
|
560 |
-
OneOrMore(
|
561 |
-
~ignoreExpr
|
562 |
-
+ ~Literal(opener)
|
563 |
-
+ ~Literal(closer)
|
564 |
-
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
565 |
-
)
|
566 |
-
).set_parse_action(lambda t: t[0].strip())
|
567 |
-
else:
|
568 |
-
content = Combine(
|
569 |
-
OneOrMore(
|
570 |
-
~Literal(opener)
|
571 |
-
+ ~Literal(closer)
|
572 |
-
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
573 |
-
)
|
574 |
-
).set_parse_action(lambda t: t[0].strip())
|
575 |
-
else:
|
576 |
-
raise ValueError(
|
577 |
-
"opening and closing arguments must be strings if no content expression is given"
|
578 |
-
)
|
579 |
-
ret = Forward()
|
580 |
-
if ignoreExpr is not None:
|
581 |
-
ret <<= Group(
|
582 |
-
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
|
583 |
-
)
|
584 |
-
else:
|
585 |
-
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
|
586 |
-
ret.set_name("nested %s%s expression" % (opener, closer))
|
587 |
-
return ret
|
588 |
-
|
589 |
-
|
590 |
-
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
|
591 |
-
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
|
592 |
-
if isinstance(tagStr, str_type):
|
593 |
-
resname = tagStr
|
594 |
-
tagStr = Keyword(tagStr, caseless=not xml)
|
595 |
-
else:
|
596 |
-
resname = tagStr.name
|
597 |
-
|
598 |
-
tagAttrName = Word(alphas, alphanums + "_-:")
|
599 |
-
if xml:
|
600 |
-
tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
|
601 |
-
openTag = (
|
602 |
-
suppress_LT
|
603 |
-
+ tagStr("tag")
|
604 |
-
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
|
605 |
-
+ Opt("/", default=[False])("empty").set_parse_action(
|
606 |
-
lambda s, l, t: t[0] == "/"
|
607 |
-
)
|
608 |
-
+ suppress_GT
|
609 |
-
)
|
610 |
-
else:
|
611 |
-
tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
|
612 |
-
printables, exclude_chars=">"
|
613 |
-
)
|
614 |
-
openTag = (
|
615 |
-
suppress_LT
|
616 |
-
+ tagStr("tag")
|
617 |
-
+ Dict(
|
618 |
-
ZeroOrMore(
|
619 |
-
Group(
|
620 |
-
tagAttrName.set_parse_action(lambda t: t[0].lower())
|
621 |
-
+ Opt(Suppress("=") + tagAttrValue)
|
622 |
-
)
|
623 |
-
)
|
624 |
-
)
|
625 |
-
+ Opt("/", default=[False])("empty").set_parse_action(
|
626 |
-
lambda s, l, t: t[0] == "/"
|
627 |
-
)
|
628 |
-
+ suppress_GT
|
629 |
-
)
|
630 |
-
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
|
631 |
-
|
632 |
-
openTag.set_name("<%s>" % resname)
|
633 |
-
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
|
634 |
-
openTag.add_parse_action(
|
635 |
-
lambda t: t.__setitem__(
|
636 |
-
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
|
637 |
-
)
|
638 |
-
)
|
639 |
-
closeTag = closeTag(
|
640 |
-
"end" + "".join(resname.replace(":", " ").title().split())
|
641 |
-
).set_name("</%s>" % resname)
|
642 |
-
openTag.tag = resname
|
643 |
-
closeTag.tag = resname
|
644 |
-
openTag.tag_body = SkipTo(closeTag())
|
645 |
-
return openTag, closeTag
|
646 |
-
|
647 |
-
|
648 |
-
def make_html_tags(
|
649 |
-
tag_str: Union[str, ParserElement]
|
650 |
-
) -> Tuple[ParserElement, ParserElement]:
|
651 |
-
"""Helper to construct opening and closing tag expressions for HTML,
|
652 |
-
given a tag name. Matches tags in either upper or lower case,
|
653 |
-
attributes with namespaces and with quoted or unquoted values.
|
654 |
-
|
655 |
-
Example::
|
656 |
-
|
657 |
-
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
658 |
-
# make_html_tags returns pyparsing expressions for the opening and
|
659 |
-
# closing tags as a 2-tuple
|
660 |
-
a, a_end = make_html_tags("A")
|
661 |
-
link_expr = a + SkipTo(a_end)("link_text") + a_end
|
662 |
-
|
663 |
-
for link in link_expr.search_string(text):
|
664 |
-
# attributes in the <A> tag (like "href" shown here) are
|
665 |
-
# also accessible as named results
|
666 |
-
print(link.link_text, '->', link.href)
|
667 |
-
|
668 |
-
prints::
|
669 |
-
|
670 |
-
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
|
671 |
-
"""
|
672 |
-
return _makeTags(tag_str, False)
|
673 |
-
|
674 |
-
|
675 |
-
def make_xml_tags(
|
676 |
-
tag_str: Union[str, ParserElement]
|
677 |
-
) -> Tuple[ParserElement, ParserElement]:
|
678 |
-
"""Helper to construct opening and closing tag expressions for XML,
|
679 |
-
given a tag name. Matches tags only in the given upper/lower case.
|
680 |
-
|
681 |
-
Example: similar to :class:`make_html_tags`
|
682 |
-
"""
|
683 |
-
return _makeTags(tag_str, True)
|
684 |
-
|
685 |
-
|
686 |
-
any_open_tag: ParserElement
|
687 |
-
any_close_tag: ParserElement
|
688 |
-
any_open_tag, any_close_tag = make_html_tags(
|
689 |
-
Word(alphas, alphanums + "_:").set_name("any tag")
|
690 |
-
)
|
691 |
-
|
692 |
-
_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
|
693 |
-
common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
|
694 |
-
"common HTML entity"
|
695 |
-
)
|
696 |
-
|
697 |
-
|
698 |
-
def replace_html_entity(t):
|
699 |
-
"""Helper parser action to replace common HTML entities with their special characters"""
|
700 |
-
return _htmlEntityMap.get(t.entity)
|
701 |
-
|
702 |
-
|
703 |
-
class OpAssoc(Enum):
|
704 |
-
LEFT = 1
|
705 |
-
RIGHT = 2
|
706 |
-
|
707 |
-
|
708 |
-
InfixNotationOperatorArgType = Union[
|
709 |
-
ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
|
710 |
-
]
|
711 |
-
InfixNotationOperatorSpec = Union[
|
712 |
-
Tuple[
|
713 |
-
InfixNotationOperatorArgType,
|
714 |
-
int,
|
715 |
-
OpAssoc,
|
716 |
-
typing.Optional[ParseAction],
|
717 |
-
],
|
718 |
-
Tuple[
|
719 |
-
InfixNotationOperatorArgType,
|
720 |
-
int,
|
721 |
-
OpAssoc,
|
722 |
-
],
|
723 |
-
]
|
724 |
-
|
725 |
-
|
726 |
-
def infix_notation(
|
727 |
-
base_expr: ParserElement,
|
728 |
-
op_list: List[InfixNotationOperatorSpec],
|
729 |
-
lpar: Union[str, ParserElement] = Suppress("("),
|
730 |
-
rpar: Union[str, ParserElement] = Suppress(")"),
|
731 |
-
) -> ParserElement:
|
732 |
-
"""Helper method for constructing grammars of expressions made up of
|
733 |
-
operators working in a precedence hierarchy. Operators may be unary
|
734 |
-
or binary, left- or right-associative. Parse actions can also be
|
735 |
-
attached to operator expressions. The generated parser will also
|
736 |
-
recognize the use of parentheses to override operator precedences
|
737 |
-
(see example below).
|
738 |
-
|
739 |
-
Note: if you define a deep operator list, you may see performance
|
740 |
-
issues when using infix_notation. See
|
741 |
-
:class:`ParserElement.enable_packrat` for a mechanism to potentially
|
742 |
-
improve your parser performance.
|
743 |
-
|
744 |
-
Parameters:
|
745 |
-
- ``base_expr`` - expression representing the most basic operand to
|
746 |
-
be used in the expression
|
747 |
-
- ``op_list`` - list of tuples, one for each operator precedence level
|
748 |
-
in the expression grammar; each tuple is of the form ``(op_expr,
|
749 |
-
num_operands, right_left_assoc, (optional)parse_action)``, where:
|
750 |
-
|
751 |
-
- ``op_expr`` is the pyparsing expression for the operator; may also
|
752 |
-
be a string, which will be converted to a Literal; if ``num_operands``
|
753 |
-
is 3, ``op_expr`` is a tuple of two expressions, for the two
|
754 |
-
operators separating the 3 terms
|
755 |
-
- ``num_operands`` is the number of terms for this operator (must be 1,
|
756 |
-
2, or 3)
|
757 |
-
- ``right_left_assoc`` is the indicator whether the operator is right
|
758 |
-
or left associative, using the pyparsing-defined constants
|
759 |
-
``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
|
760 |
-
- ``parse_action`` is the parse action to be associated with
|
761 |
-
expressions matching this operator expression (the parse action
|
762 |
-
tuple member may be omitted); if the parse action is passed
|
763 |
-
a tuple or list of functions, this is equivalent to calling
|
764 |
-
``set_parse_action(*fn)``
|
765 |
-
(:class:`ParserElement.set_parse_action`)
|
766 |
-
- ``lpar`` - expression for matching left-parentheses; if passed as a
|
767 |
-
str, then will be parsed as Suppress(lpar). If lpar is passed as
|
768 |
-
an expression (such as ``Literal('(')``), then it will be kept in
|
769 |
-
the parsed results, and grouped with them. (default= ``Suppress('(')``)
|
770 |
-
- ``rpar`` - expression for matching right-parentheses; if passed as a
|
771 |
-
str, then will be parsed as Suppress(rpar). If rpar is passed as
|
772 |
-
an expression (such as ``Literal(')')``), then it will be kept in
|
773 |
-
the parsed results, and grouped with them. (default= ``Suppress(')')``)
|
774 |
-
|
775 |
-
Example::
|
776 |
-
|
777 |
-
# simple example of four-function arithmetic with ints and
|
778 |
-
# variable names
|
779 |
-
integer = pyparsing_common.signed_integer
|
780 |
-
varname = pyparsing_common.identifier
|
781 |
-
|
782 |
-
arith_expr = infix_notation(integer | varname,
|
783 |
-
[
|
784 |
-
('-', 1, OpAssoc.RIGHT),
|
785 |
-
(one_of('* /'), 2, OpAssoc.LEFT),
|
786 |
-
(one_of('+ -'), 2, OpAssoc.LEFT),
|
787 |
-
])
|
788 |
-
|
789 |
-
arith_expr.run_tests('''
|
790 |
-
5+3*6
|
791 |
-
(5+3)*6
|
792 |
-
-2--11
|
793 |
-
''', full_dump=False)
|
794 |
-
|
795 |
-
prints::
|
796 |
-
|
797 |
-
5+3*6
|
798 |
-
[[5, '+', [3, '*', 6]]]
|
799 |
-
|
800 |
-
(5+3)*6
|
801 |
-
[[[5, '+', 3], '*', 6]]
|
802 |
-
|
803 |
-
-2--11
|
804 |
-
[[['-', 2], '-', ['-', 11]]]
|
805 |
-
"""
|
806 |
-
# captive version of FollowedBy that does not do parse actions or capture results names
|
807 |
-
class _FB(FollowedBy):
|
808 |
-
def parseImpl(self, instring, loc, doActions=True):
|
809 |
-
self.expr.try_parse(instring, loc)
|
810 |
-
return loc, []
|
811 |
-
|
812 |
-
_FB.__name__ = "FollowedBy>"
|
813 |
-
|
814 |
-
ret = Forward()
|
815 |
-
if isinstance(lpar, str):
|
816 |
-
lpar = Suppress(lpar)
|
817 |
-
if isinstance(rpar, str):
|
818 |
-
rpar = Suppress(rpar)
|
819 |
-
|
820 |
-
# if lpar and rpar are not suppressed, wrap in group
|
821 |
-
if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
|
822 |
-
lastExpr = base_expr | Group(lpar + ret + rpar)
|
823 |
-
else:
|
824 |
-
lastExpr = base_expr | (lpar + ret + rpar)
|
825 |
-
|
826 |
-
for i, operDef in enumerate(op_list):
|
827 |
-
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
|
828 |
-
if isinstance(opExpr, str_type):
|
829 |
-
opExpr = ParserElement._literalStringClass(opExpr)
|
830 |
-
if arity == 3:
|
831 |
-
if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
|
832 |
-
raise ValueError(
|
833 |
-
"if numterms=3, opExpr must be a tuple or list of two expressions"
|
834 |
-
)
|
835 |
-
opExpr1, opExpr2 = opExpr
|
836 |
-
term_name = "{}{} term".format(opExpr1, opExpr2)
|
837 |
-
else:
|
838 |
-
term_name = "{} term".format(opExpr)
|
839 |
-
|
840 |
-
if not 1 <= arity <= 3:
|
841 |
-
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
|
842 |
-
|
843 |
-
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
|
844 |
-
raise ValueError("operator must indicate right or left associativity")
|
845 |
-
|
846 |
-
thisExpr: Forward = Forward().set_name(term_name)
|
847 |
-
if rightLeftAssoc is OpAssoc.LEFT:
|
848 |
-
if arity == 1:
|
849 |
-
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
|
850 |
-
elif arity == 2:
|
851 |
-
if opExpr is not None:
|
852 |
-
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
|
853 |
-
lastExpr + (opExpr + lastExpr)[1, ...]
|
854 |
-
)
|
855 |
-
else:
|
856 |
-
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
|
857 |
-
elif arity == 3:
|
858 |
-
matchExpr = _FB(
|
859 |
-
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
|
860 |
-
) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
|
861 |
-
elif rightLeftAssoc is OpAssoc.RIGHT:
|
862 |
-
if arity == 1:
|
863 |
-
# try to avoid LR with this extra test
|
864 |
-
if not isinstance(opExpr, Opt):
|
865 |
-
opExpr = Opt(opExpr)
|
866 |
-
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
|
867 |
-
elif arity == 2:
|
868 |
-
if opExpr is not None:
|
869 |
-
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
|
870 |
-
lastExpr + (opExpr + thisExpr)[1, ...]
|
871 |
-
)
|
872 |
-
else:
|
873 |
-
matchExpr = _FB(lastExpr + thisExpr) + Group(
|
874 |
-
lastExpr + thisExpr[1, ...]
|
875 |
-
)
|
876 |
-
elif arity == 3:
|
877 |
-
matchExpr = _FB(
|
878 |
-
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
|
879 |
-
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
|
880 |
-
if pa:
|
881 |
-
if isinstance(pa, (tuple, list)):
|
882 |
-
matchExpr.set_parse_action(*pa)
|
883 |
-
else:
|
884 |
-
matchExpr.set_parse_action(pa)
|
885 |
-
thisExpr <<= (matchExpr | lastExpr).setName(term_name)
|
886 |
-
lastExpr = thisExpr
|
887 |
-
ret <<= lastExpr
|
888 |
-
return ret
|
889 |
-
|
890 |
-
|
891 |
-
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
|
892 |
-
"""
|
893 |
-
(DEPRECATED - use IndentedBlock class instead)
|
894 |
-
Helper method for defining space-delimited indentation blocks,
|
895 |
-
such as those used to define block statements in Python source code.
|
896 |
-
|
897 |
-
Parameters:
|
898 |
-
|
899 |
-
- ``blockStatementExpr`` - expression defining syntax of statement that
|
900 |
-
is repeated within the indented block
|
901 |
-
- ``indentStack`` - list created by caller to manage indentation stack
|
902 |
-
(multiple ``statementWithIndentedBlock`` expressions within a single
|
903 |
-
grammar should share a common ``indentStack``)
|
904 |
-
- ``indent`` - boolean indicating whether block must be indented beyond
|
905 |
-
the current level; set to ``False`` for block of left-most statements
|
906 |
-
(default= ``True``)
|
907 |
-
|
908 |
-
A valid block must contain at least one ``blockStatement``.
|
909 |
-
|
910 |
-
(Note that indentedBlock uses internal parse actions which make it
|
911 |
-
incompatible with packrat parsing.)
|
912 |
-
|
913 |
-
Example::
|
914 |
-
|
915 |
-
data = '''
|
916 |
-
def A(z):
|
917 |
-
A1
|
918 |
-
B = 100
|
919 |
-
G = A2
|
920 |
-
A2
|
921 |
-
A3
|
922 |
-
B
|
923 |
-
def BB(a,b,c):
|
924 |
-
BB1
|
925 |
-
def BBA():
|
926 |
-
bba1
|
927 |
-
bba2
|
928 |
-
bba3
|
929 |
-
C
|
930 |
-
D
|
931 |
-
def spam(x,y):
|
932 |
-
def eggs(z):
|
933 |
-
pass
|
934 |
-
'''
|
935 |
-
|
936 |
-
|
937 |
-
indentStack = [1]
|
938 |
-
stmt = Forward()
|
939 |
-
|
940 |
-
identifier = Word(alphas, alphanums)
|
941 |
-
funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
|
942 |
-
func_body = indentedBlock(stmt, indentStack)
|
943 |
-
funcDef = Group(funcDecl + func_body)
|
944 |
-
|
945 |
-
rvalue = Forward()
|
946 |
-
funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
|
947 |
-
rvalue << (funcCall | identifier | Word(nums))
|
948 |
-
assignment = Group(identifier + "=" + rvalue)
|
949 |
-
stmt << (funcDef | assignment | identifier)
|
950 |
-
|
951 |
-
module_body = stmt[1, ...]
|
952 |
-
|
953 |
-
parseTree = module_body.parseString(data)
|
954 |
-
parseTree.pprint()
|
955 |
-
|
956 |
-
prints::
|
957 |
-
|
958 |
-
[['def',
|
959 |
-
'A',
|
960 |
-
['(', 'z', ')'],
|
961 |
-
':',
|
962 |
-
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
|
963 |
-
'B',
|
964 |
-
['def',
|
965 |
-
'BB',
|
966 |
-
['(', 'a', 'b', 'c', ')'],
|
967 |
-
':',
|
968 |
-
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
|
969 |
-
'C',
|
970 |
-
'D',
|
971 |
-
['def',
|
972 |
-
'spam',
|
973 |
-
['(', 'x', 'y', ')'],
|
974 |
-
':',
|
975 |
-
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
|
976 |
-
"""
|
977 |
-
backup_stacks.append(indentStack[:])
|
978 |
-
|
979 |
-
def reset_stack():
|
980 |
-
indentStack[:] = backup_stacks[-1]
|
981 |
-
|
982 |
-
def checkPeerIndent(s, l, t):
|
983 |
-
if l >= len(s):
|
984 |
-
return
|
985 |
-
curCol = col(l, s)
|
986 |
-
if curCol != indentStack[-1]:
|
987 |
-
if curCol > indentStack[-1]:
|
988 |
-
raise ParseException(s, l, "illegal nesting")
|
989 |
-
raise ParseException(s, l, "not a peer entry")
|
990 |
-
|
991 |
-
def checkSubIndent(s, l, t):
|
992 |
-
curCol = col(l, s)
|
993 |
-
if curCol > indentStack[-1]:
|
994 |
-
indentStack.append(curCol)
|
995 |
-
else:
|
996 |
-
raise ParseException(s, l, "not a subentry")
|
997 |
-
|
998 |
-
def checkUnindent(s, l, t):
|
999 |
-
if l >= len(s):
|
1000 |
-
return
|
1001 |
-
curCol = col(l, s)
|
1002 |
-
if not (indentStack and curCol in indentStack):
|
1003 |
-
raise ParseException(s, l, "not an unindent")
|
1004 |
-
if curCol < indentStack[-1]:
|
1005 |
-
indentStack.pop()
|
1006 |
-
|
1007 |
-
NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
|
1008 |
-
INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
|
1009 |
-
PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
|
1010 |
-
UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
|
1011 |
-
if indent:
|
1012 |
-
smExpr = Group(
|
1013 |
-
Opt(NL)
|
1014 |
-
+ INDENT
|
1015 |
-
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
1016 |
-
+ UNDENT
|
1017 |
-
)
|
1018 |
-
else:
|
1019 |
-
smExpr = Group(
|
1020 |
-
Opt(NL)
|
1021 |
-
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
1022 |
-
+ Opt(UNDENT)
|
1023 |
-
)
|
1024 |
-
|
1025 |
-
# add a parse action to remove backup_stack from list of backups
|
1026 |
-
smExpr.add_parse_action(
|
1027 |
-
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
|
1028 |
-
)
|
1029 |
-
smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
|
1030 |
-
blockStatementExpr.ignore(_bslash + LineEnd())
|
1031 |
-
return smExpr.set_name("indented block")
|
1032 |
-
|
1033 |
-
|
1034 |
-
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
|
1035 |
-
c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
|
1036 |
-
"C style comment"
|
1037 |
-
)
|
1038 |
-
"Comment of the form ``/* ... */``"
|
1039 |
-
|
1040 |
-
html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
|
1041 |
-
"Comment of the form ``<!-- ... -->``"
|
1042 |
-
|
1043 |
-
rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
|
1044 |
-
dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
|
1045 |
-
"Comment of the form ``// ... (to end of line)``"
|
1046 |
-
|
1047 |
-
cpp_style_comment = Combine(
|
1048 |
-
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
|
1049 |
-
).set_name("C++ style comment")
|
1050 |
-
"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
|
1051 |
-
|
1052 |
-
java_style_comment = cpp_style_comment
|
1053 |
-
"Same as :class:`cpp_style_comment`"
|
1054 |
-
|
1055 |
-
python_style_comment = Regex(r"#.*").set_name("Python style comment")
|
1056 |
-
"Comment of the form ``# ... (to end of line)``"
|
1057 |
-
|
1058 |
-
|
1059 |
-
# build list of built-in expressions, for future reference if a global default value
|
1060 |
-
# gets updated
|
1061 |
-
_builtin_exprs: List[ParserElement] = [
|
1062 |
-
v for v in vars().values() if isinstance(v, ParserElement)
|
1063 |
-
]
|
1064 |
-
|
1065 |
-
|
1066 |
-
# pre-PEP8 compatible names
|
1067 |
-
delimitedList = delimited_list
|
1068 |
-
countedArray = counted_array
|
1069 |
-
matchPreviousLiteral = match_previous_literal
|
1070 |
-
matchPreviousExpr = match_previous_expr
|
1071 |
-
oneOf = one_of
|
1072 |
-
dictOf = dict_of
|
1073 |
-
originalTextFor = original_text_for
|
1074 |
-
nestedExpr = nested_expr
|
1075 |
-
makeHTMLTags = make_html_tags
|
1076 |
-
makeXMLTags = make_xml_tags
|
1077 |
-
anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
|
1078 |
-
commonHTMLEntity = common_html_entity
|
1079 |
-
replaceHTMLEntity = replace_html_entity
|
1080 |
-
opAssoc = OpAssoc
|
1081 |
-
infixNotation = infix_notation
|
1082 |
-
cStyleComment = c_style_comment
|
1083 |
-
htmlComment = html_comment
|
1084 |
-
restOfLine = rest_of_line
|
1085 |
-
dblSlashComment = dbl_slash_comment
|
1086 |
-
cppStyleComment = cpp_style_comment
|
1087 |
-
javaStyleComment = java_style_comment
|
1088 |
-
pythonStyleComment = python_style_comment
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/_framework_compat.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Backward compatibility for homebrew builds on macOS.
|
3 |
-
"""
|
4 |
-
|
5 |
-
|
6 |
-
import sys
|
7 |
-
import os
|
8 |
-
import functools
|
9 |
-
import subprocess
|
10 |
-
import sysconfig
|
11 |
-
|
12 |
-
|
13 |
-
@functools.lru_cache()
|
14 |
-
def enabled():
|
15 |
-
"""
|
16 |
-
Only enabled for Python 3.9 framework homebrew builds
|
17 |
-
except ensurepip and venv.
|
18 |
-
"""
|
19 |
-
PY39 = (3, 9) < sys.version_info < (3, 10)
|
20 |
-
framework = sys.platform == 'darwin' and sys._framework
|
21 |
-
homebrew = "Cellar" in sysconfig.get_config_var('projectbase')
|
22 |
-
venv = sys.prefix != sys.base_prefix
|
23 |
-
ensurepip = os.environ.get("ENSUREPIP_OPTIONS")
|
24 |
-
return PY39 and framework and homebrew and not venv and not ensurepip
|
25 |
-
|
26 |
-
|
27 |
-
schemes = dict(
|
28 |
-
osx_framework_library=dict(
|
29 |
-
stdlib='{installed_base}/{platlibdir}/python{py_version_short}',
|
30 |
-
platstdlib='{platbase}/{platlibdir}/python{py_version_short}',
|
31 |
-
purelib='{homebrew_prefix}/lib/python{py_version_short}/site-packages',
|
32 |
-
platlib='{homebrew_prefix}/{platlibdir}/python{py_version_short}/site-packages',
|
33 |
-
include='{installed_base}/include/python{py_version_short}{abiflags}',
|
34 |
-
platinclude='{installed_platbase}/include/python{py_version_short}{abiflags}',
|
35 |
-
scripts='{homebrew_prefix}/bin',
|
36 |
-
data='{homebrew_prefix}',
|
37 |
-
)
|
38 |
-
)
|
39 |
-
|
40 |
-
|
41 |
-
@functools.lru_cache()
|
42 |
-
def vars():
|
43 |
-
if not enabled():
|
44 |
-
return {}
|
45 |
-
homebrew_prefix = subprocess.check_output(['brew', '--prefix'], text=True).strip()
|
46 |
-
return locals()
|
47 |
-
|
48 |
-
|
49 |
-
def scheme(name):
|
50 |
-
"""
|
51 |
-
Override the selected scheme for posix_prefix.
|
52 |
-
"""
|
53 |
-
if not enabled() or not name.endswith('_prefix'):
|
54 |
-
return name
|
55 |
-
return 'osx_framework_library'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Ark Survival Evolved Unblocked No Hay Descarga.md
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>ARK: Survival Evolved - Cómo jugar en línea sin descargar</h1>
|
3 |
-
<p>¿Te gustan los dinosaurios y los juegos de supervivencia? ¿Quieres explorar una tierra vasta y misteriosa llena de peligros y maravillas? ¿Quieres jugar con o contra cientos de otros jugadores en una aventura multijugador en línea? Si respondiste sí a cualquiera de estas preguntas, entonces podrías estar interesado en ARK: Survival Evolved, un juego que te ofrece una emocionante experiencia de supervivencia en un mundo prehistórico. Pero lo que si usted no tiene suficiente espacio o tiempo para descargar e instalar el juego en su dispositivo? No te preocupes, hay formas de jugar en línea sin descargar. En este artículo, te mostraremos cómo hacerlo, y cuáles son los pros y los contras de jugar en línea sin descargar. </p>
|
4 |
-
<h2>ark survival evolved unblocked no hay descarga</h2><br /><p><b><b>DOWNLOAD</b> ✫✫✫ <a href="https://bltlly.com/2v6Lk0">https://bltlly.com/2v6Lk0</a></b></p><br /><br />
|
5 |
-
<h2>Introducción</h2>
|
6 |
-
<h3>¿Qué es ARK: Supervivencia Evolucionada? </h3>
|
7 |
-
<p>ARK: Survival Evolved es un juego que fue lanzado en 2017 por Studio Wildcard. Es un juego de supervivencia sandbox que te permite crear tu propio personaje y explorar una isla masiva llamada ARK, donde puedes encontrar más de 80 especies diferentes de dinosaurios y otras criaturas prehistóricas. Puedes domar, criar y montar a algunos de ellos, o usarlos como aliados o enemigos en tu búsqueda de la supervivencia. También puedes crear armas, herramientas, ropa y estructuras con los recursos que reúnas o saquees de otros jugadores. Puedes jugar solo o unirte a una tribu con otros jugadores, y cooperar o competir con ellos en varios modos de juego. También puedes personalizar los ajustes y mods del juego para adaptarlos a tus preferencias. </p>
|
8 |
-
<h3>¿Por qué jugar en línea sin descargar? </h3>
|
9 |
-
|
10 |
-
<p>Por eso algunos jugadores prefieren jugar online sin descargar. De esta manera, pueden ahorrar espacio y tiempo, y disfrutar del juego en cualquier dispositivo con conexión a Internet. También pueden evitar la molestia de actualizar el juego o instalar mods manualmente. Jugar en línea sin descargar también puede mejorar su rendimiento y calidad de juego, ya que no tiene que depender del hardware de su dispositivo, sino del servidor en la nube. </p>
|
11 |
-
<h2>Cómo jugar online sin descargar</h2>
|
12 |
-
<h3>Opción 1: Usar un servicio de juegos en la nube</h3>
|
13 |
-
<h4>¿Qué es el juego en la nube? </h4>
|
14 |
-
<p>Juegos en la nube es una tecnología que le permite transmitir juegos desde un servidor remoto a su dispositivo a través de Internet. No tienes que descargar ni instalar nada en tu dispositivo, ya que el juego se ejecuta en el servidor y envía la salida de vídeo y audio al dispositivo. Solo necesita un dispositivo compatible (como un PC, computadora portátil, tableta, teléfono inteligente o televisor inteligente), una conexión a Internet (preferiblemente de alta velocidad y estable), y una suscripción o cuenta con el servicio de juegos en la nube. </p>
|
15 |
-
<h4>Cómo usar Playkey.net para jugar ARK: Survival Evolved online</h4>
|
16 |
-
<p>Uno de los servicios de juegos en la nube que puede utilizar para jugar ARK: Survival Evolved en línea sin necesidad de descargar es Playkey.net. Playkey.net es una plataforma que te permite acceder a una biblioteca de más de 200 juegos, incluyendo ARK: Survival Evolved, y transmitirlos a tu dispositivo. Puedes jugar en cualquier dispositivo compatible con HTML5, como Windows, Mac, Linux, Android o iOS. También puede usar cualquier controlador o teclado y ratón que prefiera. Estos son los pasos para usar Playkey.net para jugar a ARK: Survival Evolved online:</p>
|
17 |
-
<p></p>
|
18 |
-
<ol>
|
19 |
-
<li>Vaya a <a href=">Playkey.net</a> y cree una cuenta o inicie sesión con su cuenta existente. </li>
|
20 |
-
<li>Elija un plan de suscripción que se adapte a sus necesidades y presupuesto. Puede elegir entre planes mensuales, trimestrales o anuales, a partir de $ 9.99 por mes. También puede probar el servicio de forma gratuita durante 14 días. </li>
|
21 |
-
|
22 |
-
<li>Disfruta jugando ARK: Survival Evolved en línea sin descargar. Puede ajustar la configuración de los gráficos, guardar su progreso y chatear con otros jugadores. </li>
|
23 |
-
</ol>
|
24 |
-
<h3>Opción 2: Usar una extensión de navegador</h3>
|
25 |
-
<h4>¿Qué es una extensión de navegador? </h4>
|
26 |
-
<p>Una extensión de navegador es un programa de software que añade funcionalidad a su navegador web. Puede instalar extensiones de navegador de varias fuentes, como la Chrome Web Store o los complementos de Firefox. Las extensiones del navegador pueden mejorar su experiencia de navegación agregando características, como bloquear anuncios, administrar contraseñas o descargar videos. Algunas extensiones de navegador también pueden ayudarte a descargar juegos gratis o jugarlos en línea sin necesidad de descargarlos. </p>
|
27 |
-
<h4>Cómo usar WizCase para descargar ARK: Survival Evolved gratis</h4>
|
28 |
-
<p>Una de las extensiones del navegador que puedes usar para descargar ARK: Survival Evolved gratis es WizCase. WizCase es una herramienta que le ayuda a encontrar y descargar juegos, películas, música y software de varias fuentes. También protege su privacidad y seguridad al cifrar su tráfico y ocultar su dirección IP. Estos son los pasos para usar WizCase para descargar ARK: Survival Evolved gratis:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Ir a <a href=">WizCase</a> y descargar la extensión para su navegador (Chrome o Firefox). </li>
|
31 |
-
<li>Instale la extensión y cree una cuenta o inicie sesión con su cuenta existente. </li>
|
32 |
-
<li>Una vez que haya instalado la extensión, vaya a la sección de juegos y encuentre ARK: Survival Evolved. Haga clic en el botón de descarga y elija una fuente de la lista. </li>
|
33 |
-
<li>Espera a que termine la descarga y luego ejecuta el instalador del juego en tu dispositivo. </li>
|
34 |
-
<li>Disfruta jugando ARK: Survival Evolved gratis. </li>
|
35 |
-
</ol>
|
36 |
-
<h2>Pros y contras de jugar online sin descargar</h2>
|
37 |
-
<h3>Pros</h3>
|
38 |
-
<p>Jugar en línea sin descargar tiene algunas ventajas sobre jugar sin conexión después de descargar. Algunos de ellos son:</p>
|
39 |
-
<ul>
|
40 |
-
|
41 |
-
<li>Puedes jugar en cualquier dispositivo con conexión a Internet. Usted no tiene que pegarse a un dispositivo o comprar uno nuevo si el actual no cumple con los requisitos del juego. </li>
|
42 |
-
<li> Puede disfrutar de un mejor rendimiento y calidad de juego. No tienes que sufrir retrasos, caídas o baja calidad gráfica debido a las limitaciones de hardware de tu dispositivo. </li>
|
43 |
-
<li>Puedes acceder a las últimas actualizaciones y mods automáticamente. No tienes que actualizar manualmente el juego o instalar mods tú mismo. </li>
|
44 |
-
</ul>
|
45 |
-
<h3>Contras</h3>
|
46 |
-
<p>Jugar en línea sin descargar también tiene algunas desventajas sobre jugar sin conexión después de descargar. Algunos de ellos son:</p>
|
47 |
-
<ul>
|
48 |
-
<li>Necesitas una conexión a Internet confiable. Si tu Internet es lento, inestable o no está disponible, no podrás jugar en línea sin descargar. </li>
|
49 |
-
<li>Necesita una suscripción o cuenta con un servicio de juegos en la nube o una extensión de navegador. Si no tienes uno, no podrás jugar online sin descargar. </li>
|
50 |
-
<li>Es posible que experimente alguna latencia o retraso debido a problemas de red o sobrecarga del servidor. Si su velocidad de Internet o la capacidad del servidor no es suficiente, puede enfrentar algunos retrasos o interrupciones mientras juega en línea sin descargar. </li>
|
51 |
-
<li>Es posible que pierda algún control o opciones de personalización sobre la configuración del juego o mods. Si juegas en línea sin descargar, es posible que tengas que seguir la configuración predeterminada o los mods proporcionados por el servicio de juegos en la nube o la extensión del navegador. Es posible que no pueda cambiarlos según sus preferencias. </li>
|
52 |
-
</ul>
|
53 |
-
<h2>Conclusión</h2>
|
54 |
-
<h3>Resumen de los puntos principales</h3>
|
55 |
-
|
56 |
-
<h3>Llamada a la acción</h3>
|
57 |
-
<p>Ahora que sabes cómo jugar a ARK: Survival Evolved online sin necesidad de descargarlo, ¿por qué no intentarlo? Puedes elegir la opción que más te convenga y disfrutar del juego en cualquier dispositivo con conexión a Internet. También puede compartir su experiencia con nosotros en la sección de comentarios a continuación. ¡Nos encantaría saber de usted! </p>
|
58 |
-
<h2>Preguntas frecuentes</h2>
|
59 |
-
<h3>¿Cuáles son los requisitos mínimos para jugar a ARK: Survival Evolved online? </h3>
|
60 |
-
<p>Para jugar ARK: Survival Evolved en línea sin descargar, necesita un dispositivo compatible (como un PC, portátil, tableta, teléfono inteligente o TV inteligente), una conexión a Internet (preferiblemente de alta velocidad y estable), y una suscripción o cuenta con un servicio de juegos en la nube o una extensión de navegador. </p>
|
61 |
-
<h3>¿Cuánto cuesta jugar a ARK: Survival Evolved online sin descargar? </h3>
|
62 |
-
<p>El coste de jugar a ARK: Survival Evolved online sin descargar depende de la opción que elijas. Si utiliza un servicio de juegos en la nube, debe pagar un plan de suscripción que oscila entre $ 9.99 y $ 49.99 por mes, dependiendo del servicio y el plan. Si utiliza una extensión de navegador, es posible que pueda descargar el juego de forma gratuita o por un precio reducido, dependiendo de la extensión y la fuente. </p>
|
63 |
-
<h3>¿Es seguro jugar ARK: Survival Evolved online sin descargar? </h3>
|
64 |
-
<p>Por lo general, es seguro jugar ARK: Survival Evolved en línea sin necesidad de descargar, siempre y cuando utilice un servicio de juegos en la nube de buena reputación y fiable o una extensión del navegador. Sin embargo, siempre debe tener cuidado con su privacidad y seguridad al usar cualquier servicio o extensión en línea. Debe leer los términos y condiciones, la política de privacidad y los comentarios de los usuarios antes de registrarse o instalar nada. También debe usar una VPN o software antivirus para proteger su dispositivo y los datos de hackers o malware. </p>
|
65 |
-
<h3>¿Puedo jugar ARK: Survival Evolved online sin descargar con mis amigos? </h3>
|
66 |
-
|
67 |
-
<h3>¿Puedo jugar ARK: Survival Evolved sin conexión después de la descarga? </h3>
|
68 |
-
<p>Sí, puedes jugar a ARK: Survival Evolved sin conexión después de descargarlo, si prefieres hacerlo. Puedes descargar el juego desde Steam u otras plataformas e instalarlo en tu dispositivo. A continuación, puede jugar sin conexión en el modo de un solo jugador, o en línea en el modo multijugador si tiene una conexión a Internet. </p> 64aa2da5cf<br />
|
69 |
-
<br />
|
70 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Beach Buggy Racing 2 Beta Apk.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Beach Buggy Racing 2 Beta APK: Todo lo que necesita saber</h1>
|
3 |
-
<p>Si eres un fan de los juegos de carreras de karts, es posible que hayas oído hablar de Beach Buggy Racing, un popular juego móvil que introdujo más de 100 millones de jugadores a las carreras de karts de consola con un divertido giro offroad. Ahora, la secuela, Beach Buggy Racing 2, está aquí para llevarte a otro emocionante viaje a través de lugares exóticos, con nuevos coches, conductores, powerups y modos de juego. </p>
|
4 |
-
<p>Beach Buggy Racing 2 está disponible para Android, iOS y consolas, pero si desea obtener un vistazo de lo que el juego tiene para ofrecer antes de su lanzamiento oficial, puede probar la versión beta APK para dispositivos Android. En este artículo, le diremos todo lo que necesita saber sobre Beach Buggy Racing 2 beta APK, incluyendo sus características, cómo descargarlo e instalarlo, consejos y trucos para jugarlo, y nuestra revisión del juego. </p>
|
5 |
-
<h2>beach buggy racing 2 beta apk</h2><br /><p><b><b>Download File</b> ☆☆☆ <a href="https://bltlly.com/2v6L6q">https://bltlly.com/2v6L6q</a></b></p><br /><br />
|
6 |
-
<h2>Características de Beach Buggy Racing 2</h2>
|
7 |
-
<p>Beach Buggy Racing 2 es un juego de carreras de karts todoterreno en 3D con una física increíble, coches y personajes detallados y armas espectaculares. ¡Es como un juego de consola en la palma de tu mano! Estas son algunas de las características que hacen de este juego tan divertido y adictivo:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Espectacular acción de carreras de karts con increíbles física y gráficos</strong>: Carrera a través de pirámides egipcias, castillos infestados de dragones, naufragios de barcos piratas y bio-laboratorios alienígenas experimentales. Siente la velocidad y la emoción a medida que la deriva, saltar, y romper su camino a la victoria. </li>
|
10 |
-
<li><strong>Más de 45 potenciadores para descubrir y actualizar</strong>: Con Beach Buggy Racing 2, puede crear su propia cubierta de alimentación personalizada con habilidades fuera de este mundo como "Chain Lightning", "Donut Tires", "Boost Juice" y "Killer Bees". Actualiza tus powerups para que sean más potentes y eficaces. </li>
|
11 |
-
|
12 |
-
<li><strong>Recoger más de 55 coches de diferentes tipos y estilos</strong>: Recoger un garaje lleno de buggies de playa, camiones monstruo, coches musculares, pickups clásicos y supercars fórmula. Todos los coches clásicos Beach Buggy volver -- además de docenas de coches nuevos para descubrir! </li <p>Seguir escribiendo el artículo. </p>
|
13 |
-
<ul>
|
14 |
-
<li><strong>Juega contra el mundo en competiciones y torneos online</strong>: Únete a la Beach Buggy Racing League y compite contra pilotos y coches de todo el mundo. Carrera hombro a hombro con hasta 8 jugadores en el móvil, o 4 jugadores en la consola. ¡Pon a prueba tus habilidades en 6 modos de juego diferentes en 15 imaginativas pistas de carreras en 3D, contra un grupo de rivales amantes de lo tropical con un serio caso de furia en el camino! </li>
|
15 |
-
<li><strong>Personaliza tu viaje con pinturas, calcomanías y más</strong>: Haz que tu coche destaque con más de 1500 combinaciones diferentes de pintura y calcomanías. También puede cambiar la apariencia de su conductor con trajes y accesorios frescos. </li>
|
16 |
-
<li><strong>Nuevos modos de juego impresionantes para disfrutar</strong>: Además del modo de carrera clásico, también puedes probar el nuevo modo de aventura, donde exploras un enorme mundo abierto de desafíos y secretos. O puedes jugar el nuevo modo Arena, donde te enfrentas a otros jugadores en un frenesí libre de potenciadores y caos. </li>
|
17 |
-
</ul>
|
18 |
-
<h2>Cómo descargar e instalar Beach Buggy Racing 2 Beta APK</h2>
|
19 |
-
<p>Si estás ansioso por probar Beach Buggy Racing 2 antes de su lanzamiento oficial, puedes descargar e instalar la versión beta de APK para dispositivos Android. Estos son los pasos que debes seguir:</p>
|
20 |
-
<ol>
|
21 |
-
<li><strong>Encontrar una fuente confiable para el archivo beta APK</strong>: Puede buscar en línea para sitios web que ofrecen el archivo beta APK para Beach Buggy Racing 2. Asegúrese de elegir una fuente confiable y segura, ya que algunos sitios web pueden contener malware o virus. También puede consultar las opiniones y valoraciones de otros usuarios que han descargado el archivo. </li>
|
22 |
-
|
23 |
-
<li><strong>Descargar e instalar el archivo beta APK</strong>: Una vez que haya encontrado una fuente confiable y habilitado fuentes desconocidas, puede descargar el archivo beta APK a su dispositivo. Una vez completada la descarga, busque el archivo en el almacenamiento del dispositivo y toque en él para instalarlo. Siga las instrucciones de la pantalla para completar la instalación. </li>
|
24 |
-
<li><strong>Iniciar el juego y disfrutar</strong>: Después de la instalación se hace, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Ahora puedes disfrutar de Beach Buggy Racing 2 beta APK y experimentar sus características antes que nadie. </li>
|
25 |
-
</ol>
|
26 |
-
<h2>Consejos y trucos para carreras de buggys de playa 2</h2>
|
27 |
-
<p>Beach Buggy Racing 2 es un juego divertido y desafiante que requiere habilidad, estrategia y suerte. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu rendimiento y ganar más carreras:</p>
|
28 |
-
<ul>
|
29 |
-
<li><strong>Dominar las técnicas de deriva y powerslide</strong>: A la deriva y powersliding son habilidades esenciales para cualquier corredor de karts. Le permiten tomar giros bruscos sin perder velocidad, y también llenar su medidor de impulso más rápido. Para la deriva, toque el botón de freno mientras gira. Para powerslide, pulse el botón de freno dos veces mientras gira. También puede ajustar la sensibilidad de la deriva en el menú de configuración. </li>
|
30 |
-
<li><strong>Usa la habilidad del piloto en el momento adecuado</strong>: Cada piloto tiene una habilidad especial única que puede darte una ventaja en la carrera. Por ejemplo, Rez puede teletransportarse por delante de otros corredores, McSkelly puede convocar una horda de esqueletos para frenar a sus oponentes, y Roxie puede destruir a todos con su guitarra. Sin embargo, estas habilidades tienen un tiempo de reutilización, así que úsalas sabiamente y estratégicamente. </li>
|
31 |
-
|
32 |
-
<li><strong>Construye la mejor baraja de potenciadores locos para cada carrera</strong>: Antes de cada carrera, puedes elegir qué potenciadores quieres usar de tu colección. Puedes tener hasta seis potenciadores en tu mazo, pero solo puedes usar uno a la vez durante la carrera. Por lo tanto, es importante elegir sabiamente y equilibrar su mazo de acuerdo a su estrategia y preferencia. Por ejemplo, si quieres ser más agresivo, puedes elegir potenciadores que causen daño o perturben a otros corredores, como <p>Continuar escribiendo el artículo. </p>
|
33 |
-
<ul>
|
34 |
-
<li><strong>Construye la mejor baraja de potenciadores locos para cada carrera</strong>: Antes de cada carrera, puedes elegir qué potenciadores quieres usar de tu colección. Puedes tener hasta seis potenciadores en tu mazo, pero solo puedes usar uno a la vez durante la carrera. Por lo tanto, es importante elegir sabiamente y equilibrar su mazo de acuerdo a su estrategia y preferencia. Por ejemplo, si quieres ser más agresivo, puedes elegir potenciadores que causen daño o perturben a otros corredores, como "Bola de fuego", "Mancha de aceite" o "Buscadores de Tiki". Si quieres estar más a la defensiva, puedes elegir potenciadores que te protejan o te curen, como "Shield", "Repair Kit" o "Boost Juice". También puede mezclar y combinar diferentes potenciadores para crear combos y sinergias. </li>
|
35 |
-
<li><strong>Coge esas burbujas rápidas para aumentar la velocidad extra</strong>: Durante la carrera, verás algunas burbujas azules flotando alrededor de la pista. Estas son burbujas rápidas que le dan un impulso de velocidad temporal cuando conduce a través de ellas. Pueden ayudarte a alcanzar a otros corredores, escapar del peligro o alcanzar atajos. Trata de agarrar tantas burbujas rápidas como puedas, pero ten cuidado de no chocar contra obstáculos u otros corredores mientras lo haces. </li>
|
36 |
-
|
37 |
-
</ul>
|
38 |
-
<h2>Reseña de Beach Buggy Racing 2</h2>
|
39 |
-
<p>Beach Buggy Racing 2 es un divertido y emocionante juego de carreras de karts que te mantendrá entretenido durante horas. Tiene muchas características y contenido que hacen que valga la pena jugar, pero también tiene algunos inconvenientes que pueden afectar su disfrute. Estos son algunos de los pros y los contras de Beach Buggy Racing 2:</p>
|
40 |
-
<tabla>
|
41 |
-
<tr>
|
42 |
-
<th>Pros</th>
|
43 |
-
<th>Contras</th>
|
44 |
-
</tr>
|
45 |
-
<tr>
|
46 |
-
<td><ul>
|
47 |
-
<li>Juego divertido, colorido y adictivo</li>
|
48 |
-
<li>Variedad de coches, conductores, powerups y pistas</li>
|
49 |
-
<li>Grandes gráficos y efectos de sonido</li>
|
50 |
-
<li>Multijugador en línea y opciones de personalización</li>
|
51 |
-
<li>Precio bajo para la versión de consola</li>
|
52 |
-
</ul></td>
|
53 |
-
<td><ul>
|
54 |
-
<li>Repetitivo a veces</li>
|
55 |
-
<li>No hay multijugador en línea para la versión móvil</li>
|
56 |
-
<li>Algunos powerups son demasiado frustrantes</li>
|
57 |
-
<li>Algunos errores y fallos en la versión beta</li>
|
58 |
-
</ul></td>
|
59 |
-
</tr>
|
60 |
-
</tabla>
|
61 |
-
<h2>Conclusión</h2>
|
62 |
-
<p>Beach Buggy Racing 2 es un juego de carreras de karts que ofrece mucha diversión y emoción para jugadores de todas las edades y preferencias. Tiene muchas características y contenido que lo hacen destacar de otros juegos de carreras de karts, como física y gráficos espectaculares, más de 45 potenciadores para descubrir y actualizar, más de 55 coches para coleccionar y personalizar, competiciones y torneos en línea, nuevos modos de juego y más. También tiene algunos inconvenientes que pueden afectar su disfrute, como la repetición, la falta de multijugador en línea para la versión móvil, algunos powerups frustrantes, y algunos errores y fallos en la versión beta. </p>
|
63 |
-
<p></p>
|
64 |
-
<p>Si usted está buscando un juego de carreras de karts que es divertido, colorido y adictivo, Beach Buggy Racing 2 es una gran opción para usted. Puedes probar la versión beta de APK para dispositivos Android antes de su lanzamiento oficial, o comprar la versión de consola por un precio bajo. De cualquier manera, usted tendrá una explosión de carreras a través de lugares exóticos con poderes locos y personajes. </p>
|
65 |
-
<p>Entonces, ¿qué estás esperando? Descargar Beach Buggy Racing 2 beta APK ahora y unirse a la Beach Buggy Racing League! </p>
|
66 |
-
|
67 |
-
<p>Aquí están algunas de las preguntas más comunes que la gente hace acerca de Beach Buggy Racing 2:</p>
|
68 |
-
<ol>
|
69 |
-
<li><strong>Es Beach Buggy Racing 2 gratis? </strong>: Beach Buggy Racing 2 es gratis para descargar y jugar en dispositivos Android e iOS. Sin embargo, contiene compras en la aplicación que le permiten comprar monedas, gemas, coches, conductores, powerups y otros artículos con dinero real. También puedes ver anuncios para ganar monedas o gemas gratis. La versión de consola de Beach Buggy Racing 2 cuesta $9.99 en PlayStation <p>Continuar escribiendo el artículo. </p>
|
70 |
-
<ol>
|
71 |
-
<li><strong>Es Beach Buggy Racing 2 gratis? </strong>: Beach Buggy Racing 2 es gratis para descargar y jugar en dispositivos Android e iOS. Sin embargo, contiene compras en la aplicación que le permiten comprar monedas, gemas, coches, conductores, powerups y otros artículos con dinero real. También puedes ver anuncios para ganar monedas o gemas gratis. La versión de consola de Beach Buggy Racing 2 cuesta $9.99 en PlayStation 4, Xbox One y Nintendo Switch.</li>
|
72 |
-
<li><strong>¿Está Beach Buggy Racing 2 en línea? </strong>: Beach Buggy Racing 2 tiene funciones multijugador en línea para versiones móviles y de consola. En el móvil, puedes jugar contra otros jugadores de todo el mundo en competiciones y torneos online. También puede unirse a un club y chatear con otros miembros. En la consola, puede jugar con hasta 4 jugadores en la misma pantalla o en línea. </li>
|
73 |
-
<li><strong>¿Beach Buggy Racing 2 está fuera de línea? </strong>: Beach Buggy Racing 2 también se puede jugar sin conexión en las versiones móvil y consola. En el móvil, puedes jugar al modo Aventura, donde exploras un enorme mundo abierto de desafíos y secretos. También puedes jugar en el modo Arena, donde te enfrentas a otros jugadores en un frenesí libre de potenciadores y caos. En la consola, puedes jugar el modo Carrera, donde compites contra oponentes de IA en 15 pistas diferentes. </li>
|
74 |
-
|
75 |
-
<li><strong>Cómo desbloquear nuevos coches y conductores en Beach Buggy Racing 2?</strong>: Hay más de 55 coches y más de 25 conductores para desbloquear en Beach Buggy Racing 2. Puedes desbloquearlos jugando, completando desafíos o comprándolos con monedas o gemas. Algunos coches y conductores son exclusivos de ciertos modos de juego o plataformas, por lo que es posible que tenga que jugar diferentes modos o versiones para obtener todos. </li>
|
76 |
-
</ol></p> 64aa2da5cf<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/_distutils.py
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
"""Locations where we look for configs, install stuff, etc"""
|
2 |
-
|
3 |
-
# The following comment should be removed at some point in the future.
|
4 |
-
# mypy: strict-optional=False
|
5 |
-
|
6 |
-
# If pip's going to use distutils, it should not be using the copy that setuptools
|
7 |
-
# might have injected into the environment. This is done by removing the injected
|
8 |
-
# shim, if it's injected.
|
9 |
-
#
|
10 |
-
# See https://github.com/pypa/pip/issues/8761 for the original discussion and
|
11 |
-
# rationale for why this is done within pip.
|
12 |
-
try:
|
13 |
-
__import__("_distutils_hack").remove_shim()
|
14 |
-
except (ImportError, AttributeError):
|
15 |
-
pass
|
16 |
-
|
17 |
-
import logging
|
18 |
-
import os
|
19 |
-
import sys
|
20 |
-
from distutils.cmd import Command as DistutilsCommand
|
21 |
-
from distutils.command.install import SCHEME_KEYS
|
22 |
-
from distutils.command.install import install as distutils_install_command
|
23 |
-
from distutils.sysconfig import get_python_lib
|
24 |
-
from typing import Dict, List, Optional, Union, cast
|
25 |
-
|
26 |
-
from pip._internal.models.scheme import Scheme
|
27 |
-
from pip._internal.utils.compat import WINDOWS
|
28 |
-
from pip._internal.utils.virtualenv import running_under_virtualenv
|
29 |
-
|
30 |
-
from .base import get_major_minor_version
|
31 |
-
|
32 |
-
logger = logging.getLogger(__name__)
|
33 |
-
|
34 |
-
|
35 |
-
def distutils_scheme(
|
36 |
-
dist_name: str,
|
37 |
-
user: bool = False,
|
38 |
-
home: Optional[str] = None,
|
39 |
-
root: Optional[str] = None,
|
40 |
-
isolated: bool = False,
|
41 |
-
prefix: Optional[str] = None,
|
42 |
-
*,
|
43 |
-
ignore_config_files: bool = False,
|
44 |
-
) -> Dict[str, str]:
|
45 |
-
"""
|
46 |
-
Return a distutils install scheme
|
47 |
-
"""
|
48 |
-
from distutils.dist import Distribution
|
49 |
-
|
50 |
-
dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name}
|
51 |
-
if isolated:
|
52 |
-
dist_args["script_args"] = ["--no-user-cfg"]
|
53 |
-
|
54 |
-
d = Distribution(dist_args)
|
55 |
-
if not ignore_config_files:
|
56 |
-
try:
|
57 |
-
d.parse_config_files()
|
58 |
-
except UnicodeDecodeError:
|
59 |
-
# Typeshed does not include find_config_files() for some reason.
|
60 |
-
paths = d.find_config_files() # type: ignore
|
61 |
-
logger.warning(
|
62 |
-
"Ignore distutils configs in %s due to encoding errors.",
|
63 |
-
", ".join(os.path.basename(p) for p in paths),
|
64 |
-
)
|
65 |
-
obj: Optional[DistutilsCommand] = None
|
66 |
-
obj = d.get_command_obj("install", create=True)
|
67 |
-
assert obj is not None
|
68 |
-
i = cast(distutils_install_command, obj)
|
69 |
-
# NOTE: setting user or home has the side-effect of creating the home dir
|
70 |
-
# or user base for installations during finalize_options()
|
71 |
-
# ideally, we'd prefer a scheme class that has no side-effects.
|
72 |
-
assert not (user and prefix), f"user={user} prefix={prefix}"
|
73 |
-
assert not (home and prefix), f"home={home} prefix={prefix}"
|
74 |
-
i.user = user or i.user
|
75 |
-
if user or home:
|
76 |
-
i.prefix = ""
|
77 |
-
i.prefix = prefix or i.prefix
|
78 |
-
i.home = home or i.home
|
79 |
-
i.root = root or i.root
|
80 |
-
i.finalize_options()
|
81 |
-
|
82 |
-
scheme = {}
|
83 |
-
for key in SCHEME_KEYS:
|
84 |
-
scheme[key] = getattr(i, "install_" + key)
|
85 |
-
|
86 |
-
# install_lib specified in setup.cfg should install *everything*
|
87 |
-
# into there (i.e. it takes precedence over both purelib and
|
88 |
-
# platlib). Note, i.install_lib is *always* set after
|
89 |
-
# finalize_options(); we only want to override here if the user
|
90 |
-
# has explicitly requested it hence going back to the config
|
91 |
-
if "install_lib" in d.get_option_dict("install"):
|
92 |
-
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
|
93 |
-
|
94 |
-
if running_under_virtualenv():
|
95 |
-
if home:
|
96 |
-
prefix = home
|
97 |
-
elif user:
|
98 |
-
prefix = i.install_userbase
|
99 |
-
else:
|
100 |
-
prefix = i.prefix
|
101 |
-
scheme["headers"] = os.path.join(
|
102 |
-
prefix,
|
103 |
-
"include",
|
104 |
-
"site",
|
105 |
-
f"python{get_major_minor_version()}",
|
106 |
-
dist_name,
|
107 |
-
)
|
108 |
-
|
109 |
-
if root is not None:
|
110 |
-
path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1]
|
111 |
-
scheme["headers"] = os.path.join(root, path_no_drive[1:])
|
112 |
-
|
113 |
-
return scheme
|
114 |
-
|
115 |
-
|
116 |
-
def get_scheme(
|
117 |
-
dist_name: str,
|
118 |
-
user: bool = False,
|
119 |
-
home: Optional[str] = None,
|
120 |
-
root: Optional[str] = None,
|
121 |
-
isolated: bool = False,
|
122 |
-
prefix: Optional[str] = None,
|
123 |
-
) -> Scheme:
|
124 |
-
"""
|
125 |
-
Get the "scheme" corresponding to the input parameters. The distutils
|
126 |
-
documentation provides the context for the available schemes:
|
127 |
-
https://docs.python.org/3/install/index.html#alternate-installation
|
128 |
-
|
129 |
-
:param dist_name: the name of the package to retrieve the scheme for, used
|
130 |
-
in the headers scheme path
|
131 |
-
:param user: indicates to use the "user" scheme
|
132 |
-
:param home: indicates to use the "home" scheme and provides the base
|
133 |
-
directory for the same
|
134 |
-
:param root: root under which other directories are re-based
|
135 |
-
:param isolated: equivalent to --no-user-cfg, i.e. do not consider
|
136 |
-
~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for
|
137 |
-
scheme paths
|
138 |
-
:param prefix: indicates to use the "prefix" scheme and provides the
|
139 |
-
base directory for the same
|
140 |
-
"""
|
141 |
-
scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix)
|
142 |
-
return Scheme(
|
143 |
-
platlib=scheme["platlib"],
|
144 |
-
purelib=scheme["purelib"],
|
145 |
-
headers=scheme["headers"],
|
146 |
-
scripts=scheme["scripts"],
|
147 |
-
data=scheme["data"],
|
148 |
-
)
|
149 |
-
|
150 |
-
|
151 |
-
def get_bin_prefix() -> str:
|
152 |
-
# XXX: In old virtualenv versions, sys.prefix can contain '..' components,
|
153 |
-
# so we need to call normpath to eliminate them.
|
154 |
-
prefix = os.path.normpath(sys.prefix)
|
155 |
-
if WINDOWS:
|
156 |
-
bin_py = os.path.join(prefix, "Scripts")
|
157 |
-
# buildout uses 'bin' on Windows too?
|
158 |
-
if not os.path.exists(bin_py):
|
159 |
-
bin_py = os.path.join(prefix, "bin")
|
160 |
-
return bin_py
|
161 |
-
# Forcing to use /usr/local/bin for standard macOS framework installs
|
162 |
-
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
|
163 |
-
if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/":
|
164 |
-
return "/usr/local/bin"
|
165 |
-
return os.path.join(prefix, "bin")
|
166 |
-
|
167 |
-
|
168 |
-
def get_purelib() -> str:
|
169 |
-
return get_python_lib(plat_specific=False)
|
170 |
-
|
171 |
-
|
172 |
-
def get_platlib() -> str:
|
173 |
-
return get_python_lib(plat_specific=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pathlib
|
3 |
-
import tempfile
|
4 |
-
import functools
|
5 |
-
import contextlib
|
6 |
-
import types
|
7 |
-
import importlib
|
8 |
-
|
9 |
-
from typing import Union, Optional
|
10 |
-
from .abc import ResourceReader, Traversable
|
11 |
-
|
12 |
-
from ._compat import wrap_spec
|
13 |
-
|
14 |
-
Package = Union[types.ModuleType, str]
|
15 |
-
|
16 |
-
|
17 |
-
def files(package):
|
18 |
-
# type: (Package) -> Traversable
|
19 |
-
"""
|
20 |
-
Get a Traversable resource from a package
|
21 |
-
"""
|
22 |
-
return from_package(get_package(package))
|
23 |
-
|
24 |
-
|
25 |
-
def get_resource_reader(package):
|
26 |
-
# type: (types.ModuleType) -> Optional[ResourceReader]
|
27 |
-
"""
|
28 |
-
Return the package's loader if it's a ResourceReader.
|
29 |
-
"""
|
30 |
-
# We can't use
|
31 |
-
# a issubclass() check here because apparently abc.'s __subclasscheck__()
|
32 |
-
# hook wants to create a weak reference to the object, but
|
33 |
-
# zipimport.zipimporter does not support weak references, resulting in a
|
34 |
-
# TypeError. That seems terrible.
|
35 |
-
spec = package.__spec__
|
36 |
-
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
|
37 |
-
if reader is None:
|
38 |
-
return None
|
39 |
-
return reader(spec.name) # type: ignore
|
40 |
-
|
41 |
-
|
42 |
-
def resolve(cand):
|
43 |
-
# type: (Package) -> types.ModuleType
|
44 |
-
return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
|
45 |
-
|
46 |
-
|
47 |
-
def get_package(package):
|
48 |
-
# type: (Package) -> types.ModuleType
|
49 |
-
"""Take a package name or module object and return the module.
|
50 |
-
|
51 |
-
Raise an exception if the resolved module is not a package.
|
52 |
-
"""
|
53 |
-
resolved = resolve(package)
|
54 |
-
if wrap_spec(resolved).submodule_search_locations is None:
|
55 |
-
raise TypeError(f'{package!r} is not a package')
|
56 |
-
return resolved
|
57 |
-
|
58 |
-
|
59 |
-
def from_package(package):
|
60 |
-
"""
|
61 |
-
Return a Traversable object for the given package.
|
62 |
-
|
63 |
-
"""
|
64 |
-
spec = wrap_spec(package)
|
65 |
-
reader = spec.loader.get_resource_reader(spec.name)
|
66 |
-
return reader.files()
|
67 |
-
|
68 |
-
|
69 |
-
@contextlib.contextmanager
|
70 |
-
def _tempfile(reader, suffix=''):
|
71 |
-
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
|
72 |
-
# blocks due to the need to close the temporary file to work on Windows
|
73 |
-
# properly.
|
74 |
-
fd, raw_path = tempfile.mkstemp(suffix=suffix)
|
75 |
-
try:
|
76 |
-
try:
|
77 |
-
os.write(fd, reader())
|
78 |
-
finally:
|
79 |
-
os.close(fd)
|
80 |
-
del reader
|
81 |
-
yield pathlib.Path(raw_path)
|
82 |
-
finally:
|
83 |
-
try:
|
84 |
-
os.remove(raw_path)
|
85 |
-
except FileNotFoundError:
|
86 |
-
pass
|
87 |
-
|
88 |
-
|
89 |
-
@functools.singledispatch
|
90 |
-
def as_file(path):
|
91 |
-
"""
|
92 |
-
Given a Traversable object, return that object as a
|
93 |
-
path on the local file system in a context manager.
|
94 |
-
"""
|
95 |
-
return _tempfile(path.read_bytes, suffix=path.name)
|
96 |
-
|
97 |
-
|
98 |
-
@as_file.register(pathlib.Path)
|
99 |
-
@contextlib.contextmanager
|
100 |
-
def _(path):
|
101 |
-
"""
|
102 |
-
Degenerate behavior for pathlib.Path objects.
|
103 |
-
"""
|
104 |
-
yield path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from itertools import filterfalse
|
2 |
-
|
3 |
-
from typing import (
|
4 |
-
Callable,
|
5 |
-
Iterable,
|
6 |
-
Iterator,
|
7 |
-
Optional,
|
8 |
-
Set,
|
9 |
-
TypeVar,
|
10 |
-
Union,
|
11 |
-
)
|
12 |
-
|
13 |
-
# Type and type variable definitions
|
14 |
-
_T = TypeVar('_T')
|
15 |
-
_U = TypeVar('_U')
|
16 |
-
|
17 |
-
|
18 |
-
def unique_everseen(
|
19 |
-
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
|
20 |
-
) -> Iterator[_T]:
|
21 |
-
"List unique elements, preserving order. Remember all elements ever seen."
|
22 |
-
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
|
23 |
-
# unique_everseen('ABBCcAD', str.lower) --> A B C D
|
24 |
-
seen: Set[Union[_T, _U]] = set()
|
25 |
-
seen_add = seen.add
|
26 |
-
if key is None:
|
27 |
-
for element in filterfalse(seen.__contains__, iterable):
|
28 |
-
seen_add(element)
|
29 |
-
yield element
|
30 |
-
else:
|
31 |
-
for element in iterable:
|
32 |
-
k = key(element)
|
33 |
-
if k not in seen:
|
34 |
-
seen_add(k)
|
35 |
-
yield element
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__init__.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
from .__about__ import (
|
6 |
-
__author__,
|
7 |
-
__copyright__,
|
8 |
-
__email__,
|
9 |
-
__license__,
|
10 |
-
__summary__,
|
11 |
-
__title__,
|
12 |
-
__uri__,
|
13 |
-
__version__,
|
14 |
-
)
|
15 |
-
|
16 |
-
__all__ = [
|
17 |
-
"__title__",
|
18 |
-
"__summary__",
|
19 |
-
"__uri__",
|
20 |
-
"__version__",
|
21 |
-
"__author__",
|
22 |
-
"__email__",
|
23 |
-
"__license__",
|
24 |
-
"__copyright__",
|
25 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BramVanroy/llama-2-13b-chat-dutch-space/style.css
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
h1 {
|
2 |
-
text-align: center;
|
3 |
-
}
|
4 |
-
|
5 |
-
#component-0 {
|
6 |
-
max-width: 900px;
|
7 |
-
margin: auto;
|
8 |
-
padding-top: 1.5rem;
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/merge.h
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file merge.h
|
19 |
-
* \brief Sequential implementation of merge algorithms.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
namespace system
|
30 |
-
{
|
31 |
-
namespace detail
|
32 |
-
{
|
33 |
-
namespace sequential
|
34 |
-
{
|
35 |
-
|
36 |
-
|
37 |
-
template<typename DerivedPolicy,
|
38 |
-
typename InputIterator1,
|
39 |
-
typename InputIterator2,
|
40 |
-
typename OutputIterator,
|
41 |
-
typename StrictWeakOrdering>
|
42 |
-
__host__ __device__
|
43 |
-
OutputIterator merge(sequential::execution_policy<DerivedPolicy> &exec,
|
44 |
-
InputIterator1 first1,
|
45 |
-
InputIterator1 last1,
|
46 |
-
InputIterator2 first2,
|
47 |
-
InputIterator2 last2,
|
48 |
-
OutputIterator result,
|
49 |
-
StrictWeakOrdering comp);
|
50 |
-
|
51 |
-
|
52 |
-
template<typename DerivedPolicy,
|
53 |
-
typename InputIterator1,
|
54 |
-
typename InputIterator2,
|
55 |
-
typename InputIterator3,
|
56 |
-
typename InputIterator4,
|
57 |
-
typename OutputIterator1,
|
58 |
-
typename OutputIterator2,
|
59 |
-
typename StrictWeakOrdering>
|
60 |
-
__host__ __device__
|
61 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
62 |
-
merge_by_key(sequential::execution_policy<DerivedPolicy> &exec,
|
63 |
-
InputIterator1 keys_first1,
|
64 |
-
InputIterator1 keys_last1,
|
65 |
-
InputIterator2 keys_first2,
|
66 |
-
InputIterator2 keys_last2,
|
67 |
-
InputIterator3 values_first1,
|
68 |
-
InputIterator4 values_first2,
|
69 |
-
OutputIterator1 keys_result,
|
70 |
-
OutputIterator2 values_result,
|
71 |
-
StrictWeakOrdering comp);
|
72 |
-
|
73 |
-
|
74 |
-
} // end namespace sequential
|
75 |
-
} // end namespace detail
|
76 |
-
} // end namespace system
|
77 |
-
} // end namespace thrust
|
78 |
-
|
79 |
-
#include <thrust/system/detail/sequential/merge.inl>
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/bbox/samplers/score_hlr_sampler.py
DELETED
@@ -1,264 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from mmcv.ops import nms_match
|
3 |
-
|
4 |
-
from ..builder import BBOX_SAMPLERS
|
5 |
-
from ..transforms import bbox2roi
|
6 |
-
from .base_sampler import BaseSampler
|
7 |
-
from .sampling_result import SamplingResult
|
8 |
-
|
9 |
-
|
10 |
-
@BBOX_SAMPLERS.register_module()
|
11 |
-
class ScoreHLRSampler(BaseSampler):
|
12 |
-
r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
|
13 |
-
Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.
|
14 |
-
|
15 |
-
Score hierarchical local rank (HLR) differentiates with RandomSampler in
|
16 |
-
negative part. It firstly computes Score-HLR in a two-step way,
|
17 |
-
then linearly maps score hlr to the loss weights.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
num (int): Total number of sampled RoIs.
|
21 |
-
pos_fraction (float): Fraction of positive samples.
|
22 |
-
context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
|
23 |
-
neg_pos_ub (int): Upper bound of the ratio of num negative to num
|
24 |
-
positive, -1 means no upper bound.
|
25 |
-
add_gt_as_proposals (bool): Whether to add ground truth as proposals.
|
26 |
-
k (float): Power of the non-linear mapping.
|
27 |
-
bias (float): Shift of the non-linear mapping.
|
28 |
-
score_thr (float): Minimum score that a negative sample is to be
|
29 |
-
considered as valid bbox.
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self,
|
33 |
-
num,
|
34 |
-
pos_fraction,
|
35 |
-
context,
|
36 |
-
neg_pos_ub=-1,
|
37 |
-
add_gt_as_proposals=True,
|
38 |
-
k=0.5,
|
39 |
-
bias=0,
|
40 |
-
score_thr=0.05,
|
41 |
-
iou_thr=0.5,
|
42 |
-
**kwargs):
|
43 |
-
super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
|
44 |
-
self.k = k
|
45 |
-
self.bias = bias
|
46 |
-
self.score_thr = score_thr
|
47 |
-
self.iou_thr = iou_thr
|
48 |
-
self.context = context
|
49 |
-
# context of cascade detectors is a list, so distinguish them here.
|
50 |
-
if not hasattr(context, 'num_stages'):
|
51 |
-
self.bbox_roi_extractor = context.bbox_roi_extractor
|
52 |
-
self.bbox_head = context.bbox_head
|
53 |
-
self.with_shared_head = context.with_shared_head
|
54 |
-
if self.with_shared_head:
|
55 |
-
self.shared_head = context.shared_head
|
56 |
-
else:
|
57 |
-
self.bbox_roi_extractor = context.bbox_roi_extractor[
|
58 |
-
context.current_stage]
|
59 |
-
self.bbox_head = context.bbox_head[context.current_stage]
|
60 |
-
|
61 |
-
@staticmethod
|
62 |
-
def random_choice(gallery, num):
|
63 |
-
"""Randomly select some elements from the gallery.
|
64 |
-
|
65 |
-
If `gallery` is a Tensor, the returned indices will be a Tensor;
|
66 |
-
If `gallery` is a ndarray or list, the returned indices will be a
|
67 |
-
ndarray.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
gallery (Tensor | ndarray | list): indices pool.
|
71 |
-
num (int): expected sample num.
|
72 |
-
|
73 |
-
Returns:
|
74 |
-
Tensor or ndarray: sampled indices.
|
75 |
-
"""
|
76 |
-
assert len(gallery) >= num
|
77 |
-
|
78 |
-
is_tensor = isinstance(gallery, torch.Tensor)
|
79 |
-
if not is_tensor:
|
80 |
-
if torch.cuda.is_available():
|
81 |
-
device = torch.cuda.current_device()
|
82 |
-
else:
|
83 |
-
device = 'cpu'
|
84 |
-
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
|
85 |
-
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
|
86 |
-
rand_inds = gallery[perm]
|
87 |
-
if not is_tensor:
|
88 |
-
rand_inds = rand_inds.cpu().numpy()
|
89 |
-
return rand_inds
|
90 |
-
|
91 |
-
def _sample_pos(self, assign_result, num_expected, **kwargs):
|
92 |
-
"""Randomly sample some positive samples."""
|
93 |
-
pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
|
94 |
-
if pos_inds.numel() <= num_expected:
|
95 |
-
return pos_inds
|
96 |
-
else:
|
97 |
-
return self.random_choice(pos_inds, num_expected)
|
98 |
-
|
99 |
-
def _sample_neg(self,
|
100 |
-
assign_result,
|
101 |
-
num_expected,
|
102 |
-
bboxes,
|
103 |
-
feats=None,
|
104 |
-
img_meta=None,
|
105 |
-
**kwargs):
|
106 |
-
"""Sample negative samples.
|
107 |
-
|
108 |
-
Score-HLR sampler is done in the following steps:
|
109 |
-
1. Take the maximum positive score prediction of each negative samples
|
110 |
-
as s_i.
|
111 |
-
2. Filter out negative samples whose s_i <= score_thr, the left samples
|
112 |
-
are called valid samples.
|
113 |
-
3. Use NMS-Match to divide valid samples into different groups,
|
114 |
-
samples in the same group will greatly overlap with each other
|
115 |
-
4. Rank the matched samples in two-steps to get Score-HLR.
|
116 |
-
(1) In the same group, rank samples with their scores.
|
117 |
-
(2) In the same score rank across different groups,
|
118 |
-
rank samples with their scores again.
|
119 |
-
5. Linearly map Score-HLR to the final label weights.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
assign_result (:obj:`AssignResult`): result of assigner.
|
123 |
-
num_expected (int): Expected number of samples.
|
124 |
-
bboxes (Tensor): bbox to be sampled.
|
125 |
-
feats (Tensor): Features come from FPN.
|
126 |
-
img_meta (dict): Meta information dictionary.
|
127 |
-
"""
|
128 |
-
neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
|
129 |
-
num_neg = neg_inds.size(0)
|
130 |
-
if num_neg == 0:
|
131 |
-
return neg_inds, None
|
132 |
-
with torch.no_grad():
|
133 |
-
neg_bboxes = bboxes[neg_inds]
|
134 |
-
neg_rois = bbox2roi([neg_bboxes])
|
135 |
-
bbox_result = self.context._bbox_forward(feats, neg_rois)
|
136 |
-
cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
|
137 |
-
'bbox_pred']
|
138 |
-
|
139 |
-
ori_loss = self.bbox_head.loss(
|
140 |
-
cls_score=cls_score,
|
141 |
-
bbox_pred=None,
|
142 |
-
rois=None,
|
143 |
-
labels=neg_inds.new_full((num_neg, ),
|
144 |
-
self.bbox_head.num_classes),
|
145 |
-
label_weights=cls_score.new_ones(num_neg),
|
146 |
-
bbox_targets=None,
|
147 |
-
bbox_weights=None,
|
148 |
-
reduction_override='none')['loss_cls']
|
149 |
-
|
150 |
-
# filter out samples with the max score lower than score_thr
|
151 |
-
max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
|
152 |
-
valid_inds = (max_score > self.score_thr).nonzero().view(-1)
|
153 |
-
invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
|
154 |
-
num_valid = valid_inds.size(0)
|
155 |
-
num_invalid = invalid_inds.size(0)
|
156 |
-
|
157 |
-
num_expected = min(num_neg, num_expected)
|
158 |
-
num_hlr = min(num_valid, num_expected)
|
159 |
-
num_rand = num_expected - num_hlr
|
160 |
-
if num_valid > 0:
|
161 |
-
valid_rois = neg_rois[valid_inds]
|
162 |
-
valid_max_score = max_score[valid_inds]
|
163 |
-
valid_argmax_score = argmax_score[valid_inds]
|
164 |
-
valid_bbox_pred = bbox_pred[valid_inds]
|
165 |
-
|
166 |
-
# valid_bbox_pred shape: [num_valid, #num_classes, 4]
|
167 |
-
valid_bbox_pred = valid_bbox_pred.view(
|
168 |
-
valid_bbox_pred.size(0), -1, 4)
|
169 |
-
selected_bbox_pred = valid_bbox_pred[range(num_valid),
|
170 |
-
valid_argmax_score]
|
171 |
-
pred_bboxes = self.bbox_head.bbox_coder.decode(
|
172 |
-
valid_rois[:, 1:], selected_bbox_pred)
|
173 |
-
pred_bboxes_with_score = torch.cat(
|
174 |
-
[pred_bboxes, valid_max_score[:, None]], -1)
|
175 |
-
group = nms_match(pred_bboxes_with_score, self.iou_thr)
|
176 |
-
|
177 |
-
# imp: importance
|
178 |
-
imp = cls_score.new_zeros(num_valid)
|
179 |
-
for g in group:
|
180 |
-
g_score = valid_max_score[g]
|
181 |
-
# g_score has already sorted
|
182 |
-
rank = g_score.new_tensor(range(g_score.size(0)))
|
183 |
-
imp[g] = num_valid - rank + g_score
|
184 |
-
_, imp_rank_inds = imp.sort(descending=True)
|
185 |
-
_, imp_rank = imp_rank_inds.sort()
|
186 |
-
hlr_inds = imp_rank_inds[:num_expected]
|
187 |
-
|
188 |
-
if num_rand > 0:
|
189 |
-
rand_inds = torch.randperm(num_invalid)[:num_rand]
|
190 |
-
select_inds = torch.cat(
|
191 |
-
[valid_inds[hlr_inds], invalid_inds[rand_inds]])
|
192 |
-
else:
|
193 |
-
select_inds = valid_inds[hlr_inds]
|
194 |
-
|
195 |
-
neg_label_weights = cls_score.new_ones(num_expected)
|
196 |
-
|
197 |
-
up_bound = max(num_expected, num_valid)
|
198 |
-
imp_weights = (up_bound -
|
199 |
-
imp_rank[hlr_inds].float()) / up_bound
|
200 |
-
neg_label_weights[:num_hlr] = imp_weights
|
201 |
-
neg_label_weights[num_hlr:] = imp_weights.min()
|
202 |
-
neg_label_weights = (self.bias +
|
203 |
-
(1 - self.bias) * neg_label_weights).pow(
|
204 |
-
self.k)
|
205 |
-
ori_selected_loss = ori_loss[select_inds]
|
206 |
-
new_loss = ori_selected_loss * neg_label_weights
|
207 |
-
norm_ratio = ori_selected_loss.sum() / new_loss.sum()
|
208 |
-
neg_label_weights *= norm_ratio
|
209 |
-
else:
|
210 |
-
neg_label_weights = cls_score.new_ones(num_expected)
|
211 |
-
select_inds = torch.randperm(num_neg)[:num_expected]
|
212 |
-
|
213 |
-
return neg_inds[select_inds], neg_label_weights
|
214 |
-
|
215 |
-
def sample(self,
|
216 |
-
assign_result,
|
217 |
-
bboxes,
|
218 |
-
gt_bboxes,
|
219 |
-
gt_labels=None,
|
220 |
-
img_meta=None,
|
221 |
-
**kwargs):
|
222 |
-
"""Sample positive and negative bboxes.
|
223 |
-
|
224 |
-
This is a simple implementation of bbox sampling given candidates,
|
225 |
-
assigning results and ground truth bboxes.
|
226 |
-
|
227 |
-
Args:
|
228 |
-
assign_result (:obj:`AssignResult`): Bbox assigning results.
|
229 |
-
bboxes (Tensor): Boxes to be sampled from.
|
230 |
-
gt_bboxes (Tensor): Ground truth bboxes.
|
231 |
-
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
|
232 |
-
|
233 |
-
Returns:
|
234 |
-
tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive
|
235 |
-
label weights.
|
236 |
-
"""
|
237 |
-
bboxes = bboxes[:, :4]
|
238 |
-
|
239 |
-
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
|
240 |
-
if self.add_gt_as_proposals:
|
241 |
-
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
|
242 |
-
assign_result.add_gt_(gt_labels)
|
243 |
-
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
|
244 |
-
gt_flags = torch.cat([gt_ones, gt_flags])
|
245 |
-
|
246 |
-
num_expected_pos = int(self.num * self.pos_fraction)
|
247 |
-
pos_inds = self.pos_sampler._sample_pos(
|
248 |
-
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
|
249 |
-
num_sampled_pos = pos_inds.numel()
|
250 |
-
num_expected_neg = self.num - num_sampled_pos
|
251 |
-
if self.neg_pos_ub >= 0:
|
252 |
-
_pos = max(1, num_sampled_pos)
|
253 |
-
neg_upper_bound = int(self.neg_pos_ub * _pos)
|
254 |
-
if num_expected_neg > neg_upper_bound:
|
255 |
-
num_expected_neg = neg_upper_bound
|
256 |
-
neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
|
257 |
-
assign_result,
|
258 |
-
num_expected_neg,
|
259 |
-
bboxes,
|
260 |
-
img_meta=img_meta,
|
261 |
-
**kwargs)
|
262 |
-
|
263 |
-
return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
|
264 |
-
assign_result, gt_flags), neg_label_weights
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/backbones/resnext.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
from mmcv.cnn import build_conv_layer, build_norm_layer
|
4 |
-
|
5 |
-
from ..builder import BACKBONES
|
6 |
-
from ..utils import ResLayer
|
7 |
-
from .resnet import Bottleneck as _Bottleneck
|
8 |
-
from .resnet import ResNet
|
9 |
-
|
10 |
-
|
11 |
-
class Bottleneck(_Bottleneck):
|
12 |
-
expansion = 4
|
13 |
-
|
14 |
-
def __init__(self,
|
15 |
-
inplanes,
|
16 |
-
planes,
|
17 |
-
groups=1,
|
18 |
-
base_width=4,
|
19 |
-
base_channels=64,
|
20 |
-
**kwargs):
|
21 |
-
"""Bottleneck block for ResNeXt.
|
22 |
-
|
23 |
-
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
|
24 |
-
it is "caffe", the stride-two layer is the first 1x1 conv layer.
|
25 |
-
"""
|
26 |
-
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
|
27 |
-
|
28 |
-
if groups == 1:
|
29 |
-
width = self.planes
|
30 |
-
else:
|
31 |
-
width = math.floor(self.planes *
|
32 |
-
(base_width / base_channels)) * groups
|
33 |
-
|
34 |
-
self.norm1_name, norm1 = build_norm_layer(
|
35 |
-
self.norm_cfg, width, postfix=1)
|
36 |
-
self.norm2_name, norm2 = build_norm_layer(
|
37 |
-
self.norm_cfg, width, postfix=2)
|
38 |
-
self.norm3_name, norm3 = build_norm_layer(
|
39 |
-
self.norm_cfg, self.planes * self.expansion, postfix=3)
|
40 |
-
|
41 |
-
self.conv1 = build_conv_layer(
|
42 |
-
self.conv_cfg,
|
43 |
-
self.inplanes,
|
44 |
-
width,
|
45 |
-
kernel_size=1,
|
46 |
-
stride=self.conv1_stride,
|
47 |
-
bias=False)
|
48 |
-
self.add_module(self.norm1_name, norm1)
|
49 |
-
fallback_on_stride = False
|
50 |
-
self.with_modulated_dcn = False
|
51 |
-
if self.with_dcn:
|
52 |
-
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
|
53 |
-
if not self.with_dcn or fallback_on_stride:
|
54 |
-
self.conv2 = build_conv_layer(
|
55 |
-
self.conv_cfg,
|
56 |
-
width,
|
57 |
-
width,
|
58 |
-
kernel_size=3,
|
59 |
-
stride=self.conv2_stride,
|
60 |
-
padding=self.dilation,
|
61 |
-
dilation=self.dilation,
|
62 |
-
groups=groups,
|
63 |
-
bias=False)
|
64 |
-
else:
|
65 |
-
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
|
66 |
-
self.conv2 = build_conv_layer(
|
67 |
-
self.dcn,
|
68 |
-
width,
|
69 |
-
width,
|
70 |
-
kernel_size=3,
|
71 |
-
stride=self.conv2_stride,
|
72 |
-
padding=self.dilation,
|
73 |
-
dilation=self.dilation,
|
74 |
-
groups=groups,
|
75 |
-
bias=False)
|
76 |
-
|
77 |
-
self.add_module(self.norm2_name, norm2)
|
78 |
-
self.conv3 = build_conv_layer(
|
79 |
-
self.conv_cfg,
|
80 |
-
width,
|
81 |
-
self.planes * self.expansion,
|
82 |
-
kernel_size=1,
|
83 |
-
bias=False)
|
84 |
-
self.add_module(self.norm3_name, norm3)
|
85 |
-
|
86 |
-
if self.with_plugins:
|
87 |
-
self._del_block_plugins(self.after_conv1_plugin_names +
|
88 |
-
self.after_conv2_plugin_names +
|
89 |
-
self.after_conv3_plugin_names)
|
90 |
-
self.after_conv1_plugin_names = self.make_block_plugins(
|
91 |
-
width, self.after_conv1_plugins)
|
92 |
-
self.after_conv2_plugin_names = self.make_block_plugins(
|
93 |
-
width, self.after_conv2_plugins)
|
94 |
-
self.after_conv3_plugin_names = self.make_block_plugins(
|
95 |
-
self.planes * self.expansion, self.after_conv3_plugins)
|
96 |
-
|
97 |
-
def _del_block_plugins(self, plugin_names):
|
98 |
-
"""delete plugins for block if exist.
|
99 |
-
|
100 |
-
Args:
|
101 |
-
plugin_names (list[str]): List of plugins name to delete.
|
102 |
-
"""
|
103 |
-
assert isinstance(plugin_names, list)
|
104 |
-
for plugin_name in plugin_names:
|
105 |
-
del self._modules[plugin_name]
|
106 |
-
|
107 |
-
|
108 |
-
@BACKBONES.register_module()
|
109 |
-
class ResNeXt(ResNet):
|
110 |
-
"""ResNeXt backbone.
|
111 |
-
|
112 |
-
Args:
|
113 |
-
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
114 |
-
in_channels (int): Number of input image channels. Default: 3.
|
115 |
-
num_stages (int): Resnet stages. Default: 4.
|
116 |
-
groups (int): Group of resnext.
|
117 |
-
base_width (int): Base width of resnext.
|
118 |
-
strides (Sequence[int]): Strides of the first block of each stage.
|
119 |
-
dilations (Sequence[int]): Dilation of each stage.
|
120 |
-
out_indices (Sequence[int]): Output from which stages.
|
121 |
-
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
122 |
-
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
123 |
-
the first 1x1 conv layer.
|
124 |
-
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
|
125 |
-
not freezing any parameters.
|
126 |
-
norm_cfg (dict): dictionary to construct and config norm layer.
|
127 |
-
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
128 |
-
freeze running stats (mean and var). Note: Effect on Batch Norm
|
129 |
-
and its variants only.
|
130 |
-
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
131 |
-
memory while slowing down the training speed.
|
132 |
-
zero_init_residual (bool): whether to use zero init for last norm layer
|
133 |
-
in resblocks to let them behave as identity.
|
134 |
-
"""
|
135 |
-
|
136 |
-
arch_settings = {
|
137 |
-
50: (Bottleneck, (3, 4, 6, 3)),
|
138 |
-
101: (Bottleneck, (3, 4, 23, 3)),
|
139 |
-
152: (Bottleneck, (3, 8, 36, 3))
|
140 |
-
}
|
141 |
-
|
142 |
-
def __init__(self, groups=1, base_width=4, **kwargs):
|
143 |
-
self.groups = groups
|
144 |
-
self.base_width = base_width
|
145 |
-
super(ResNeXt, self).__init__(**kwargs)
|
146 |
-
|
147 |
-
def make_res_layer(self, **kwargs):
|
148 |
-
"""Pack all blocks in a stage into a ``ResLayer``"""
|
149 |
-
return ResLayer(
|
150 |
-
groups=self.groups,
|
151 |
-
base_width=self.base_width,
|
152 |
-
base_channels=self.base_channels,
|
153 |
-
**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|