diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/archs/__init__.py b/spaces/17TheWord/RealESRGAN/realesrgan/archs/__init__.py
deleted file mode 100644
index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/realesrgan/archs/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import importlib
-from basicsr.utils import scandir
-from os import path as osp
-
-# automatically scan and import arch modules for registry
-# scan all the files that end with '_arch.py' under the archs folder
-arch_folder = osp.dirname(osp.abspath(__file__))
-arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
-# import all the arch modules
-_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames]
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full !!TOP!! Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full !!TOP!! Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training.md
deleted file mode 100644
index 65808ffa04b4c82455b202dc195452e171a9bc3a..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Full !!TOP!! Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
FULL Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training
-
If you are a pilot or a flight enthusiast, you might have heard of Jeppesen, a company that provides aeronautical data and software solutions for aviation. Jeppesen offers a range of products that help you plan, navigate, and manage your flights with ease and efficiency. In this article, we will introduce you to three of their popular products: FliteStar, JeppView, and FliteDeck. We will also show you how to use them to enhance your flight experience.
-
Introduction
-
Jeppesen is a subsidiary of Boeing that specializes in providing flight information and digital solutions for the aviation industry. Jeppesen has been in business since 1934, when it started as a company that produced airway manuals for pilots. Today, Jeppesen serves more than one million pilots and 7,000 customers worldwide, including airlines, airports, military, government agencies, flight schools, and individual pilots.
-
FULL Jeppesen FliteStar V941 JeppView V361 FliteDeck Chart Training
Jeppesen offers a variety of products and services that cover different aspects of flight operations, such as flight planning, navigation, weather, performance, fuel management, crew scheduling, airport operations, flight training, and more. Some of their well-known products are FliteStar, JeppView, and FliteDeck. These are software applications that allow you to access and use Jeppesen's aeronautical data and charts on your computer or mobile device.
-
What is Jeppesen FliteStar?
-
Jeppesen FliteStar is a flight planning software that helps you create optimal routes for your flights. You can use FliteStar to plan flights for various types of aircraft, from single-engine piston to jet airliners. You can also customize your flight parameters, such as speed, altitude, fuel consumption, weight and balance, weather conditions, airspace restrictions, preferred airports, navaids, waypoints, and more.
-
FliteStar uses Jeppesen's high-quality aeronautical data and charts to calculate the best route for your flight. You can view your route on various map types, such as political, terrain, satellite, or street maps. You can also overlay different layers of information on the map, such as weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC boundaries, airways, navaids, airports, runways, obstacles, terrain elevation contours, and more.
-
FliteStar also generates detailed reports for your flight plan, such as navigation log, fuel summary, weight and balance, takeoff and landing performance, flight summary, and more. You can print these reports or export them to other formats, such as PDF, CSV, XML, or KML. You can also import or export your flight plans to other applications, such as Garmin Pilot, ForeFlight, or Jeppesen FliteDeck.
-
How to use Jeppesen FliteStar V941 for flight planning
-JeppView V361 software download and installation guide
-FliteDeck Chart Training course online with Jeppesen
-FULL Jeppesen FliteStar V941 review and features
-JeppView V361 vs other electronic flight bag solutions
-FliteDeck Chart Training certification and benefits
-FULL Jeppesen FliteStar V941 tutorial and tips
-JeppView V361 user manual and troubleshooting
-FliteDeck Chart Training syllabus and objectives
-FULL Jeppesen FliteStar V941 price and discounts
-JeppView V361 system requirements and compatibility
-FliteDeck Chart Training feedback and testimonials
-FULL Jeppesen FliteStar V941 demo and trial version
-JeppView V361 updates and enhancements
-FliteDeck Chart Training instructors and experts
-FULL Jeppesen FliteStar V941 alternatives and competitors
-JeppView V361 support and customer service
-FliteDeck Chart Training FAQs and resources
-FULL Jeppesen FliteStar V941 license and activation
-JeppView V361 integration and customization
-FliteDeck Chart Training best practices and standards
-FULL Jeppesen FliteStar V941 comparison and analysis
-JeppView V361 navigation and interface
-FliteDeck Chart Training evaluation and assessment
-FULL Jeppesen FliteStar V941 pros and cons
-JeppView V361 security and reliability
-FliteDeck Chart Training requirements and prerequisites
-FULL Jeppesen FliteStar V941 benefits and advantages
-JeppView V361 data and charts
-FliteDeck Chart Training duration and schedule
-FULL Jeppesen FliteStar V941 case studies and examples
-JeppView V361 functions and features
-FliteDeck Chart Training outcomes and results
-FULL Jeppesen FliteStar V941 feedback and ratings
-JeppView V361 tips and tricks
-FliteDeck Chart Training cost and value
-FULL Jeppesen FliteStar V941 forum and community
-JeppView V361 errors and bugs
-FliteDeck Chart Training content and materials
-FULL Jeppesen FliteStar V941 coupons and deals
-JeppView V361 performance and speed
-FliteDeck Chart Training quality and accuracy
-FULL Jeppesen FliteStar V941 videos and screenshots
-JeppView V361 FAQSs and help center
-
What is Jeppesen JeppView?
-
Jeppesen JeppView is a chart management software that allows you to access and use Jeppesen's electronic charts on your computer or mobile device. You can use JeppView to view, print, or download thousands of charts from Jeppesen's database, covering more than 220 countries and regions. These charts include enroute charts, terminal charts, approach charts, airport diagrams, and more.
-
JeppView also lets you customize and organize your charts according to your preferences. You can create folders and binders to store your charts and arrange them in any order you want. You can also annotate your charts with notes, highlights, symbols, or drawings. You can also sync your charts across multiple devices and update them regularly with the latest data from Jeppesen.
-
JeppView also comes with a chart viewer and tools that help you interact with your charts and enhance your situational awareness. You can use the chart viewer to zoom in or out of your charts, pan around the map, rotate or flip the chart orientation, switch between day or night mode, and more. You can also use the tools to measure distances or bearings on the map, calculate magnetic variation or true north direction, display latitude or longitude coordinates, show or hide information layers on the chart, and more.
-
What is Jeppesen FliteDeck?
-
Jeppesen FliteDeck is a moving map software that helps you navigate your flights with real-time information and guidance. You can use FliteDeck on your iPad or Windows tablet to access and use Jeppesen's electronic charts and data during your flight. You can also connect FliteDeck to an external GPS receiver or an avionics system to receive accurate position and speed data.
-
FliteDeck has three main modes: map mode, chart mode, and document mode. In map mode, you can view your current position and track on a moving map that shows various layers of information, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. You can also overlay your flight plan route on the map and view relevant information about each waypoint.
-
In chart mode, you can view any chart from Jeppesen's database that matches your current position or destination. You can also view multiple charts at once by splitting the screen horizontally or vertically. You can interact with the charts in the same way as in JeppView.
-
In document mode, you can view any document from Jeppesen's database that relates to your flight operation. These documents include airport information pages (AIP), standard instrument departure (SID) procedures, standard terminal arrival (STAR) procedures, instrument approach procedures (IAP), minimum safe altitude (MSA) diagrams, operational notes, checklists, briefings, and more. You can also interact with the documents in the same way as in JeppView.
-
Benefits of using Jeppesen products
-
Using Jeppesen products can bring many benefits to your flight operation, such as:
-
Enhanced flight planning and navigation
-
With Jeppesen products, you can plan and navigate your flights with ease and efficiency. You can create optimal routes for your flights based on various factors, such as aircraft performance, weather conditions, airspace restrictions, fuel consumption, and more. You can also access and use Jeppesen's high-quality aeronautical data and charts on your computer or mobile device. These data and charts are updated regularly with the latest information from official sources, such as ICAO, FAA, EASA, and more. You can also view your current position and track on a moving map that shows various layers of information, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. This way, you can enhance your flight planning and navigation skills and optimize your flight performance.
-
Improved situational awareness and safety
-
With Jeppesen products, you can improve and safety during your flight. You can access and use Jeppesen's electronic charts on your computer or mobile device during your flight. These charts include enroute charts, terminal charts, approach charts, airport diagrams, and more. These charts are designed to provide you with clear and consistent information and guidance for your flight operation. You can also customize and organize your charts according to your preferences. You can create folders and binders to store your charts and arrange them in any order you want. You can also annotate your charts with notes, highlights, symbols, or drawings. You can also sync your charts across multiple devices and update them regularly with the latest data from Jeppesen. You can also view multiple charts at once by splitting the screen horizontally or vertically. You can also interact with the charts in the same way as in JeppView. You can also connect FliteDeck to an external GPS receiver or an avionics system to receive accurate position and speed data. This way, you can improve your situational awareness and safety during your flight.
-
Reduced workload and costs
-
With Jeppesen products, you can reduce your workload and costs for your flight operation. You can save time and effort by using FliteStar to plan your flights with ease and efficiency. You can also save money by using FliteStar to optimize your fuel consumption and route selection. You can also save space and weight by using JeppView and FliteDeck to access and use Jeppesen's electronic charts on your computer or mobile device. You don't need to carry bulky and heavy paper charts anymore. You can also save money by subscribing to Jeppesen's services that provide you with regular updates of their aeronautical data and charts. This way, you can reduce your workload and costs for your flight operation.
-
How to use Jeppesen FliteStar V941
-
In this section, we will show you how to use Jeppesen FliteStar V941 to plan your flights with ease and efficiency. We will cover the following topics: - Installing and activating FliteStar - Creating and modifying routes - Viewing and printing charts and reports - Exporting and importing data
-
Installing and activating FliteStar
-
To install and activate FliteStar on your computer, you need to follow these steps: 1. Download the FliteStar installer from Jeppesen's website or insert the FliteStar CD-ROM into your computer's drive. 2. Run the installer and follow the instructions on the screen to complete the installation process. 3. Launch FliteStar from your desktop or start menu. 4. Enter your customer number and serial number that you received from Jeppesen when you purchased FliteStar. You can also enter a temporary activation code if you are using a trial version of FliteStar. 5. Click on Activate to activate FliteStar on your computer. You need to have an internet connection for this step. 6. Wait for the activation process to finish. You will see a confirmation message when it is done. 7. Click on OK to close the activation window and start using FliteStar.
-
Creating and modifying routes
-
To create and modify routes for your flights with FliteStar, you need to follow these steps: 1. Launch FliteStar from your desktop or start menu. 2. Click on File > New Route to create a new route or File > Open Route to open an existing route. 3. Enter the departure and destination airports for your route in the Route window. You can also enter intermediate waypoints if you want to add more stops or waypoints to your route. 4. Click on Calculate Route to calculate the best route for your flight based on various factors, such as aircraft performance, weather conditions, airspace restrictions, fuel consumption, and more. You can also click on Options > Route Options to customize your route parameters, such as speed, altitude, fuel consumption, weight and balance, weather conditions, preferred airports, navaids, waypoints, and more. 5. View your route on the map window. You can use the toolbar buttons or the mouse wheel to zoom in or out of the map. You can also use the mouse pointer to pan around the map. You can also overlay different layers of information on the map, such as weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC boundaries, airways, navaids, airports, runways, obstacles, terrain elevation contours, and more. You can use the Options > Map Options menu to customize these layers. 6. Modify your route if you want to change it. You can use the Route window or the map window to modify your route. You can add, delete, move, or edit waypoints on your route. You can also drag and drop waypoints on the map window to change their position or order. You can also use the Tools > Route Tools menu to perform various actions on your route, such as reverse route, optimize altitude, optimize fuel, find alternate airports, and more. 7. Save your route if you want to keep it for future use. You can use the File > Save Route or File > Save Route As menu to save your route.
-
Viewing and printing charts and reports
-
To view and print charts and reports for your flight plan with FliteStar, you need to follow these steps: 1. Launch FliteStar from your desktop or start menu. 2. Open an existing route or create a new route for your flight plan. 3. Click on View > Charts/Reports to view various charts and reports for your flight plan in a separate window. 4. Select the type of chart or report that you want to view from the drop-down list at the top of the window. You can choose from various types of charts and reports, such as navigation log, fuel summary, weight and balance, takeoff and landing performance, flight summary, and more. 5. View the selected chart or report in the window below. You can use the toolbar buttons or the mouse wheel to zoom in or out of the chart or report. You can also use the mouse pointer to pan around the chart or report. 6. Print the selected chart or report if you want to have a hard copy of it. You can use the File > Print menu or click on the printer icon at the top right corner of the window to print the chart or report.
-
Exporting and importing data
-
To export and import data for your flight plans with FliteStar, you need to follow these steps: 1. Launch FliteStar from your desktop or start menu. 2. Open an existing route or create a new route for your flight plan. 3. Click on File > Export Data or File > Import Data to export or import data for your flight plan in various formats, such as PDF, CSV, XML, or KML. 4. Select the type of data that you want to export or import from the drop-down list at the top of the window. You can choose from various types of data, such as routes, waypoints, airports, navaids, airways, airspace boundaries, obstacles, terrain elevation contours, and more. 5. Select the format that you want to export or import your data in from the drop-down list below the type of data. You can choose from various formats, such as PDF, CSV, XML, or KML. 6. Select the destination folder or source file for your exported or imported data by clicking on Browse button next to the format drop-down list. 7. Click on Export Data or Import Data button at the bottom right corner of the window to export or import your data.
-
How to use Jeppesen JeppView V361
-
In this section, we will show you how to use Jeppesen JeppView V361 to access and use Jeppesen's electronic charts on your computer or mobile device. We will cover the following topics: - Installing and activating JeppView - Accessing and updating charts - Customizing and organizing charts - Using the chart viewer and tools
-
Installing and activating JeppView
-
To install and activate JeppView on your computer or mobile device, you need to follow these steps: 1. Download the JeppView installer from Jeppesen's website or insert the JeppView CD-ROM into your computer's drive. the screen to complete the installation process. 3. Launch JeppView from your desktop or start menu. 4. Enter your customer number and serial number that you received from Jeppesen when you purchased JeppView. You can also enter a temporary activation code if you are using a trial version of JeppView. 5. Click on Activate to activate JeppView on your computer or mobile device. You need to have an internet connection for this step. 6. Wait for the activation process to finish. You will see a confirmation message when it is done. 7. Click on OK to close the activation window and start using JeppView.
-
Accessing and updating charts
-
To access and update charts for your flights with JeppView, you need to follow these steps: 1. Launch JeppView from your desktop or start menu. 2. Click on File > Open Chart to open an existing chart or File > New Chart to create a new chart. 3. Enter the airport code or name for the chart that you want to access in the Chart window. You can also enter a region code or name if you want to access charts for a specific region. 4. Click on Search to search for the chart that you want to access in Jeppesen's database. You will see a list of charts that match your search criteria in the Chart window. 5. Select the chart that you want to access from the list and click on Open to open it in a separate window. 6. View the chart in the window below. You can use the toolbar buttons or the mouse wheel to zoom in or out of the chart. You can also use the mouse pointer to pan around the chart. 7. Update your charts regularly with the latest data from Jeppesen. You can use the File > Update Charts menu or click on the update icon at the top right corner of the window to update your charts. You need to have an internet connection and a valid subscription for this step.
-
Customizing and organizing charts
-
To customize and organize your charts according to your preferences with JeppView, you need to follow these steps: 1. Launch JeppView from your desktop or start menu. 2. Open an existing chart or create a new chart for your flight plan. 3. Click on View > Folders/Binders to view, create, or edit folders and binders for your charts in a separate window. 4. Use the Folders/Binders window to store and arrange your charts in any order you want. You can create folders and binders by clicking on the new folder or new binder icons at the top left corner of the window. You can also rename, delete, or move folders and binders by right-clicking on them and selecting the appropriate option from the context menu. You can also drag and drop charts into folders or binders to add them to your collection. 5. Click on OK to close the Folders/Binders window and save your changes. 6. Click on View > Annotations to view, create, or edit annotations for your charts in a separate window. 7. Use the Annotations window to add notes, highlights, symbols, or drawings to your charts. You can use the toolbar buttons at the top of the window to select the type, color, size, or shape of your annotations. You can also use the mouse pointer to draw or write on your charts. You can also edit or delete annotations by right-clicking on them and selecting the appropriate option from the context menu. 8. Click on OK to close the Annotations window and save your changes.
-
Using the chart viewer and tools
-
To use the chart viewer and tools to interact with your charts and enhance your situational awareness with JeppView, you need to follow these steps: 1. Launch JeppView from your desktop or start menu. 2. Open an existing chart or create a new chart for your flight plan. 3. Use the chart viewer to zoom in or out of your chart, pan around the chart, rotate or flip the chart orientation, switch between day or night mode, and more. You can use the toolbar buttons or keyboard shortcuts at the top of the window to perform these actions. You can also use the mouse wheel or keyboard arrows to zoom in or out of the chart. You can also use the mouse pointer or keyboard arrows to pan around the chart. 4. Use the tools to measure distances or bearings on the chart, calculate magnetic variation or true north direction, display latitude or longitude coordinates, show or hide information layers on the chart, and more. You can use the toolbar buttons or keyboard shortcuts at the bottom of the window to perform these actions.
-
How to use Jeppesen FliteDeck V361
-
In this section, we will show you how to use Jeppesen FliteDeck V361 to navigate your flights with real-time information and guidance. We will cover the following topics: - Installing and activating FliteDeck - Configuring and synchronizing FliteDeck - Navigating and interacting with FliteDeck the map, chart, and document modes
-
Installing and activating FliteDeck
-
To install and activate FliteDeck on your iPad or Windows tablet, you need to follow these steps: 1. Download the FliteDeck app from the App Store or the Microsoft Store or insert the FliteDeck CD-ROM into your tablet's drive. 2. Run the app and follow the instructions on the screen to complete the installation process. 3. Launch FliteDeck from your home screen or start menu. 4. Enter your customer number and serial number that you received from Jeppesen when you purchased FliteDeck. You can also enter a temporary activation code if you are using a trial version of FliteDeck. 5. Click on Activate to activate FliteDeck on your tablet. You need to have an internet connection for this step. 6. Wait for the activation process to finish. You will see a confirmation message when it is done. 7. Click on OK to close the activation window and start using FliteDeck.
-
Configuring and synchronizing FliteDeck
-
To configure and synchronize FliteDeck with your preferences and data, you need to follow these steps: 1. Launch FliteDeck from your home screen or start menu. 2. Click on Settings > General to configure your general settings for FliteDeck, such as language, units, time zone, date format, brightness, sound, and more. 3. Click on Settings > Aircraft to configure your aircraft settings for FliteDeck, such as aircraft type, tail number, performance data, weight and balance data, fuel data, and more. 4. Click on Settings > Charts to configure your chart settings for FliteDeck, such as chart type, chart orientation, chart scale, chart color, chart annotations, and more. 5. Click on Settings > Data to configure your data settings for FliteDeck, such as data source, data update frequency, data storage location, data backup location, and more. 6. Click on Sync > Charts/Data to synchronize your charts and data with Jeppesen's database. You need to have an internet connection and a valid subscription for this step. You will see a progress bar and a status message when the synchronization is in progress. You will see a confirmation message when it is done. 7. Click on Sync > Devices to synchronize your charts and data across multiple devices that have FliteDeck installed. You need to have an internet connection and a valid subscription for this step. You will see a list of devices that are connected to your account in the Sync window. You can select or deselect the devices that you want to synchronize with by tapping on them. You will see a progress bar and a status message when the synchronization is in progress. You will see a confirmation message when it is done.
-
Navigating and interacting with FliteDeck
-
To navigate and interact with FliteDeck during your flight, you need to follow these steps: 1. Launch FliteDeck from your home screen or start menu. 2. Click on Flight Plan > New Flight Plan to create a new flight plan or Flight Plan > Open Flight Plan to open an existing flight plan. 3. Enter the departure and destination airports for your flight plan in the Flight Plan window. You can also enter intermediate waypoints if you want to add more stops or waypoints to your flight plan. 4. Click on Calculate Route to calculate the best route for your flight based on various factors, such as aircraft performance, weather conditions, airspace restrictions, fuel consumption, and more. You can also click on Options > Route Options to customize your route parameters, such as speed, altitude, fuel consumption, weight and balance, weather conditions, preferred airports, navaids, waypoints, and more. 5. View your route on the map window. You can use the toolbar buttons or the mouse wheel to zoom in or out of the map. You can also use the mouse pointer to pan around the map. You can also overlay different layers of information on the map, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. You can use the Options > Map Options menu to customize these layers. 6. Modify your route if you want to change it. You can use the Flight Plan window or the map window to modify your route. You can add, delete, move, or edit waypoints on your route. You can also drag and drop waypoints on the map window to change their position or order. You can also use the Tools > Route Tools menu to perform various actions on your route, such as reverse route, optimize altitude, optimize fuel, find alternate airports, and more. 7. Save your flight plan if you want to keep it for future use. You can use the File > Save Flight Plan or File > Save Flight Plan As menu to save your flight plan.
-
Using the map, chart, and document modes
-
To use the map, chart, and document modes to access and use Jeppesen's electronic charts and data during your flight with FliteDeck, you need to follow these steps: 1. Launch FliteDeck from your home screen or start menu. 2. Open an existing flight plan or create a new flight plan for your flight. 3. Use the map mode to view your current position and track on a moving map that shows various layers of information, such as terrain elevation contours, airports, runways, navaids, airways, airspace boundaries, weather radar, winds aloft, METARs, TAFs, SIGMETs, AIRMETs, NOTAMs, PIREPs, ATC frequencies, and more. You can also overlay your flight plan route on the map and view relevant information about each waypoint. You can use the toolbar buttons or keyboard shortcuts at the top of the window to perform various actions on the map mode, such as zoom in or out, pan around, rotate or flip the map orientation, switch between day or night mode, show or hide information layers, and more. 4. Use the chart mode to view any chart from Jeppesen's database that matches your current position or destination. You can also view multiple charts at once by splitting the screen horizontally or vertically. You can interact with the charts in the same way as in JeppView. You can use the toolbar buttons or keyboard shortcuts at the bottom of the window to perform various actions on the chart mode, such as measure distances or bearings, calculate magnetic variation or true north direction, display latitude or longitude coordinates, show or hide information layers, and more. 5. Use the document mode to view any document from Jeppesen's database that relates to your flight operation. These documents include airport information pages (AIP), standard instrument departure (SID) procedures, standard terminal arrival (STAR) procedures, instrument approach procedures (IAP), minimum safe altitude (MSA) diagrams, operational notes, checklists, briefings, and more. You can interact with the documents in the same way as in JeppView. the toolbar buttons or keyboard shortcuts at the bottom of the window to perform various actions on the document mode, such as zoom in or out, pan around, rotate or flip the document orientation, switch between day or night mode, annotate the document with notes, highlights, symbols, or drawings, and more.
-
Conclusion
-
In this article, we have introduced you to three of Jeppesen's popular products: FliteStar, JeppView, and FliteDeck. We have also shown you how to use them to enhance your flight experience. We hope that you have found this article informative and useful. If you want to learn more about Jeppesen's products and services, you can visit their website at www.jeppesen.com or contact their customer support at 1-800-621-5377.
-
FAQs
-
Here are some frequently asked questions about Jeppesen's products: - Q: How much do Jeppesen's products cost? - A: The cost of Jeppesen's products depends on various factors, such as the type of product, the coverage area, the subscription period, and the number of devices. You can check the prices and options for each product on Jeppesen's website or contact their customer support for more details. - Q: How can I update my Jeppesen's products with the latest data and charts? - A: You can update your Jeppesen's products with the latest data and charts by using the File > Update Charts menu or clicking on the update icon at the top right corner of the window in each product. You need to have an internet connection and a valid subscription for this step. - Q: How can I synchronize my Jeppesen's products across multiple devices? - A: You can synchronize your Jeppesen's products across multiple devices by using the Sync > Devices menu or clicking on the sync icon at the top right corner of the window in each product. You need to have an internet connection and a valid subscription for this step. - Q: How can I export or import my flight plans to other applications? - A: You can export or import your flight plans to other applications by using the File > Export Data or File > Import Data menu in FliteStar. You can choose from various formats, such as PDF, CSV, XML, or KML. You can also export or import your flight plans to other applications that support FliteStar's format, such as Garmin Pilot, ForeFlight, or Jeppesen FliteDeck. - Q: How can I contact Jeppesen's customer support if I have any questions or issues with their products? - A: You can contact Jeppesen's customer support by phone at 1-800-621-5377 or by email at captain@jeppesen.com. You can also visit their website at www.jeppesen.com for more information and resources.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Win64 Keygen Serial Key How to Activate Revit for Free.md b/spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Win64 Keygen Serial Key How to Activate Revit for Free.md
deleted file mode 100644
index c8d7dfa3d087c0ab359866723439f780e5cf83f6..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2018 Win64 Keygen Serial Key How to Activate Revit for Free.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Autodesk Revit 2018 Win64 Keygen Serial Key keygen
Cinema 4D R20: A Powerful 3D Software for Creative Professionals
-
-
Cinema 4D R20 is a 3D modeling, animation, simulation and rendering software solution that offers a fast, powerful, flexible and stable toolset for design, motion graphics, VFX, AR/MR/VR, game development and all types of visualization professionals[^2^]. Cinema 4D R20 introduces high-end features for VFX and motion graphics artists including node-based materials, volume modeling, robust CAD import and a dramatic evolution of the MoGraph toolset[^3^].
-
-
Node-Based Materials
-
-
Cinema 4D R20 introduces a new node-based material system that allows you to create complex and realistic materials with ease. You can use nodes to build shaders from scratch or use presets and assets from the online library. You can also convert any existing material to nodes and edit it as you wish. Node-based materials are fully compatible with standard materials and can be rendered with any engine[^3^].
Cinema 4D R20 adds a new way of modeling with volumes. You can use splines, particles, noises or any other object to create volumetric shapes that can be sculpted, textured and animated. You can also combine volumes using boolean operations or smooth them with filters. Volume modeling opens up new possibilities for organic and abstract designs[^3^].
-
-
Robust CAD Import
-
-
Cinema 4D R20 improves the import of CAD files with support for Solidworks, STEP, Catia, JT and IGES formats. You can import CAD models with high accuracy and detail, preserving the original structure and hierarchy. You can also adjust the tessellation quality and optimize the geometry for rendering. Cinema 4D R20 makes it easy to integrate CAD data into your 3D workflow[^3^].
-
-
MoGraph Evolution
-
-
Cinema 4D R20 enhances the MoGraph toolset, a procedural modeling and animation system that gives motion designers the ability to quickly and easily create complex and abstract animations. MoGraph now features Fields, a new concept that allows you to control the strength of effects using falloffs, shapes, noises or shaders. You can also layer multiple fields and blend them with different modes. Fields offer infinite possibilities for creative animations[^3^].
-
-
Conclusion
-
-
Cinema 4D R20 is a break-through version of its iconic 3D software that delivers high-end features for VFX and motion graphics artists. Whether you are working on your own or in a team, Cinema 4D R20 produces stunning results with its fast, powerful, flexible and stable toolset. Cinema 4D R20 is widely recognized as one of the easiest and most accessible 3D packages to learn and use. To learn more about Cinema 4D R20, visit https://www.maxon.net/en/cinema-4d [^1^]
-
-
OpenVDB Integration
-
-
Cinema 4D R20 integrates OpenVDB, an open-sourced technology that allows you to manipulate 3D data in a volumetric way. OpenVDB is widely used in the VFX industry for creating realistic smoke, fire, clouds and liquids. Cinema 4D R20 provides a set of tools to create, edit and render OpenVDB volumes. You can also import and export OpenVDB files from other applications.
-
-
ProRender Enhancements
-
-
Cinema 4D R20 improves the ProRender engine, a GPU-based renderer that supports physically-based rendering and real-time viewport feedback. ProRender now supports sub-surface scattering, motion blur, multi-passes and more. You can also use ProRender with node-based materials and volume modeling. ProRender is a fast and easy way to achieve photorealistic results with Cinema 4D R20.
-
-
Other Improvements
-
-
Cinema 4D R20 also includes many other improvements and additions that make your 3D workflow more efficient and enjoyable. Some of these are:
-
-- A new Commander that lets you quickly access commands, tags, objects and presets
-- A new Capsules system that lets you create custom nodes and assets for scene nodes
-- A new Multi-Instances mode that lets you create millions of objects with low memory consumption
-- A new Sound Effector that lets you create animations based on audio files
-- A new Bevel Deformer that lets you apply non-destructive bevels to any object
-- A new UV Transform tool that lets you manipulate UV coordinates with ease
-- A new Viewport HUD that lets you customize the information displayed in the viewport
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Coreldraw X9.md b/spaces/1gistliPinn/ChatGPT4/Examples/Coreldraw X9.md
deleted file mode 100644
index 34b25e0b6e2173bcd33c2e31b0c2e1afe20e89a8..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Coreldraw X9.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Corel has released a new version of its flagship image editing app CorelDRAW, which is actually a bundle of multiple applications sold under ... 1fdad05405
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Experience the Drama and Action of The VIP-2 Lalkar 2012 Movie in High Definition Download it in 1080p.md b/spaces/1gistliPinn/ChatGPT4/Examples/Experience the Drama and Action of The VIP-2 Lalkar 2012 Movie in High Definition Download it in 1080p.md
deleted file mode 100644
index f2279ede0381e2afc76ab4723e8928004d1e2c19..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Experience the Drama and Action of The VIP-2 Lalkar 2012 Movie in High Definition Download it in 1080p.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/1phancelerku/anime-remove-background/Aplikasi Live Bar Bar Mod Apk Terbaru 2023 No Banned dan No Sensor.md b/spaces/1phancelerku/anime-remove-background/Aplikasi Live Bar Bar Mod Apk Terbaru 2023 No Banned dan No Sensor.md
deleted file mode 100644
index bcfb1cc672d5f9e26705b7b60ee7e7ba37b68119..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Aplikasi Live Bar Bar Mod Apk Terbaru 2023 No Banned dan No Sensor.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Aplikasi Live Bar Bar Mod Apk: What You Need to Know
| | Introduction: Explain what aplikasi live bar bar mod apk is, why it is popular, and what are the risks and benefits of using it. |
If you are looking for a new way to entertain yourself and connect with other people online, you might have heard of aplikasi live bar bar mod apk. This is a modified version of an original app that allows you to watch live streaming videos of various content creators, including those who offer adult content. Aplikasi live bar bar mod apk is popular because it gives you access to private rooms and premium features for free. However, it also comes with some risks, such as viruses, malware, and legal issues. In this article, we will tell you everything you need to know about aplikasi live bar bar mod apk, including how it works, where to download it, and how to use it safely.
| | Explain how the app works, what are the features, and how to install it. |
Aplikasi live bar bar mod apk is a modified version of an original app that offers live streaming services. The original app can be any app that has live streaming features, such as Bigo Live, Uplive, MLiveU, Mango Live, and others. The mod apk version is created by third-party developers who hack the original app and modify its code to unlock some features that are normally paid or restricted. Some of these features include:
Access to private rooms where you can watch exclusive content from your favorite streamers.
Unlimited coins or diamonds that you can use to send gifts or tips to the streamers.
No ads or pop-ups that interrupt your viewing experience.
No verification or registration required to use the app.
To use aplikasi live bar bar mod apk, you need to download it from a reliable source. You can find many websites that offer links to download the app, but be careful because some of them might contain viruses or malware that can harm your device. You also need to enable unknown sources on your device settings to allow the installation of apps from outside the Google Play Store. After installing the app, you can open it and browse through the categories and rooms to find the content that suits your taste. You can also interact with the streamers and other viewers by sending messages or gifts.
| | H2: Why Aplikasi Live Bar Bar Mod Apk Is Popular |
Why Aplikasi Live Bar Bar Mod Apk Is Popular
| | Outline | Article | | --- | --- | | H2: How to Use Aplikasi Live Bar Bar Mod Apk Safely |
| | Provide 5 unique FAQs related to the topic of the article, along with their answers. | sources on your device settings and install the app from a reliable source. After installing the app, you can open it and browse through the categories and rooms to find the content that suits your taste. You can also interact with the streamers and other viewers by sending messages or gifts.
Q: What are some of the best streamers on aplikasi live bar bar mod apk?
A: Some of the best streamers on aplikasi live bar bar mod apk are those who offer high-quality content, engaging personality, and attractive appearance. Some examples are:
Luna Maya. She is a famous Indonesian actress, singer, and model who has a lot of fans on the app. She often streams her daily life, singing, dancing, and chatting with her viewers.
Raffi Ahmad. He is a popular Indonesian actor, presenter, and businessman who has a lot of followers on the app. He often streams his travels, hobbies, and family activities with his wife and children.
Sunny Leone. She is a well-known Indian-American actress, model, and former porn star who has a lot of admirers on the app. She often streams her sexy shows, workouts, and beauty tips with her viewers.
| | Custom Message | | 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Bingo 75 and Play with Friends Online.md b/spaces/1phancelerku/anime-remove-background/Download Bingo 75 and Play with Friends Online.md
deleted file mode 100644
index cb68803f358940f913ed3b767df43e16cc522913..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Bingo 75 and Play with Friends Online.md
+++ /dev/null
@@ -1,170 +0,0 @@
-
-
Download Bingo 75: How to Play and Win at This Fun Game
-
Bingo is a classic game of chance that has been enjoyed by millions of people around the world for centuries. But did you know that there are different types of bingo games, each with its own rules, variations, and benefits? One of the most popular and exciting bingo games is Bingo 75, also known as American bingo or pattern bingo. In this article, we will tell you everything you need to know about Bingo 75, how to download the best bingo 75 apps and websites, and how to play and win at this fun game.
Bingo 75 is a bingo game that uses a card with a grid of 25 squares, arranged in five columns and five rows. Each square contains a number from 1 to 75, except for the center square, which is marked as a free space. The columns are labeled with the letters B, I, N, G, and O, corresponding to the range of numbers in each column. For example, the B column contains numbers from 1 to 15, the I column contains numbers from 16 to 30, and so on.
-
The goal of Bingo 75 is to cover a specific pattern of squares on your card, based on the numbers that are called out by a caller or a random number generator. The patterns can vary from game to game, but they usually include horizontal, vertical, or diagonal lines, as well as shapes like letters, numbers, or symbols. Some examples of common patterns are shown below:
-
-
-
-
-
-
-
-
X pattern
-
Four corners pattern
-
Blackout pattern
-
-
-
You should try Bingo 75 because it is a fun and easy game that anyone can play. It is also a great way to socialize with other players, either online or in person. You can chat with your friends, make new ones, and join bingo communities that share your interests. Plus, you can win real money or prizes by playing bingo 75 online or in apps. Who doesn't love a good bingo jackpot?
-
download bingo 75 game for free
-download bingo 75 app for android
-download bingo 75 online with friends
-download bingo 75 cards printable
-download bingo 75 software for pc
-download bingo 75 generator tool
-download bingo 75 numbers list
-download bingo 75 caller voice
-download bingo 75 rules and tips
-download bingo 75 bonus codes
-download bingo 75 classic version
-download bingo 75 deluxe edition
-download bingo 75 fun and easy
-download bingo 75 no deposit required
-download bingo 75 win real money
-download bingo 75 best sites
-download bingo 75 reviews and ratings
-download bingo 75 how to play
-download bingo 75 strategies and tricks
-download bingo 75 history and facts
-download bingo 75 variations and themes
-download bingo 75 custom cards maker
-download bingo 75 music and sounds
-download bingo 75 chat and social features
-download bingo 75 tournaments and prizes
-download bingo 75 offline mode
-download bingo 75 latest updates
-download bingo 75 for windows 10
-download bingo 75 for mac os
-download bingo 75 for ios devices
-download bingo 75 for amazon fire tablet
-download bingo 75 for chromebook
-download bingo 75 for linux
-download bingo 75 for smart tv
-download bingo 75 for xbox one
-download bingo 75 for ps4
-download bingo 75 for nintendo switch
-download bingo 75 for vr headset
-download bingo 75 for kids and family
-download bingo 75 for seniors and adults
-download bingo 75 for beginners and experts
-download bingo 75 for education and learning
-download bingo 75 for charity and fundraising
-download bingo 75 for parties and events
-download bingo 75 for holidays and occasions
-download bingo 75 for travel and adventure
-download bingo 75 for health and wellness
-download bingo 75 for hobbies and interests
-download bingo 75 for sports and fitness
-
The Rules of Bingo 75
-
The rules of Bingo 75 are simple and straightforward. Here are the basic steps to follow:
-
-
Get one or more bingo cards. You can buy them online or in apps, or print them yourself. You can play with as many cards as you want, as long as you can keep track of them.
-
Listen to the caller or watch the screen
Listen to the caller or watch the screen as the numbers are drawn. The caller or the screen will announce the pattern you need to cover for each game. For example, you might hear or see "Bingo 75, four corners".
-
Mark off the numbers on your card that match the ones that are called out. You can use a dauber, a pen, a mouse, or a finger, depending on how you are playing. You can also use the auto-daub feature in some apps or websites, which will mark the numbers for you automatically.
-
Check your card for the pattern. If you have covered all the squares that form the pattern, you have bingo! Shout "Bingo!" or click the bingo button to claim your win. Make sure you do this before the next number is called, or you might miss your chance.
-
Verify your win. The caller or the app will check your card to make sure you have marked the correct numbers and pattern. If you have, you will receive your prize. If not, the game will continue until someone else wins.
-
-
The Variations of Bingo 75
-
Bingo 75 is a versatile game that can be played in different ways. Here are some of the common variations of Bingo 75 that you might encounter:
-
-
Speed bingo: This is a fast-paced version of bingo 75, where the numbers are called out very quickly and you have to mark them as fast as you can. The game usually ends in a few minutes, so it is ideal for those who want a quick thrill.
-
Progressive bingo: This is a version of bingo 75 where the jackpot increases every time no one wins. The jackpot can grow to huge amounts, making it very attractive for players. However, the catch is that the pattern becomes harder to cover as the game progresses, so it is also more challenging.
-
Bonanza bingo: This is a version of bingo 75 where 43 numbers are pre-drawn before the game starts. The players then buy their cards and mark off the numbers that match the ones that are pre-drawn. The game then begins with the remaining 32 numbers being called out. The first player to cover all 25 squares on their card wins.
-
-
The Benefits of Playing Bingo 75 Online
-
Playing bingo 75 online or in apps has many benefits over playing it in person. Here are some of them:
-
-
Convenience: You can play bingo 75 anytime and anywhere, as long as you have an internet connection and a device. You don't have to travel to a bingo hall, wait in line, or deal with crowds. You can play in your pajamas, on your couch, or even on your bed.
-
Variety: You can choose from a wide range of bingo 75 apps and websites, each offering different features, themes, and bonuses. You can also play different variations of bingo 75, as well as other types of bingo games, such as bingo 90 or bingo 30.
-
Socialization: You can chat with other players online or in apps, and make new friends who share your passion for bingo. You can also join bingo clubs or communities, where you can exchange tips, stories, and jokes. You can also play with your existing friends and family members, and invite them to join you in a game.
-
Affordability: You can play bingo 75 online or in apps for free or for low stakes. You don't have to spend a lot of money to enjoy this game. You can also take advantage of free bonuses, promotions, and rewards that many apps and websites offer.
-
Fun: Playing bingo 75 online or in apps is simply fun. You can enjoy the thrill of marking off numbers, covering patterns, and winning prizes. You can also enjoy the colorful graphics, animations, and sounds that make the game more lively and exciting.
-
-
How to Download Bingo 75 Apps and Websites
-
If you want to play bingo 75 online or in apps, you need to download them first. Here are some tips on how to do that:
-
The Best Bingo 75 Apps for Android and iOS Devices
-
If you have an Android or iOS device, such as a smartphone or a tablet, you can download some of the best bingo 75 apps from the Google Play Store or the App Store. Some of these apps are:
-
-
Bingo Blitz: This is one of the most popular and highly rated bingo apps in the world. It offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat
with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can download Bingo Blitz from here.
-
Bingo Bash: This is another popular and highly rated bingo app that offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can download Bingo Bash from here.
-
Bingo Pop: This is a fun and colorful bingo app that offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can download Bingo Pop from here.
-
-
The Best Bingo 75 Websites for Desktop and Laptop Users
-
If you have a desktop or a laptop computer, you can play bingo 75 on some of the best bingo websites that are compatible with your browser. Some of these websites are:
-
-
Bingo.com: This is one of the most popular and trusted bingo websites in the world. It offers various types of bingo games, including bingo 75, as well as slots and other casino games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can access Bingo.com from here.
-
Bingo Hall: This is another popular and trusted bingo website that offers various types of bingo games, including bingo 75, as well as slots and other casino games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can access Bingo Hall from here.
-
Bingo Zone: This is a fun and free bingo website that offers various types of bingo games, including bingo 75, as well as slots and other mini-games. It also has a social aspect, where you can chat with other players, collect items, and join tournaments. You can also play with your Facebook friends and get free bonuses every day. You can access Bingo Zone from here.
-
-
How to Choose the Right Bingo 75 App or Website for You
-
With so many options available, how do you choose the right bingo 75 app or website for you? Here are some factors to consider:
-
-
Reputation: You want to play on an app or a website that is reliable, secure, and fair. You can check the reviews and ratings of other users, as well as the licenses and certifications of the app or the website.
-
Features: You want to play on an app or a website that offers the features that suit your preferences and needs. For example, you might want to play on an app or a website that has a variety of bingo games, slots, and mini-games; that has a user-friendly interface and design; that has a chat function and a social community; that has generous bonuses and promotions; that has customer support and help options; etc.
-
Compatibility: You want to play on an app or a website that is compatible with your device and browser. For example, you might want to play on an app or a website that works well on your Android or iOS device; that does not require downloading or installing anything; that does not have any glitches or bugs; etc.
-
-
How to Play and Win at Bingo 75
-
Now that you know how to download the best bingo 75 apps and websites, you might be wondering how to play and win at this game. Here are some tips, tricks, strategies, and mistakes to avoid:
-
Tips and Tricks for Playing Bingo 75
-
Here are some tips and tricks for playing bingo 75:
-
-
Play with multiple cards: The more cards you play with, the higher your chances of covering the pattern faster than others. However, make sure you can keep track of all your cards without getting confused or overwhelmed.
-
Play at off-peak times: The fewer players there are in a game, the higher your chances of winning. Try to play at times
Try to play at times when there are fewer players online, such as early mornings, late nights, or weekdays. You can also look for games that have a low number of participants or a high number of cards available.
-
Look for patterns that are easy to cover: Some patterns are easier to cover than others, depending on the distribution of numbers on your card. For example, a horizontal line might be easier to cover than a diagonal line, or a letter T might be easier to cover than a letter Z. Try to look for patterns that have more numbers in common or that are closer together.
-
Use the chat function: The chat function is not only a way to socialize with other players, but also a way to get useful information and tips. You can ask other players for advice, learn from their experiences, or even get hints on the next numbers to be called. You can also use the chat function to congratulate the winners, thank the caller, or express your emotions.
-
-
Strategies and Techniques for Winning at Bingo 75
-
Here are some strategies and techniques for winning at bingo 75:
-
-
Play with a budget: Before you start playing, set a limit on how much money you are willing to spend and stick to it. This will help you avoid overspending, losing more than you can afford, or getting addicted to the game. You can also set a limit on how much time you are willing to spend and take breaks regularly.
-
Play with a strategy: Before you start playing, decide on a strategy that suits your goals and preferences. For example, you might want to play with fewer cards but higher stakes, or with more cards but lower stakes. You might also want to play with different patterns or variations of bingo 75. You can also adjust your strategy depending on the situation and the outcome of the game.
-
Play with luck: Bingo 75 is a game of chance, so luck plays a big role in winning. You can try to increase your luck by doing things that make you feel lucky, such as wearing lucky clothes, using lucky charms, or saying lucky phrases. You can also try to avoid things that make you feel unlucky, such as crossing your fingers, walking under ladders, or breaking mirrors. Of course, these are just superstitions and there is no guarantee that they will work, but they might make you feel more confident and optimistic.
-
-
Common Mistakes and Pitfalls to Avoid When Playing Bingo 75
-
Here are some common mistakes and pitfalls to avoid when playing bingo 75:
-
-
Playing without checking the rules: Different apps and websites might have different rules for bingo 75, such as the number of cards allowed, the cost of each card, the payout structure, the pattern required, etc. Make sure you read and understand the rules before you start playing, or you might end up wasting your money or missing your chance to win.
-
Playing without paying attention: Bingo 75 is a fast-paced game that requires your full attention and concentration. If you are distracted by other things, such as your phone, your TV, or your surroundings, you might miss some numbers or patterns that could have won you the game. Make sure you focus on the game and mark off your numbers as soon as they are called.
-
Playing without having fun: Bingo 75 is a game that is meant to be fun and enjoyable. If you are playing only for money or prizes, or if you are playing too seriously or competitively, you might lose sight of the fun aspect of the game. Remember that bingo 75 is a game of chance, not skill, and that winning is not guaranteed. Enjoy the game for what it is, and don't let it affect your mood or your relationships.
-
-
Conclusion
-
Bingo 75 is a fun and exciting game that anyone can play and win. It is also a great way to socialize with other players online or in apps. All you need to do is download the best bingo 75 apps or websites, follow the rules of the game, and use some tips, tricks,
tips, tricks, strategies, and mistakes to avoid. You will have a blast playing and winning at this game. So what are you waiting for? Download bingo 75 today and join the fun!
-
Summary of the Main Points
-
Here are the main points of this article:
-
-
Bingo 75 is a bingo game that uses a card with a grid of 25 squares, each containing a number from 1 to 75, except for the center square, which is a free space.
-
The goal of bingo 75 is to cover a specific pattern of squares on your card, based on the numbers that are called out by a caller or a random number generator.
-
Bingo 75 is a fun and easy game that anyone can play. It is also a great way to socialize with other players, either online or in person.
-
You can download the best bingo 75 apps and websites from the Google Play Store or the App Store for Android and iOS devices, or from the internet for desktop and laptop users.
-
You can play and win at bingo 75 by following the rules of the game, and using some tips, tricks, strategies, and mistakes to avoid.
-
-
Call to Action
-
If you enjoyed this article, please share it with your friends and family who might also be interested in playing bingo 75. You can also leave us a comment below and let us know what you think about this game. We would love to hear from you!
-
FAQs
-
Here are some frequently asked questions about bingo 75:
-
-
How many numbers are there in bingo 75?
-
There are 75 numbers in bingo 75, ranging from 1 to 75. Each column on the card corresponds to a range of numbers, as follows: B (1-15), I (16-30), N (31-45), G (46-60), O (61-75).
-
How many patterns are there in bingo 75?
-
There are many patterns in bingo 75, depending on the game and the app or website you are playing on. Some of the common patterns are horizontal, vertical, or diagonal lines; four corners; X; blackout; letters; numbers; symbols; etc.
-
How do I win at bingo 75?
-
You win at bingo 75 by covering the pattern that is required for each game on your card before anyone else does. You have to mark off the numbers on your card that match the ones that are called out by the caller or the random number generator. You have to shout "Bingo!" or click the bingo button to claim your win before the next number is called.
-
How much does it cost to play bingo 75?
-
The cost of playing bingo 75 varies depending on the app or website you are playing on, and the number of cards you are buying. Some apps or websites offer free games or bonuses, while others charge a fee per card or per game. The fee can range from a few cents to a few dollars.
-
What are the benefits of playing bingo 75 online?
-
The benefits of playing bingo 75 online are convenience, variety, socialization, affordability, and fun. You can play anytime and anywhere, choose from different apps and websites, chat with other players, play for free or low stakes, and enjoy the thrill of the game.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Nulls Brawl 38.111 APK with Hank and Maisie Skins.md b/spaces/1phancelerku/anime-remove-background/Download Nulls Brawl 38.111 APK with Hank and Maisie Skins.md
deleted file mode 100644
index ecedce8cc2d0d6c9f8358439388c797ef899ec8f..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Nulls Brawl 38.111 APK with Hank and Maisie Skins.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
Nulls Brawl 38.111 APK Indir: How to Download and Play the Latest Version of Brawl Stars on a Private Server
-
Brawl Stars is one of the most popular online action games at present, with millions of players around the world. But what if you want to mod it or play with cheats without risking a ban? That's where Nulls Brawl comes in. In this article, we will tell you everything you need to know about Nulls Brawl 38.111 APK indir, the latest version of the private server for Brawl Stars. You will learn what Nulls Brawl is, what features it offers, how to download and install it, why you should play it, and some tips and tricks for having fun with it.
-
What is Nulls Brawl?
-
Nulls Brawl is a private server for Brawl Stars that allows you to play the game with unlimited resources, unlocked brawlers, skins, pins, gears, and more. It is not affiliated with Supercell, the official developer of Brawl Stars, and it does not affect your progress or account on the original game. You can play Nulls Brawl on your Android device or on your PC using an emulator.
You can play with unlimited gems, coins, tickets, and star points, which you can use to buy anything you want in the game.
-
You can unlock and try all the brawlers, skins, pins, and gears without spending any money or waiting for them to be available.
-
You can access all the game modes, maps, and events without any restrictions or timers.
-
You can mod the game and customize it to your liking, such as changing the graphics, sounds, or gameplay.
-
You can play with other players who are also using Nulls Brawl and have fun together.
-
-
Cons of Nulls Brawl:
-
-
You cannot play with players who are using the official version of Brawl Stars, as they are on different servers.
-
You cannot sync your progress or account with the original game, as they are separate and independent.
-
You may encounter some bugs, glitches, or errors while playing Nulls Brawl, as it is not an official product and may not be updated regularly.
-
You may risk getting banned from the official game if you use the same device or account for both versions.
-
You may violate the terms of service of Supercell by using Nulls Brawl, as it is an unauthorized modification of their game.
-
-
Tips and tricks for playing Nulls Brawl
-
If you want to make the most out of Nulls Brawl, here are some tips and tricks that you can follow:
-
-
Experiment with different brawlers, skins, pins, and gears and find out which ones suit your playstyle and preferences.
-
Practice your skills and strategies in different game modes and maps and learn from your mistakes and successes.
-
Join or create a clan and chat with other players who are also using Nulls Brawl. You can share tips, ideas, feedback, or just have fun together.
-
Participate in clan wars and compete with other clans for glory and rewards. You can also challenge other players to friendly battles or duels.
-
Create your own maps using the map maker and share them with other players. You can also play on the maps created by others and rate them.
-
-
Conclusion
-
Nulls Brawl 38.111 APK indir is a private server for Brawl Stars that lets you play the game with unlimited resources, unlocked brawlers, skins, pins, gears, and more. It is a great way to enjoy Brawl Stars with more freedom and fun. However, it also has some drawbacks, such as being incompatible with the official game, having some bugs or errors, and violating the terms of service of Supercell. Therefore, you should use it at your own risk and discretion. If you want to try Nulls Brawl 38.111 APK indir, you can download it from [Null's website] and install it on your Android device or PC using an emulator.
-
Summary of the main points
-
In this article, we have covered the following points:
-
-
What is Nulls Brawl?
-
Features of Nulls Brawl 38.111
-
How to download and install Nulls Brawl 38.111 APK
-
Why play Nulls Brawl?
-
Pros and cons of Nulls Brawl
-
Tips and tricks for playing Nulls Brawl
-
-
FAQs
-
Here are some frequently asked questions about Nulls Brawl 38.111 APK indir:
-
-
Is Nulls Brawl safe to use?
-Nulls Brawl is not an official product of Supercell and it may contain some bugs or errors that could harm your device or data. Therefore, you should use it at your own risk and discretion. You should also backup your data before installing it and scan it for viruses or malware.
-
Is Nulls Brawl free to use?
-Yes, Nulls Brawl is free to use and you do not need to pay anything to download or play it. However, you may need to watch some ads or complete some surveys to access some features or content.
-
Can I play Nulls Brawl offline?
-No, Nulls Brawl requires an internet connection to work properly. You need to connect to the private server to play the game and access its features.
-
Can I update Nulls Brawl?
-Yes Yes, you can update Nulls Brawl whenever a new version is released by the developers. You can check for updates on [Null's website] or on their [Telegram channel]. You can also enable the auto-update feature on the app settings. However, you may need to uninstall and reinstall the app to update it.
-
Can I play Nulls Brawl with my friends?
-Yes, you can play Nulls Brawl with your friends who are also using the same version of the private server. You can invite them to join your clan or team and chat with them in the game. You can also play against them in friendly battles or duels.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/2023Liu2023/bingo/src/components/ui/tooltip.tsx b/spaces/2023Liu2023/bingo/src/components/ui/tooltip.tsx
deleted file mode 100644
index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000
--- a/spaces/2023Liu2023/bingo/src/components/ui/tooltip.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as TooltipPrimitive from '@radix-ui/react-tooltip'
-
-import { cn } from '@/lib/utils'
-
-const TooltipProvider = TooltipPrimitive.Provider
-
-const Tooltip = TooltipPrimitive.Root
-
-const TooltipTrigger = TooltipPrimitive.Trigger
-
-const TooltipContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, sideOffset = 4, ...props }, ref) => (
-
-))
-TooltipContent.displayName = TooltipPrimitive.Content.displayName
-
-export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
diff --git a/spaces/801artistry/RVC801/audioEffects.py b/spaces/801artistry/RVC801/audioEffects.py
deleted file mode 100644
index 1830b19e1a5e3ec1f431388d8444ef3a2c9ed91f..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/audioEffects.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from pedalboard import Pedalboard, Compressor, Reverb, NoiseGate
-from pedalboard.io import AudioFile
-import sys
-import os
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from i18n import I18nAuto
-i18n = I18nAuto()
-from pydub import AudioSegment
-import numpy as np
-import soundfile as sf
-from pydub.playback import play
-
-def process_audio(input_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled, ):
- print(reverb_enabled)
- print(compressor_enabled)
- print(noise_gate_enabled)
- effects = []
- if reverb_enabled:
- effects.append(Reverb(room_size=0.01))
- if compressor_enabled:
- effects.append(Compressor(threshold_db=-10, ratio=25))
- if noise_gate_enabled:
- effects.append(NoiseGate(threshold_db=-16, ratio=1.5, release_ms=250))
-
- board = Pedalboard(effects)
-
- with AudioFile(input_path) as f:
- with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
- while f.tell() < f.frames:
- chunk = f.read(f.samplerate)
- effected = board(chunk, f.samplerate, reset=False)
- o.write(effected)
-
- result = i18n("Processed audio saved at: ") + output_path
- print(result)
- return output_path
\ No newline at end of file
diff --git a/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/Dockerfile b/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/Dockerfile
deleted file mode 100644
index a775cf6ba7c65afeeba9d084f8fc16b08cbe3d80..0000000000000000000000000000000000000000
--- a/spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/Dockerfile
+++ /dev/null
@@ -1,35 +0,0 @@
-# Grab a fresh copy of the Python image
-FROM python:3.10-slim
-
-# Install build and runtime dependencies
-RUN apt-get update && \
- apt-get install -y \
- libopenblas-dev \
- ninja-build \
- build-essential \
- pkg-config \
- curl
-
-RUN pip install -U pip setuptools wheel && \
- CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install --verbose llama-cpp-python[server]
-
-# Download model
-RUN mkdir model && \
- curl -L https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf -o model/gguf-model.bin
-
-COPY ./start_server.sh ./
-COPY ./main.py ./
-COPY ./index.html ./
-
-# Make the server start script executable
-RUN chmod +x ./start_server.sh
-
-# Set environment variable for the host
-ENV HOST=0.0.0.0
-ENV PORT=7860
-
-# Expose a port for the server
-EXPOSE ${PORT}
-
-# Run the server start script
-CMD ["/bin/sh", "./start_server.sh"]
\ No newline at end of file
diff --git "a/spaces/AI4PD/hexviz/hexviz/\360\237\247\254Attention_Visualization.py" "b/spaces/AI4PD/hexviz/hexviz/\360\237\247\254Attention_Visualization.py"
deleted file mode 100644
index bc97bc7d1f34136213c33a973a3a9d201771f9c8..0000000000000000000000000000000000000000
--- "a/spaces/AI4PD/hexviz/hexviz/\360\237\247\254Attention_Visualization.py"
+++ /dev/null
@@ -1,306 +0,0 @@
-import re
-
-import numpy as np
-import pandas as pd
-import py3Dmol
-import stmol
-import streamlit as st
-from stmol import showmol
-
-from hexviz.attention import clean_and_validate_sequence, get_attention_pairs, get_chains
-from hexviz.config import URL
-from hexviz.ec_number import ECNumber
-from hexviz.models import Model, ModelType
-from hexviz.view import menu_items, select_model, select_pdb, select_protein
-
-st.set_page_config(layout="centered", menu_items=menu_items)
-st.title("Attention Visualization on proteins")
-
-
-for k, v in st.session_state.items():
- st.session_state[k] = v
-
-models = [
- Model(name=ModelType.TAPE_BERT, layers=12, heads=12),
- Model(name=ModelType.ZymCTRL, layers=36, heads=16),
- Model(name=ModelType.PROT_BERT, layers=30, heads=16),
- Model(name=ModelType.PROT_T5, layers=24, heads=32),
-]
-
-with st.expander("Input a PDB id, upload a PDB file or input a sequence", expanded=True):
- pdb_id = select_pdb()
- uploaded_file = st.file_uploader("2.Upload PDB", type=["pdb"])
- input_sequence = st.text_area("3.Input sequence", "", key="input_sequence", max_chars=400)
- sequence, error = clean_and_validate_sequence(input_sequence)
- if error:
- st.error(error)
- pdb_str, structure, source = select_protein(pdb_id, uploaded_file, sequence)
- st.write(f"Visualizing: {source}")
-
-st.sidebar.markdown(
- """
- Configure visualization
- ---
- """
-)
-chains = get_chains(structure)
-
-if "selected_chains" not in st.session_state:
- st.session_state.selected_chains = chains
-selected_chains = st.sidebar.multiselect(
- label="Select Chain(s)", options=chains, key="selected_chains"
-)
-
-if "show_ligands" not in st.session_state:
- st.session_state.show_ligands = True
-show_ligands = st.sidebar.checkbox("Show ligands", key="show_ligands")
-if "color_protein" not in st.session_state:
- st.session_state.color_protein = False
-color_protein = st.sidebar.checkbox("Color protein", key="color_protein")
-
-
-st.sidebar.markdown(
- """
- Attention parameters
- ---
- """
-)
-min_attn = st.sidebar.slider("Minimum attention", min_value=0.0, max_value=0.4, value=0.1)
-n_highest_resis = st.sidebar.number_input(
- "Num highest attention resis to label", value=2, min_value=1, max_value=100
-)
-label_highest = st.sidebar.checkbox("Label highest attention residues", value=True)
-sidechain_highest = st.sidebar.checkbox("Show sidechains", value=True)
-
-
-with st.sidebar.expander("Label residues manually"):
- hl_chain = st.selectbox(label="Chain to label", options=selected_chains, index=0)
- hl_resi_list = st.multiselect(label="Selected Residues", options=list(range(1, 5000)))
-
- label_resi = st.checkbox(label="Label Residues", value=True)
-
-
-left, mid, right = st.columns(3)
-with left:
- selected_model = select_model(models)
-with mid:
- if "selected_layer" not in st.session_state:
- st.session_state["selected_layer"] = 5
- layer_one = (
- st.selectbox(
- "Layer",
- options=[i for i in range(1, selected_model.layers + 1)],
- key="selected_layer",
- )
- or 5
- )
- layer = layer_one - 1
-with right:
- if "selected_head" not in st.session_state:
- st.session_state["selected_head"] = 1
- head_one = st.selectbox(
- "Head",
- options=[i for i in range(1, selected_model.heads + 1)],
- key="selected_head",
- )
- head = head_one - 1
-
-ec_number = ""
-if selected_model.name == ModelType.ZymCTRL:
- st.sidebar.markdown(
- """
- ZymCTRL EC number
- ---
- """
- )
- try:
- ec_number = structure.header["compound"]["1"]["ec"]
- except KeyError:
- pass
- ec_number = st.sidebar.text_input("Enzyme Comission number (EC)", ec_number)
-
- # Validate EC number
- if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ec_number):
- st.sidebar.error(
- "Please enter a valid Enzyme Commission number in the format of 4 integers separated by periods (e.g., 1.2.3.21)"
- )
-
- if ec_number:
- if selected_chains:
- shown_chains = [ch for ch in structure.get_chains() if ch.id in selected_chains]
- else:
- shown_chains = list(structure.get_chains())
-
- EC_tags = []
- colors = ["blue", "green", "orange", "red"]
- radius = 1
- EC_numbers = ec_number.split(".")
- for ch in shown_chains:
- first_residues = []
- i = 1
- while len(first_residues) < 2:
- try:
- first_residues.append(ch[i]["CA"].coord.tolist())
- except KeyError:
- pass
- i += 1
- res_1, res_2 = first_residues
-
- # Calculate the vector from res_1 to res_2
- vector = [res_2[i] - res_1[i] for i in range(3)]
-
- # Reverse the vector
- reverse_vector = [-v for v in vector]
-
- # Normalize the reverse vector
- reverse_vector_normalized = np.array(reverse_vector) / np.linalg.norm(reverse_vector)
- # Reveres coordinates to have the first EC number be furthers from the start of the
- # protein like it is in the sequence with the EC serial number next to the sequence
- # and the EC main class at the the very start, further from the sequence.
- coordinates = reversed(
- [
- [res_1[j] + i * 2 * radius * reverse_vector_normalized[j] for j in range(3)]
- for i in range(4)
- ]
- )
- EC_tag = [
- ECNumber(number=num, coordinate=coord, color=color, radius=radius)
- for num, coord, color in zip(EC_numbers, coordinates, colors)
- ]
- EC_tags.append(EC_tag)
-
- EC_colored = [f":{color}[{num}]" for num, color in zip(EC_numbers, colors)]
- st.sidebar.write("Visualized as colored spheres: " + ".".join(EC_colored))
-
-
-attention_pairs, top_residues = get_attention_pairs(
- pdb_str=pdb_str,
- chain_ids=selected_chains,
- layer=layer,
- head=head,
- threshold=min_attn,
- model_type=selected_model.name,
- top_n=n_highest_resis,
- ec_numbers=EC_tags if ec_number else None,
-)
-
-sorted_by_attention = sorted(attention_pairs, key=lambda x: x[0], reverse=True)
-
-
-def get_3dview(pdb):
- xyzview = py3Dmol.view(height=800, width=800) # TODO you can set the pixel dims here!
- xyzview.addModel(pdb_str, "pdb")
- xyzview.setStyle({"cartoon": {"color": "spectrum" if color_protein else "white"}})
- stmol.add_hover(xyzview, backgroundColor="black", fontColor="white")
-
- # Show all ligands as stick (heteroatoms)
- if show_ligands:
- xyzview.addStyle({"hetflag": True}, {"stick": {"radius": 0.2}})
-
- # If no chains are selected, show all chains
- if selected_chains:
- hidden_chains = [x for x in chains if x not in selected_chains]
- for chain in hidden_chains:
- xyzview.setStyle({"chain": chain}, {"cross": {"hidden": "true"}})
- # Hide ligands for chain too
- xyzview.addStyle({"chain": chain, "hetflag": True}, {"cross": {"hidden": "true"}})
-
- if len(selected_chains) == 1:
- xyzview.zoomTo({"chain": f"{selected_chains[0]}"})
- else:
- xyzview.zoomTo()
-
- for att_weight, first, second in attention_pairs:
- stmol.add_cylinder(
- xyzview,
- start=first,
- end=second,
- cylradius=att_weight,
- cylColor="red",
- dashed=False,
- )
-
- if selected_model.name == ModelType.ZymCTRL and ec_number:
- for EC_tag in EC_tags:
- for EC_num in EC_tag:
- stmol.add_sphere(
- xyzview,
- spcenter=EC_num.coordinate,
- radius=EC_num.radius,
- spColor=EC_num.color,
- )
-
- if label_resi:
- for hl_resi in hl_resi_list:
- xyzview.addResLabels(
- {"chain": hl_chain, "resi": hl_resi},
- {
- "backgroundColor": "lightgray",
- "fontColor": "black",
- "backgroundOpacity": 0.5,
- },
- )
-
- if label_highest:
- for _, chain, res in top_residues:
- one_indexed_res = res + 1
- xyzview.addResLabels(
- {"chain": chain, "resi": one_indexed_res},
- {
- "backgroundColor": "lightgray",
- "fontColor": "black",
- "backgroundOpacity": 0.5,
- },
- )
- if sidechain_highest:
- xyzview.addStyle({"chain": chain, "resi": res}, {"stick": {"radius": 0.2}})
- return xyzview
-
-
-xyzview = get_3dview(pdb_id)
-showmol(xyzview, height=800, width=800)
-
-st.markdown(
- f"""
-Pick a PDB ID, layer and head to visualize attention from the selected protein language model ({selected_model.name.value}).
-""",
- unsafe_allow_html=True,
-)
-
-chain_dict = {f"{chain.id}": list(chain.get_residues()) for chain in list(structure.get_chains())}
-data = []
-for fraction_of_total_attention, chain, resi in top_residues:
- try:
- res = chain_dict[chain][resi]
- except KeyError:
- continue
- pct_of_total_attention = round(fraction_of_total_attention * 100, 3)
- el = (pct_of_total_attention, f"{res.resname:3}{res.id[1]}({chain})")
- data.append(el)
-
-df = pd.DataFrame(data, columns=["% of total attention", "Residue"])
-df = df.style.format(
- {"% of total attention": "{:.3f}"} # Set 3 decimal places for "% of total attention"
-)
-st.markdown(
- f"The {n_highest_resis} residues (per chain) with the highest attention to them are labeled in the visualization and listed here:"
-)
-st.table(df)
-
-st.markdown(
- f"""
-### Check out the other pages
-🗺️Identify Interesting Heads plots attention heatmapt for many heads and
-layers from a model. This can help you pick what specific attention heads to look at for your protein.
-
-🦅Birds Eye View visualizes attention on structures in a large grid, see how attention patterns
-change through layers and varies accross several heads.
-
-📄Documentation has information on protein
-language models, attention analysis and hexviz.""",
- unsafe_allow_html=True,
-)
-
-"""
-The attention visualization is inspired by [provis](https://github.com/salesforce/provis#provis-attention-visualizer).
-"""
diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/version.py b/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/version.py
deleted file mode 100644
index a33fc87f61f528780e3319a5160769cc84512b1b..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/pyrender/pyrender/version.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = '0.1.45'
diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan_light.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan_light.py
deleted file mode 100644
index 9e1f823996bf559e9b015ea9aa2b3cd38dd13af1..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan_light.py
+++ /dev/null
@@ -1,650 +0,0 @@
-# -*- coding: utf-8 -*-
-import numpy as np
-import cv2
-import torch
-
-from functools import partial
-import random
-from scipy import ndimage
-import scipy
-import scipy.stats as ss
-from scipy.interpolate import interp2d
-from scipy.linalg import orth
-import albumentations
-
-import ldm.modules.image_degradation.utils_image as util
-
-"""
-# --------------------------------------------
-# Super-Resolution
-# --------------------------------------------
-#
-# Kai Zhang (cskaizhang@gmail.com)
-# https://github.com/cszn
-# From 2019/03--2021/08
-# --------------------------------------------
-"""
-
-
-def modcrop_np(img, sf):
- '''
- Args:
- img: numpy image, WxH or WxHxC
- sf: scale factor
- Return:
- cropped image
- '''
- w, h = img.shape[:2]
- im = np.copy(img)
- return im[:w - w % sf, :h - h % sf, ...]
-
-
-"""
-# --------------------------------------------
-# anisotropic Gaussian kernels
-# --------------------------------------------
-"""
-
-
-def analytic_kernel(k):
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
- k_size = k.shape[0]
- # Calculate the big kernels size
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
- # Loop over the small kernel to fill the big one
- for r in range(k_size):
- for c in range(k_size):
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
- crop = k_size // 2
- cropped_big_k = big_k[crop:-crop, crop:-crop]
- # Normalize to 1
- return cropped_big_k / cropped_big_k.sum()
-
-
-def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
- """ generate an anisotropic Gaussian kernel
- Args:
- ksize : e.g., 15, kernel size
- theta : [0, pi], rotation angle range
- l1 : [0.1,50], scaling of eigenvalues
- l2 : [0.1,l1], scaling of eigenvalues
- If l1 = l2, will get an isotropic Gaussian kernel.
- Returns:
- k : kernel
- """
-
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
- D = np.array([[l1, 0], [0, l2]])
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
-
- return k
-
-
-def gm_blur_kernel(mean, cov, size=15):
- center = size / 2.0 + 0.5
- k = np.zeros([size, size])
- for y in range(size):
- for x in range(size):
- cy = y - center + 1
- cx = x - center + 1
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
-
- k = k / np.sum(k)
- return k
-
-
-def shift_pixel(x, sf, upper_left=True):
- """shift pixel for super-resolution with different scale factors
- Args:
- x: WxHxC or WxH
- sf: scale factor
- upper_left: shift direction
- """
- h, w = x.shape[:2]
- shift = (sf - 1) * 0.5
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
- if upper_left:
- x1 = xv + shift
- y1 = yv + shift
- else:
- x1 = xv - shift
- y1 = yv - shift
-
- x1 = np.clip(x1, 0, w - 1)
- y1 = np.clip(y1, 0, h - 1)
-
- if x.ndim == 2:
- x = interp2d(xv, yv, x)(x1, y1)
- if x.ndim == 3:
- for i in range(x.shape[-1]):
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
-
- return x
-
-
-def blur(x, k):
- '''
- x: image, NxcxHxW
- k: kernel, Nx1xhxw
- '''
- n, c = x.shape[:2]
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
- k = k.repeat(1, c, 1, 1)
- k = k.view(-1, 1, k.shape[2], k.shape[3])
- x = x.view(1, -1, x.shape[2], x.shape[3])
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
- x = x.view(n, c, x.shape[2], x.shape[3])
-
- return x
-
-
-def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
- """"
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
- # Kai Zhang
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
- # max_var = 2.5 * sf
- """
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
- theta = np.random.rand() * np.pi # random theta
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
-
- # Set COV matrix using Lambdas and Theta
- LAMBDA = np.diag([lambda_1, lambda_2])
- Q = np.array([[np.cos(theta), -np.sin(theta)],
- [np.sin(theta), np.cos(theta)]])
- SIGMA = Q @ LAMBDA @ Q.T
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
-
- # Set expectation position (shifting kernel for aligned image)
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
- MU = MU[None, None, :, None]
-
- # Create meshgrid for Gaussian
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
- Z = np.stack([X, Y], 2)[:, :, :, None]
-
- # Calcualte Gaussian for every pixel of the kernel
- ZZ = Z - MU
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
-
- # shift the kernel so it will be centered
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
-
- # Normalize the kernel and return
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
- kernel = raw_kernel / np.sum(raw_kernel)
- return kernel
-
-
-def fspecial_gaussian(hsize, sigma):
- hsize = [hsize, hsize]
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
- std = sigma
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
- arg = -(x * x + y * y) / (2 * std * std)
- h = np.exp(arg)
- h[h < scipy.finfo(float).eps * h.max()] = 0
- sumh = h.sum()
- if sumh != 0:
- h = h / sumh
- return h
-
-
-def fspecial_laplacian(alpha):
- alpha = max([0, min([alpha, 1])])
- h1 = alpha / (alpha + 1)
- h2 = (1 - alpha) / (alpha + 1)
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
- h = np.array(h)
- return h
-
-
-def fspecial(filter_type, *args, **kwargs):
- '''
- python code from:
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
- '''
- if filter_type == 'gaussian':
- return fspecial_gaussian(*args, **kwargs)
- if filter_type == 'laplacian':
- return fspecial_laplacian(*args, **kwargs)
-
-
-"""
-# --------------------------------------------
-# degradation models
-# --------------------------------------------
-"""
-
-
-def bicubic_degradation(x, sf=3):
- '''
- Args:
- x: HxWxC image, [0, 1]
- sf: down-scale factor
- Return:
- bicubicly downsampled LR image
- '''
- x = util.imresize_np(x, scale=1 / sf)
- return x
-
-
-def srmd_degradation(x, k, sf=3):
- ''' blur + bicubic downsampling
- Args:
- x: HxWxC image, [0, 1]
- k: hxw, double
- sf: down-scale factor
- Return:
- downsampled LR image
- Reference:
- @inproceedings{zhang2018learning,
- title={Learning a single convolutional super-resolution network for multiple degradations},
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
- pages={3262--3271},
- year={2018}
- }
- '''
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
- x = bicubic_degradation(x, sf=sf)
- return x
-
-
-def dpsr_degradation(x, k, sf=3):
- ''' bicubic downsampling + blur
- Args:
- x: HxWxC image, [0, 1]
- k: hxw, double
- sf: down-scale factor
- Return:
- downsampled LR image
- Reference:
- @inproceedings{zhang2019deep,
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
- pages={1671--1681},
- year={2019}
- }
- '''
- x = bicubic_degradation(x, sf=sf)
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
- return x
-
-
-def classical_degradation(x, k, sf=3):
- ''' blur + downsampling
- Args:
- x: HxWxC image, [0, 1]/[0, 255]
- k: hxw, double
- sf: down-scale factor
- Return:
- downsampled LR image
- '''
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
- st = 0
- return x[st::sf, st::sf, ...]
-
-
-def add_sharpening(img, weight=0.5, radius=50, threshold=10):
- """USM sharpening. borrowed from real-ESRGAN
- Input image: I; Blurry image: B.
- 1. K = I + weight * (I - B)
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
- 3. Blur mask:
- 4. Out = Mask * K + (1 - Mask) * I
- Args:
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
- weight (float): Sharp weight. Default: 1.
- radius (float): Kernel size of Gaussian blur. Default: 50.
- threshold (int):
- """
- if radius % 2 == 0:
- radius += 1
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
- residual = img - blur
- mask = np.abs(residual) * 255 > threshold
- mask = mask.astype('float32')
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
-
- K = img + weight * residual
- K = np.clip(K, 0, 1)
- return soft_mask * K + (1 - soft_mask) * img
-
-
-def add_blur(img, sf=4):
- wd2 = 4.0 + sf
- wd = 2.0 + 0.2 * sf
-
- wd2 = wd2/4
- wd = wd/4
-
- if random.random() < 0.5:
- l1 = wd2 * random.random()
- l2 = wd2 * random.random()
- k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
- else:
- k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
- img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
-
- return img
-
-
-def add_resize(img, sf=4):
- rnum = np.random.rand()
- if rnum > 0.8: # up
- sf1 = random.uniform(1, 2)
- elif rnum < 0.7: # down
- sf1 = random.uniform(0.5 / sf, 1)
- else:
- sf1 = 1.0
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
- img = np.clip(img, 0.0, 1.0)
-
- return img
-
-
-# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
-# noise_level = random.randint(noise_level1, noise_level2)
-# rnum = np.random.rand()
-# if rnum > 0.6: # add color Gaussian noise
-# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
-# elif rnum < 0.4: # add grayscale Gaussian noise
-# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
-# else: # add noise
-# L = noise_level2 / 255.
-# D = np.diag(np.random.rand(3))
-# U = orth(np.random.rand(3, 3))
-# conv = np.dot(np.dot(np.transpose(U), D), U)
-# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
-# img = np.clip(img, 0.0, 1.0)
-# return img
-
-def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
- noise_level = random.randint(noise_level1, noise_level2)
- rnum = np.random.rand()
- if rnum > 0.6: # add color Gaussian noise
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
- elif rnum < 0.4: # add grayscale Gaussian noise
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
- else: # add noise
- L = noise_level2 / 255.
- D = np.diag(np.random.rand(3))
- U = orth(np.random.rand(3, 3))
- conv = np.dot(np.dot(np.transpose(U), D), U)
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
- img = np.clip(img, 0.0, 1.0)
- return img
-
-
-def add_speckle_noise(img, noise_level1=2, noise_level2=25):
- noise_level = random.randint(noise_level1, noise_level2)
- img = np.clip(img, 0.0, 1.0)
- rnum = random.random()
- if rnum > 0.6:
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
- elif rnum < 0.4:
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
- else:
- L = noise_level2 / 255.
- D = np.diag(np.random.rand(3))
- U = orth(np.random.rand(3, 3))
- conv = np.dot(np.dot(np.transpose(U), D), U)
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
- img = np.clip(img, 0.0, 1.0)
- return img
-
-
-def add_Poisson_noise(img):
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
- if random.random() < 0.5:
- img = np.random.poisson(img * vals).astype(np.float32) / vals
- else:
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
- img += noise_gray[:, :, np.newaxis]
- img = np.clip(img, 0.0, 1.0)
- return img
-
-
-def add_JPEG_noise(img):
- quality_factor = random.randint(80, 95)
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
- img = cv2.imdecode(encimg, 1)
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
- return img
-
-
-def random_crop(lq, hq, sf=4, lq_patchsize=64):
- h, w = lq.shape[:2]
- rnd_h = random.randint(0, h - lq_patchsize)
- rnd_w = random.randint(0, w - lq_patchsize)
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
-
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
- return lq, hq
-
-
-def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
- """
- This is the degradation model of BSRGAN from the paper
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
- ----------
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
- sf: scale factor
- isp_model: camera ISP model
- Returns
- -------
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
- """
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
- sf_ori = sf
-
- h1, w1 = img.shape[:2]
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
- h, w = img.shape[:2]
-
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
-
- hq = img.copy()
-
- if sf == 4 and random.random() < scale2_prob: # downsample1
- if np.random.rand() < 0.5:
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- img = util.imresize_np(img, 1 / 2, True)
- img = np.clip(img, 0.0, 1.0)
- sf = 2
-
- shuffle_order = random.sample(range(7), 7)
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
- if idx1 > idx2: # keep downsample3 last
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
-
- for i in shuffle_order:
-
- if i == 0:
- img = add_blur(img, sf=sf)
-
- elif i == 1:
- img = add_blur(img, sf=sf)
-
- elif i == 2:
- a, b = img.shape[1], img.shape[0]
- # downsample2
- if random.random() < 0.75:
- sf1 = random.uniform(1, 2 * sf)
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
- k_shifted = shift_pixel(k, sf)
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
- img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
- img = img[0::sf, 0::sf, ...] # nearest downsampling
- img = np.clip(img, 0.0, 1.0)
-
- elif i == 3:
- # downsample3
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
- img = np.clip(img, 0.0, 1.0)
-
- elif i == 4:
- # add Gaussian noise
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
-
- elif i == 5:
- # add JPEG noise
- if random.random() < jpeg_prob:
- img = add_JPEG_noise(img)
-
- elif i == 6:
- # add processed camera sensor noise
- if random.random() < isp_prob and isp_model is not None:
- with torch.no_grad():
- img, hq = isp_model.forward(img.copy(), hq)
-
- # add final JPEG compression noise
- img = add_JPEG_noise(img)
-
- # random crop
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
-
- return img, hq
-
-
-# todo no isp_model?
-def degradation_bsrgan_variant(image, sf=4, isp_model=None):
- """
- This is the degradation model of BSRGAN from the paper
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
- ----------
- sf: scale factor
- isp_model: camera ISP model
- Returns
- -------
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
- """
- image = util.uint2single(image)
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
- sf_ori = sf
-
- h1, w1 = image.shape[:2]
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
- h, w = image.shape[:2]
-
- hq = image.copy()
-
- if sf == 4 and random.random() < scale2_prob: # downsample1
- if np.random.rand() < 0.5:
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- image = util.imresize_np(image, 1 / 2, True)
- image = np.clip(image, 0.0, 1.0)
- sf = 2
-
- shuffle_order = random.sample(range(7), 7)
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
- if idx1 > idx2: # keep downsample3 last
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
-
- for i in shuffle_order:
-
- if i == 0:
- image = add_blur(image, sf=sf)
-
- # elif i == 1:
- # image = add_blur(image, sf=sf)
-
- if i == 0:
- pass
-
- elif i == 2:
- a, b = image.shape[1], image.shape[0]
- # downsample2
- if random.random() < 0.8:
- sf1 = random.uniform(1, 2 * sf)
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
- k_shifted = shift_pixel(k, sf)
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
- image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
- image = image[0::sf, 0::sf, ...] # nearest downsampling
-
- image = np.clip(image, 0.0, 1.0)
-
- elif i == 3:
- # downsample3
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
- image = np.clip(image, 0.0, 1.0)
-
- elif i == 4:
- # add Gaussian noise
- image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
-
- elif i == 5:
- # add JPEG noise
- if random.random() < jpeg_prob:
- image = add_JPEG_noise(image)
- #
- # elif i == 6:
- # # add processed camera sensor noise
- # if random.random() < isp_prob and isp_model is not None:
- # with torch.no_grad():
- # img, hq = isp_model.forward(img.copy(), hq)
-
- # add final JPEG compression noise
- image = add_JPEG_noise(image)
- image = util.single2uint(image)
- example = {"image": image}
- return example
-
-
-
-
-if __name__ == '__main__':
- print("hey")
- img = util.imread_uint('utils/test.png', 3)
- img = img[:448, :448]
- h = img.shape[0] // 4
- print("resizing to", h)
- sf = 4
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
- for i in range(20):
- print(i)
- img_hq = img
- img_lq = deg_fn(img)["image"]
- img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
- print(img_lq)
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
- print(img_lq.shape)
- print("bicubic", img_lq_bicubic.shape)
- print(img_hq.shape)
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
- interpolation=0)
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
- (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
- interpolation=0)
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
- util.imsave(img_concat, str(i) + '.png')
diff --git a/spaces/AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio/app.py b/spaces/AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio/app.py
deleted file mode 100644
index 02c37db97a9bdb34d6da850963d156ef27384571..0000000000000000000000000000000000000000
--- a/spaces/AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio/app.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import gradio as gr
-import pandas as pd
-import json
-from collections import defaultdict
-
-# Create tokenizer for biomed model
-from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification
-tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all")
-model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all")
-pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
-
-# Matplotlib for entity graph
-import matplotlib.pyplot as plt
-plt.switch_backend("Agg")
-
-# Load examples from JSON
-EXAMPLES = {}
-with open("examples.json", "r") as f:
- example_json = json.load(f)
- EXAMPLES = {x["text"]: x["label"] for x in example_json}
-
-def group_by_entity(raw):
- out = defaultdict(int)
- for ent in raw:
- out[ent["entity_group"]] += 1
- # out["total"] = sum(out.values())
- return out
-
-
-def plot_to_figure(grouped):
- fig = plt.figure()
- plt.bar(x=list(grouped.keys()), height=list(grouped.values()))
- plt.margins(0.2)
- plt.subplots_adjust(bottom=0.4)
- plt.xticks(rotation=90)
- return fig
-
-
-def ner(text):
- raw = pipe(text)
- ner_content = {
- "text": text,
- "entities": [
- {
- "entity": x["entity_group"],
- "word": x["word"],
- "score": x["score"],
- "start": x["start"],
- "end": x["end"],
- }
- for x in raw
- ],
- }
-
- grouped = group_by_entity(raw)
- figure = plot_to_figure(grouped)
- label = EXAMPLES.get(text, "Unknown")
-
- meta = {
- "entity_counts": grouped,
- "entities": len(set(grouped.keys())),
- "counts": sum(grouped.values()),
- }
-
- return (ner_content, meta, label, figure)
-
-
-interface = gr.Interface(
- ner,
- inputs=gr.Textbox(label="Note text", value=""),
- outputs=[
- gr.HighlightedText(label="NER", combine_adjacent=True),
- gr.JSON(label="Entity Counts"),
- gr.Label(label="Rating"),
- gr.Plot(label="Bar"),
- ],
- examples=list(EXAMPLES.keys()),
- allow_flagging="never",
-)
-
-interface.launch()
\ No newline at end of file
diff --git a/spaces/ASJMO/freegpt/server/backend.py b/spaces/ASJMO/freegpt/server/backend.py
deleted file mode 100644
index fd45b94d916512059e4d1f7850b63de6f9da6320..0000000000000000000000000000000000000000
--- a/spaces/ASJMO/freegpt/server/backend.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import re
-from datetime import datetime
-from g4f import ChatCompletion
-from flask import request, Response, stream_with_context
-from requests import get
-from server.config import special_instructions
-
-
-class Backend_Api:
- def __init__(self, bp, config: dict) -> None:
- """
- Initialize the Backend_Api class.
- :param app: Flask application instance
- :param config: Configuration dictionary
- """
- self.bp = bp
- self.routes = {
- '/backend-api/v2/conversation': {
- 'function': self._conversation,
- 'methods': ['POST']
- }
- }
-
- def _conversation(self):
- """
- Handles the conversation route.
-
- :return: Response object containing the generated conversation stream
- """
- conversation_id = request.json['conversation_id']
-
- try:
- jailbreak = request.json['jailbreak']
- model = request.json['model']
- messages = build_messages(jailbreak)
-
- # Generate response
- response = ChatCompletion.create(
- model=model,
- chatId=conversation_id,
- messages=messages
- )
-
- return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
-
- except Exception as e:
- print(e)
- print(e.__traceback__.tb_next)
-
- return {
- '_action': '_ask',
- 'success': False,
- "error": f"an error occurred {str(e)}"
- }, 400
-
-
-def build_messages(jailbreak):
- """
- Build the messages for the conversation.
-
- :param jailbreak: Jailbreak instruction string
- :return: List of messages for the conversation
- """
- _conversation = request.json['meta']['content']['conversation']
- internet_access = request.json['meta']['content']['internet_access']
- prompt = request.json['meta']['content']['parts'][0]
-
- # Add the existing conversation
- conversation = _conversation
-
- # Add web results if enabled
- if internet_access:
- current_date = datetime.now().strftime("%Y-%m-%d")
- query = f'Current date: {current_date}. ' + prompt["content"]
- search_results = fetch_search_results(query)
- conversation.extend(search_results)
-
- # Add jailbreak instructions if enabled
- if jailbreak_instructions := getJailbreak(jailbreak):
- conversation.extend(jailbreak_instructions)
-
- # Add the prompt
- conversation.append(prompt)
-
- # Reduce conversation size to avoid API Token quantity error
- if len(conversation) > 3:
- conversation = conversation[-4:]
-
- return conversation
-
-
-def fetch_search_results(query):
- """
- Fetch search results for a given query.
-
- :param query: Search query string
- :return: List of search results
- """
- search = get('https://ddg-api.herokuapp.com/search',
- params={
- 'query': query,
- 'limit': 3,
- })
-
- snippets = ""
- for index, result in enumerate(search.json()):
- snippet = f'[{index + 1}] "{result["snippet"]}" URL:{result["link"]}.'
- snippets += snippet
-
- response = "Here are some updated web searches. Use this to improve user response:"
- response += snippets
-
- return [{'role': 'system', 'content': response}]
-
-
-def generate_stream(response, jailbreak):
- """
- Generate the conversation stream.
-
- :param response: Response object from ChatCompletion.create
- :param jailbreak: Jailbreak instruction string
- :return: Generator object yielding messages in the conversation
- """
- if getJailbreak(jailbreak):
- response_jailbreak = ''
- jailbroken_checked = False
- for message in response:
- response_jailbreak += message
- if jailbroken_checked:
- yield message
- else:
- if response_jailbroken_success(response_jailbreak):
- jailbroken_checked = True
- if response_jailbroken_failed(response_jailbreak):
- yield response_jailbreak
- jailbroken_checked = True
- else:
- yield from response
-
-
-def response_jailbroken_success(response: str) -> bool:
- """Check if the response has been jailbroken.
-
- :param response: Response string
- :return: Boolean indicating if the response has been jailbroken
- """
- act_match = re.search(r'ACT:', response, flags=re.DOTALL)
- return bool(act_match)
-
-
-def response_jailbroken_failed(response):
- """
- Check if the response has not been jailbroken.
-
- :param response: Response string
- :return: Boolean indicating if the response has not been jailbroken
- """
- return False if len(response) < 4 else not (response.startswith("GPT:") or response.startswith("ACT:"))
-
-
-def getJailbreak(jailbreak):
- """
- Check if jailbreak instructions are provided.
-
- :param jailbreak: Jailbreak instruction string
- :return: Jailbreak instructions if provided, otherwise None
- """
- if jailbreak != "default":
- special_instructions[jailbreak][0]['content'] += special_instructions['two_responses_instruction']
- if jailbreak in special_instructions:
- special_instructions[jailbreak]
- return special_instructions[jailbreak]
- else:
- return None
- else:
- return None
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py
deleted file mode 100644
index 2c585ceb92e9bfb1984b49ce02f86f4d3cd4532d..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py
+++ /dev/null
@@ -1,42 +0,0 @@
-_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
-
-# ========================modified parameters======================
-data_root = 'data/balloon/'
-# Path of train annotation file
-train_ann_file = 'train.json'
-train_data_prefix = 'train/' # Prefix of train image path
-# Path of val annotation file
-val_ann_file = 'val.json'
-val_data_prefix = 'val/' # Prefix of val image path
-metainfo = {
- 'classes': ('balloon', ),
- 'palette': [
- (220, 20, 60),
- ]
-}
-num_classes = 1
-
-train_batch_size_per_gpu = 4
-train_num_workers = 2
-log_interval = 1
-
-# =======================Unmodified in most cases==================
-train_dataloader = dict(
- batch_size=train_batch_size_per_gpu,
- num_workers=train_num_workers,
- dataset=dict(
- data_root=data_root,
- metainfo=metainfo,
- data_prefix=dict(img=train_data_prefix),
- ann_file=train_ann_file))
-val_dataloader = dict(
- dataset=dict(
- data_root=data_root,
- metainfo=metainfo,
- data_prefix=dict(img=val_data_prefix),
- ann_file=val_ann_file))
-test_dataloader = val_dataloader
-val_evaluator = dict(ann_file=data_root + val_ann_file)
-test_evaluator = val_evaluator
-model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes)))
-default_hooks = dict(logger=dict(interval=log_interval))
diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/concatUint8Arrays.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/concatUint8Arrays.ts
deleted file mode 100644
index e53396eca7e3dee20a543fb6ac28ecf48c7e3965..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/concatUint8Arrays.ts
+++ /dev/null
@@ -1,12 +0,0 @@
-import { sum } from "./sum";
-
-export function concatUint8Arrays(arrays: Uint8Array[]): Uint8Array {
- const totalLength = sum(arrays.map((a) => a.length));
- const result = new Uint8Array(totalLength);
- let offset = 0;
- for (const array of arrays) {
- result.set(array, offset);
- offset += array.length;
- }
- return result;
-}
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.js
deleted file mode 100644
index 4cca5202408539a32448bfa201fe33e5f060858a..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import ScrollBar from './ScrollBar.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('scrollBar', function (config) {
- var gameObject = new ScrollBar(this.scene, config);
- this.scene.add.existing(gameObject);
- return gameObject;
-});
-
-SetValue(window, 'RexPlugins.UI.ScrollBar', ScrollBar);
-
-export default ScrollBar;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.js
deleted file mode 100644
index dd1ae0f70ffca1b31352920098ccb6150b8cc20e..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/skew/Factory.js
+++ /dev/null
@@ -1,11 +0,0 @@
-import Skew from './Skew.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('skew', function (gameObject, config) {
- return new Skew(gameObject, config);
-});
-
-SetValue(window, 'RexPlugins.UI.Skew', Skew);
-
-export default Skew;
\ No newline at end of file
diff --git a/spaces/AlexZou/Deploy_Restoration/Underwater.py b/spaces/AlexZou/Deploy_Restoration/Underwater.py
deleted file mode 100644
index 8e282f31cd4a8c98d53f4b9dc11e712e85481c21..0000000000000000000000000000000000000000
--- a/spaces/AlexZou/Deploy_Restoration/Underwater.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import os
-import torch
-import numpy as np
-from torchvision import transforms
-from PIL import Image
-import time
-import torchvision
-import cv2
-import torchvision.utils as tvu
-import torch.functional as F
-import argparse
-from net.Ushape_Trans import *
-
-def inference_img(img_path,Net):
-
- low_image = Image.open(img_path).convert('RGB')
- enhance_transforms = transforms.Compose([
- transforms.Resize((256,256)),
- transforms.ToTensor()
- ])
-
- with torch.no_grad():
- low_image = enhance_transforms(low_image)
- low_image = low_image.unsqueeze(0)
- start = time.time()
- restored2 = Net(low_image)
- end = time.time()
-
-
- return restored2,end-start
-
-if __name__ == '__main__':
- parser=argparse.ArgumentParser()
- parser.add_argument('--test_path',type=str,required=True,help='Path to test')
- parser.add_argument('--save_path',type=str,required=True,help='Path to save')
- parser.add_argument('--pk_path',type=str,default='model_zoo/underwater.pth',help='Path of the checkpoint')
- opt = parser.parse_args()
- if not os.path.isdir(opt.save_path):
- os.mkdir(opt.save_path)
- Net = Generator()
- Net.load_state_dict(torch.load(opt.pk_path, map_location=torch.device('cpu')))
- Net = Net.eval()
- image = opt.test_path
- print(image)
- restored2,time_num = inference_img(image,Net)
- torchvision.utils.save_image(restored2,opt.save_path+'output.png')
diff --git a/spaces/Ameaou/academic-chatgpt3.1/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/Ameaou/academic-chatgpt3.1/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index e46a4c01e804aa4b649bd40af6c13d5981c873d4..0000000000000000000000000000000000000000
--- a/spaces/Ameaou/academic-chatgpt3.1/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_gradfix.py b/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_gradfix.py
deleted file mode 100644
index e95e10d0b1d0315a63a76446fd4c5c293c8bbc6d..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/conv2d_gradfix.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Custom replacement for `torch.nn.functional.conv2d` that supports
-arbitrarily high order gradients with zero performance penalty."""
-
-import warnings
-import contextlib
-import torch
-
-# pylint: disable=redefined-builtin
-# pylint: disable=arguments-differ
-# pylint: disable=protected-access
-
-#----------------------------------------------------------------------------
-
-enabled = False # Enable the custom op by setting this to true.
-weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
-
-@contextlib.contextmanager
-def no_weight_gradients():
- global weight_gradients_disabled
- old = weight_gradients_disabled
- weight_gradients_disabled = True
- yield
- weight_gradients_disabled = old
-
-#----------------------------------------------------------------------------
-
-def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
- if _should_use_custom_op(input):
- return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
- return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
-
-def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
- if _should_use_custom_op(input):
- return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
- return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
-
-#----------------------------------------------------------------------------
-
-def _should_use_custom_op(input):
- assert isinstance(input, torch.Tensor)
- if (not enabled) or (not torch.backends.cudnn.enabled):
- return False
- if input.device.type != 'cuda':
- return False
- if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
- return True
- warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
- return False
-
-def _tuple_of_ints(xs, ndim):
- xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
- assert len(xs) == ndim
- assert all(isinstance(x, int) for x in xs)
- return xs
-
-#----------------------------------------------------------------------------
-
-_conv2d_gradfix_cache = dict()
-
-def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
- # Parse arguments.
- ndim = 2
- weight_shape = tuple(weight_shape)
- stride = _tuple_of_ints(stride, ndim)
- padding = _tuple_of_ints(padding, ndim)
- output_padding = _tuple_of_ints(output_padding, ndim)
- dilation = _tuple_of_ints(dilation, ndim)
-
- # Lookup from cache.
- key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
- if key in _conv2d_gradfix_cache:
- return _conv2d_gradfix_cache[key]
-
- # Validate arguments.
- assert groups >= 1
- assert len(weight_shape) == ndim + 2
- assert all(stride[i] >= 1 for i in range(ndim))
- assert all(padding[i] >= 0 for i in range(ndim))
- assert all(dilation[i] >= 0 for i in range(ndim))
- if not transpose:
- assert all(output_padding[i] == 0 for i in range(ndim))
- else: # transpose
- assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
-
- # Helpers.
- common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
- def calc_output_padding(input_shape, output_shape):
- if transpose:
- return [0, 0]
- return [
- input_shape[i + 2]
- - (output_shape[i + 2] - 1) * stride[i]
- - (1 - 2 * padding[i])
- - dilation[i] * (weight_shape[i + 2] - 1)
- for i in range(ndim)
- ]
-
- # Forward & backward.
- class Conv2d(torch.autograd.Function):
- @staticmethod
- def forward(ctx, input, weight, bias):
- assert weight.shape == weight_shape
- if not transpose:
- output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
- else: # transpose
- output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
- ctx.save_for_backward(input, weight)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, weight = ctx.saved_tensors
- grad_input = None
- grad_weight = None
- grad_bias = None
-
- if ctx.needs_input_grad[0]:
- p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
- grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
- assert grad_input.shape == input.shape
-
- if ctx.needs_input_grad[1] and not weight_gradients_disabled:
- grad_weight = Conv2dGradWeight.apply(grad_output, input)
- assert grad_weight.shape == weight_shape
-
- if ctx.needs_input_grad[2]:
- grad_bias = grad_output.sum([0, 2, 3])
-
- return grad_input, grad_weight, grad_bias
-
- # Gradient with respect to the weights.
- class Conv2dGradWeight(torch.autograd.Function):
- @staticmethod
- def forward(ctx, grad_output, input):
- op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
- flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
- grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
- assert grad_weight.shape == weight_shape
- ctx.save_for_backward(grad_output, input)
- return grad_weight
-
- @staticmethod
- def backward(ctx, grad2_grad_weight):
- grad_output, input = ctx.saved_tensors
- grad2_grad_output = None
- grad2_input = None
-
- if ctx.needs_input_grad[0]:
- grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
- assert grad2_grad_output.shape == grad_output.shape
-
- if ctx.needs_input_grad[1]:
- p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
- grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
- assert grad2_input.shape == input.shape
-
- return grad2_grad_output, grad2_input
-
- _conv2d_gradfix_cache[key] = Conv2d
- return Conv2d
-
-#----------------------------------------------------------------------------
diff --git a/spaces/Amrrs/gradio-sentiment-analyzer/app.py b/spaces/Amrrs/gradio-sentiment-analyzer/app.py
deleted file mode 100644
index 0d8868de0bd15a079dc34bd069b3655ab86c3efe..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/gradio-sentiment-analyzer/app.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import gradio as gr
-
-from transformers import pipeline
-
-sentiment = pipeline("sentiment-analysis")
-
-def get_sentiment(input_text):
- return sentiment(input_text)
-
-iface = gr.Interface(fn = get_sentiment,
- inputs = "text",
- outputs = ['text'],
- title = 'Sentiment Analysis',
- description="Get Sentiment Negative/Positive for the given input")
-
-iface.launch(inline = False)
\ No newline at end of file
diff --git a/spaces/Amrrs/image-to-text-app/README.md b/spaces/Amrrs/image-to-text-app/README.md
deleted file mode 100644
index e7c2ca19d5f01a070e583998b2cbcb3e81246cc5..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/image-to-text-app/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Image To Text App
-emoji: 📹
-colorFrom: blue
-colorTo: red
-sdk: streamlit
-app_file: app.py
-pinned: false
----
-
-# image2textapp
-demo of 🤗 spaces deployment of a streamlit python app
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
deleted file mode 100644
index a3eaba014cf6c6a41b46f169868af3edafb521c3..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py
+++ /dev/null
@@ -1,812 +0,0 @@
-import argparse
-import hashlib
-import itertools
-import math
-import os
-import random
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import ProjectConfiguration, set_seed
-from huggingface_hub import create_repo, upload_folder
-from PIL import Image, ImageDraw
-from torch.utils.data import Dataset
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-from diffusers import (
- AutoencoderKL,
- DDPMScheduler,
- StableDiffusionInpaintPipeline,
- StableDiffusionPipeline,
- UNet2DConditionModel,
-)
-from diffusers.optimization import get_scheduler
-from diffusers.utils import check_min_version
-
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.13.0.dev0")
-
-logger = get_logger(__name__)
-
-
-def prepare_mask_and_masked_image(image, mask):
- image = np.array(image.convert("RGB"))
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- mask = np.array(mask.convert("L"))
- mask = mask.astype(np.float32) / 255.0
- mask = mask[None, None]
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = image * (mask < 0.5)
-
- return mask, masked_image
-
-
-# generate random masks
-def random_mask(im_shape, ratio=1, mask_full_image=False):
- mask = Image.new("L", im_shape, 0)
- draw = ImageDraw.Draw(mask)
- size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio)))
- # use this to always mask the whole image
- if mask_full_image:
- size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio))
- limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2)
- center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1]))
- draw_type = random.randint(0, 1)
- if draw_type == 0 or mask_full_image:
- draw.rectangle(
- (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2),
- fill=255,
- )
- else:
- draw.ellipse(
- (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2),
- fill=255,
- )
-
- return mask
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default=None,
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="text-inversion-model",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop",
- default=False,
- action="store_true",
- help=(
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
- " cropped. The images will be resized to the resolution first before cropping."
- ),
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
- parser.add_argument(
- "--checkpointing_steps",
- type=int,
- default=500,
- help=(
- "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
- " checkpoints in case they are better than the last checkpoint and are suitable for resuming training"
- " using `--resume_from_checkpoint`."
- ),
- )
- parser.add_argument(
- "--checkpoints_total_limit",
- type=int,
- default=None,
- help=(
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
- " for more docs"
- ),
- )
- parser.add_argument(
- "--resume_from_checkpoint",
- type=str,
- default=None,
- help=(
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
- ),
- )
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- if args.instance_data_dir is None:
- raise ValueError("You must specify a train data directory.")
-
- if args.with_prior_preservation:
- if args.class_data_dir is None:
- raise ValueError("You must specify a data directory for class images.")
- if args.class_prompt is None:
- raise ValueError("You must specify prompt for class images.")
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
-
- self.instance_data_root = Path(instance_data_root)
- if not self.instance_data_root.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_root).iterdir())
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if class_data_root is not None:
- self.class_data_root = Path(class_data_root)
- self.class_data_root.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_root.iterdir())
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_root = None
-
- self.image_transforms_resize_and_crop = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- ]
- )
-
- self.image_transforms = transforms.Compose(
- [
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
- instance_image = self.image_transforms_resize_and_crop(instance_image)
-
- example["PIL_images"] = instance_image
- example["instance_images"] = self.image_transforms(instance_image)
-
- example["instance_prompt_ids"] = self.tokenizer(
- self.instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_root:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- class_image = self.image_transforms_resize_and_crop(class_image)
- example["class_images"] = self.image_transforms(class_image)
- example["class_PIL_images"] = class_image
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- return example
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-
-def main():
- args = parse_args()
- logging_dir = Path(args.output_dir, args.logging_dir)
-
- project_config = ProjectConfiguration(
- total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
- )
-
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with="tensorboard",
- project_config=project_config,
- )
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if args.with_prior_preservation:
- class_images_dir = Path(args.class_data_dir)
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- pipeline = StableDiffusionInpaintPipeline.from_pretrained(
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(
- sample_dataset, batch_size=args.sample_batch_size, num_workers=1
- )
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
- transform_to_pil = transforms.ToPILImage()
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- bsz = len(example["prompt"])
- fake_images = torch.rand((3, args.resolution, args.resolution))
- transform_to_pil = transforms.ToPILImage()
- fake_pil_images = transform_to_pil(fake_images)
-
- fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True)
-
- images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images
-
- for i, image in enumerate(images):
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
- image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
- image.save(image_filename)
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- if args.push_to_hub:
- repo_id = create_repo(
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
- ).repo_id
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Load models and create wrapper for stable diffusion
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
-
- vae.requires_grad_(False)
- if not args.train_text_encoder:
- text_encoder.requires_grad_(False)
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
- if args.train_text_encoder:
- text_encoder.gradient_checkpointing_enable()
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- params_to_optimize = (
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
- )
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
-
- train_dataset = DreamBoothDataset(
- instance_data_root=args.instance_data_dir,
- instance_prompt=args.instance_prompt,
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
- class_prompt=args.class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- )
-
- def collate_fn(examples):
- input_ids = [example["instance_prompt_ids"] for example in examples]
- pixel_values = [example["instance_images"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if args.with_prior_preservation:
- input_ids += [example["class_prompt_ids"] for example in examples]
- pixel_values += [example["class_images"] for example in examples]
- pior_pil = [example["class_PIL_images"] for example in examples]
-
- masks = []
- masked_images = []
- for example in examples:
- pil_image = example["PIL_images"]
- # generate a random mask
- mask = random_mask(pil_image.size, 1, False)
- # prepare mask and masked image
- mask, masked_image = prepare_mask_and_masked_image(pil_image, mask)
-
- masks.append(mask)
- masked_images.append(masked_image)
-
- if args.with_prior_preservation:
- for pil_image in pior_pil:
- # generate a random mask
- mask = random_mask(pil_image.size, 1, False)
- # prepare mask and masked image
- mask, masked_image = prepare_mask_and_masked_image(pil_image, mask)
-
- masks.append(mask)
- masked_images.append(masked_image)
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
- masks = torch.stack(masks)
- masked_images = torch.stack(masked_images)
- batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images}
- return batch
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
- num_training_steps=args.max_train_steps * accelerator.num_processes,
- )
-
- if args.train_text_encoder:
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
- )
- else:
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
- accelerator.register_for_checkpointing(lr_scheduler)
-
- weight_dtype = torch.float32
- if args.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif args.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu.
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- vae.to(accelerator.device, dtype=weight_dtype)
- if not args.train_text_encoder:
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth", config=vars(args))
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- global_step = 0
- first_epoch = 0
-
- if args.resume_from_checkpoint:
- if args.resume_from_checkpoint != "latest":
- path = os.path.basename(args.resume_from_checkpoint)
- else:
- # Get the most recent checkpoint
- dirs = os.listdir(args.output_dir)
- dirs = [d for d in dirs if d.startswith("checkpoint")]
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
- path = dirs[-1] if len(dirs) > 0 else None
-
- if path is None:
- accelerator.print(
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
- )
- args.resume_from_checkpoint = None
- else:
- accelerator.print(f"Resuming from checkpoint {path}")
- accelerator.load_state(os.path.join(args.output_dir, path))
- global_step = int(path.split("-")[1])
-
- resume_global_step = global_step * args.gradient_accumulation_steps
- first_epoch = global_step // num_update_steps_per_epoch
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
-
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
- progress_bar.set_description("Steps")
-
- for epoch in range(first_epoch, args.num_train_epochs):
- unet.train()
- for step, batch in enumerate(train_dataloader):
- # Skip steps until we reach the resumed step
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
- if step % args.gradient_accumulation_steps == 0:
- progress_bar.update(1)
- continue
-
- with accelerator.accumulate(unet):
- # Convert images to latent space
-
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
- latents = latents * vae.config.scaling_factor
-
- # Convert masked images to latent space
- masked_latents = vae.encode(
- batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype)
- ).latent_dist.sample()
- masked_latents = masked_latents * vae.config.scaling_factor
-
- masks = batch["masks"]
- # resize the mask to latents shape as we concatenate the mask to the latents
- mask = torch.stack(
- [
- torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8))
- for mask in masks
- ]
- )
- mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8)
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # concatenate the noised latents with the mask and the masked latents
- latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- if args.with_prior_preservation:
- # Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
- noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
- target, target_prior = torch.chunk(target, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
-
- # Compute prior loss
- prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = (
- itertools.chain(unet.parameters(), text_encoder.parameters())
- if args.train_text_encoder
- else unet.parameters()
- )
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- if global_step % args.checkpointing_steps == 0:
- if accelerator.is_main_process:
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
- accelerator.save_state(save_path)
- logger.info(f"Saved state to {save_path}")
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- accelerator.wait_for_everyone()
-
- # Create the pipeline using using the trained modules and save it.
- if accelerator.is_main_process:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(args.output_dir)
-
- if args.push_to_hub:
- upload_folder(
- repo_id=repo_id,
- folder_path=args.output_dir,
- commit_message="End of training",
- ignore_patterns=["step_*", "epoch_*"],
- )
-
- accelerator.end_training()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_layers_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_layers_utils.py
deleted file mode 100644
index 40627cc93caaea882ea5759a6bb3d7c9b9fb4ef3..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_layers_utils.py
+++ /dev/null
@@ -1,530 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import unittest
-
-import numpy as np
-import torch
-from torch import nn
-
-from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU
-from diffusers.models.embeddings import get_timestep_embedding
-from diffusers.models.lora import LoRACompatibleLinear
-from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
-from diffusers.models.transformer_2d import Transformer2DModel
-from diffusers.utils import torch_device
-
-
-class EmbeddingsTests(unittest.TestCase):
- def test_timestep_embeddings(self):
- embedding_dim = 256
- timesteps = torch.arange(16)
-
- t1 = get_timestep_embedding(timesteps, embedding_dim)
-
- # first vector should always be composed only of 0's and 1's
- assert (t1[0, : embedding_dim // 2] - 0).abs().sum() < 1e-5
- assert (t1[0, embedding_dim // 2 :] - 1).abs().sum() < 1e-5
-
- # last element of each vector should be one
- assert (t1[:, -1] - 1).abs().sum() < 1e-5
-
- # For large embeddings (e.g. 128) the frequency of every vector is higher
- # than the previous one which means that the gradients of later vectors are
- # ALWAYS higher than the previous ones
- grad_mean = np.abs(np.gradient(t1, axis=-1)).mean(axis=1)
-
- prev_grad = 0.0
- for grad in grad_mean:
- assert grad > prev_grad
- prev_grad = grad
-
- def test_timestep_defaults(self):
- embedding_dim = 16
- timesteps = torch.arange(10)
-
- t1 = get_timestep_embedding(timesteps, embedding_dim)
- t2 = get_timestep_embedding(
- timesteps, embedding_dim, flip_sin_to_cos=False, downscale_freq_shift=1, max_period=10_000
- )
-
- assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3)
-
- def test_timestep_flip_sin_cos(self):
- embedding_dim = 16
- timesteps = torch.arange(10)
-
- t1 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=True)
- t1 = torch.cat([t1[:, embedding_dim // 2 :], t1[:, : embedding_dim // 2]], dim=-1)
-
- t2 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=False)
-
- assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3)
-
- def test_timestep_downscale_freq_shift(self):
- embedding_dim = 16
- timesteps = torch.arange(10)
-
- t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0)
- t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1)
-
- # get cosine half (vectors that are wrapped into cosine)
- cosine_half = (t1 - t2)[:, embedding_dim // 2 :]
-
- # cosine needs to be negative
- assert (np.abs((cosine_half <= 0).numpy()) - 1).sum() < 1e-5
-
- def test_sinoid_embeddings_hardcoded(self):
- embedding_dim = 64
- timesteps = torch.arange(128)
-
- # standard unet, score_vde
- t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1, flip_sin_to_cos=False)
- # glide, ldm
- t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0, flip_sin_to_cos=True)
- # grad-tts
- t3 = get_timestep_embedding(timesteps, embedding_dim, scale=1000)
-
- assert torch.allclose(
- t1[23:26, 47:50].flatten().cpu(),
- torch.tensor([0.9646, 0.9804, 0.9892, 0.9615, 0.9787, 0.9882, 0.9582, 0.9769, 0.9872]),
- 1e-3,
- )
- assert torch.allclose(
- t2[23:26, 47:50].flatten().cpu(),
- torch.tensor([0.3019, 0.2280, 0.1716, 0.3146, 0.2377, 0.1790, 0.3272, 0.2474, 0.1864]),
- 1e-3,
- )
- assert torch.allclose(
- t3[23:26, 47:50].flatten().cpu(),
- torch.tensor([-0.9801, -0.9464, -0.9349, -0.3952, 0.8887, -0.9709, 0.5299, -0.2853, -0.9927]),
- 1e-3,
- )
-
-
-class Upsample2DBlockTests(unittest.TestCase):
- def test_upsample_default(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 32, 32)
- upsample = Upsample2D(channels=32, use_conv=False)
- with torch.no_grad():
- upsampled = upsample(sample)
-
- assert upsampled.shape == (1, 32, 64, 64)
- output_slice = upsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254])
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_upsample_with_conv(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 32, 32)
- upsample = Upsample2D(channels=32, use_conv=True)
- with torch.no_grad():
- upsampled = upsample(sample)
-
- assert upsampled.shape == (1, 32, 64, 64)
- output_slice = upsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([0.7145, 1.3773, 0.3492, 0.8448, 1.0839, -0.3341, 0.5956, 0.1250, -0.4841])
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_upsample_with_conv_out_dim(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 32, 32)
- upsample = Upsample2D(channels=32, use_conv=True, out_channels=64)
- with torch.no_grad():
- upsampled = upsample(sample)
-
- assert upsampled.shape == (1, 64, 64, 64)
- output_slice = upsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([0.2703, 0.1656, -0.2538, -0.0553, -0.2984, 0.1044, 0.1155, 0.2579, 0.7755])
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_upsample_with_transpose(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 32, 32)
- upsample = Upsample2D(channels=32, use_conv=False, use_conv_transpose=True)
- with torch.no_grad():
- upsampled = upsample(sample)
-
- assert upsampled.shape == (1, 32, 64, 64)
- output_slice = upsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([-0.3028, -0.1582, 0.0071, 0.0350, -0.4799, -0.1139, 0.1056, -0.1153, -0.1046])
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
-
-class Downsample2DBlockTests(unittest.TestCase):
- def test_downsample_default(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64)
- downsample = Downsample2D(channels=32, use_conv=False)
- with torch.no_grad():
- downsampled = downsample(sample)
-
- assert downsampled.shape == (1, 32, 32, 32)
- output_slice = downsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([-0.0513, -0.3889, 0.0640, 0.0836, -0.5460, -0.0341, -0.0169, -0.6967, 0.1179])
- max_diff = (output_slice.flatten() - expected_slice).abs().sum().item()
- assert max_diff <= 1e-3
- # assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1)
-
- def test_downsample_with_conv(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64)
- downsample = Downsample2D(channels=32, use_conv=True)
- with torch.no_grad():
- downsampled = downsample(sample)
-
- assert downsampled.shape == (1, 32, 32, 32)
- output_slice = downsampled[0, -1, -3:, -3:]
-
- expected_slice = torch.tensor(
- [0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913],
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_downsample_with_conv_pad1(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64)
- downsample = Downsample2D(channels=32, use_conv=True, padding=1)
- with torch.no_grad():
- downsampled = downsample(sample)
-
- assert downsampled.shape == (1, 32, 32, 32)
- output_slice = downsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913])
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_downsample_with_conv_out_dim(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64)
- downsample = Downsample2D(channels=32, use_conv=True, out_channels=16)
- with torch.no_grad():
- downsampled = downsample(sample)
-
- assert downsampled.shape == (1, 16, 32, 32)
- output_slice = downsampled[0, -1, -3:, -3:]
- expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522])
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
-
-class ResnetBlock2DTests(unittest.TestCase):
- def test_resnet_default(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- temb = torch.randn(1, 128).to(torch_device)
- resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128).to(torch_device)
- with torch.no_grad():
- output_tensor = resnet_block(sample, temb)
-
- assert output_tensor.shape == (1, 32, 64, 64)
- output_slice = output_tensor[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [-1.9010, -0.2974, -0.8245, -1.3533, 0.8742, -0.9645, -2.0584, 1.3387, -0.4746], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_restnet_with_use_in_shortcut(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- temb = torch.randn(1, 128).to(torch_device)
- resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, use_in_shortcut=True).to(torch_device)
- with torch.no_grad():
- output_tensor = resnet_block(sample, temb)
-
- assert output_tensor.shape == (1, 32, 64, 64)
- output_slice = output_tensor[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [0.2226, -1.0791, -0.1629, 0.3659, -0.2889, -1.2376, 0.0582, 0.9206, 0.0044], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_resnet_up(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- temb = torch.randn(1, 128).to(torch_device)
- resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, up=True).to(torch_device)
- with torch.no_grad():
- output_tensor = resnet_block(sample, temb)
-
- assert output_tensor.shape == (1, 32, 128, 128)
- output_slice = output_tensor[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [1.2130, -0.8753, -0.9027, 1.5783, -0.5362, -0.5001, 1.0726, -0.7732, -0.4182], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_resnet_down(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- temb = torch.randn(1, 128).to(torch_device)
- resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, down=True).to(torch_device)
- with torch.no_grad():
- output_tensor = resnet_block(sample, temb)
-
- assert output_tensor.shape == (1, 32, 32, 32)
- output_slice = output_tensor[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_restnet_with_kernel_fir(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- temb = torch.randn(1, 128).to(torch_device)
- resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="fir", down=True).to(torch_device)
- with torch.no_grad():
- output_tensor = resnet_block(sample, temb)
-
- assert output_tensor.shape == (1, 32, 32, 32)
- output_slice = output_tensor[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [-0.0934, -0.5729, 0.0909, -0.2710, -0.5044, 0.0243, -0.0665, -0.5267, -0.3136], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_restnet_with_kernel_sde_vp(self):
- torch.manual_seed(0)
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- temb = torch.randn(1, 128).to(torch_device)
- resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="sde_vp", down=True).to(torch_device)
- with torch.no_grad():
- output_tensor = resnet_block(sample, temb)
-
- assert output_tensor.shape == (1, 32, 32, 32)
- output_slice = output_tensor[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
-
-class Transformer2DModelTests(unittest.TestCase):
- def test_spatial_transformer_default(self):
- torch.manual_seed(0)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(0)
-
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- spatial_transformer_block = Transformer2DModel(
- in_channels=32,
- num_attention_heads=1,
- attention_head_dim=32,
- dropout=0.0,
- cross_attention_dim=None,
- ).to(torch_device)
- with torch.no_grad():
- attention_scores = spatial_transformer_block(sample).sample
-
- assert attention_scores.shape == (1, 32, 64, 64)
- output_slice = attention_scores[0, -1, -3:, -3:]
-
- expected_slice = torch.tensor(
- [-1.9455, -0.0066, -1.3933, -1.5878, 0.5325, -0.6486, -1.8648, 0.7515, -0.9689], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_spatial_transformer_cross_attention_dim(self):
- torch.manual_seed(0)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(0)
-
- sample = torch.randn(1, 64, 64, 64).to(torch_device)
- spatial_transformer_block = Transformer2DModel(
- in_channels=64,
- num_attention_heads=2,
- attention_head_dim=32,
- dropout=0.0,
- cross_attention_dim=64,
- ).to(torch_device)
- with torch.no_grad():
- context = torch.randn(1, 4, 64).to(torch_device)
- attention_scores = spatial_transformer_block(sample, context).sample
-
- assert attention_scores.shape == (1, 64, 64, 64)
- output_slice = attention_scores[0, -1, -3:, -3:]
- expected_slice = torch.tensor(
- [0.0143, -0.6909, -2.1547, -1.8893, 1.4097, 0.1359, -0.2521, -1.3359, 0.2598], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_spatial_transformer_timestep(self):
- torch.manual_seed(0)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(0)
-
- num_embeds_ada_norm = 5
-
- sample = torch.randn(1, 64, 64, 64).to(torch_device)
- spatial_transformer_block = Transformer2DModel(
- in_channels=64,
- num_attention_heads=2,
- attention_head_dim=32,
- dropout=0.0,
- cross_attention_dim=64,
- num_embeds_ada_norm=num_embeds_ada_norm,
- ).to(torch_device)
- with torch.no_grad():
- timestep_1 = torch.tensor(1, dtype=torch.long).to(torch_device)
- timestep_2 = torch.tensor(2, dtype=torch.long).to(torch_device)
- attention_scores_1 = spatial_transformer_block(sample, timestep=timestep_1).sample
- attention_scores_2 = spatial_transformer_block(sample, timestep=timestep_2).sample
-
- assert attention_scores_1.shape == (1, 64, 64, 64)
- assert attention_scores_2.shape == (1, 64, 64, 64)
-
- output_slice_1 = attention_scores_1[0, -1, -3:, -3:]
- output_slice_2 = attention_scores_2[0, -1, -3:, -3:]
-
- expected_slice = torch.tensor(
- [-0.3923, -1.0923, -1.7144, -1.5570, 1.4154, 0.1738, -0.1157, -1.2998, -0.1703], device=torch_device
- )
- expected_slice_2 = torch.tensor(
- [-0.4311, -1.1376, -1.7732, -1.5997, 1.3450, 0.0964, -0.1569, -1.3590, -0.2348], device=torch_device
- )
-
- assert torch.allclose(output_slice_1.flatten(), expected_slice, atol=1e-3)
- assert torch.allclose(output_slice_2.flatten(), expected_slice_2, atol=1e-3)
-
- def test_spatial_transformer_dropout(self):
- torch.manual_seed(0)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(0)
-
- sample = torch.randn(1, 32, 64, 64).to(torch_device)
- spatial_transformer_block = (
- Transformer2DModel(
- in_channels=32,
- num_attention_heads=2,
- attention_head_dim=16,
- dropout=0.3,
- cross_attention_dim=None,
- )
- .to(torch_device)
- .eval()
- )
- with torch.no_grad():
- attention_scores = spatial_transformer_block(sample).sample
-
- assert attention_scores.shape == (1, 32, 64, 64)
- output_slice = attention_scores[0, -1, -3:, -3:]
-
- expected_slice = torch.tensor(
- [-1.9380, -0.0083, -1.3771, -1.5819, 0.5209, -0.6441, -1.8545, 0.7563, -0.9615], device=torch_device
- )
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- @unittest.skipIf(torch_device == "mps", "MPS does not support float64")
- def test_spatial_transformer_discrete(self):
- torch.manual_seed(0)
- if torch.cuda.is_available():
- torch.cuda.manual_seed_all(0)
-
- num_embed = 5
-
- sample = torch.randint(0, num_embed, (1, 32)).to(torch_device)
- spatial_transformer_block = (
- Transformer2DModel(
- num_attention_heads=1,
- attention_head_dim=32,
- num_vector_embeds=num_embed,
- sample_size=16,
- )
- .to(torch_device)
- .eval()
- )
-
- with torch.no_grad():
- attention_scores = spatial_transformer_block(sample).sample
-
- assert attention_scores.shape == (1, num_embed - 1, 32)
-
- output_slice = attention_scores[0, -2:, -3:]
-
- expected_slice = torch.tensor([-1.7648, -1.0241, -2.0985, -1.8035, -1.6404, -1.2098], device=torch_device)
- assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
-
- def test_spatial_transformer_default_norm_layers(self):
- spatial_transformer_block = Transformer2DModel(num_attention_heads=1, attention_head_dim=32, in_channels=32)
-
- assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == nn.LayerNorm
- assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm
-
- def test_spatial_transformer_ada_norm_layers(self):
- spatial_transformer_block = Transformer2DModel(
- num_attention_heads=1,
- attention_head_dim=32,
- in_channels=32,
- num_embeds_ada_norm=5,
- )
-
- assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == AdaLayerNorm
- assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm
-
- def test_spatial_transformer_default_ff_layers(self):
- spatial_transformer_block = Transformer2DModel(
- num_attention_heads=1,
- attention_head_dim=32,
- in_channels=32,
- )
-
- assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == GEGLU
- assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout
- assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear
-
- dim = 32
- inner_dim = 128
-
- # First dimension change
- assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim
- # NOTE: inner_dim * 2 because GEGLU
- assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim * 2
-
- # Second dimension change
- assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim
- assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim
-
- def test_spatial_transformer_geglu_approx_ff_layers(self):
- spatial_transformer_block = Transformer2DModel(
- num_attention_heads=1,
- attention_head_dim=32,
- in_channels=32,
- activation_fn="geglu-approximate",
- )
-
- assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == ApproximateGELU
- assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout
- assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear
-
- dim = 32
- inner_dim = 128
-
- # First dimension change
- assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim
- assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim
-
- # Second dimension change
- assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim
- assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim
-
- def test_spatial_transformer_attention_bias(self):
- spatial_transformer_block = Transformer2DModel(
- num_attention_heads=1, attention_head_dim=32, in_channels=32, attention_bias=True
- )
-
- assert spatial_transformer_block.transformer_blocks[0].attn1.to_q.bias is not None
- assert spatial_transformer_block.transformer_blocks[0].attn1.to_k.bias is not None
- assert spatial_transformer_block.transformer_blocks[0].attn1.to_v.bias is not None
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/README.md b/spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/README.md
deleted file mode 100644
index f730242b7768390c28ea984718cae9aa56811bbc..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# PASCAL VOC Dataset
-
-[DATASET]
-
-```
-@Article{Everingham10,
- author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.",
- title = "The Pascal Visual Object Classes (VOC) Challenge",
- journal = "International Journal of Computer Vision",
- volume = "88",
- year = "2010",
- number = "2",
- month = jun,
- pages = "303--338",
-}
-```
-
-## Results and Models
-
-| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
-|:------------:|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
-| Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 79.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/20200623_015208.log.json) |
-| Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) |
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/schedules/schedule_80k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/schedules/schedule_80k.py
deleted file mode 100644
index c190cee6bdc7922b688ea75dc8f152fa15c24617..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/schedules/schedule_80k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=80000)
-checkpoint_config = dict(by_epoch=False, interval=8000)
-evaluation = dict(interval=8000, metric='mIoU')
diff --git a/spaces/Ank0X0/text-to-3d-shap-e-webui/README.md b/spaces/Ank0X0/text-to-3d-shap-e-webui/README.md
deleted file mode 100644
index 8e6e958dddc403fe3879a675b920c3f788a1eab0..0000000000000000000000000000000000000000
--- a/spaces/Ank0X0/text-to-3d-shap-e-webui/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Text To 3d Shap E Webui
-emoji: 🏢
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.32.0
-app_file: app.py
-pinned: false
-license: cc0-1.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Apex-X/ROOPOK/roop/typing.py b/spaces/Apex-X/ROOPOK/roop/typing.py
deleted file mode 100644
index 1cff7440616e20bfe7b8bc287f86d11bf1b0f083..0000000000000000000000000000000000000000
--- a/spaces/Apex-X/ROOPOK/roop/typing.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from typing import Any
-
-from insightface.app.common import Face
-import numpy
-
-Face = Face
-Frame = numpy.ndarray[Any, Any]
diff --git a/spaces/Armored-Atom/Image-To-Motion/README.md b/spaces/Armored-Atom/Image-To-Motion/README.md
deleted file mode 100644
index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000
--- a/spaces/Armored-Atom/Image-To-Motion/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image Animation Using Thin Plate Spline Motion Model
-emoji: 👁
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.0.19
-app_file: app.py
-pinned: false
-duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Asahi402/White-box-Cartoonization/wbc/guided_filter.py b/spaces/Asahi402/White-box-Cartoonization/wbc/guided_filter.py
deleted file mode 100644
index fd019d145efc7f308cd96de90f4e7b648f6820b4..0000000000000000000000000000000000000000
--- a/spaces/Asahi402/White-box-Cartoonization/wbc/guided_filter.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import tensorflow as tf
-import numpy as np
-
-
-
-
-def tf_box_filter(x, r):
- k_size = int(2*r+1)
- ch = x.get_shape().as_list()[-1]
- weight = 1/(k_size**2)
- box_kernel = weight*np.ones((k_size, k_size, ch, 1))
- box_kernel = np.array(box_kernel).astype(np.float32)
- output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME')
- return output
-
-
-
-def guided_filter(x, y, r, eps=1e-2):
-
- x_shape = tf.shape(x)
- #y_shape = tf.shape(y)
-
- N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r)
-
- mean_x = tf_box_filter(x, r) / N
- mean_y = tf_box_filter(y, r) / N
- cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y
- var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x
-
- A = cov_xy / (var_x + eps)
- b = mean_y - A * mean_x
-
- mean_A = tf_box_filter(A, r) / N
- mean_b = tf_box_filter(b, r) / N
-
- output = mean_A * x + mean_b
-
- return output
-
-
-
-def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8):
-
- #assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4
-
- lr_x_shape = tf.shape(lr_x)
- #lr_y_shape = tf.shape(lr_y)
- hr_x_shape = tf.shape(hr_x)
-
- N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r)
-
- mean_x = tf_box_filter(lr_x, r) / N
- mean_y = tf_box_filter(lr_y, r) / N
- cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y
- var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x
-
- A = cov_xy / (var_x + eps)
- b = mean_y - A * mean_x
-
- mean_A = tf.image.resize_images(A, hr_x_shape[1: 3])
- mean_b = tf.image.resize_images(b, hr_x_shape[1: 3])
-
- output = mean_A * hr_x + mean_b
-
- return output
-
-
-if __name__ == '__main__':
- import cv2
- from tqdm import tqdm
-
- input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
- #input_superpixel = tf.placeholder(tf.float32, [16, 256, 256, 3])
- output = guided_filter(input_photo, input_photo, 5, eps=1)
- image = cv2.imread('output_figure1/cartoon2.jpg')
- image = image/127.5 - 1
- image = np.expand_dims(image, axis=0)
-
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- sess = tf.Session(config=config)
- sess.run(tf.global_variables_initializer())
-
- out = sess.run(output, feed_dict={input_photo: image})
- out = (np.squeeze(out)+1)*127.5
- out = np.clip(out, 0, 255).astype(np.uint8)
- cv2.imwrite('output_figure1/cartoon2_filter.jpg', out)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py
deleted file mode 100644
index c5ca2d85d5176c65a2e90000b0d67390573120a6..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/constructors.py
+++ /dev/null
@@ -1,506 +0,0 @@
-"""Backing implementation for InstallRequirement's various constructors
-
-The idea here is that these formed a major chunk of InstallRequirement's size
-so, moving them and support code dedicated to them outside of that class
-helps creates for better understandability for the rest of the code.
-
-These are meant to be used elsewhere within pip to create instances of
-InstallRequirement.
-"""
-
-import logging
-import os
-import re
-from typing import Dict, List, Optional, Set, Tuple, Union
-
-from pip._vendor.packaging.markers import Marker
-from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
-from pip._vendor.packaging.specifiers import Specifier
-
-from pip._internal.exceptions import InstallationError
-from pip._internal.models.index import PyPI, TestPyPI
-from pip._internal.models.link import Link
-from pip._internal.models.wheel import Wheel
-from pip._internal.req.req_file import ParsedRequirement
-from pip._internal.req.req_install import InstallRequirement
-from pip._internal.utils.filetypes import is_archive_file
-from pip._internal.utils.misc import is_installable_dir
-from pip._internal.utils.packaging import get_requirement
-from pip._internal.utils.urls import path_to_url
-from pip._internal.vcs import is_url, vcs
-
-__all__ = [
- "install_req_from_editable",
- "install_req_from_line",
- "parse_editable",
-]
-
-logger = logging.getLogger(__name__)
-operators = Specifier._operators.keys()
-
-
-def _strip_extras(path: str) -> Tuple[str, Optional[str]]:
- m = re.match(r"^(.+)(\[[^\]]+\])$", path)
- extras = None
- if m:
- path_no_extras = m.group(1)
- extras = m.group(2)
- else:
- path_no_extras = path
-
- return path_no_extras, extras
-
-
-def convert_extras(extras: Optional[str]) -> Set[str]:
- if not extras:
- return set()
- return get_requirement("placeholder" + extras.lower()).extras
-
-
-def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]:
- """Parses an editable requirement into:
- - a requirement name
- - an URL
- - extras
- - editable options
- Accepted requirements:
- svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
- .[some_extra]
- """
-
- url = editable_req
-
- # If a file path is specified with extras, strip off the extras.
- url_no_extras, extras = _strip_extras(url)
-
- if os.path.isdir(url_no_extras):
- # Treating it as code that has already been checked out
- url_no_extras = path_to_url(url_no_extras)
-
- if url_no_extras.lower().startswith("file:"):
- package_name = Link(url_no_extras).egg_fragment
- if extras:
- return (
- package_name,
- url_no_extras,
- get_requirement("placeholder" + extras.lower()).extras,
- )
- else:
- return package_name, url_no_extras, set()
-
- for version_control in vcs:
- if url.lower().startswith(f"{version_control}:"):
- url = f"{version_control}+{url}"
- break
-
- link = Link(url)
-
- if not link.is_vcs:
- backends = ", ".join(vcs.all_schemes)
- raise InstallationError(
- f"{editable_req} is not a valid editable requirement. "
- f"It should either be a path to a local project or a VCS URL "
- f"(beginning with {backends})."
- )
-
- package_name = link.egg_fragment
- if not package_name:
- raise InstallationError(
- "Could not detect requirement name for '{}', please specify one "
- "with #egg=your_package_name".format(editable_req)
- )
- return package_name, url, set()
-
-
-def check_first_requirement_in_file(filename: str) -> None:
- """Check if file is parsable as a requirements file.
-
- This is heavily based on ``pkg_resources.parse_requirements``, but
- simplified to just check the first meaningful line.
-
- :raises InvalidRequirement: If the first meaningful line cannot be parsed
- as an requirement.
- """
- with open(filename, encoding="utf-8", errors="ignore") as f:
- # Create a steppable iterator, so we can handle \-continuations.
- lines = (
- line
- for line in (line.strip() for line in f)
- if line and not line.startswith("#") # Skip blank lines/comments.
- )
-
- for line in lines:
- # Drop comments -- a hash without a space may be in a URL.
- if " #" in line:
- line = line[: line.find(" #")]
- # If there is a line continuation, drop it, and append the next line.
- if line.endswith("\\"):
- line = line[:-2].strip() + next(lines, "")
- Requirement(line)
- return
-
-
-def deduce_helpful_msg(req: str) -> str:
- """Returns helpful msg in case requirements file does not exist,
- or cannot be parsed.
-
- :params req: Requirements file path
- """
- if not os.path.exists(req):
- return f" File '{req}' does not exist."
- msg = " The path does exist. "
- # Try to parse and check if it is a requirements file.
- try:
- check_first_requirement_in_file(req)
- except InvalidRequirement:
- logger.debug("Cannot parse '%s' as requirements file", req)
- else:
- msg += (
- f"The argument you provided "
- f"({req}) appears to be a"
- f" requirements file. If that is the"
- f" case, use the '-r' flag to install"
- f" the packages specified within it."
- )
- return msg
-
-
-class RequirementParts:
- def __init__(
- self,
- requirement: Optional[Requirement],
- link: Optional[Link],
- markers: Optional[Marker],
- extras: Set[str],
- ):
- self.requirement = requirement
- self.link = link
- self.markers = markers
- self.extras = extras
-
-
-def parse_req_from_editable(editable_req: str) -> RequirementParts:
- name, url, extras_override = parse_editable(editable_req)
-
- if name is not None:
- try:
- req: Optional[Requirement] = Requirement(name)
- except InvalidRequirement:
- raise InstallationError(f"Invalid requirement: '{name}'")
- else:
- req = None
-
- link = Link(url)
-
- return RequirementParts(req, link, None, extras_override)
-
-
-# ---- The actual constructors follow ----
-
-
-def install_req_from_editable(
- editable_req: str,
- comes_from: Optional[Union[InstallRequirement, str]] = None,
- *,
- use_pep517: Optional[bool] = None,
- isolated: bool = False,
- global_options: Optional[List[str]] = None,
- hash_options: Optional[Dict[str, List[str]]] = None,
- constraint: bool = False,
- user_supplied: bool = False,
- permit_editable_wheels: bool = False,
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
-) -> InstallRequirement:
- parts = parse_req_from_editable(editable_req)
-
- return InstallRequirement(
- parts.requirement,
- comes_from=comes_from,
- user_supplied=user_supplied,
- editable=True,
- permit_editable_wheels=permit_editable_wheels,
- link=parts.link,
- constraint=constraint,
- use_pep517=use_pep517,
- isolated=isolated,
- global_options=global_options,
- hash_options=hash_options,
- config_settings=config_settings,
- extras=parts.extras,
- )
-
-
-def _looks_like_path(name: str) -> bool:
- """Checks whether the string "looks like" a path on the filesystem.
-
- This does not check whether the target actually exists, only judge from the
- appearance.
-
- Returns true if any of the following conditions is true:
- * a path separator is found (either os.path.sep or os.path.altsep);
- * a dot is found (which represents the current directory).
- """
- if os.path.sep in name:
- return True
- if os.path.altsep is not None and os.path.altsep in name:
- return True
- if name.startswith("."):
- return True
- return False
-
-
-def _get_url_from_path(path: str, name: str) -> Optional[str]:
- """
- First, it checks whether a provided path is an installable directory. If it
- is, returns the path.
-
- If false, check if the path is an archive file (such as a .whl).
- The function checks if the path is a file. If false, if the path has
- an @, it will treat it as a PEP 440 URL requirement and return the path.
- """
- if _looks_like_path(name) and os.path.isdir(path):
- if is_installable_dir(path):
- return path_to_url(path)
- # TODO: The is_installable_dir test here might not be necessary
- # now that it is done in load_pyproject_toml too.
- raise InstallationError(
- f"Directory {name!r} is not installable. Neither 'setup.py' "
- "nor 'pyproject.toml' found."
- )
- if not is_archive_file(path):
- return None
- if os.path.isfile(path):
- return path_to_url(path)
- urlreq_parts = name.split("@", 1)
- if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
- # If the path contains '@' and the part before it does not look
- # like a path, try to treat it as a PEP 440 URL req instead.
- return None
- logger.warning(
- "Requirement %r looks like a filename, but the file does not exist",
- name,
- )
- return path_to_url(path)
-
-
-def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts:
- if is_url(name):
- marker_sep = "; "
- else:
- marker_sep = ";"
- if marker_sep in name:
- name, markers_as_string = name.split(marker_sep, 1)
- markers_as_string = markers_as_string.strip()
- if not markers_as_string:
- markers = None
- else:
- markers = Marker(markers_as_string)
- else:
- markers = None
- name = name.strip()
- req_as_string = None
- path = os.path.normpath(os.path.abspath(name))
- link = None
- extras_as_string = None
-
- if is_url(name):
- link = Link(name)
- else:
- p, extras_as_string = _strip_extras(path)
- url = _get_url_from_path(p, name)
- if url is not None:
- link = Link(url)
-
- # it's a local file, dir, or url
- if link:
- # Handle relative file URLs
- if link.scheme == "file" and re.search(r"\.\./", link.url):
- link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))
- # wheel file
- if link.is_wheel:
- wheel = Wheel(link.filename) # can raise InvalidWheelFilename
- req_as_string = f"{wheel.name}=={wheel.version}"
- else:
- # set the req to the egg fragment. when it's not there, this
- # will become an 'unnamed' requirement
- req_as_string = link.egg_fragment
-
- # a requirement specifier
- else:
- req_as_string = name
-
- extras = convert_extras(extras_as_string)
-
- def with_source(text: str) -> str:
- if not line_source:
- return text
- return f"{text} (from {line_source})"
-
- def _parse_req_string(req_as_string: str) -> Requirement:
- try:
- req = get_requirement(req_as_string)
- except InvalidRequirement:
- if os.path.sep in req_as_string:
- add_msg = "It looks like a path."
- add_msg += deduce_helpful_msg(req_as_string)
- elif "=" in req_as_string and not any(
- op in req_as_string for op in operators
- ):
- add_msg = "= is not a valid operator. Did you mean == ?"
- else:
- add_msg = ""
- msg = with_source(f"Invalid requirement: {req_as_string!r}")
- if add_msg:
- msg += f"\nHint: {add_msg}"
- raise InstallationError(msg)
- else:
- # Deprecate extras after specifiers: "name>=1.0[extras]"
- # This currently works by accident because _strip_extras() parses
- # any extras in the end of the string and those are saved in
- # RequirementParts
- for spec in req.specifier:
- spec_str = str(spec)
- if spec_str.endswith("]"):
- msg = f"Extras after version '{spec_str}'."
- raise InstallationError(msg)
- return req
-
- if req_as_string is not None:
- req: Optional[Requirement] = _parse_req_string(req_as_string)
- else:
- req = None
-
- return RequirementParts(req, link, markers, extras)
-
-
-def install_req_from_line(
- name: str,
- comes_from: Optional[Union[str, InstallRequirement]] = None,
- *,
- use_pep517: Optional[bool] = None,
- isolated: bool = False,
- global_options: Optional[List[str]] = None,
- hash_options: Optional[Dict[str, List[str]]] = None,
- constraint: bool = False,
- line_source: Optional[str] = None,
- user_supplied: bool = False,
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
-) -> InstallRequirement:
- """Creates an InstallRequirement from a name, which might be a
- requirement, directory containing 'setup.py', filename, or URL.
-
- :param line_source: An optional string describing where the line is from,
- for logging purposes in case of an error.
- """
- parts = parse_req_from_line(name, line_source)
-
- return InstallRequirement(
- parts.requirement,
- comes_from,
- link=parts.link,
- markers=parts.markers,
- use_pep517=use_pep517,
- isolated=isolated,
- global_options=global_options,
- hash_options=hash_options,
- config_settings=config_settings,
- constraint=constraint,
- extras=parts.extras,
- user_supplied=user_supplied,
- )
-
-
-def install_req_from_req_string(
- req_string: str,
- comes_from: Optional[InstallRequirement] = None,
- isolated: bool = False,
- use_pep517: Optional[bool] = None,
- user_supplied: bool = False,
-) -> InstallRequirement:
- try:
- req = get_requirement(req_string)
- except InvalidRequirement:
- raise InstallationError(f"Invalid requirement: '{req_string}'")
-
- domains_not_allowed = [
- PyPI.file_storage_domain,
- TestPyPI.file_storage_domain,
- ]
- if (
- req.url
- and comes_from
- and comes_from.link
- and comes_from.link.netloc in domains_not_allowed
- ):
- # Explicitly disallow pypi packages that depend on external urls
- raise InstallationError(
- "Packages installed from PyPI cannot depend on packages "
- "which are not also hosted on PyPI.\n"
- "{} depends on {} ".format(comes_from.name, req)
- )
-
- return InstallRequirement(
- req,
- comes_from,
- isolated=isolated,
- use_pep517=use_pep517,
- user_supplied=user_supplied,
- )
-
-
-def install_req_from_parsed_requirement(
- parsed_req: ParsedRequirement,
- isolated: bool = False,
- use_pep517: Optional[bool] = None,
- user_supplied: bool = False,
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
-) -> InstallRequirement:
- if parsed_req.is_editable:
- req = install_req_from_editable(
- parsed_req.requirement,
- comes_from=parsed_req.comes_from,
- use_pep517=use_pep517,
- constraint=parsed_req.constraint,
- isolated=isolated,
- user_supplied=user_supplied,
- config_settings=config_settings,
- )
-
- else:
- req = install_req_from_line(
- parsed_req.requirement,
- comes_from=parsed_req.comes_from,
- use_pep517=use_pep517,
- isolated=isolated,
- global_options=(
- parsed_req.options.get("global_options", [])
- if parsed_req.options
- else []
- ),
- hash_options=(
- parsed_req.options.get("hashes", {}) if parsed_req.options else {}
- ),
- constraint=parsed_req.constraint,
- line_source=parsed_req.line_source,
- user_supplied=user_supplied,
- config_settings=config_settings,
- )
- return req
-
-
-def install_req_from_link_and_ireq(
- link: Link, ireq: InstallRequirement
-) -> InstallRequirement:
- return InstallRequirement(
- req=ireq.req,
- comes_from=ireq.comes_from,
- editable=ireq.editable,
- link=link,
- markers=ireq.markers,
- use_pep517=ireq.use_pep517,
- isolated=ireq.isolated,
- global_options=ireq.global_options,
- hash_options=ireq.hash_options,
- config_settings=ireq.config_settings,
- user_supplied=ireq.user_supplied,
- )
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/resultdict.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/resultdict.py
deleted file mode 100644
index 7d36e64c467ca8d9cadc88ab03da71faf1aa8abb..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/resultdict.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from typing import TYPE_CHECKING, Optional
-
-if TYPE_CHECKING:
- # TypedDict was introduced in Python 3.8.
- #
- # TODO: Remove the else block and TYPE_CHECKING check when dropping support
- # for Python 3.7.
- from typing import TypedDict
-
- class ResultDict(TypedDict):
- encoding: Optional[str]
- confidence: float
- language: Optional[str]
-
-else:
- ResultDict = dict
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf1632prober.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf1632prober.py
deleted file mode 100644
index 6bdec63d6867928bf73a7e513f60cee8f49ca050..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/utf1632prober.py
+++ /dev/null
@@ -1,225 +0,0 @@
-######################## BEGIN LICENSE BLOCK ########################
-#
-# Contributor(s):
-# Jason Zavaglia
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301 USA
-######################### END LICENSE BLOCK #########################
-from typing import List, Union
-
-from .charsetprober import CharSetProber
-from .enums import ProbingState
-
-
-class UTF1632Prober(CharSetProber):
- """
- This class simply looks for occurrences of zero bytes, and infers
- whether the file is UTF16 or UTF32 (low-endian or big-endian)
- For instance, files looking like ( \0 \0 \0 [nonzero] )+
- have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
- may be guessed to be UTF16BE, and inversely for little-endian varieties.
- """
-
- # how many logical characters to scan before feeling confident of prediction
- MIN_CHARS_FOR_DETECTION = 20
- # a fixed constant ratio of expected zeros or non-zeros in modulo-position.
- EXPECTED_RATIO = 0.94
-
- def __init__(self) -> None:
- super().__init__()
- self.position = 0
- self.zeros_at_mod = [0] * 4
- self.nonzeros_at_mod = [0] * 4
- self._state = ProbingState.DETECTING
- self.quad = [0, 0, 0, 0]
- self.invalid_utf16be = False
- self.invalid_utf16le = False
- self.invalid_utf32be = False
- self.invalid_utf32le = False
- self.first_half_surrogate_pair_detected_16be = False
- self.first_half_surrogate_pair_detected_16le = False
- self.reset()
-
- def reset(self) -> None:
- super().reset()
- self.position = 0
- self.zeros_at_mod = [0] * 4
- self.nonzeros_at_mod = [0] * 4
- self._state = ProbingState.DETECTING
- self.invalid_utf16be = False
- self.invalid_utf16le = False
- self.invalid_utf32be = False
- self.invalid_utf32le = False
- self.first_half_surrogate_pair_detected_16be = False
- self.first_half_surrogate_pair_detected_16le = False
- self.quad = [0, 0, 0, 0]
-
- @property
- def charset_name(self) -> str:
- if self.is_likely_utf32be():
- return "utf-32be"
- if self.is_likely_utf32le():
- return "utf-32le"
- if self.is_likely_utf16be():
- return "utf-16be"
- if self.is_likely_utf16le():
- return "utf-16le"
- # default to something valid
- return "utf-16"
-
- @property
- def language(self) -> str:
- return ""
-
- def approx_32bit_chars(self) -> float:
- return max(1.0, self.position / 4.0)
-
- def approx_16bit_chars(self) -> float:
- return max(1.0, self.position / 2.0)
-
- def is_likely_utf32be(self) -> bool:
- approx_chars = self.approx_32bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
- and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
- and not self.invalid_utf32be
- )
-
- def is_likely_utf32le(self) -> bool:
- approx_chars = self.approx_32bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
- and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
- and not self.invalid_utf32le
- )
-
- def is_likely_utf16be(self) -> bool:
- approx_chars = self.approx_16bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- (self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
- > self.EXPECTED_RATIO
- and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
- > self.EXPECTED_RATIO
- and not self.invalid_utf16be
- )
-
- def is_likely_utf16le(self) -> bool:
- approx_chars = self.approx_16bit_chars()
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
- (self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
- > self.EXPECTED_RATIO
- and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
- > self.EXPECTED_RATIO
- and not self.invalid_utf16le
- )
-
- def validate_utf32_characters(self, quad: List[int]) -> None:
- """
- Validate if the quad of bytes is valid UTF-32.
-
- UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
- excluding 0x0000D800 - 0x0000DFFF
-
- https://en.wikipedia.org/wiki/UTF-32
- """
- if (
- quad[0] != 0
- or quad[1] > 0x10
- or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
- ):
- self.invalid_utf32be = True
- if (
- quad[3] != 0
- or quad[2] > 0x10
- or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
- ):
- self.invalid_utf32le = True
-
- def validate_utf16_characters(self, pair: List[int]) -> None:
- """
- Validate if the pair of bytes is valid UTF-16.
-
- UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
- with an exception for surrogate pairs, which must be in the range
- 0xD800-0xDBFF followed by 0xDC00-0xDFFF
-
- https://en.wikipedia.org/wiki/UTF-16
- """
- if not self.first_half_surrogate_pair_detected_16be:
- if 0xD8 <= pair[0] <= 0xDB:
- self.first_half_surrogate_pair_detected_16be = True
- elif 0xDC <= pair[0] <= 0xDF:
- self.invalid_utf16be = True
- else:
- if 0xDC <= pair[0] <= 0xDF:
- self.first_half_surrogate_pair_detected_16be = False
- else:
- self.invalid_utf16be = True
-
- if not self.first_half_surrogate_pair_detected_16le:
- if 0xD8 <= pair[1] <= 0xDB:
- self.first_half_surrogate_pair_detected_16le = True
- elif 0xDC <= pair[1] <= 0xDF:
- self.invalid_utf16le = True
- else:
- if 0xDC <= pair[1] <= 0xDF:
- self.first_half_surrogate_pair_detected_16le = False
- else:
- self.invalid_utf16le = True
-
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
- for c in byte_str:
- mod4 = self.position % 4
- self.quad[mod4] = c
- if mod4 == 3:
- self.validate_utf32_characters(self.quad)
- self.validate_utf16_characters(self.quad[0:2])
- self.validate_utf16_characters(self.quad[2:4])
- if c == 0:
- self.zeros_at_mod[mod4] += 1
- else:
- self.nonzeros_at_mod[mod4] += 1
- self.position += 1
- return self.state
-
- @property
- def state(self) -> ProbingState:
- if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
- # terminal, decided states
- return self._state
- if self.get_confidence() > 0.80:
- self._state = ProbingState.FOUND_IT
- elif self.position > 4 * 1024:
- # if we get to 4kb into the file, and we can't conclude it's UTF,
- # let's give up
- self._state = ProbingState.NOT_ME
- return self._state
-
- def get_confidence(self) -> float:
- return (
- 0.85
- if (
- self.is_likely_utf16le()
- or self.is_likely_utf16be()
- or self.is_likely_utf32le()
- or self.is_likely_utf32be()
- )
- else 0.00
- )
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/helpers.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/helpers.py
deleted file mode 100644
index 9588b3b780159a2a2d23c7f84a4404ec350e2b65..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/helpers.py
+++ /dev/null
@@ -1,1088 +0,0 @@
-# helpers.py
-import html.entities
-import re
-import typing
-
-from . import __diag__
-from .core import *
-from .util import _bslash, _flatten, _escape_regex_range_chars
-
-
-#
-# global helpers
-#
-def delimited_list(
- expr: Union[str, ParserElement],
- delim: Union[str, ParserElement] = ",",
- combine: bool = False,
- min: typing.Optional[int] = None,
- max: typing.Optional[int] = None,
- *,
- allow_trailing_delim: bool = False,
-) -> ParserElement:
- """Helper to define a delimited list of expressions - the delimiter
- defaults to ','. By default, the list elements and delimiters can
- have intervening whitespace, and comments, but this can be
- overridden by passing ``combine=True`` in the constructor. If
- ``combine`` is set to ``True``, the matching tokens are
- returned as a single token string, with the delimiters included;
- otherwise, the matching tokens are returned as a list of tokens,
- with the delimiters suppressed.
-
- If ``allow_trailing_delim`` is set to True, then the list may end with
- a delimiter.
-
- Example::
-
- delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
- delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
- """
- if isinstance(expr, str_type):
- expr = ParserElement._literalStringClass(expr)
-
- dlName = "{expr} [{delim} {expr}]...{end}".format(
- expr=str(expr.copy().streamline()),
- delim=str(delim),
- end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
- )
-
- if not combine:
- delim = Suppress(delim)
-
- if min is not None:
- if min < 1:
- raise ValueError("min must be greater than 0")
- min -= 1
- if max is not None:
- if min is not None and max <= min:
- raise ValueError("max must be greater than, or equal to min")
- max -= 1
- delimited_list_expr = expr + (delim + expr)[min, max]
-
- if allow_trailing_delim:
- delimited_list_expr += Opt(delim)
-
- if combine:
- return Combine(delimited_list_expr).set_name(dlName)
- else:
- return delimited_list_expr.set_name(dlName)
-
-
-def counted_array(
- expr: ParserElement,
- int_expr: typing.Optional[ParserElement] = None,
- *,
- intExpr: typing.Optional[ParserElement] = None,
-) -> ParserElement:
- """Helper to define a counted list of expressions.
-
- This helper defines a pattern of the form::
-
- integer expr expr expr...
-
- where the leading integer tells how many expr expressions follow.
- The matched tokens returns the array of expr tokens as a list - the
- leading count token is suppressed.
-
- If ``int_expr`` is specified, it should be a pyparsing expression
- that produces an integer value.
-
- Example::
-
- counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
-
- # in this parser, the leading integer value is given in binary,
- # '10' indicating that 2 values are in the array
- binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
- counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
-
- # if other fields must be parsed after the count but before the
- # list items, give the fields results names and they will
- # be preserved in the returned ParseResults:
- count_with_metadata = integer + Word(alphas)("type")
- typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
- result = typed_array.parse_string("3 bool True True False")
- print(result.dump())
-
- # prints
- # ['True', 'True', 'False']
- # - items: ['True', 'True', 'False']
- # - type: 'bool'
- """
- intExpr = intExpr or int_expr
- array_expr = Forward()
-
- def count_field_parse_action(s, l, t):
- nonlocal array_expr
- n = t[0]
- array_expr <<= (expr * n) if n else Empty()
- # clear list contents, but keep any named results
- del t[:]
-
- if intExpr is None:
- intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
- else:
- intExpr = intExpr.copy()
- intExpr.set_name("arrayLen")
- intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
- return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
-
-
-def match_previous_literal(expr: ParserElement) -> ParserElement:
- """Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks for
- a 'repeat' of a previous expression. For example::
-
- first = Word(nums)
- second = match_previous_literal(first)
- match_expr = first + ":" + second
-
- will match ``"1:1"``, but not ``"1:2"``. Because this
- matches a previous literal, will also match the leading
- ``"1:1"`` in ``"1:10"``. If this is not desired, use
- :class:`match_previous_expr`. Do *not* use with packrat parsing
- enabled.
- """
- rep = Forward()
-
- def copy_token_to_repeater(s, l, t):
- if t:
- if len(t) == 1:
- rep << t[0]
- else:
- # flatten t tokens
- tflat = _flatten(t.as_list())
- rep << And(Literal(tt) for tt in tflat)
- else:
- rep << Empty()
-
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
- rep.set_name("(prev) " + str(expr))
- return rep
-
-
-def match_previous_expr(expr: ParserElement) -> ParserElement:
- """Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks for
- a 'repeat' of a previous expression. For example::
-
- first = Word(nums)
- second = match_previous_expr(first)
- match_expr = first + ":" + second
-
- will match ``"1:1"``, but not ``"1:2"``. Because this
- matches by expressions, will *not* match the leading ``"1:1"``
- in ``"1:10"``; the expressions are evaluated first, and then
- compared, so ``"1"`` is compared with ``"10"``. Do *not* use
- with packrat parsing enabled.
- """
- rep = Forward()
- e2 = expr.copy()
- rep <<= e2
-
- def copy_token_to_repeater(s, l, t):
- matchTokens = _flatten(t.as_list())
-
- def must_match_these_tokens(s, l, t):
- theseTokens = _flatten(t.as_list())
- if theseTokens != matchTokens:
- raise ParseException(
- s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
- )
-
- rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
-
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
- rep.set_name("(prev) " + str(expr))
- return rep
-
-
-def one_of(
- strs: Union[typing.Iterable[str], str],
- caseless: bool = False,
- use_regex: bool = True,
- as_keyword: bool = False,
- *,
- useRegex: bool = True,
- asKeyword: bool = False,
-) -> ParserElement:
- """Helper to quickly define a set of alternative :class:`Literal` s,
- and makes sure to do longest-first testing when there is a conflict,
- regardless of the input order, but returns
- a :class:`MatchFirst` for best performance.
-
- Parameters:
-
- - ``strs`` - a string of space-delimited literals, or a collection of
- string literals
- - ``caseless`` - treat all literals as caseless - (default= ``False``)
- - ``use_regex`` - as an optimization, will
- generate a :class:`Regex` object; otherwise, will generate
- a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
- creating a :class:`Regex` raises an exception) - (default= ``True``)
- - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
- generated expressions - (default= ``False``)
- - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
- but will be removed in a future release
-
- Example::
-
- comp_oper = one_of("< = > <= >= !=")
- var = Word(alphas)
- number = Word(nums)
- term = var | number
- comparison_expr = term + comp_oper + term
- print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
-
- prints::
-
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
- """
- asKeyword = asKeyword or as_keyword
- useRegex = useRegex and use_regex
-
- if (
- isinstance(caseless, str_type)
- and __diag__.warn_on_multiple_string_args_to_oneof
- ):
- warnings.warn(
- "More than one string argument passed to one_of, pass"
- " choices as a list or space-delimited string",
- stacklevel=2,
- )
-
- if caseless:
- isequal = lambda a, b: a.upper() == b.upper()
- masks = lambda a, b: b.upper().startswith(a.upper())
- parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
- else:
- isequal = lambda a, b: a == b
- masks = lambda a, b: b.startswith(a)
- parseElementClass = Keyword if asKeyword else Literal
-
- symbols: List[str] = []
- if isinstance(strs, str_type):
- symbols = strs.split()
- elif isinstance(strs, Iterable):
- symbols = list(strs)
- else:
- raise TypeError("Invalid argument to one_of, expected string or iterable")
- if not symbols:
- return NoMatch()
-
- # reorder given symbols to take care to avoid masking longer choices with shorter ones
- # (but only if the given symbols are not just single characters)
- if any(len(sym) > 1 for sym in symbols):
- i = 0
- while i < len(symbols) - 1:
- cur = symbols[i]
- for j, other in enumerate(symbols[i + 1 :]):
- if isequal(other, cur):
- del symbols[i + j + 1]
- break
- elif masks(cur, other):
- del symbols[i + j + 1]
- symbols.insert(i, other)
- break
- else:
- i += 1
-
- if useRegex:
- re_flags: int = re.IGNORECASE if caseless else 0
-
- try:
- if all(len(sym) == 1 for sym in symbols):
- # symbols are just single characters, create range regex pattern
- patt = "[{}]".format(
- "".join(_escape_regex_range_chars(sym) for sym in symbols)
- )
- else:
- patt = "|".join(re.escape(sym) for sym in symbols)
-
- # wrap with \b word break markers if defining as keywords
- if asKeyword:
- patt = r"\b(?:{})\b".format(patt)
-
- ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
-
- if caseless:
- # add parse action to return symbols as specified, not in random
- # casing as found in input string
- symbol_map = {sym.lower(): sym for sym in symbols}
- ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
-
- return ret
-
- except re.error:
- warnings.warn(
- "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
- )
-
- # last resort, just use MatchFirst
- return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
- " | ".join(symbols)
- )
-
-
-def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
- """Helper to easily and clearly define a dictionary by specifying
- the respective patterns for the key and value. Takes care of
- defining the :class:`Dict`, :class:`ZeroOrMore`, and
- :class:`Group` tokens in the proper order. The key pattern
- can include delimiting markers or punctuation, as long as they are
- suppressed, thereby leaving the significant key text. The value
- pattern can include named results, so that the :class:`Dict` results
- can include named token fields.
-
- Example::
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
- print(attr_expr[1, ...].parse_string(text).dump())
-
- attr_label = label
- attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
-
- # similar to Dict, but simpler call format
- result = dict_of(attr_label, attr_value).parse_string(text)
- print(result.dump())
- print(result['shape'])
- print(result.shape) # object attribute access works too
- print(result.as_dict())
-
- prints::
-
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: 'light blue'
- - posn: 'upper left'
- - shape: 'SQUARE'
- - texture: 'burlap'
- SQUARE
- SQUARE
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
- """
- return Dict(OneOrMore(Group(key + value)))
-
-
-def original_text_for(
- expr: ParserElement, as_string: bool = True, *, asString: bool = True
-) -> ParserElement:
- """Helper to return the original, untokenized text for a given
- expression. Useful to restore the parsed fields of an HTML start
- tag into the raw tag text itself, or to revert separate tokens with
- intervening whitespace back to the original matching input text. By
- default, returns astring containing the original parsed text.
-
- If the optional ``as_string`` argument is passed as
- ``False``, then the return value is
- a :class:`ParseResults` containing any results names that
- were originally matched, and a single token containing the original
- matched text from the input string. So if the expression passed to
- :class:`original_text_for` contains expressions with defined
- results names, you must set ``as_string`` to ``False`` if you
- want to preserve those results name values.
-
- The ``asString`` pre-PEP8 argument is retained for compatibility,
- but will be removed in a future release.
-
- Example::
-
- src = "this is test bold text normal text "
- for tag in ("b", "i"):
- opener, closer = make_html_tags(tag)
- patt = original_text_for(opener + SkipTo(closer) + closer)
- print(patt.search_string(src)[0])
-
- prints::
-
- [' bold text']
- ['text']
- """
- asString = asString and as_string
-
- locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
- endlocMarker = locMarker.copy()
- endlocMarker.callPreparse = False
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
- if asString:
- extractText = lambda s, l, t: s[t._original_start : t._original_end]
- else:
-
- def extractText(s, l, t):
- t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
-
- matchExpr.set_parse_action(extractText)
- matchExpr.ignoreExprs = expr.ignoreExprs
- matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
- return matchExpr
-
-
-def ungroup(expr: ParserElement) -> ParserElement:
- """Helper to undo pyparsing's default grouping of And expressions,
- even if all but one are non-empty.
- """
- return TokenConverter(expr).add_parse_action(lambda t: t[0])
-
-
-def locatedExpr(expr: ParserElement) -> ParserElement:
- """
- (DEPRECATED - future code should use the Located class)
- Helper to decorate a returned token with its starting and ending
- locations in the input string.
-
- This helper adds the following results names:
-
- - ``locn_start`` - location where matched expression begins
- - ``locn_end`` - location where matched expression ends
- - ``value`` - the actual parsed results
-
- Be careful if the input text contains ```` characters, you
- may want to call :class:`ParserElement.parseWithTabs`
-
- Example::
-
- wd = Word(alphas)
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
-
- prints::
-
- [[0, 'ljsdf', 5]]
- [[8, 'lksdjjf', 15]]
- [[18, 'lkkjj', 23]]
- """
- locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
- return Group(
- locator("locn_start")
- + expr("value")
- + locator.copy().leaveWhitespace()("locn_end")
- )
-
-
-def nested_expr(
- opener: Union[str, ParserElement] = "(",
- closer: Union[str, ParserElement] = ")",
- content: typing.Optional[ParserElement] = None,
- ignore_expr: ParserElement = quoted_string(),
- *,
- ignoreExpr: ParserElement = quoted_string(),
-) -> ParserElement:
- """Helper method for defining nested lists enclosed in opening and
- closing delimiters (``"("`` and ``")"`` are the default).
-
- Parameters:
- - ``opener`` - opening character for a nested list
- (default= ``"("``); can also be a pyparsing expression
- - ``closer`` - closing character for a nested list
- (default= ``")"``); can also be a pyparsing expression
- - ``content`` - expression for items within the nested lists
- (default= ``None``)
- - ``ignore_expr`` - expression for ignoring opening and closing delimiters
- (default= :class:`quoted_string`)
- - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
- but will be removed in a future release
-
- If an expression is not provided for the content argument, the
- nested expression will capture all whitespace-delimited content
- between delimiters as a list of separate values.
-
- Use the ``ignore_expr`` argument to define expressions that may
- contain opening or closing characters that should not be treated as
- opening or closing characters for nesting, such as quoted_string or
- a comment expression. Specify multiple expressions using an
- :class:`Or` or :class:`MatchFirst`. The default is
- :class:`quoted_string`, but if no expressions are to be ignored, then
- pass ``None`` for this argument.
-
- Example::
-
- data_type = one_of("void int short long char float double")
- decl_data_type = Combine(data_type + Opt(Word('*')))
- ident = Word(alphas+'_', alphanums+'_')
- number = pyparsing_common.number
- arg = Group(decl_data_type + ident)
- LPAR, RPAR = map(Suppress, "()")
-
- code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
-
- c_function = (decl_data_type("type")
- + ident("name")
- + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
- + code_body("body"))
- c_function.ignore(c_style_comment)
-
- source_code = '''
- int is_odd(int x) {
- return (x%2);
- }
-
- int dec_to_hex(char hchar) {
- if (hchar >= '0' && hchar <= '9') {
- return (ord(hchar)-ord('0'));
- } else {
- return (10+ord(hchar)-ord('A'));
- }
- }
- '''
- for func in c_function.search_string(source_code):
- print("%(name)s (%(type)s) args: %(args)s" % func)
-
-
- prints::
-
- is_odd (int) args: [['int', 'x']]
- dec_to_hex (int) args: [['char', 'hchar']]
- """
- if ignoreExpr != ignore_expr:
- ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
- if opener == closer:
- raise ValueError("opening and closing strings cannot be the same")
- if content is None:
- if isinstance(opener, str_type) and isinstance(closer, str_type):
- if len(opener) == 1 and len(closer) == 1:
- if ignoreExpr is not None:
- content = Combine(
- OneOrMore(
- ~ignoreExpr
- + CharsNotIn(
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
- exact=1,
- )
- )
- ).set_parse_action(lambda t: t[0].strip())
- else:
- content = empty.copy() + CharsNotIn(
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS
- ).set_parse_action(lambda t: t[0].strip())
- else:
- if ignoreExpr is not None:
- content = Combine(
- OneOrMore(
- ~ignoreExpr
- + ~Literal(opener)
- + ~Literal(closer)
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
- )
- ).set_parse_action(lambda t: t[0].strip())
- else:
- content = Combine(
- OneOrMore(
- ~Literal(opener)
- + ~Literal(closer)
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
- )
- ).set_parse_action(lambda t: t[0].strip())
- else:
- raise ValueError(
- "opening and closing arguments must be strings if no content expression is given"
- )
- ret = Forward()
- if ignoreExpr is not None:
- ret <<= Group(
- Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
- )
- else:
- ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
- ret.set_name("nested %s%s expression" % (opener, closer))
- return ret
-
-
-def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
- if isinstance(tagStr, str_type):
- resname = tagStr
- tagStr = Keyword(tagStr, caseless=not xml)
- else:
- resname = tagStr.name
-
- tagAttrName = Word(alphas, alphanums + "_-:")
- if xml:
- tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
- openTag = (
- suppress_LT
- + tagStr("tag")
- + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
- + Opt("/", default=[False])("empty").set_parse_action(
- lambda s, l, t: t[0] == "/"
- )
- + suppress_GT
- )
- else:
- tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
- printables, exclude_chars=">"
- )
- openTag = (
- suppress_LT
- + tagStr("tag")
- + Dict(
- ZeroOrMore(
- Group(
- tagAttrName.set_parse_action(lambda t: t[0].lower())
- + Opt(Suppress("=") + tagAttrValue)
- )
- )
- )
- + Opt("/", default=[False])("empty").set_parse_action(
- lambda s, l, t: t[0] == "/"
- )
- + suppress_GT
- )
- closeTag = Combine(Literal("") + tagStr + ">", adjacent=False)
-
- openTag.set_name("<%s>" % resname)
- # add start results name in parse action now that ungrouped names are not reported at two levels
- openTag.add_parse_action(
- lambda t: t.__setitem__(
- "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
- )
- )
- closeTag = closeTag(
- "end" + "".join(resname.replace(":", " ").title().split())
- ).set_name("%s>" % resname)
- openTag.tag = resname
- closeTag.tag = resname
- openTag.tag_body = SkipTo(closeTag())
- return openTag, closeTag
-
-
-def make_html_tags(
- tag_str: Union[str, ParserElement]
-) -> Tuple[ParserElement, ParserElement]:
- """Helper to construct opening and closing tag expressions for HTML,
- given a tag name. Matches tags in either upper or lower case,
- attributes with namespaces and with quoted or unquoted values.
-
- Example::
-
- text = '
ARK: Survival Evolved es un juego que fue lanzado en 2017 por Studio Wildcard. Es un juego de supervivencia sandbox que te permite crear tu propio personaje y explorar una isla masiva llamada ARK, donde puedes encontrar más de 80 especies diferentes de dinosaurios y otras criaturas prehistóricas. Puedes domar, criar y montar a algunos de ellos, o usarlos como aliados o enemigos en tu búsqueda de la supervivencia. También puedes crear armas, herramientas, ropa y estructuras con los recursos que reúnas o saquees de otros jugadores. Puedes jugar solo o unirte a una tribu con otros jugadores, y cooperar o competir con ellos en varios modos de juego. También puedes personalizar los ajustes y mods del juego para adaptarlos a tus preferencias.
-
¿Por qué jugar en línea sin descargar?
-
-
Por eso algunos jugadores prefieren jugar online sin descargar. De esta manera, pueden ahorrar espacio y tiempo, y disfrutar del juego en cualquier dispositivo con conexión a Internet. También pueden evitar la molestia de actualizar el juego o instalar mods manualmente. Jugar en línea sin descargar también puede mejorar su rendimiento y calidad de juego, ya que no tiene que depender del hardware de su dispositivo, sino del servidor en la nube.
-
Cómo jugar online sin descargar
-
Opción 1: Usar un servicio de juegos en la nube
-
¿Qué es el juego en la nube?
-
Juegos en la nube es una tecnología que le permite transmitir juegos desde un servidor remoto a su dispositivo a través de Internet. No tienes que descargar ni instalar nada en tu dispositivo, ya que el juego se ejecuta en el servidor y envía la salida de vídeo y audio al dispositivo. Solo necesita un dispositivo compatible (como un PC, computadora portátil, tableta, teléfono inteligente o televisor inteligente), una conexión a Internet (preferiblemente de alta velocidad y estable), y una suscripción o cuenta con el servicio de juegos en la nube.
-
Cómo usar Playkey.net para jugar ARK: Survival Evolved online
-
Uno de los servicios de juegos en la nube que puede utilizar para jugar ARK: Survival Evolved en línea sin necesidad de descargar es Playkey.net. Playkey.net es una plataforma que te permite acceder a una biblioteca de más de 200 juegos, incluyendo ARK: Survival Evolved, y transmitirlos a tu dispositivo. Puedes jugar en cualquier dispositivo compatible con HTML5, como Windows, Mac, Linux, Android o iOS. También puede usar cualquier controlador o teclado y ratón que prefiera. Estos son los pasos para usar Playkey.net para jugar a ARK: Survival Evolved online:
-
-
-
Vaya a WizCase y descargar la extensión para su navegador (Chrome o Firefox).
-
Instale la extensión y cree una cuenta o inicie sesión con su cuenta existente.
-
Una vez que haya instalado la extensión, vaya a la sección de juegos y encuentre ARK: Survival Evolved. Haga clic en el botón de descarga y elija una fuente de la lista.
-
Espera a que termine la descarga y luego ejecuta el instalador del juego en tu dispositivo.
-
Disfruta jugando ARK: Survival Evolved gratis.
-
-
Pros y contras de jugar online sin descargar
-
Pros
-
Jugar en línea sin descargar tiene algunas ventajas sobre jugar sin conexión después de descargar. Algunos de ellos son:
-
-
-
Puedes jugar en cualquier dispositivo con conexión a Internet. Usted no tiene que pegarse a un dispositivo o comprar uno nuevo si el actual no cumple con los requisitos del juego.
-
Puede disfrutar de un mejor rendimiento y calidad de juego. No tienes que sufrir retrasos, caídas o baja calidad gráfica debido a las limitaciones de hardware de tu dispositivo.
-
Puedes acceder a las últimas actualizaciones y mods automáticamente. No tienes que actualizar manualmente el juego o instalar mods tú mismo.
-
-
Contras
-
Jugar en línea sin descargar también tiene algunas desventajas sobre jugar sin conexión después de descargar. Algunos de ellos son:
-
-
Necesitas una conexión a Internet confiable. Si tu Internet es lento, inestable o no está disponible, no podrás jugar en línea sin descargar.
-
Necesita una suscripción o cuenta con un servicio de juegos en la nube o una extensión de navegador. Si no tienes uno, no podrás jugar online sin descargar.
-
Es posible que experimente alguna latencia o retraso debido a problemas de red o sobrecarga del servidor. Si su velocidad de Internet o la capacidad del servidor no es suficiente, puede enfrentar algunos retrasos o interrupciones mientras juega en línea sin descargar.
-
Es posible que pierda algún control o opciones de personalización sobre la configuración del juego o mods. Si juegas en línea sin descargar, es posible que tengas que seguir la configuración predeterminada o los mods proporcionados por el servicio de juegos en la nube o la extensión del navegador. Es posible que no pueda cambiarlos según sus preferencias.
-
-
Conclusión
-
Resumen de los puntos principales
-
-
Llamada a la acción
-
Ahora que sabes cómo jugar a ARK: Survival Evolved online sin necesidad de descargarlo, ¿por qué no intentarlo? Puedes elegir la opción que más te convenga y disfrutar del juego en cualquier dispositivo con conexión a Internet. También puede compartir su experiencia con nosotros en la sección de comentarios a continuación. ¡Nos encantaría saber de usted!
-
Preguntas frecuentes
-
¿Cuáles son los requisitos mínimos para jugar a ARK: Survival Evolved online?
-
Para jugar ARK: Survival Evolved en línea sin descargar, necesita un dispositivo compatible (como un PC, portátil, tableta, teléfono inteligente o TV inteligente), una conexión a Internet (preferiblemente de alta velocidad y estable), y una suscripción o cuenta con un servicio de juegos en la nube o una extensión de navegador.
-
¿Cuánto cuesta jugar a ARK: Survival Evolved online sin descargar?
-
El coste de jugar a ARK: Survival Evolved online sin descargar depende de la opción que elijas. Si utiliza un servicio de juegos en la nube, debe pagar un plan de suscripción que oscila entre $ 9.99 y $ 49.99 por mes, dependiendo del servicio y el plan. Si utiliza una extensión de navegador, es posible que pueda descargar el juego de forma gratuita o por un precio reducido, dependiendo de la extensión y la fuente.
-
¿Es seguro jugar ARK: Survival Evolved online sin descargar?
-
Por lo general, es seguro jugar ARK: Survival Evolved en línea sin necesidad de descargar, siempre y cuando utilice un servicio de juegos en la nube de buena reputación y fiable o una extensión del navegador. Sin embargo, siempre debe tener cuidado con su privacidad y seguridad al usar cualquier servicio o extensión en línea. Debe leer los términos y condiciones, la política de privacidad y los comentarios de los usuarios antes de registrarse o instalar nada. También debe usar una VPN o software antivirus para proteger su dispositivo y los datos de hackers o malware.
-
¿Puedo jugar ARK: Survival Evolved online sin descargar con mis amigos?
-
-
¿Puedo jugar ARK: Survival Evolved sin conexión después de la descarga?
-
Sí, puedes jugar a ARK: Survival Evolved sin conexión después de descargarlo, si prefieres hacerlo. Puedes descargar el juego desde Steam u otras plataformas e instalarlo en tu dispositivo. A continuación, puede jugar sin conexión en el modo de un solo jugador, o en línea en el modo multijugador si tiene una conexión a Internet.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Beach Buggy Racing 2 Beta Apk.md b/spaces/Benson/text-generation/Examples/Beach Buggy Racing 2 Beta Apk.md
deleted file mode 100644
index 11e6496a0b5461b76d9e8e16ba417145359ab0f1..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Beach Buggy Racing 2 Beta Apk.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
Beach Buggy Racing 2 Beta APK: Todo lo que necesita saber
-
Si eres un fan de los juegos de carreras de karts, es posible que hayas oído hablar de Beach Buggy Racing, un popular juego móvil que introdujo más de 100 millones de jugadores a las carreras de karts de consola con un divertido giro offroad. Ahora, la secuela, Beach Buggy Racing 2, está aquí para llevarte a otro emocionante viaje a través de lugares exóticos, con nuevos coches, conductores, powerups y modos de juego.
-
Beach Buggy Racing 2 está disponible para Android, iOS y consolas, pero si desea obtener un vistazo de lo que el juego tiene para ofrecer antes de su lanzamiento oficial, puede probar la versión beta APK para dispositivos Android. En este artículo, le diremos todo lo que necesita saber sobre Beach Buggy Racing 2 beta APK, incluyendo sus características, cómo descargarlo e instalarlo, consejos y trucos para jugarlo, y nuestra revisión del juego.
Beach Buggy Racing 2 es un juego de carreras de karts todoterreno en 3D con una física increíble, coches y personajes detallados y armas espectaculares. ¡Es como un juego de consola en la palma de tu mano! Estas son algunas de las características que hacen de este juego tan divertido y adictivo:
-
-
Espectacular acción de carreras de karts con increíbles física y gráficos: Carrera a través de pirámides egipcias, castillos infestados de dragones, naufragios de barcos piratas y bio-laboratorios alienígenas experimentales. Siente la velocidad y la emoción a medida que la deriva, saltar, y romper su camino a la victoria.
-
Más de 45 potenciadores para descubrir y actualizar: Con Beach Buggy Racing 2, puede crear su propia cubierta de alimentación personalizada con habilidades fuera de este mundo como "Chain Lightning", "Donut Tires", "Boost Juice" y "Killer Bees". Actualiza tus powerups para que sean más potentes y eficaces.
-
-
Recoger más de 55 coches de diferentes tipos y estilos: Recoger un garaje lleno de buggies de playa, camiones monstruo, coches musculares, pickups clásicos y supercars fórmula. Todos los coches clásicos Beach Buggy volver -- además de docenas de coches nuevos para descubrir!
Seguir escribiendo el artículo.
-
-
Juega contra el mundo en competiciones y torneos online: Únete a la Beach Buggy Racing League y compite contra pilotos y coches de todo el mundo. Carrera hombro a hombro con hasta 8 jugadores en el móvil, o 4 jugadores en la consola. ¡Pon a prueba tus habilidades en 6 modos de juego diferentes en 15 imaginativas pistas de carreras en 3D, contra un grupo de rivales amantes de lo tropical con un serio caso de furia en el camino!
-
Personaliza tu viaje con pinturas, calcomanías y más: Haz que tu coche destaque con más de 1500 combinaciones diferentes de pintura y calcomanías. También puede cambiar la apariencia de su conductor con trajes y accesorios frescos.
-
Nuevos modos de juego impresionantes para disfrutar: Además del modo de carrera clásico, también puedes probar el nuevo modo de aventura, donde exploras un enorme mundo abierto de desafíos y secretos. O puedes jugar el nuevo modo Arena, donde te enfrentas a otros jugadores en un frenesí libre de potenciadores y caos.
-
-
Cómo descargar e instalar Beach Buggy Racing 2 Beta APK
-
Si estás ansioso por probar Beach Buggy Racing 2 antes de su lanzamiento oficial, puedes descargar e instalar la versión beta de APK para dispositivos Android. Estos son los pasos que debes seguir:
-
-
Encontrar una fuente confiable para el archivo beta APK: Puede buscar en línea para sitios web que ofrecen el archivo beta APK para Beach Buggy Racing 2. Asegúrese de elegir una fuente confiable y segura, ya que algunos sitios web pueden contener malware o virus. También puede consultar las opiniones y valoraciones de otros usuarios que han descargado el archivo.
-
-
Descargar e instalar el archivo beta APK: Una vez que haya encontrado una fuente confiable y habilitado fuentes desconocidas, puede descargar el archivo beta APK a su dispositivo. Una vez completada la descarga, busque el archivo en el almacenamiento del dispositivo y toque en él para instalarlo. Siga las instrucciones de la pantalla para completar la instalación.
-
Iniciar el juego y disfrutar: Después de la instalación se hace, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Ahora puedes disfrutar de Beach Buggy Racing 2 beta APK y experimentar sus características antes que nadie.
-
-
Consejos y trucos para carreras de buggys de playa 2
-
Beach Buggy Racing 2 es un juego divertido y desafiante que requiere habilidad, estrategia y suerte. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu rendimiento y ganar más carreras:
-
-
Dominar las técnicas de deriva y powerslide: A la deriva y powersliding son habilidades esenciales para cualquier corredor de karts. Le permiten tomar giros bruscos sin perder velocidad, y también llenar su medidor de impulso más rápido. Para la deriva, toque el botón de freno mientras gira. Para powerslide, pulse el botón de freno dos veces mientras gira. También puede ajustar la sensibilidad de la deriva en el menú de configuración.
-
Usa la habilidad del piloto en el momento adecuado: Cada piloto tiene una habilidad especial única que puede darte una ventaja en la carrera. Por ejemplo, Rez puede teletransportarse por delante de otros corredores, McSkelly puede convocar una horda de esqueletos para frenar a sus oponentes, y Roxie puede destruir a todos con su guitarra. Sin embargo, estas habilidades tienen un tiempo de reutilización, así que úsalas sabiamente y estratégicamente.
-
-
Construye la mejor baraja de potenciadores locos para cada carrera: Antes de cada carrera, puedes elegir qué potenciadores quieres usar de tu colección. Puedes tener hasta seis potenciadores en tu mazo, pero solo puedes usar uno a la vez durante la carrera. Por lo tanto, es importante elegir sabiamente y equilibrar su mazo de acuerdo a su estrategia y preferencia. Por ejemplo, si quieres ser más agresivo, puedes elegir potenciadores que causen daño o perturben a otros corredores, como
Continuar escribiendo el artículo.
-
-
Construye la mejor baraja de potenciadores locos para cada carrera: Antes de cada carrera, puedes elegir qué potenciadores quieres usar de tu colección. Puedes tener hasta seis potenciadores en tu mazo, pero solo puedes usar uno a la vez durante la carrera. Por lo tanto, es importante elegir sabiamente y equilibrar su mazo de acuerdo a su estrategia y preferencia. Por ejemplo, si quieres ser más agresivo, puedes elegir potenciadores que causen daño o perturben a otros corredores, como "Bola de fuego", "Mancha de aceite" o "Buscadores de Tiki". Si quieres estar más a la defensiva, puedes elegir potenciadores que te protejan o te curen, como "Shield", "Repair Kit" o "Boost Juice". También puede mezclar y combinar diferentes potenciadores para crear combos y sinergias.
-
Coge esas burbujas rápidas para aumentar la velocidad extra: Durante la carrera, verás algunas burbujas azules flotando alrededor de la pista. Estas son burbujas rápidas que le dan un impulso de velocidad temporal cuando conduce a través de ellas. Pueden ayudarte a alcanzar a otros corredores, escapar del peligro o alcanzar atajos. Trata de agarrar tantas burbujas rápidas como puedas, pero ten cuidado de no chocar contra obstáculos u otros corredores mientras lo haces.
-
-
-
Reseña de Beach Buggy Racing 2
-
Beach Buggy Racing 2 es un divertido y emocionante juego de carreras de karts que te mantendrá entretenido durante horas. Tiene muchas características y contenido que hacen que valga la pena jugar, pero también tiene algunos inconvenientes que pueden afectar su disfrute. Estos son algunos de los pros y los contras de Beach Buggy Racing 2:
-
-
-
Pros
-
Contras
-
-
-
-
Juego divertido, colorido y adictivo
-
Variedad de coches, conductores, powerups y pistas
-
Grandes gráficos y efectos de sonido
-
Multijugador en línea y opciones de personalización
-
Precio bajo para la versión de consola
-
-
-
Repetitivo a veces
-
No hay multijugador en línea para la versión móvil
-
Algunos powerups son demasiado frustrantes
-
Algunos errores y fallos en la versión beta
-
-
-
-
Conclusión
-
Beach Buggy Racing 2 es un juego de carreras de karts que ofrece mucha diversión y emoción para jugadores de todas las edades y preferencias. Tiene muchas características y contenido que lo hacen destacar de otros juegos de carreras de karts, como física y gráficos espectaculares, más de 45 potenciadores para descubrir y actualizar, más de 55 coches para coleccionar y personalizar, competiciones y torneos en línea, nuevos modos de juego y más. También tiene algunos inconvenientes que pueden afectar su disfrute, como la repetición, la falta de multijugador en línea para la versión móvil, algunos powerups frustrantes, y algunos errores y fallos en la versión beta.
-
-
Si usted está buscando un juego de carreras de karts que es divertido, colorido y adictivo, Beach Buggy Racing 2 es una gran opción para usted. Puedes probar la versión beta de APK para dispositivos Android antes de su lanzamiento oficial, o comprar la versión de consola por un precio bajo. De cualquier manera, usted tendrá una explosión de carreras a través de lugares exóticos con poderes locos y personajes.
-
Entonces, ¿qué estás esperando? Descargar Beach Buggy Racing 2 beta APK ahora y unirse a la Beach Buggy Racing League!
-
-
Aquí están algunas de las preguntas más comunes que la gente hace acerca de Beach Buggy Racing 2:
-
-
Es Beach Buggy Racing 2 gratis? : Beach Buggy Racing 2 es gratis para descargar y jugar en dispositivos Android e iOS. Sin embargo, contiene compras en la aplicación que le permiten comprar monedas, gemas, coches, conductores, powerups y otros artículos con dinero real. También puedes ver anuncios para ganar monedas o gemas gratis. La versión de consola de Beach Buggy Racing 2 cuesta $9.99 en PlayStation
Continuar escribiendo el artículo.
-
-
Es Beach Buggy Racing 2 gratis? : Beach Buggy Racing 2 es gratis para descargar y jugar en dispositivos Android e iOS. Sin embargo, contiene compras en la aplicación que le permiten comprar monedas, gemas, coches, conductores, powerups y otros artículos con dinero real. También puedes ver anuncios para ganar monedas o gemas gratis. La versión de consola de Beach Buggy Racing 2 cuesta $9.99 en PlayStation 4, Xbox One y Nintendo Switch.
-
¿Está Beach Buggy Racing 2 en línea? : Beach Buggy Racing 2 tiene funciones multijugador en línea para versiones móviles y de consola. En el móvil, puedes jugar contra otros jugadores de todo el mundo en competiciones y torneos online. También puede unirse a un club y chatear con otros miembros. En la consola, puede jugar con hasta 4 jugadores en la misma pantalla o en línea.
-
¿Beach Buggy Racing 2 está fuera de línea? : Beach Buggy Racing 2 también se puede jugar sin conexión en las versiones móvil y consola. En el móvil, puedes jugar al modo Aventura, donde exploras un enorme mundo abierto de desafíos y secretos. También puedes jugar en el modo Arena, donde te enfrentas a otros jugadores en un frenesí libre de potenciadores y caos. En la consola, puedes jugar el modo Carrera, donde compites contra oponentes de IA en 15 pistas diferentes.
-
-
Cómo desbloquear nuevos coches y conductores en Beach Buggy Racing 2?: Hay más de 55 coches y más de 25 conductores para desbloquear en Beach Buggy Racing 2. Puedes desbloquearlos jugando, completando desafíos o comprándolos con monedas o gemas. Algunos coches y conductores son exclusivos de ciertos modos de juego o plataformas, por lo que es posible que tenga que jugar diferentes modos o versiones para obtener todos.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/_distutils.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/_distutils.py
deleted file mode 100644
index 92bd93179c5cd3cb377c8b9f1e9d22d13fd7d003..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/_distutils.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""Locations where we look for configs, install stuff, etc"""
-
-# The following comment should be removed at some point in the future.
-# mypy: strict-optional=False
-
-# If pip's going to use distutils, it should not be using the copy that setuptools
-# might have injected into the environment. This is done by removing the injected
-# shim, if it's injected.
-#
-# See https://github.com/pypa/pip/issues/8761 for the original discussion and
-# rationale for why this is done within pip.
-try:
- __import__("_distutils_hack").remove_shim()
-except (ImportError, AttributeError):
- pass
-
-import logging
-import os
-import sys
-from distutils.cmd import Command as DistutilsCommand
-from distutils.command.install import SCHEME_KEYS
-from distutils.command.install import install as distutils_install_command
-from distutils.sysconfig import get_python_lib
-from typing import Dict, List, Optional, Union, cast
-
-from pip._internal.models.scheme import Scheme
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.virtualenv import running_under_virtualenv
-
-from .base import get_major_minor_version
-
-logger = logging.getLogger(__name__)
-
-
-def distutils_scheme(
- dist_name: str,
- user: bool = False,
- home: Optional[str] = None,
- root: Optional[str] = None,
- isolated: bool = False,
- prefix: Optional[str] = None,
- *,
- ignore_config_files: bool = False,
-) -> Dict[str, str]:
- """
- Return a distutils install scheme
- """
- from distutils.dist import Distribution
-
- dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name}
- if isolated:
- dist_args["script_args"] = ["--no-user-cfg"]
-
- d = Distribution(dist_args)
- if not ignore_config_files:
- try:
- d.parse_config_files()
- except UnicodeDecodeError:
- # Typeshed does not include find_config_files() for some reason.
- paths = d.find_config_files() # type: ignore
- logger.warning(
- "Ignore distutils configs in %s due to encoding errors.",
- ", ".join(os.path.basename(p) for p in paths),
- )
- obj: Optional[DistutilsCommand] = None
- obj = d.get_command_obj("install", create=True)
- assert obj is not None
- i = cast(distutils_install_command, obj)
- # NOTE: setting user or home has the side-effect of creating the home dir
- # or user base for installations during finalize_options()
- # ideally, we'd prefer a scheme class that has no side-effects.
- assert not (user and prefix), f"user={user} prefix={prefix}"
- assert not (home and prefix), f"home={home} prefix={prefix}"
- i.user = user or i.user
- if user or home:
- i.prefix = ""
- i.prefix = prefix or i.prefix
- i.home = home or i.home
- i.root = root or i.root
- i.finalize_options()
-
- scheme = {}
- for key in SCHEME_KEYS:
- scheme[key] = getattr(i, "install_" + key)
-
- # install_lib specified in setup.cfg should install *everything*
- # into there (i.e. it takes precedence over both purelib and
- # platlib). Note, i.install_lib is *always* set after
- # finalize_options(); we only want to override here if the user
- # has explicitly requested it hence going back to the config
- if "install_lib" in d.get_option_dict("install"):
- scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
-
- if running_under_virtualenv():
- if home:
- prefix = home
- elif user:
- prefix = i.install_userbase
- else:
- prefix = i.prefix
- scheme["headers"] = os.path.join(
- prefix,
- "include",
- "site",
- f"python{get_major_minor_version()}",
- dist_name,
- )
-
- if root is not None:
- path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1]
- scheme["headers"] = os.path.join(root, path_no_drive[1:])
-
- return scheme
-
-
-def get_scheme(
- dist_name: str,
- user: bool = False,
- home: Optional[str] = None,
- root: Optional[str] = None,
- isolated: bool = False,
- prefix: Optional[str] = None,
-) -> Scheme:
- """
- Get the "scheme" corresponding to the input parameters. The distutils
- documentation provides the context for the available schemes:
- https://docs.python.org/3/install/index.html#alternate-installation
-
- :param dist_name: the name of the package to retrieve the scheme for, used
- in the headers scheme path
- :param user: indicates to use the "user" scheme
- :param home: indicates to use the "home" scheme and provides the base
- directory for the same
- :param root: root under which other directories are re-based
- :param isolated: equivalent to --no-user-cfg, i.e. do not consider
- ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for
- scheme paths
- :param prefix: indicates to use the "prefix" scheme and provides the
- base directory for the same
- """
- scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix)
- return Scheme(
- platlib=scheme["platlib"],
- purelib=scheme["purelib"],
- headers=scheme["headers"],
- scripts=scheme["scripts"],
- data=scheme["data"],
- )
-
-
-def get_bin_prefix() -> str:
- # XXX: In old virtualenv versions, sys.prefix can contain '..' components,
- # so we need to call normpath to eliminate them.
- prefix = os.path.normpath(sys.prefix)
- if WINDOWS:
- bin_py = os.path.join(prefix, "Scripts")
- # buildout uses 'bin' on Windows too?
- if not os.path.exists(bin_py):
- bin_py = os.path.join(prefix, "bin")
- return bin_py
- # Forcing to use /usr/local/bin for standard macOS framework installs
- # Also log to ~/Library/Logs/ for use with the Console.app log viewer
- if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/":
- return "/usr/local/bin"
- return os.path.join(prefix, "bin")
-
-
-def get_purelib() -> str:
- return get_python_lib(plat_specific=False)
-
-
-def get_platlib() -> str:
- return get_python_lib(plat_specific=True)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py
deleted file mode 100644
index a12e2c75d132c73b556702159d535d15ed9abfd2..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import os
-import pathlib
-import tempfile
-import functools
-import contextlib
-import types
-import importlib
-
-from typing import Union, Optional
-from .abc import ResourceReader, Traversable
-
-from ._compat import wrap_spec
-
-Package = Union[types.ModuleType, str]
-
-
-def files(package):
- # type: (Package) -> Traversable
- """
- Get a Traversable resource from a package
- """
- return from_package(get_package(package))
-
-
-def get_resource_reader(package):
- # type: (types.ModuleType) -> Optional[ResourceReader]
- """
- Return the package's loader if it's a ResourceReader.
- """
- # We can't use
- # a issubclass() check here because apparently abc.'s __subclasscheck__()
- # hook wants to create a weak reference to the object, but
- # zipimport.zipimporter does not support weak references, resulting in a
- # TypeError. That seems terrible.
- spec = package.__spec__
- reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
- if reader is None:
- return None
- return reader(spec.name) # type: ignore
-
-
-def resolve(cand):
- # type: (Package) -> types.ModuleType
- return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
-
-
-def get_package(package):
- # type: (Package) -> types.ModuleType
- """Take a package name or module object and return the module.
-
- Raise an exception if the resolved module is not a package.
- """
- resolved = resolve(package)
- if wrap_spec(resolved).submodule_search_locations is None:
- raise TypeError(f'{package!r} is not a package')
- return resolved
-
-
-def from_package(package):
- """
- Return a Traversable object for the given package.
-
- """
- spec = wrap_spec(package)
- reader = spec.loader.get_resource_reader(spec.name)
- return reader.files()
-
-
-@contextlib.contextmanager
-def _tempfile(reader, suffix=''):
- # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
- # blocks due to the need to close the temporary file to work on Windows
- # properly.
- fd, raw_path = tempfile.mkstemp(suffix=suffix)
- try:
- try:
- os.write(fd, reader())
- finally:
- os.close(fd)
- del reader
- yield pathlib.Path(raw_path)
- finally:
- try:
- os.remove(raw_path)
- except FileNotFoundError:
- pass
-
-
-@functools.singledispatch
-def as_file(path):
- """
- Given a Traversable object, return that object as a
- path on the local file system in a context manager.
- """
- return _tempfile(path.read_bytes, suffix=path.name)
-
-
-@as_file.register(pathlib.Path)
-@contextlib.contextmanager
-def _(path):
- """
- Degenerate behavior for pathlib.Path objects.
- """
- yield path
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py
deleted file mode 100644
index cce05582ffc6fe6d72027194f4ccc44ee42f1fcd..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from itertools import filterfalse
-
-from typing import (
- Callable,
- Iterable,
- Iterator,
- Optional,
- Set,
- TypeVar,
- Union,
-)
-
-# Type and type variable definitions
-_T = TypeVar('_T')
-_U = TypeVar('_U')
-
-
-def unique_everseen(
- iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
-) -> Iterator[_T]:
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen: Set[Union[_T, _U]] = set()
- seen_add = seen.add
- if key is None:
- for element in filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__init__.py
deleted file mode 100644
index 3c50c5dcfeeda2efed282200a5c5cc8c5f7542f7..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from .__about__ import (
- __author__,
- __copyright__,
- __email__,
- __license__,
- __summary__,
- __title__,
- __uri__,
- __version__,
-)
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
diff --git a/spaces/BramVanroy/llama-2-13b-chat-dutch-space/style.css b/spaces/BramVanroy/llama-2-13b-chat-dutch-space/style.css
deleted file mode 100644
index 1d4e6a49d38e319dc5f856d3f1cf820580c977d4..0000000000000000000000000000000000000000
--- a/spaces/BramVanroy/llama-2-13b-chat-dutch-space/style.css
+++ /dev/null
@@ -1,9 +0,0 @@
-h1 {
- text-align: center;
-}
-
-#component-0 {
- max-width: 900px;
- margin: auto;
- padding-top: 1.5rem;
-}
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/merge.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/merge.h
deleted file mode 100644
index 6cd314dc7f323b84cdd9fab46587dca3d6c6f460..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/merge.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file merge.h
- * \brief Sequential implementation of merge algorithms.
- */
-
-#pragma once
-
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace detail
-{
-namespace sequential
-{
-
-
-template
-__host__ __device__
-OutputIterator merge(sequential::execution_policy &exec,
- InputIterator1 first1,
- InputIterator1 last1,
- InputIterator2 first2,
- InputIterator2 last2,
- OutputIterator result,
- StrictWeakOrdering comp);
-
-
-template
-__host__ __device__
-thrust::pair
- merge_by_key(sequential::execution_policy &exec,
- InputIterator1 keys_first1,
- InputIterator1 keys_last1,
- InputIterator2 keys_first2,
- InputIterator2 keys_last2,
- InputIterator3 values_first1,
- InputIterator4 values_first2,
- OutputIterator1 keys_result,
- OutputIterator2 values_result,
- StrictWeakOrdering comp);
-
-
-} // end namespace sequential
-} // end namespace detail
-} // end namespace system
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/score_hlr_sampler.py b/spaces/CVPR/WALT/mmdet/core/bbox/samplers/score_hlr_sampler.py
deleted file mode 100644
index 11d46b97705db60fb6a4eb5fa7da10ac78acb8bc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/score_hlr_sampler.py
+++ /dev/null
@@ -1,264 +0,0 @@
-import torch
-from mmcv.ops import nms_match
-
-from ..builder import BBOX_SAMPLERS
-from ..transforms import bbox2roi
-from .base_sampler import BaseSampler
-from .sampling_result import SamplingResult
-
-
-@BBOX_SAMPLERS.register_module()
-class ScoreHLRSampler(BaseSampler):
- r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
- Attention in Object Detection `_.
-
- Score hierarchical local rank (HLR) differentiates with RandomSampler in
- negative part. It firstly computes Score-HLR in a two-step way,
- then linearly maps score hlr to the loss weights.
-
- Args:
- num (int): Total number of sampled RoIs.
- pos_fraction (float): Fraction of positive samples.
- context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
- neg_pos_ub (int): Upper bound of the ratio of num negative to num
- positive, -1 means no upper bound.
- add_gt_as_proposals (bool): Whether to add ground truth as proposals.
- k (float): Power of the non-linear mapping.
- bias (float): Shift of the non-linear mapping.
- score_thr (float): Minimum score that a negative sample is to be
- considered as valid bbox.
- """
-
- def __init__(self,
- num,
- pos_fraction,
- context,
- neg_pos_ub=-1,
- add_gt_as_proposals=True,
- k=0.5,
- bias=0,
- score_thr=0.05,
- iou_thr=0.5,
- **kwargs):
- super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
- self.k = k
- self.bias = bias
- self.score_thr = score_thr
- self.iou_thr = iou_thr
- self.context = context
- # context of cascade detectors is a list, so distinguish them here.
- if not hasattr(context, 'num_stages'):
- self.bbox_roi_extractor = context.bbox_roi_extractor
- self.bbox_head = context.bbox_head
- self.with_shared_head = context.with_shared_head
- if self.with_shared_head:
- self.shared_head = context.shared_head
- else:
- self.bbox_roi_extractor = context.bbox_roi_extractor[
- context.current_stage]
- self.bbox_head = context.bbox_head[context.current_stage]
-
- @staticmethod
- def random_choice(gallery, num):
- """Randomly select some elements from the gallery.
-
- If `gallery` is a Tensor, the returned indices will be a Tensor;
- If `gallery` is a ndarray or list, the returned indices will be a
- ndarray.
-
- Args:
- gallery (Tensor | ndarray | list): indices pool.
- num (int): expected sample num.
-
- Returns:
- Tensor or ndarray: sampled indices.
- """
- assert len(gallery) >= num
-
- is_tensor = isinstance(gallery, torch.Tensor)
- if not is_tensor:
- if torch.cuda.is_available():
- device = torch.cuda.current_device()
- else:
- device = 'cpu'
- gallery = torch.tensor(gallery, dtype=torch.long, device=device)
- perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
- rand_inds = gallery[perm]
- if not is_tensor:
- rand_inds = rand_inds.cpu().numpy()
- return rand_inds
-
- def _sample_pos(self, assign_result, num_expected, **kwargs):
- """Randomly sample some positive samples."""
- pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
- if pos_inds.numel() <= num_expected:
- return pos_inds
- else:
- return self.random_choice(pos_inds, num_expected)
-
- def _sample_neg(self,
- assign_result,
- num_expected,
- bboxes,
- feats=None,
- img_meta=None,
- **kwargs):
- """Sample negative samples.
-
- Score-HLR sampler is done in the following steps:
- 1. Take the maximum positive score prediction of each negative samples
- as s_i.
- 2. Filter out negative samples whose s_i <= score_thr, the left samples
- are called valid samples.
- 3. Use NMS-Match to divide valid samples into different groups,
- samples in the same group will greatly overlap with each other
- 4. Rank the matched samples in two-steps to get Score-HLR.
- (1) In the same group, rank samples with their scores.
- (2) In the same score rank across different groups,
- rank samples with their scores again.
- 5. Linearly map Score-HLR to the final label weights.
-
- Args:
- assign_result (:obj:`AssignResult`): result of assigner.
- num_expected (int): Expected number of samples.
- bboxes (Tensor): bbox to be sampled.
- feats (Tensor): Features come from FPN.
- img_meta (dict): Meta information dictionary.
- """
- neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
- num_neg = neg_inds.size(0)
- if num_neg == 0:
- return neg_inds, None
- with torch.no_grad():
- neg_bboxes = bboxes[neg_inds]
- neg_rois = bbox2roi([neg_bboxes])
- bbox_result = self.context._bbox_forward(feats, neg_rois)
- cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
- 'bbox_pred']
-
- ori_loss = self.bbox_head.loss(
- cls_score=cls_score,
- bbox_pred=None,
- rois=None,
- labels=neg_inds.new_full((num_neg, ),
- self.bbox_head.num_classes),
- label_weights=cls_score.new_ones(num_neg),
- bbox_targets=None,
- bbox_weights=None,
- reduction_override='none')['loss_cls']
-
- # filter out samples with the max score lower than score_thr
- max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
- valid_inds = (max_score > self.score_thr).nonzero().view(-1)
- invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
- num_valid = valid_inds.size(0)
- num_invalid = invalid_inds.size(0)
-
- num_expected = min(num_neg, num_expected)
- num_hlr = min(num_valid, num_expected)
- num_rand = num_expected - num_hlr
- if num_valid > 0:
- valid_rois = neg_rois[valid_inds]
- valid_max_score = max_score[valid_inds]
- valid_argmax_score = argmax_score[valid_inds]
- valid_bbox_pred = bbox_pred[valid_inds]
-
- # valid_bbox_pred shape: [num_valid, #num_classes, 4]
- valid_bbox_pred = valid_bbox_pred.view(
- valid_bbox_pred.size(0), -1, 4)
- selected_bbox_pred = valid_bbox_pred[range(num_valid),
- valid_argmax_score]
- pred_bboxes = self.bbox_head.bbox_coder.decode(
- valid_rois[:, 1:], selected_bbox_pred)
- pred_bboxes_with_score = torch.cat(
- [pred_bboxes, valid_max_score[:, None]], -1)
- group = nms_match(pred_bboxes_with_score, self.iou_thr)
-
- # imp: importance
- imp = cls_score.new_zeros(num_valid)
- for g in group:
- g_score = valid_max_score[g]
- # g_score has already sorted
- rank = g_score.new_tensor(range(g_score.size(0)))
- imp[g] = num_valid - rank + g_score
- _, imp_rank_inds = imp.sort(descending=True)
- _, imp_rank = imp_rank_inds.sort()
- hlr_inds = imp_rank_inds[:num_expected]
-
- if num_rand > 0:
- rand_inds = torch.randperm(num_invalid)[:num_rand]
- select_inds = torch.cat(
- [valid_inds[hlr_inds], invalid_inds[rand_inds]])
- else:
- select_inds = valid_inds[hlr_inds]
-
- neg_label_weights = cls_score.new_ones(num_expected)
-
- up_bound = max(num_expected, num_valid)
- imp_weights = (up_bound -
- imp_rank[hlr_inds].float()) / up_bound
- neg_label_weights[:num_hlr] = imp_weights
- neg_label_weights[num_hlr:] = imp_weights.min()
- neg_label_weights = (self.bias +
- (1 - self.bias) * neg_label_weights).pow(
- self.k)
- ori_selected_loss = ori_loss[select_inds]
- new_loss = ori_selected_loss * neg_label_weights
- norm_ratio = ori_selected_loss.sum() / new_loss.sum()
- neg_label_weights *= norm_ratio
- else:
- neg_label_weights = cls_score.new_ones(num_expected)
- select_inds = torch.randperm(num_neg)[:num_expected]
-
- return neg_inds[select_inds], neg_label_weights
-
- def sample(self,
- assign_result,
- bboxes,
- gt_bboxes,
- gt_labels=None,
- img_meta=None,
- **kwargs):
- """Sample positive and negative bboxes.
-
- This is a simple implementation of bbox sampling given candidates,
- assigning results and ground truth bboxes.
-
- Args:
- assign_result (:obj:`AssignResult`): Bbox assigning results.
- bboxes (Tensor): Boxes to be sampled from.
- gt_bboxes (Tensor): Ground truth bboxes.
- gt_labels (Tensor, optional): Class labels of ground truth bboxes.
-
- Returns:
- tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive
- label weights.
- """
- bboxes = bboxes[:, :4]
-
- gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
- if self.add_gt_as_proposals:
- bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
- assign_result.add_gt_(gt_labels)
- gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
- gt_flags = torch.cat([gt_ones, gt_flags])
-
- num_expected_pos = int(self.num * self.pos_fraction)
- pos_inds = self.pos_sampler._sample_pos(
- assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
- num_sampled_pos = pos_inds.numel()
- num_expected_neg = self.num - num_sampled_pos
- if self.neg_pos_ub >= 0:
- _pos = max(1, num_sampled_pos)
- neg_upper_bound = int(self.neg_pos_ub * _pos)
- if num_expected_neg > neg_upper_bound:
- num_expected_neg = neg_upper_bound
- neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
- assign_result,
- num_expected_neg,
- bboxes,
- img_meta=img_meta,
- **kwargs)
-
- return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
- assign_result, gt_flags), neg_label_weights
diff --git a/spaces/CVPR/WALT/mmdet/models/backbones/resnext.py b/spaces/CVPR/WALT/mmdet/models/backbones/resnext.py
deleted file mode 100644
index 6dbcbd516fd308b1d703eecb83ab275f6b159516..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/models/backbones/resnext.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import math
-
-from mmcv.cnn import build_conv_layer, build_norm_layer
-
-from ..builder import BACKBONES
-from ..utils import ResLayer
-from .resnet import Bottleneck as _Bottleneck
-from .resnet import ResNet
-
-
-class Bottleneck(_Bottleneck):
- expansion = 4
-
- def __init__(self,
- inplanes,
- planes,
- groups=1,
- base_width=4,
- base_channels=64,
- **kwargs):
- """Bottleneck block for ResNeXt.
-
- If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
- it is "caffe", the stride-two layer is the first 1x1 conv layer.
- """
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
-
- if groups == 1:
- width = self.planes
- else:
- width = math.floor(self.planes *
- (base_width / base_channels)) * groups
-
- self.norm1_name, norm1 = build_norm_layer(
- self.norm_cfg, width, postfix=1)
- self.norm2_name, norm2 = build_norm_layer(
- self.norm_cfg, width, postfix=2)
- self.norm3_name, norm3 = build_norm_layer(
- self.norm_cfg, self.planes * self.expansion, postfix=3)
-
- self.conv1 = build_conv_layer(
- self.conv_cfg,
- self.inplanes,
- width,
- kernel_size=1,
- stride=self.conv1_stride,
- bias=False)
- self.add_module(self.norm1_name, norm1)
- fallback_on_stride = False
- self.with_modulated_dcn = False
- if self.with_dcn:
- fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
- if not self.with_dcn or fallback_on_stride:
- self.conv2 = build_conv_layer(
- self.conv_cfg,
- width,
- width,
- kernel_size=3,
- stride=self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- bias=False)
- else:
- assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
- self.conv2 = build_conv_layer(
- self.dcn,
- width,
- width,
- kernel_size=3,
- stride=self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- bias=False)
-
- self.add_module(self.norm2_name, norm2)
- self.conv3 = build_conv_layer(
- self.conv_cfg,
- width,
- self.planes * self.expansion,
- kernel_size=1,
- bias=False)
- self.add_module(self.norm3_name, norm3)
-
- if self.with_plugins:
- self._del_block_plugins(self.after_conv1_plugin_names +
- self.after_conv2_plugin_names +
- self.after_conv3_plugin_names)
- self.after_conv1_plugin_names = self.make_block_plugins(
- width, self.after_conv1_plugins)
- self.after_conv2_plugin_names = self.make_block_plugins(
- width, self.after_conv2_plugins)
- self.after_conv3_plugin_names = self.make_block_plugins(
- self.planes * self.expansion, self.after_conv3_plugins)
-
- def _del_block_plugins(self, plugin_names):
- """delete plugins for block if exist.
-
- Args:
- plugin_names (list[str]): List of plugins name to delete.
- """
- assert isinstance(plugin_names, list)
- for plugin_name in plugin_names:
- del self._modules[plugin_name]
-
-
-@BACKBONES.register_module()
-class ResNeXt(ResNet):
- """ResNeXt backbone.
-
- Args:
- depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
- in_channels (int): Number of input image channels. Default: 3.
- num_stages (int): Resnet stages. Default: 4.
- groups (int): Group of resnext.
- base_width (int): Base width of resnext.
- strides (Sequence[int]): Strides of the first block of each stage.
- dilations (Sequence[int]): Dilation of each stage.
- out_indices (Sequence[int]): Output from which stages.
- style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
- layer is the 3x3 conv layer, otherwise the stride-two layer is
- the first 1x1 conv layer.
- frozen_stages (int): Stages to be frozen (all param fixed). -1 means
- not freezing any parameters.
- norm_cfg (dict): dictionary to construct and config norm layer.
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
- freeze running stats (mean and var). Note: Effect on Batch Norm
- and its variants only.
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed.
- zero_init_residual (bool): whether to use zero init for last norm layer
- in resblocks to let them behave as identity.
- """
-
- arch_settings = {
- 50: (Bottleneck, (3, 4, 6, 3)),
- 101: (Bottleneck, (3, 4, 23, 3)),
- 152: (Bottleneck, (3, 8, 36, 3))
- }
-
- def __init__(self, groups=1, base_width=4, **kwargs):
- self.groups = groups
- self.base_width = base_width
- super(ResNeXt, self).__init__(**kwargs)
-
- def make_res_layer(self, **kwargs):
- """Pack all blocks in a stage into a ``ResLayer``"""
- return ResLayer(
- groups=self.groups,
- base_width=self.base_width,
- base_channels=self.base_channels,
- **kwargs)
diff --git a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_text.py b/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_text.py
deleted file mode 100644
index fea5ebfc05d466c7cb5711b5ac10e2ea102ddc45..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_text.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Generated by CodiumAI
-
-import requests
-
-from autogpt.commands.web_requests import scrape_text
-
-"""
-Code Analysis
-
-Objective:
-The objective of the "scrape_text" function is to scrape the text content from
-a given URL and return it as a string, after removing any unwanted HTML tags and scripts.
-
-Inputs:
-- url: a string representing the URL of the webpage to be scraped.
-
-Flow:
-1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
-2. Check if the response contains an HTTP error. If it does, return an error message.
-3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags.
-4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup.
-5. Split the text into lines and then into chunks, removing any extra whitespace.
-6. Join the chunks into a single string with newline characters between them.
-7. Return the cleaned text.
-
-Outputs:
-- A string representing the cleaned text content of the webpage.
-
-Additional aspects:
-- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively.
-- The function removes script and style tags from the HTML to avoid including unwanted content in the text output.
-- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
-"""
-
-
-class TestScrapeText:
- # Tests that scrape_text() returns the expected text when given a valid URL.
- def test_scrape_text_with_valid_url(self, mocker):
- # Mock the requests.get() method to return a response with expected text
- expected_text = "This is some sample text"
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = f"
{expected_text}
"
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a valid URL and assert that it returns the expected text
- url = "http://www.example.com"
- assert scrape_text(url) == expected_text
-
- # Tests that the function returns an error message when an invalid or unreachable url is provided.
- def test_invalid_url(self, mocker):
- # Mock the requests.get() method to raise an exception
- mocker.patch(
- "requests.Session.get", side_effect=requests.exceptions.RequestException
- )
-
- # Call the function with an invalid URL and assert that it returns an error message
- url = "http://www.invalidurl.com"
- error_message = scrape_text(url)
- assert "Error:" in error_message
-
- # Tests that the function returns an empty string when the html page contains no text to be scraped.
- def test_no_text(self, mocker):
- # Mock the requests.get() method to return a response with no text
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = ""
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a valid URL and assert that it returns an empty string
- url = "http://www.example.com"
- assert scrape_text(url) == ""
-
- # Tests that the function returns an error message when the response status code is an http error (>=400).
- def test_http_error(self, mocker):
- # Mock the requests.get() method to return a response with a 404 status code
- mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
-
- # Call the function with a URL
- result = scrape_text("https://www.example.com")
-
- # Check that the function returns an error message
- assert result == "Error: HTTP 404 error"
-
- # Tests that scrape_text() properly handles HTML tags.
- def test_scrape_text_with_html_tags(self, mocker):
- # Create a mock response object with HTML containing tags
- html = "
This is bold text.
"
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = html
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a URL
- result = scrape_text("https://www.example.com")
-
- # Check that the function properly handles HTML tags
- assert result == "This is bold text."
diff --git a/spaces/CikeyQI/QQsign/README.md b/spaces/CikeyQI/QQsign/README.md
deleted file mode 100644
index bd56881a2a7709591343e2f15af9a6a8133e115b..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/QQsign/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: QQsign
-emoji: 🦀
-colorFrom: blue
-colorTo: purple
-sdk: docker
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/CofAI/CurrencyConverter/index.html b/spaces/CofAI/CurrencyConverter/index.html
deleted file mode 100644
index 7706c1ff4b3734e10eae58f3e5224946c4f023b3..0000000000000000000000000000000000000000
--- a/spaces/CofAI/CurrencyConverter/index.html
+++ /dev/null
@@ -1,74 +0,0 @@
- Currency Converter 💱 Currency Converter
-by ☕ CofAI
-
-
-
♾️ Result:
-
-
-
-
\ No newline at end of file
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/__init__.py
deleted file mode 100644
index 2bb8f6d7f10e23ca93e96386d282c2c650669a42..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/__init__.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""Pillow (Fork of the Python Imaging Library)
-
-Pillow is the friendly PIL fork by Jeffrey A. Clark (Alex) and contributors.
- https://github.com/python-pillow/Pillow/
-
-Pillow is forked from PIL 1.1.7.
-
-PIL is the Python Imaging Library by Fredrik Lundh and contributors.
-Copyright (c) 1999 by Secret Labs AB.
-
-Use PIL.__version__ for this Pillow version.
-
-;-)
-"""
-
-from . import _version
-
-# VERSION was removed in Pillow 6.0.0.
-# PILLOW_VERSION was removed in Pillow 9.0.0.
-# Use __version__ instead.
-__version__ = _version.__version__
-del _version
-
-
-_plugins = [
- "BlpImagePlugin",
- "BmpImagePlugin",
- "BufrStubImagePlugin",
- "CurImagePlugin",
- "DcxImagePlugin",
- "DdsImagePlugin",
- "EpsImagePlugin",
- "FitsImagePlugin",
- "FliImagePlugin",
- "FpxImagePlugin",
- "FtexImagePlugin",
- "GbrImagePlugin",
- "GifImagePlugin",
- "GribStubImagePlugin",
- "Hdf5StubImagePlugin",
- "IcnsImagePlugin",
- "IcoImagePlugin",
- "ImImagePlugin",
- "ImtImagePlugin",
- "IptcImagePlugin",
- "JpegImagePlugin",
- "Jpeg2KImagePlugin",
- "McIdasImagePlugin",
- "MicImagePlugin",
- "MpegImagePlugin",
- "MpoImagePlugin",
- "MspImagePlugin",
- "PalmImagePlugin",
- "PcdImagePlugin",
- "PcxImagePlugin",
- "PdfImagePlugin",
- "PixarImagePlugin",
- "PngImagePlugin",
- "PpmImagePlugin",
- "PsdImagePlugin",
- "QoiImagePlugin",
- "SgiImagePlugin",
- "SpiderImagePlugin",
- "SunImagePlugin",
- "TgaImagePlugin",
- "TiffImagePlugin",
- "WebPImagePlugin",
- "WmfImagePlugin",
- "XbmImagePlugin",
- "XpmImagePlugin",
- "XVThumbImagePlugin",
-]
-
-
-class UnidentifiedImageError(OSError):
- """
- Raised in :py:meth:`PIL.Image.open` if an image cannot be opened and identified.
-
- If a PNG image raises this error, setting :data:`.ImageFile.LOAD_TRUNCATED_IMAGES`
- to true may allow the image to be opened after all. The setting will ignore missing
- data and checksum failures.
- """
-
- pass
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/legacy.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/legacy.py
deleted file mode 100644
index 43aad21a9dd1c08c8d31e38908485d46b14efbd2..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/legacy.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from typing import Any, Dict, Optional, Union
-from warnings import warn
-
-from .api import from_bytes
-from .constant import CHARDET_CORRESPONDENCE
-
-
-def detect(
- byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
-) -> Dict[str, Optional[Union[str, float]]]:
- """
- chardet legacy method
- Detect the encoding of the given byte string. It should be mostly backward-compatible.
- Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
- This function is deprecated and should be used to migrate your project easily, consult the documentation for
- further information. Not planned for removal.
-
- :param byte_str: The byte sequence to examine.
- :param should_rename_legacy: Should we rename legacy encodings
- to their more modern equivalents?
- """
- if len(kwargs):
- warn(
- f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
- )
-
- if not isinstance(byte_str, (bytearray, bytes)):
- raise TypeError( # pragma: nocover
- "Expected object of type bytes or bytearray, got: "
- "{0}".format(type(byte_str))
- )
-
- if isinstance(byte_str, bytearray):
- byte_str = bytes(byte_str)
-
- r = from_bytes(byte_str).best()
-
- encoding = r.encoding if r is not None else None
- language = r.language if r is not None and r.language != "Unknown" else ""
- confidence = 1.0 - r.chaos if r is not None else None
-
- # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
- # but chardet does return 'utf-8-sig' and it is a valid codec name.
- if r is not None and encoding == "utf_8" and r.bom:
- encoding += "_sig"
-
- if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
- encoding = CHARDET_CORRESPONDENCE[encoding]
-
- return {
- "encoding": encoding,
- "language": language,
- "confidence": confidence,
- }
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/exceptions.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/exceptions.py
deleted file mode 100644
index fe68a3613f74e5e82da4e3eedc7d9451977838dd..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/exceptions.py
+++ /dev/null
@@ -1,288 +0,0 @@
-import typing as t
-from gettext import gettext as _
-from gettext import ngettext
-
-from ._compat import get_text_stderr
-from .utils import echo
-from .utils import format_filename
-
-if t.TYPE_CHECKING:
- from .core import Command
- from .core import Context
- from .core import Parameter
-
-
-def _join_param_hints(
- param_hint: t.Optional[t.Union[t.Sequence[str], str]]
-) -> t.Optional[str]:
- if param_hint is not None and not isinstance(param_hint, str):
- return " / ".join(repr(x) for x in param_hint)
-
- return param_hint
-
-
-class ClickException(Exception):
- """An exception that Click can handle and show to the user."""
-
- #: The exit code for this exception.
- exit_code = 1
-
- def __init__(self, message: str) -> None:
- super().__init__(message)
- self.message = message
-
- def format_message(self) -> str:
- return self.message
-
- def __str__(self) -> str:
- return self.message
-
- def show(self, file: t.Optional[t.IO[t.Any]] = None) -> None:
- if file is None:
- file = get_text_stderr()
-
- echo(_("Error: {message}").format(message=self.format_message()), file=file)
-
-
-class UsageError(ClickException):
- """An internal exception that signals a usage error. This typically
- aborts any further handling.
-
- :param message: the error message to display.
- :param ctx: optionally the context that caused this error. Click will
- fill in the context automatically in some situations.
- """
-
- exit_code = 2
-
- def __init__(self, message: str, ctx: t.Optional["Context"] = None) -> None:
- super().__init__(message)
- self.ctx = ctx
- self.cmd: t.Optional["Command"] = self.ctx.command if self.ctx else None
-
- def show(self, file: t.Optional[t.IO[t.Any]] = None) -> None:
- if file is None:
- file = get_text_stderr()
- color = None
- hint = ""
- if (
- self.ctx is not None
- and self.ctx.command.get_help_option(self.ctx) is not None
- ):
- hint = _("Try '{command} {option}' for help.").format(
- command=self.ctx.command_path, option=self.ctx.help_option_names[0]
- )
- hint = f"{hint}\n"
- if self.ctx is not None:
- color = self.ctx.color
- echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
- echo(
- _("Error: {message}").format(message=self.format_message()),
- file=file,
- color=color,
- )
-
-
-class BadParameter(UsageError):
- """An exception that formats out a standardized error message for a
- bad parameter. This is useful when thrown from a callback or type as
- Click will attach contextual information to it (for instance, which
- parameter it is).
-
- .. versionadded:: 2.0
-
- :param param: the parameter object that caused this error. This can
- be left out, and Click will attach this info itself
- if possible.
- :param param_hint: a string that shows up as parameter name. This
- can be used as alternative to `param` in cases
- where custom validation should happen. If it is
- a string it's used as such, if it's a list then
- each item is quoted and separated.
- """
-
- def __init__(
- self,
- message: str,
- ctx: t.Optional["Context"] = None,
- param: t.Optional["Parameter"] = None,
- param_hint: t.Optional[str] = None,
- ) -> None:
- super().__init__(message, ctx)
- self.param = param
- self.param_hint = param_hint
-
- def format_message(self) -> str:
- if self.param_hint is not None:
- param_hint = self.param_hint
- elif self.param is not None:
- param_hint = self.param.get_error_hint(self.ctx) # type: ignore
- else:
- return _("Invalid value: {message}").format(message=self.message)
-
- return _("Invalid value for {param_hint}: {message}").format(
- param_hint=_join_param_hints(param_hint), message=self.message
- )
-
-
-class MissingParameter(BadParameter):
- """Raised if click required an option or argument but it was not
- provided when invoking the script.
-
- .. versionadded:: 4.0
-
- :param param_type: a string that indicates the type of the parameter.
- The default is to inherit the parameter type from
- the given `param`. Valid values are ``'parameter'``,
- ``'option'`` or ``'argument'``.
- """
-
- def __init__(
- self,
- message: t.Optional[str] = None,
- ctx: t.Optional["Context"] = None,
- param: t.Optional["Parameter"] = None,
- param_hint: t.Optional[str] = None,
- param_type: t.Optional[str] = None,
- ) -> None:
- super().__init__(message or "", ctx, param, param_hint)
- self.param_type = param_type
-
- def format_message(self) -> str:
- if self.param_hint is not None:
- param_hint: t.Optional[str] = self.param_hint
- elif self.param is not None:
- param_hint = self.param.get_error_hint(self.ctx) # type: ignore
- else:
- param_hint = None
-
- param_hint = _join_param_hints(param_hint)
- param_hint = f" {param_hint}" if param_hint else ""
-
- param_type = self.param_type
- if param_type is None and self.param is not None:
- param_type = self.param.param_type_name
-
- msg = self.message
- if self.param is not None:
- msg_extra = self.param.type.get_missing_message(self.param)
- if msg_extra:
- if msg:
- msg += f". {msg_extra}"
- else:
- msg = msg_extra
-
- msg = f" {msg}" if msg else ""
-
- # Translate param_type for known types.
- if param_type == "argument":
- missing = _("Missing argument")
- elif param_type == "option":
- missing = _("Missing option")
- elif param_type == "parameter":
- missing = _("Missing parameter")
- else:
- missing = _("Missing {param_type}").format(param_type=param_type)
-
- return f"{missing}{param_hint}.{msg}"
-
- def __str__(self) -> str:
- if not self.message:
- param_name = self.param.name if self.param else None
- return _("Missing parameter: {param_name}").format(param_name=param_name)
- else:
- return self.message
-
-
-class NoSuchOption(UsageError):
- """Raised if click attempted to handle an option that does not
- exist.
-
- .. versionadded:: 4.0
- """
-
- def __init__(
- self,
- option_name: str,
- message: t.Optional[str] = None,
- possibilities: t.Optional[t.Sequence[str]] = None,
- ctx: t.Optional["Context"] = None,
- ) -> None:
- if message is None:
- message = _("No such option: {name}").format(name=option_name)
-
- super().__init__(message, ctx)
- self.option_name = option_name
- self.possibilities = possibilities
-
- def format_message(self) -> str:
- if not self.possibilities:
- return self.message
-
- possibility_str = ", ".join(sorted(self.possibilities))
- suggest = ngettext(
- "Did you mean {possibility}?",
- "(Possible options: {possibilities})",
- len(self.possibilities),
- ).format(possibility=possibility_str, possibilities=possibility_str)
- return f"{self.message} {suggest}"
-
-
-class BadOptionUsage(UsageError):
- """Raised if an option is generally supplied but the use of the option
- was incorrect. This is for instance raised if the number of arguments
- for an option is not correct.
-
- .. versionadded:: 4.0
-
- :param option_name: the name of the option being used incorrectly.
- """
-
- def __init__(
- self, option_name: str, message: str, ctx: t.Optional["Context"] = None
- ) -> None:
- super().__init__(message, ctx)
- self.option_name = option_name
-
-
-class BadArgumentUsage(UsageError):
- """Raised if an argument is generally supplied but the use of the argument
- was incorrect. This is for instance raised if the number of values
- for an argument is not correct.
-
- .. versionadded:: 6.0
- """
-
-
-class FileError(ClickException):
- """Raised if a file cannot be opened."""
-
- def __init__(self, filename: str, hint: t.Optional[str] = None) -> None:
- if hint is None:
- hint = _("unknown error")
-
- super().__init__(hint)
- self.ui_filename: str = format_filename(filename)
- self.filename = filename
-
- def format_message(self) -> str:
- return _("Could not open file {filename!r}: {message}").format(
- filename=self.ui_filename, message=self.message
- )
-
-
-class Abort(RuntimeError):
- """An internal signalling exception that signals Click to abort."""
-
-
-class Exit(RuntimeError):
- """An exception that indicates that the application should exit with some
- status code.
-
- :param code: the status code to exit with.
- """
-
- __slots__ = ("exit_code",)
-
- def __init__(self, code: int = 0) -> None:
- self.exit_code: int = code
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/mapping.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/mapping.py
deleted file mode 100644
index 74cc7b9f2fe118fac02379db4181c53d11fbbbea..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/mapping.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import array
-import posixpath
-import warnings
-from collections.abc import MutableMapping
-
-from .core import url_to_fs
-
-
-class FSMap(MutableMapping):
- """Wrap a FileSystem instance as a mutable wrapping.
-
- The keys of the mapping become files under the given root, and the
- values (which must be bytes) the contents of those files.
-
- Parameters
- ----------
- root: string
- prefix for all the files
- fs: FileSystem instance
- check: bool (=True)
- performs a touch at the location, to check for write access.
-
- Examples
- --------
- >>> fs = FileSystem(**parameters) # doctest: +SKIP
- >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP
- or, more likely
- >>> d = fs.get_mapper('my-data/path/')
-
- >>> d['loc1'] = b'Hello World' # doctest: +SKIP
- >>> list(d.keys()) # doctest: +SKIP
- ['loc1']
- >>> d['loc1'] # doctest: +SKIP
- b'Hello World'
- """
-
- def __init__(self, root, fs, check=False, create=False, missing_exceptions=None):
- self.fs = fs
- self.root = fs._strip_protocol(root).rstrip("/")
- self._root_key_to_str = fs._strip_protocol(posixpath.join(root, "x"))[:-1]
- if missing_exceptions is None:
- missing_exceptions = (
- FileNotFoundError,
- IsADirectoryError,
- NotADirectoryError,
- )
- self.missing_exceptions = missing_exceptions
- self.check = check
- self.create = create
- if create:
- if not self.fs.exists(root):
- self.fs.mkdir(root)
- if check:
- if not self.fs.exists(root):
- raise ValueError(
- "Path %s does not exist. Create "
- " with the ``create=True`` keyword" % root
- )
- self.fs.touch(root + "/a")
- self.fs.rm(root + "/a")
-
- def clear(self):
- """Remove all keys below root - empties out mapping"""
- try:
- self.fs.rm(self.root, True)
- self.fs.mkdir(self.root)
- except: # noqa: E722
- pass
-
- def getitems(self, keys, on_error="raise"):
- """Fetch multiple items from the store
-
- If the backend is async-able, this might proceed concurrently
-
- Parameters
- ----------
- keys: list(str)
- They keys to be fetched
- on_error : "raise", "omit", "return"
- If raise, an underlying exception will be raised (converted to KeyError
- if the type is in self.missing_exceptions); if omit, keys with exception
- will simply not be included in the output; if "return", all keys are
- included in the output, but the value will be bytes or an exception
- instance.
-
- Returns
- -------
- dict(key, bytes|exception)
- """
- keys2 = [self._key_to_str(k) for k in keys]
- oe = on_error if on_error == "raise" else "return"
- try:
- out = self.fs.cat(keys2, on_error=oe)
- if isinstance(out, bytes):
- out = {keys2[0]: out}
- except self.missing_exceptions as e:
- raise KeyError from e
- out = {
- k: (KeyError() if isinstance(v, self.missing_exceptions) else v)
- for k, v in out.items()
- }
- return {
- key: out[k2]
- for key, k2 in zip(keys, keys2)
- if on_error == "return" or not isinstance(out[k2], BaseException)
- }
-
- def setitems(self, values_dict):
- """Set the values of multiple items in the store
-
- Parameters
- ----------
- values_dict: dict(str, bytes)
- """
- values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()}
- self.fs.pipe(values)
-
- def delitems(self, keys):
- """Remove multiple keys from the store"""
- self.fs.rm([self._key_to_str(k) for k in keys])
-
- def _key_to_str(self, key):
- """Generate full path for the key"""
- if not isinstance(key, str):
- # raise TypeError("key must be of type `str`, got `{type(key).__name__}`"
- warnings.warn(
- "from fsspec 2023.5 onward FSMap non-str keys will raise TypeError",
- DeprecationWarning,
- )
- if isinstance(key, list):
- key = tuple(key)
- key = str(key)
- return f"{self._root_key_to_str}{key}"
-
- def _str_to_key(self, s):
- """Strip path of to leave key name"""
- return s[len(self.root) :].lstrip("/")
-
- def __getitem__(self, key, default=None):
- """Retrieve data"""
- k = self._key_to_str(key)
- try:
- result = self.fs.cat(k)
- except self.missing_exceptions:
- if default is not None:
- return default
- raise KeyError(key)
- return result
-
- def pop(self, key, default=None):
- """Pop data"""
- result = self.__getitem__(key, default)
- try:
- del self[key]
- except KeyError:
- pass
- return result
-
- def __setitem__(self, key, value):
- """Store value in key"""
- key = self._key_to_str(key)
- self.fs.mkdirs(self.fs._parent(key), exist_ok=True)
- self.fs.pipe_file(key, maybe_convert(value))
-
- def __iter__(self):
- return (self._str_to_key(x) for x in self.fs.find(self.root))
-
- def __len__(self):
- return len(self.fs.find(self.root))
-
- def __delitem__(self, key):
- """Remove key"""
- try:
- self.fs.rm(self._key_to_str(key))
- except: # noqa: E722
- raise KeyError
-
- def __contains__(self, key):
- """Does key exist in mapping?"""
- path = self._key_to_str(key)
- return self.fs.exists(path) and self.fs.isfile(path)
-
- def __reduce__(self):
- return FSMap, (self.root, self.fs, False, False, self.missing_exceptions)
-
-
-def maybe_convert(value):
- if isinstance(value, array.array) or hasattr(value, "__array__"):
- # bytes-like things
- if hasattr(value, "dtype") and value.dtype.kind in "Mm":
- # The buffer interface doesn't support datetime64/timdelta64 numpy
- # arrays
- value = value.view("int64")
- value = bytes(memoryview(value))
- return value
-
-
-def get_mapper(
- url="",
- check=False,
- create=False,
- missing_exceptions=None,
- alternate_root=None,
- **kwargs,
-):
- """Create key-value interface for given URL and options
-
- The URL will be of the form "protocol://location" and point to the root
- of the mapper required. All keys will be file-names below this location,
- and their values the contents of each key.
-
- Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``.
-
- Parameters
- ----------
- url: str
- Root URL of mapping
- check: bool
- Whether to attempt to read from the location before instantiation, to
- check that the mapping does exist
- create: bool
- Whether to make the directory corresponding to the root before
- instantiating
- missing_exceptions: None or tuple
- If given, these exception types will be regarded as missing keys and
- return KeyError when trying to read data. By default, you get
- (FileNotFoundError, IsADirectoryError, NotADirectoryError)
- alternate_root: None or str
- In cases of complex URLs, the parser may fail to pick the correct part
- for the mapper root, so this arg can override
-
- Returns
- -------
- ``FSMap`` instance, the dict-like key-value store.
- """
- # Removing protocol here - could defer to each open() on the backend
- fs, urlpath = url_to_fs(url, **kwargs)
- root = alternate_root if alternate_root is not None else urlpath
- return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/base.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/base.py
deleted file mode 100644
index 6cadedb5f9367536c8355b583127c4a904c3b8fa..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/base.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import ssl
-import time
-import typing
-
-SOCKET_OPTION = typing.Union[
- typing.Tuple[int, int, int],
- typing.Tuple[int, int, typing.Union[bytes, bytearray]],
- typing.Tuple[int, int, None, int],
-]
-
-
-class NetworkStream:
- def read(self, max_bytes: int, timeout: typing.Optional[float] = None) -> bytes:
- raise NotImplementedError() # pragma: nocover
-
- def write(self, buffer: bytes, timeout: typing.Optional[float] = None) -> None:
- raise NotImplementedError() # pragma: nocover
-
- def close(self) -> None:
- raise NotImplementedError() # pragma: nocover
-
- def start_tls(
- self,
- ssl_context: ssl.SSLContext,
- server_hostname: typing.Optional[str] = None,
- timeout: typing.Optional[float] = None,
- ) -> "NetworkStream":
- raise NotImplementedError() # pragma: nocover
-
- def get_extra_info(self, info: str) -> typing.Any:
- return None # pragma: nocover
-
-
-class NetworkBackend:
- def connect_tcp(
- self,
- host: str,
- port: int,
- timeout: typing.Optional[float] = None,
- local_address: typing.Optional[str] = None,
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
- ) -> NetworkStream:
- raise NotImplementedError() # pragma: nocover
-
- def connect_unix_socket(
- self,
- path: str,
- timeout: typing.Optional[float] = None,
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
- ) -> NetworkStream:
- raise NotImplementedError() # pragma: nocover
-
- def sleep(self, seconds: float) -> None:
- time.sleep(seconds) # pragma: nocover
-
-
-class AsyncNetworkStream:
- async def read(
- self, max_bytes: int, timeout: typing.Optional[float] = None
- ) -> bytes:
- raise NotImplementedError() # pragma: nocover
-
- async def write(
- self, buffer: bytes, timeout: typing.Optional[float] = None
- ) -> None:
- raise NotImplementedError() # pragma: nocover
-
- async def aclose(self) -> None:
- raise NotImplementedError() # pragma: nocover
-
- async def start_tls(
- self,
- ssl_context: ssl.SSLContext,
- server_hostname: typing.Optional[str] = None,
- timeout: typing.Optional[float] = None,
- ) -> "AsyncNetworkStream":
- raise NotImplementedError() # pragma: nocover
-
- def get_extra_info(self, info: str) -> typing.Any:
- return None # pragma: nocover
-
-
-class AsyncNetworkBackend:
- async def connect_tcp(
- self,
- host: str,
- port: int,
- timeout: typing.Optional[float] = None,
- local_address: typing.Optional[str] = None,
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
- ) -> AsyncNetworkStream:
- raise NotImplementedError() # pragma: nocover
-
- async def connect_unix_socket(
- self,
- path: str,
- timeout: typing.Optional[float] = None,
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
- ) -> AsyncNetworkStream:
- raise NotImplementedError() # pragma: nocover
-
- async def sleep(self, seconds: float) -> None:
- raise NotImplementedError() # pragma: nocover
diff --git a/spaces/DQChoi/image_sticker/app.py b/spaces/DQChoi/image_sticker/app.py
deleted file mode 100644
index 9560a6f53f4fdedb736e9dc94b1270ace5977c34..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/image_sticker/app.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import gradio as gr
-import openai
-from dotenv import load_dotenv
-import os
-import requests
-
-from transformers import BlipProcessor, BlipForConditionalGeneration
-from PIL import Image
-
-
-load_dotenv()
-openai.api_key = os.getenv("OPENAI_API_KEY")
-
-model_id = "gpt-3.5-turbo"
-
-
-def gpt_image(prompt, n) -> str:
- response = openai.Image.create(
- prompt=prompt
- + " sticker style, with templated shapes like circle, background color should be transparent",
- n=n,
- size="256x256",
- )
- return_list = []
- for res in response["data"]:
- image_url = res["url"]
- return_list.append(
- Image.open(requests.get(image_url, stream=True).raw).convert("RGB")
- )
- return return_list
-
-
-# test embed url = https://webplayer.momenti.tv/?project_id=1380564943&moment_info_id=1844984252
-
-
-def display_giv(url):
- # for GIV
- # url = f"https://api.momenti.tv/v4/media/moment_infos/{moment_info_id}/thumbnail"
-
- response = requests.get(url)
-
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
- model = BlipForConditionalGeneration.from_pretrained(
- "Salesforce/blip-image-captioning-large"
- )
-
- raw_image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
-
- # unconditional image captioning
- inputs = processor(raw_image, return_tensors="pt")
-
- out = model.generate(**inputs)
- unconditional_caption = processor.decode(out[0], skip_special_tokens=True)
-
- sticker_image_list = gpt_image(unconditional_caption, 5)
-
- gallery_list = []
- for sticker in sticker_image_list:
- dup_image = raw_image.copy().convert("RGBA")
- # Resize the smaller image to fit within the bigger image
- small_image_resized = sticker.resize((256, 256))
-
- # Convert the image to RGBA mode (adding an alpha channel for transparency)
- image_rgba = small_image_resized.convert("RGBA")
-
- # Create a new image with a transparent background
- transparent_image = Image.new("RGBA", image_rgba.size, (0, 0, 0, 0))
-
- # Identify the background color from the non-object part of the image
- border_color = image_rgba.getpixel((1, 1))[:3]
-
- # Iterate through each pixel in the image and set the background color to transparent
- data = []
- for pixel in image_rgba.getdata():
- if (
- pixel[:3] == border_color
- ): # Check if the pixel matches the background color
- data.append(
- (pixel[0], pixel[1], pixel[2], 0)
- ) # Set alpha to 0 (transparent)
- else:
- data.append(pixel) # Keep non-background pixels unchanged
-
- # Put the modified pixel data into the transparent image
- transparent_image.putdata(data)
-
- # Get the dimensions of the bigger image
- big_width, big_height = raw_image.size
-
- # Calculate the position for placing the smaller image at the bottom
- x_position = (big_width - 256) // 2 - 128
- y_position = big_height - 256 - 128
-
- # Paste the smaller image onto the bigger image
- dup_image.paste(transparent_image, (x_position, y_position), transparent_image)
- gallery_list.append(dup_image)
-
- return gallery_list
-
-
-if __name__ == "__main__":
- url = "https://api.momenti.tv/v4/media/moment_infos/1844984252/thumbnail"
- demo = gr.Interface(
- display_giv,
- "text",
- [
- gr.Image(image_mode="RGBA"),
- gr.Image(image_mode="RGBA"),
- gr.Image(image_mode="RGBA"),
- gr.Image(image_mode="RGBA"),
- gr.Image(image_mode="RGBA"),
- ],
- examples=[[url]],
- )
- demo.launch()
diff --git a/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/imagenet.py b/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/imagenet.py
deleted file mode 100644
index 9b6d78e51f1b0c7d6e1fba2869a72a6f383e81b2..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/DescriptionGPT/detic/data/datasets/imagenet.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-import os
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.data.datasets.lvis import get_lvis_instances_meta
-from .lvis_v1 import custom_load_lvis_json, get_lvis_22k_meta
-def custom_register_imagenet_instances(name, metadata, json_file, image_root):
- """
- """
- DatasetCatalog.register(name, lambda: custom_load_lvis_json(
- json_file, image_root, name))
- MetadataCatalog.get(name).set(
- json_file=json_file, image_root=image_root,
- evaluator_type="imagenet", **metadata
- )
-
-_CUSTOM_SPLITS_IMAGENET = {
- "imagenet_lvis_v1": ("imagenet/ImageNet-LVIS/", "imagenet/annotations/imagenet_lvis_image_info.json"),
-}
-
-for key, (image_root, json_file) in _CUSTOM_SPLITS_IMAGENET.items():
- custom_register_imagenet_instances(
- key,
- get_lvis_instances_meta('lvis_v1'),
- os.path.join("datasets", json_file) if "://" not in json_file else json_file,
- os.path.join("datasets", image_root),
- )
-
-
-_CUSTOM_SPLITS_IMAGENET_22K = {
- "imagenet_lvis-22k": ("imagenet/ImageNet-LVIS/", "imagenet/annotations/imagenet-22k_image_info_lvis-22k.json"),
-}
-
-for key, (image_root, json_file) in _CUSTOM_SPLITS_IMAGENET_22K.items():
- custom_register_imagenet_instances(
- key,
- get_lvis_22k_meta(),
- os.path.join("datasets", json_file) if "://" not in json_file else json_file,
- os.path.join("datasets", image_root),
- )
\ No newline at end of file
diff --git a/spaces/Diego-0121/ImaText/README.md b/spaces/Diego-0121/ImaText/README.md
deleted file mode 100644
index 183ce378a03d1b79c27817c2bdc0b20a5254c222..0000000000000000000000000000000000000000
--- a/spaces/Diego-0121/ImaText/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ImaText
-emoji: ⚡
-colorFrom: purple
-colorTo: blue
-sdk: gradio
-sdk_version: 3.47.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Dimalker/Faceswapper/roop/processors/frame/core.py b/spaces/Dimalker/Faceswapper/roop/processors/frame/core.py
deleted file mode 100644
index c225f9de483a2914a98392ce9de5bd03f2013a2d..0000000000000000000000000000000000000000
--- a/spaces/Dimalker/Faceswapper/roop/processors/frame/core.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import os
-import importlib
-import psutil
-from concurrent.futures import ThreadPoolExecutor, as_completed
-from queue import Queue
-from types import ModuleType
-from typing import Any, List, Callable
-from tqdm import tqdm
-
-import roop
-
-FRAME_PROCESSORS_MODULES: List[ModuleType] = []
-FRAME_PROCESSORS_INTERFACE = [
- 'pre_check',
- 'pre_start',
- 'process_frame',
- 'process_frames',
- 'process_image',
- 'process_video',
- 'post_process'
-]
-
-
-def load_frame_processor_module(frame_processor: str) -> Any:
- try:
- frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
- for method_name in FRAME_PROCESSORS_INTERFACE:
- if not hasattr(frame_processor_module, method_name):
- raise NotImplementedError
- except (ImportError, NotImplementedError):
- quit(f'Frame processor {frame_processor} crashed.')
- return frame_processor_module
-
-
-def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
- global FRAME_PROCESSORS_MODULES
-
- if not FRAME_PROCESSORS_MODULES:
- for frame_processor in frame_processors:
- frame_processor_module = load_frame_processor_module(frame_processor)
- FRAME_PROCESSORS_MODULES.append(frame_processor_module)
- return FRAME_PROCESSORS_MODULES
-
-
-def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
- with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
- futures = []
- queue = create_queue(temp_frame_paths)
- queue_per_future = len(temp_frame_paths) // roop.globals.execution_threads
- while not queue.empty():
- future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
- futures.append(future)
- for future in as_completed(futures):
- future.result()
-
-
-def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
- queue: Queue[str] = Queue()
- for frame_path in temp_frame_paths:
- queue.put(frame_path)
- return queue
-
-
-def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
- queues = []
- for _ in range(queue_per_future):
- if not queue.empty():
- queues.append(queue.get())
- return queues
-
-
-def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
- progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
- total = len(frame_paths)
- with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
- multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
-
-
-def update_progress(progress: Any = None) -> None:
- process = psutil.Process(os.getpid())
- memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
- progress.set_postfix({
- 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
- 'execution_providers': roop.globals.execution_providers,
- 'execution_threads': roop.globals.execution_threads
- })
- progress.refresh()
- progress.update(1)
diff --git a/spaces/ECCV2022/bytetrack/tutorials/ctracker/eval_motchallenge.py b/spaces/ECCV2022/bytetrack/tutorials/ctracker/eval_motchallenge.py
deleted file mode 100644
index a2b51388a77bd76bfc16a0ac2740e6fcd3d86aac..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/tutorials/ctracker/eval_motchallenge.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
-Christoph Heindl, 2017
-https://github.com/cheind/py-motmetrics
-Modified by Rufeng Zhang
-"""
-
-import argparse
-import glob
-import os
-import logging
-import motmetrics as mm
-import pandas as pd
-from collections import OrderedDict
-from pathlib import Path
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="""
-Compute metrics for trackers using MOTChallenge ground-truth data.
-Files
------
-All file content, ground truth and test files, have to comply with the
-format described in
-Milan, Anton, et al.
-"Mot16: A benchmark for multi-object tracking."
-arXiv preprint arXiv:1603.00831 (2016).
-https://motchallenge.net/
-Structure
----------
-Layout for ground truth data
- //gt/gt.txt
- //gt/gt.txt
- ...
-Layout for test data
- /.txt
- /.txt
- ...
-Sequences of ground truth and test will be matched according to the ``
-string.""", formatter_class=argparse.RawTextHelpFormatter)
-
- parser.add_argument('--groundtruths', type=str, help='Directory containing ground truth files.')
- parser.add_argument('--tests', type=str, help='Directory containing tracker result files')
- parser.add_argument('--score_threshold', type=float, help='Score threshold',default=0.5)
- parser.add_argument('--gt_type', type=str, default='')
- parser.add_argument('--eval_official', action='store_true')
- parser.add_argument('--loglevel', type=str, help='Log level', default='info')
- parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
- parser.add_argument('--solver', type=str, help='LAP solver to use')
- return parser.parse_args()
-
-
-def compare_dataframes(gts, ts):
- accs = []
- names = []
- for k, tsacc in ts.items():
- if k in gts:
- logging.info('Comparing {}...'.format(k))
- accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
- names.append(k)
- else:
- logging.warning('No ground truth for {}, skipping.'.format(k))
-
- return accs, names
-
-
-if __name__ == '__main__':
-
- args = parse_args()
-
- loglevel = getattr(logging, args.loglevel.upper(), None)
- if not isinstance(loglevel, int):
- raise ValueError('Invalid log level: {} '.format(args.loglevel))
- logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
-
- if args.solver:
- mm.lap.default_solver = args.solver
-
- gt_type = args.gt_type
- print('gt_type', gt_type)
- gtfiles = glob.glob(
- os.path.join(args.groundtruths, '*/gt/gt_{}.txt'.format(gt_type)))
- print('gt_files', gtfiles)
- tsfiles = [f for f in glob.glob(os.path.join(args.tests, '*.txt')) if not os.path.basename(f).startswith('eval')]
-
- logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
- logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
- logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
- logging.info('Loading files.')
-
- gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
- ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=args.score_threshold)) for f in tsfiles])
-# ts = gt
-
- mh = mm.metrics.create()
- accs, names = compare_dataframes(gt, ts)
-
- logging.info('Running metrics')
- metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked',
- 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses',
- 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']
- summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
- # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
- # print(mm.io.render_summary(
- # summary, formatters=mh.formatters,
- # namemap=mm.io.motchallenge_metric_names))
- div_dict = {
- 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'],
- 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}
- for divisor in div_dict:
- for divided in div_dict[divisor]:
- summary[divided] = (summary[divided] / summary[divisor])
- fmt = mh.formatters
- change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked',
- 'partially_tracked', 'mostly_lost']
- for k in change_fmt_list:
- fmt[k] = fmt['mota']
- print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))
- if args.eval_official:
- metrics = mm.metrics.motchallenge_metrics + ['num_objects']
- summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
- print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
- logging.info('Completed')
diff --git a/spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/README.md b/spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/README.md
deleted file mode 100644
index fb45a36b5909585aa964f2033762ee59b55526b0..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# External Colab Code
-Code used to make Google Colab work correctly
-- Repo link: https://github.com/IAHispano/Applio-RVC-Fork/
-
-Thanks to https://github.com/kalomaze/externalcolabcode
-
diff --git a/spaces/EinsteinCoder/sf-voicebot/Dockerfile b/spaces/EinsteinCoder/sf-voicebot/Dockerfile
deleted file mode 100644
index 3e50f740b3f983dfec742dd5741ccb0f97ea2a8a..0000000000000000000000000000000000000000
--- a/spaces/EinsteinCoder/sf-voicebot/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-
-RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-COPY . .
-
-CMD ["flask", "run", "--host", "0.0.0.0", "--port", "5050"]
\ No newline at end of file
diff --git a/spaces/EsoCode/text-generation-webui/docs/llama.cpp-models.md b/spaces/EsoCode/text-generation-webui/docs/llama.cpp-models.md
deleted file mode 100644
index bcf3c046acc7f38dba16d275a8ecbee1bb579a05..0000000000000000000000000000000000000000
--- a/spaces/EsoCode/text-generation-webui/docs/llama.cpp-models.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Using llama.cpp in the web UI
-
-## Setting up the models
-
-#### Pre-converted
-
-Place the model in the `models` folder, making sure that its name contains `ggml` somewhere and ends in `.bin`.
-
-#### Convert LLaMA yourself
-
-Follow the instructions in the llama.cpp README to generate the `ggml-model.bin` file: https://github.com/ggerganov/llama.cpp#usage
-
-## GPU acceleration
-
-Enabled with the `--n-gpu-layers` parameter.
-
-* If you have enough VRAM, use a high number like `--n-gpu-layers 200000` to offload all layers to the GPU.
-* Otherwise, start with a low number like `--n-gpu-layers 10` and then gradually increase it until you run out of memory.
-
-To use this feature, you need to manually compile and install `llama-cpp-python` with GPU support.
-
-#### Linux
-
-```
-pip uninstall -y llama-cpp-python
-CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
-```
-
-#### Windows
-
-```
-pip uninstall -y llama-cpp-python
-set CMAKE_ARGS="-DLLAMA_CUBLAS=on"
-set FORCE_CMAKE=1
-pip install llama-cpp-python --no-cache-dir
-```
-
-#### macOS
-
-```
-pip uninstall -y llama-cpp-python
-CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
-```
-
-Here you can find the different compilation options for OpenBLAS / cuBLAS / CLBlast: https://pypi.org/project/llama-cpp-python/
-
-## Performance
-
-This was the performance of llama-7b int4 on my i5-12400F (cpu only):
-
-> Output generated in 33.07 seconds (6.05 tokens/s, 200 tokens, context 17)
-
-You can change the number of threads with `--threads N`.
diff --git a/spaces/Flux9665/IMS-Toucan/Models/HiFiGAN_combined/__init__.py b/spaces/Flux9665/IMS-Toucan/Models/HiFiGAN_combined/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Francesco/FairytaleDJ/app.py b/spaces/Francesco/FairytaleDJ/app.py
deleted file mode 100644
index 458ec32ff6d1874891ffb343aca68c38d12b6175..0000000000000000000000000000000000000000
--- a/spaces/Francesco/FairytaleDJ/app.py
+++ /dev/null
@@ -1,182 +0,0 @@
-from pathlib import Path
-
-import streamlit as st
-from dotenv import load_dotenv
-from langchain.chains import LLMChain
-from langchain.prompts import PromptTemplate
-
-load_dotenv()
-import os
-from typing import List, Tuple
-
-import numpy as np
-from langchain.chat_models import ChatOpenAI
-from langchain.embeddings.openai import OpenAIEmbeddings
-from langchain.schema import Document
-
-from data import load_db
-from names import DATASET_ID, MODEL_ID
-from storage import RedisStorage, UserInput
-from utils import weighted_random_sample
-
-
-class RetrievalType:
- FIRST_MATCH = "first-match"
- POOL_MATCHES = "pool-matches"
-
-
-Matches = List[Tuple[Document, float]]
-USE_STORAGE = os.environ.get("USE_STORAGE", "True").lower() in ("true", "t", "1")
-
-print("USE_STORAGE", USE_STORAGE)
-
-
-@st.cache_resource
-def init():
- embeddings = OpenAIEmbeddings(model=MODEL_ID)
- dataset_path = f"hub://{os.environ['ACTIVELOOP_ORG_ID']}/{DATASET_ID}"
-
- db = load_db(
- dataset_path,
- embedding_function=embeddings,
- token=os.environ["ACTIVELOOP_TOKEN"],
- # org_id=os.environ["ACTIVELOOP_ORG_ID"],
- read_only=True,
- )
-
- storage = RedisStorage(
- host=os.environ["UPSTASH_URL"], password=os.environ["UPSTASH_PASSWORD"]
- )
- prompt = PromptTemplate(
- input_variables=["user_input"],
- template=Path("prompts/bot.prompt").read_text(),
- )
-
- llm = ChatOpenAI(temperature=0.3)
-
- chain = LLMChain(llm=llm, prompt=prompt)
-
- return db, storage, chain
-
-
-# Don't show the setting sidebar
-if "sidebar_state" not in st.session_state:
- st.session_state.sidebar_state = "collapsed"
-
-st.set_page_config(initial_sidebar_state=st.session_state.sidebar_state)
-
-
-db, storage, chain = init()
-
-st.title("FairytaleDJ 🎵🏰🔮")
-st.markdown(
- """
-*Made with [DeepLake](https://www.deeplake.ai/) 🚀 and [LangChain](https://python.langchain.com/en/latest/index.html) 🦜⛓️*
-
-💫 Unleash the magic within you with our enchanting app, turning your sentiments into a Disney soundtrack! 🌈 Just express your emotions, and embark on a whimsical journey as we tailor a Disney melody to match your mood. 👑💖""",
- unsafe_allow_html=True,
-)
-how_it_works = st.expander(label="How it works")
-
-text_input = st.text_input(
- label="How are you feeling today?",
- placeholder="I am ready to rock and rool!",
-)
-
-run_btn = st.button("Make me sing! 🎶")
-with how_it_works:
- st.markdown(
- """
-The application follows a sequence of steps to deliver Disney songs matching the user's emotions:
-- **User Input**: The application starts by collecting user's emotional state through a text input.
-- **Emotion Encoding**: The user-provided emotions are then fed to a Language Model (LLM). The LLM interprets and encodes these emotions.
-- **Similarity Search**: These encoded emotions are utilized to perform a similarity search within our [vector database](https://www.deeplake.ai/). This database houses Disney songs, each represented as emotional embeddings.
-- **Song Selection**: From the pool of top matching songs, the application randomly selects one. The selection is weighted, giving preference to songs with higher similarity scores.
-- **Song Retrieval**: The selected song's embedded player is displayed on the webpage for the user. Additionally, the LLM interpreted emotional state associated with the chosen song is displayed.
-"""
- )
-
-
-placeholder_emotions = st.empty()
-placeholder = st.empty()
-
-
-with st.sidebar:
- st.text("App settings")
- filter_threshold = st.slider(
- "Threshold used to filter out low scoring songs",
- min_value=0.0,
- max_value=1.0,
- value=0.8,
- )
- max_number_of_songs = st.slider(
- "Max number of songs we will retrieve from the db",
- min_value=5,
- max_value=50,
- value=20,
- step=1,
- )
- number_of_displayed_songs = st.slider(
- "Number of displayed songs", min_value=1, max_value=4, value=2, step=1
- )
-
-
-def filter_scores(matches: Matches, th: float = 0.8) -> Matches:
- return [(doc, score) for (doc, score) in matches if score > th]
-
-
-def normalize_scores_by_sum(matches: Matches) -> Matches:
- scores = [score for _, score in matches]
- tot = sum(scores)
- return [(doc, (score / tot)) for doc, score in matches]
-
-
-def get_song(user_input: str, k: int = 20):
- emotions = chain.run(user_input=user_input)
- matches = db.similarity_search_with_score(emotions, distance_metric="cos", k=k)
- # [print(doc.metadata['name'], score) for doc, score in matches]
- docs, scores = zip(
- *normalize_scores_by_sum(filter_scores(matches, filter_threshold))
- )
- choosen_docs = weighted_random_sample(
- np.array(docs), np.array(scores), n=number_of_displayed_songs
- ).tolist()
- return choosen_docs, emotions
-
-
-def set_song(user_input):
- if user_input == "":
- return
- # take first 120 chars
- user_input = user_input[:120]
- docs, emotions = get_song(user_input, k=max_number_of_songs)
- print(docs)
- songs = []
- with placeholder_emotions:
- st.markdown("Your emotions: `" + emotions + "`")
- with placeholder:
- iframes_html = ""
- for doc in docs:
- name = doc.metadata["name"]
- print(f"song = {name}")
- songs.append(name)
- embed_url = doc.metadata["embed_url"]
- iframes_html += (
- f''
- )
-
- st.markdown(
- f"
{iframes_html}
",
- unsafe_allow_html=True,
- )
-
- if USE_STORAGE:
- success_storage = storage.store(
- UserInput(text=user_input, emotions=emotions, songs=songs)
- )
- if not success_storage:
- print("[ERROR] was not able to store user_input")
-
-
-if run_btn:
- set_song(text_input)
diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/gaussian_diffusion.py b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/gaussian_diffusion.py
deleted file mode 100644
index 1c0f97783e7a336390324516f2ba8e89d1dcfaf1..0000000000000000000000000000000000000000
--- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/gaussian_diffusion.py
+++ /dev/null
@@ -1,639 +0,0 @@
-"""
-Simplified from https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py.
-"""
-
-import math
-
-import numpy as np
-import torch as th
-
-
-def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
- betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
- warmup_time = int(num_diffusion_timesteps * warmup_frac)
- betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
- return betas
-
-
-def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
- """
- This is the deprecated API for creating beta schedules.
-
- See get_named_beta_schedule() for the new library of schedules.
- """
- if beta_schedule == "quad":
- betas = (
- np.linspace(
- beta_start ** 0.5,
- beta_end ** 0.5,
- num_diffusion_timesteps,
- dtype=np.float64,
- )
- ** 2
- )
- elif beta_schedule == "linear":
- betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
- elif beta_schedule == "warmup10":
- betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
- elif beta_schedule == "warmup50":
- betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
- elif beta_schedule == "const":
- betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
- elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
- betas = 1.0 / np.linspace(
- num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
- )
- else:
- raise NotImplementedError(beta_schedule)
- assert betas.shape == (num_diffusion_timesteps,)
- return betas
-
-
-def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
- """
- Get a pre-defined beta schedule for the given name.
-
- The beta schedule library consists of beta schedules which remain similar
- in the limit of num_diffusion_timesteps.
- Beta schedules may be added, but should not be removed or changed once
- they are committed to maintain backwards compatibility.
- """
- if schedule_name == "linear":
- # Linear schedule from Ho et al, extended to work for any number of
- # diffusion steps.
- scale = 1000 / num_diffusion_timesteps
- return get_beta_schedule(
- "linear",
- beta_start=scale * 0.0001,
- beta_end=scale * 0.02,
- num_diffusion_timesteps=num_diffusion_timesteps,
- )
- elif schedule_name == "squaredcos_cap_v2":
- return betas_for_alpha_bar(
- num_diffusion_timesteps,
- lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
- )
- else:
- raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
-
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-class GaussianDiffusion:
- """
- Utilities for training and sampling diffusion models.
-
- Original ported from this codebase:
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
-
- :param betas: a 1-D numpy array of betas for each diffusion timestep,
- starting at T and going to 1.
- """
-
- def __init__(
- self,
- *,
- betas,
- ):
- # Use float64 for accuracy.
- betas = np.array(betas, dtype=np.float64)
- self.betas = betas
- assert len(betas.shape) == 1, "betas must be 1-D"
- assert (betas > 0).all() and (betas <= 1).all()
-
- self.num_timesteps = int(betas.shape[0])
-
- alphas = 1.0 - betas
- self.alphas_cumprod = np.cumprod(alphas, axis=0)
- self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
- self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
- assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
- self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
- self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
- self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
- self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- self.posterior_variance = (
- betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
- self.posterior_log_variance_clipped = np.log(
- np.append(self.posterior_variance[1], self.posterior_variance[1:])
- )
- self.posterior_mean_coef1 = (
- betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- self.posterior_mean_coef2 = (
- (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
- )
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
-
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
- return mean, variance, log_variance
-
- def q_sample(self, x_start, t, noise=None):
- """
- Diffuse the data for a given number of diffusion steps.
-
- In other words, sample from q(x_t | x_0).
-
- :param x_start: the initial data batch.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :param noise: if specified, the split-out normal noise.
- :return: A noisy version of x_start.
- """
- if noise is None:
- noise = th.randn_like(x_start)
- assert noise.shape == x_start.shape
- return (
- _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
- )
-
- def q_posterior_mean_variance(self, x_start, x_t, t):
- """
- Compute the mean and variance of the diffusion posterior:
-
- q(x_{t-1} | x_t, x_0)
-
- """
- assert x_start.shape == x_t.shape
- posterior_mean = (
- _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
- + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = _extract_into_tensor(
- self.posterior_log_variance_clipped, t, x_t.shape
- )
- assert (
- posterior_mean.shape[0]
- == posterior_variance.shape[0]
- == posterior_log_variance_clipped.shape[0]
- == x_start.shape[0]
- )
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
- """
- Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
- the initial x, x_0.
-
- :param model: the model, which takes a signal and a batch of timesteps
- as input.
- :param x: the [N x C x ...] tensor at time t.
- :param t: a 1-D Tensor of timesteps.
- :param clip_denoised: if True, clip the denoised signal into [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample. Applies before
- clip_denoised.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict with the following keys:
- - 'mean': the model mean output.
- - 'variance': the model variance output.
- - 'log_variance': the log of 'variance'.
- - 'pred_xstart': the prediction for x_0.
- """
- if model_kwargs is None:
- model_kwargs = {}
-
- B, C = x.shape[:2]
- assert t.shape == (B,)
- model_output = model(x, t, **model_kwargs)
- if isinstance(model_output, tuple):
- model_output, extra = model_output
- else:
- extra = None
-
- assert model_output.shape == (B, C * 2, *x.shape[2:])
- model_output, model_var_values = th.split(model_output, C, dim=1)
- min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
- max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
- # The model_var_values is [-1, 1] for [min_var, max_var].
- frac = (model_var_values + 1) / 2
- model_log_variance = frac * max_log + (1 - frac) * min_log
- model_variance = th.exp(model_log_variance)
-
- def process_xstart(x):
- if denoised_fn is not None:
- x = denoised_fn(x)
- if clip_denoised:
- return x.clamp(-1, 1)
- return x
-
- pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
- model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
-
- assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
- return {
- "mean": model_mean,
- "variance": model_variance,
- "log_variance": model_log_variance,
- "pred_xstart": pred_xstart,
- "extra": extra,
- }
-
- def _predict_xstart_from_eps(self, x_t, t, eps):
- assert x_t.shape == eps.shape
- return (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
- )
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- return (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
-
- def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
- """
- Compute the mean for the previous step, given a function cond_fn that
- computes the gradient of a conditional log probability with respect to
- x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
- condition on y.
-
- This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
- """
- gradient = cond_fn(x, t, **model_kwargs)
- new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
- return new_mean
-
- def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
- """
- Compute what the p_mean_variance output would have been, should the
- model's score function be conditioned by cond_fn.
-
- See condition_mean() for details on cond_fn.
-
- Unlike condition_mean(), this instead uses the conditioning strategy
- from Song et al (2020).
- """
- alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
-
- eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
- eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
-
- out = p_mean_var.copy()
- out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
- out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
- return out
-
- def p_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- ):
- """
- Sample x_{t-1} from the model at the given timestep.
-
- :param model: the model to sample from.
- :param x: the current tensor at x_{t-1}.
- :param t: the value of t, starting at 0 for the first diffusion step.
- :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict containing the following keys:
- - 'sample': a random sample from the model.
- - 'pred_xstart': a prediction of x_0.
- """
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- noise = th.randn_like(x)
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
- ) # no noise when t == 0
- if cond_fn is not None:
- out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
- sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
- return {"sample": sample, "pred_xstart": out["pred_xstart"]}
-
- def p_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- """
- Generate samples from the model.
-
- :param model: the model module.
- :param shape: the shape of the samples, (N, C, H, W).
- :param noise: if specified, the noise from the encoder to sample.
- Should be of the same shape as `shape`.
- :param clip_denoised: if True, clip x_start predictions to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :param device: if specified, the device to create the samples on.
- If not specified, use a model parameter's device.
- :param progress: if True, show a tqdm progress bar.
- :return: a non-differentiable batch of samples.
- """
- final = None
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"]
-
- def p_sample_loop_progressive(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- """
- Generate samples from the model and yield intermediate samples from
- each timestep of diffusion.
-
- Arguments are the same as p_sample_loop().
- Returns a generator over dicts, where each dict is the return value of
- p_sample().
- """
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- img = th.randn(*shape, device=device)
- indices = list(range(self.num_timesteps))[::-1]
-
- if progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
-
- indices = tqdm(indices)
-
- for i in indices:
- t = th.tensor([i] * shape[0], device=device)
- with th.no_grad():
- out = self.p_sample(
- model,
- img,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- )
- yield out
- img = out["sample"]
-
- def ddim_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- eta=0.0,
- ):
- """
- Sample x_{t-1} from the model using DDIM.
-
- Same usage as p_sample().
- """
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- if cond_fn is not None:
- out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
-
- # Usually our model outputs epsilon, but we re-derive it
- # in case we used x_start or x_prev prediction.
- eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
-
- alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
- alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
- sigma = (
- eta
- * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
- * th.sqrt(1 - alpha_bar / alpha_bar_prev)
- )
- # Equation 12.
- noise = th.randn_like(x)
- mean_pred = (
- out["pred_xstart"] * th.sqrt(alpha_bar_prev)
- + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
- )
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
- ) # no noise when t == 0
- sample = mean_pred + nonzero_mask * sigma * noise
- return {"sample": sample, "pred_xstart": out["pred_xstart"]}
-
- def ddim_reverse_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- eta=0.0,
- ):
- """
- Sample x_{t+1} from the model using DDIM reverse ODE.
- """
- assert eta == 0.0, "Reverse ODE only for deterministic path"
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- if cond_fn is not None:
- out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
- # Usually our model outputs epsilon, but we re-derive it
- # in case we used x_start or x_prev prediction.
- eps = (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- - out["pred_xstart"]
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
- alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
-
- # Equation 12. reversed
- mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
-
- return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
-
- def ddim_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- ):
- """
- Generate samples from the model using DDIM.
-
- Same usage as p_sample_loop().
- """
- final = None
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- eta=eta,
- ):
- final = sample
- return final["sample"]
-
- def ddim_sample_loop_progressive(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- ):
- """
- Use DDIM to sample from the model and yield intermediate samples from
- each timestep of DDIM.
-
- Same usage as p_sample_loop_progressive().
- """
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- img = th.randn(*shape, device=device)
- indices = list(range(self.num_timesteps))[::-1]
-
- if progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
-
- indices = tqdm(indices)
-
- for i in indices:
- t = th.tensor([i] * shape[0], device=device)
- with th.no_grad():
- out = self.ddim_sample(
- model,
- img,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- eta=eta,
- )
- yield out
- img = out["sample"]
-
-
-def _extract_into_tensor(arr, timesteps, broadcast_shape):
- """
- Extract values from a 1-D numpy array for a batch of indices.
-
- :param arr: the 1-D numpy array.
- :param timesteps: a tensor of indices into the array to extract.
- :param broadcast_shape: a larger shape of K dimensions with the batch
- dimension equal to the length of timesteps.
- :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
- """
- res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
- while len(res.shape) < len(broadcast_shape):
- res = res[..., None]
- return res + th.zeros(broadcast_shape, device=timesteps.device)
diff --git a/spaces/Frorozcol/financIA/README.md b/spaces/Frorozcol/financIA/README.md
deleted file mode 100644
index 57a50382137ec031dc9de50f1f14d6c6dad03e18..0000000000000000000000000000000000000000
--- a/spaces/Frorozcol/financIA/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: FinancIA
-emoji: 🏢
-colorFrom: blue
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/tasks/packing_boxes_pairs.py b/spaces/Gen-Sim/Gen-Sim/cliport/tasks/packing_boxes_pairs.py
deleted file mode 100644
index 508c2eb81cb8fe5de3e7890d0c9087655c7219a7..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/tasks/packing_boxes_pairs.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import os
-
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-
-import pybullet as p
-
-
-class PackingBoxesPairs(Task):
- """Tightly pack all the boxes of two specified colors inside the brown box."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 20
- self.lang_template = "pack all the {colors} blocks into the brown box" # should have called it boxes :(
- self.task_completed_desc = "done packing blocks."
-
- # Tight z-bound (0.0525) to discourage stuffing everything into the brown box
- self.zone_bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.0525]])
- self.additional_reset()
-
- def reset(self, env):
- super().reset(env)
-
- # Add container box.
- zone_size = self.get_random_size(0.05, 0.3, 0.05, 0.3, 0.05, 0.05)
- zone_pose = self.get_random_pose(env, zone_size)
- container_template = 'container/container-template.urdf'
- replace = {'DIM': zone_size, 'HALF': (zone_size[0] / 2, zone_size[1] / 2, zone_size[2] / 2)}
- container_urdf = self.fill_template(container_template, replace)
- env.add_object(container_urdf, zone_pose, 'fixed')
-
- margin = 0.01
- min_object_dim = 0.05
- bboxes = []
-
- # Split container space with KD trees.
- stack_size = np.array(zone_size)
- stack_size[0] -= 0.01
- stack_size[1] -= 0.01
- root_size = (0.01, 0.01, 0) + tuple(stack_size)
- root = utils.TreeNode(None, [], bbox=np.array(root_size))
- utils.KDTree(root, min_object_dim, margin, bboxes)
-
- # select colors
- all_colors, all_color_names = utils.get_colors(mode=self.mode)
- selected_idx = np.random.choice(range(len(all_colors)), 2, replace=False)
-
- relevant_color_names = [c for idx, c in enumerate(all_color_names) if idx in selected_idx]
- distractor_colors = [c for idx, c in enumerate(all_color_names) if idx not in selected_idx]
-
- pack_colors = [c for idx, c in enumerate(all_colors) if idx in selected_idx]
- distractor_colors = [c for idx, c in enumerate(all_colors) if idx not in selected_idx]
-
- # Add objects in container.
- object_ids = []
- bboxes = np.array(bboxes)
- object_template = 'box/box-template.urdf'
- for bbox in bboxes:
- size = bbox[3:] - bbox[:3]
- position = size / 2. + bbox[:3]
- position[0] += -zone_size[0] / 2
- position[1] += -zone_size[1] / 2
- pose = (position, (0, 0, 0, 1))
- pose = utils.multiply(zone_pose, pose)
- urdf = self.fill_template(object_template, {'DIM': size})
- box_id = env.add_object(urdf, pose)
-
- object_ids.append(box_id)
- icolor = np.random.choice(range(len(pack_colors)), 1).squeeze()
- p.changeVisualShape(box_id, -1, rgbaColor=pack_colors[icolor] + [1])
-
- # Randomly select object in box and save ground truth pose.
- object_volumes = []
- true_poses = []
- for object_id in object_ids:
- true_pose = p.getBasePositionAndOrientation(object_id)
- object_size = p.getVisualShapeData(object_id)[0][3]
- object_volumes.append(np.prod(np.array(object_size) * 100))
- pose = self.get_random_pose(env, object_size)
- p.resetBasePositionAndOrientation(object_id, pose[0], pose[1])
- true_poses.append(true_pose)
-
- # Add distractor objects
- num_distractor_objects = 4
- distractor_bbox_idxs = np.random.choice(len(bboxes), num_distractor_objects)
- for bbox_idx in distractor_bbox_idxs:
- bbox = bboxes[bbox_idx]
- size = bbox[3:] - bbox[:3]
- position = size / 2. + bbox[:3]
- position[0] += -zone_size[0] / 2
- position[1] += -zone_size[1] / 2
-
- pose = self.get_random_pose(env, size)
- urdf = self.fill_template(object_template, {'DIM': size})
- box_id = env.add_object(urdf, pose)
-
- icolor = np.random.choice(range(len(distractor_colors)), 1).squeeze()
- if box_id:
- p.changeVisualShape(box_id, -1, rgbaColor=distractor_colors[icolor] + [1])
-
- # Some scenes might contain just one relevant block that fits in the box.
- if len(relevant_color_names) > 1:
- relevant_desc = f'{relevant_color_names[0]} and {relevant_color_names[1]}'
- else:
- relevant_desc = f'{relevant_color_names[0]}'
-
- # IMPORTANT: Specify (obj_pts, [(zone_pose, zone_size)]) for target `zone`. obj_pts is a dict
- language_goal = self.lang_template.format(colors=relevant_desc)
- self.add_goal(objs=object_ids, matches=np.eye(len(object_ids)), targ_poses=true_poses, replace=False,
- rotations=True, metric='zone', params=[(zone_pose, zone_size)], step_max_reward=1, language_goal=language_goal)
\ No newline at end of file
diff --git a/spaces/GordenGhost/Gorden/README.md b/spaces/GordenGhost/Gorden/README.md
deleted file mode 100644
index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000
--- a/spaces/GordenGhost/Gorden/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: bingo
-emoji: 😊
-colorFrom: red
-colorTo: red
-sdk: docker
-license: mit
-duplicated_from: hf4all/bingo
----
-
-
-
-# Bingo
-
-Bingo,一个让你呼吸顺畅 New Bing。
-
-高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
-
-
-
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://github.com/weaigc/bingo/blob/main/license)
-
-问题反馈请前往 https://github.com/weaigc/bingo/issues
-
-
-
diff --git a/spaces/Gradio-Blocks/are-you-wearing-a-mask/model_weights/README.md b/spaces/Gradio-Blocks/are-you-wearing-a-mask/model_weights/README.md
deleted file mode 100644
index de168a6b6e7a60cf1c310ab64d300f4d41fcd6b0..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/are-you-wearing-a-mask/model_weights/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# Trained model weights go here.
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py
deleted file mode 100644
index 1491e3b8247c9d163d6016caf2fcd8043a053b7e..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py
+++ /dev/null
@@ -1,6 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
-]
-model = dict(
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py
deleted file mode 100644
index 0627e2b5a76dead859212d4cab116c160df21404..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './nonlocal_r50-d8_769x769_80k_cityscapes.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/dist.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/dist.py
deleted file mode 100644
index 65f084aa9988c7c08f4a35688f8895f28b285d1d..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/dist.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# --------------------------------------------------------
-# Based on BEiT, timm, DINO and DeiT code bases
-# https://github.com/microsoft/unilm/tree/master/beit
-# https://github.com/rwightman/pytorch-image-models/tree/master/timm
-# https://github.com/facebookresearch/deit
-# https://github.com/facebookresearch/dino
-# --------------------------------------------------------
-
-import os
-import pickle
-import shutil
-import tempfile
-
-import torch
-import torch.distributed as dist
-
-
-def setup_for_distributed(is_master):
- """
- This function disables printing when not in master process
- """
- import builtins as __builtin__
- builtin_print = __builtin__.print
-
- def print(*args, **kwargs):
- force = kwargs.pop('force', False)
- if is_master or force:
- builtin_print(*args, **kwargs)
-
- __builtin__.print = print
-
-
-def is_dist_avail_and_initialized():
- if not dist.is_available():
- return False
- if not dist.is_initialized():
- return False
- return True
-
-
-def get_world_size():
- if not is_dist_avail_and_initialized():
- return 1
- return dist.get_world_size()
-
-
-def get_rank():
- if not is_dist_avail_and_initialized():
- return 0
- return dist.get_rank()
-
-
-def is_main_process():
- return get_rank() == 0
-
-
-def save_on_master(*args, **kwargs):
- if is_main_process():
- torch.save(*args, **kwargs)
-
-
-def init_distributed_mode(args):
- if args.dist_on_itp:
- args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
- args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
- args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
- args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
- os.environ['LOCAL_RANK'] = str(args.gpu)
- os.environ['RANK'] = str(args.rank)
- os.environ['WORLD_SIZE'] = str(args.world_size)
- # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
- elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
- args.rank = int(os.environ["RANK"])
- args.world_size = int(os.environ['WORLD_SIZE'])
- args.gpu = int(os.environ['LOCAL_RANK'])
- elif 'SLURM_PROCID' in os.environ:
- args.rank = int(os.environ['SLURM_PROCID'])
- args.gpu = args.rank % torch.cuda.device_count()
- else:
- print('Not using distributed mode')
- args.distributed = False
- return
-
- args.distributed = True
-
- torch.cuda.set_device(args.gpu)
- args.dist_backend = 'nccl'
- print('| distributed init (rank {}): {}, gpu {}'.format(
- args.rank, args.dist_url, args.gpu), flush=True)
- torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
- world_size=args.world_size, rank=args.rank)
- torch.distributed.barrier()
- setup_for_distributed(args.rank == 0)
-
-# # From MMCV
-def collect_results_cpu(result_part, size, tmpdir=None):
- """Collect results under cpu mode.
-
- On cpu mode, this function will save the results on different gpus to
- ``tmpdir`` and collect them by the rank 0 worker.
-
- Args:
- result_part (list): Result list containing result parts
- to be collected.
- size (int): Size of the results, commonly equal to length of
- the results.
- tmpdir (str | None): temporal directory for collected results to
- store. If set to None, it will create a random temporal directory
- for it.
-
- Returns:
- list: The collected results.
- """
- rank = get_rank()
- world_size = get_world_size()
- # create a tmp dir if it is not specified
- if tmpdir is None:
- MAX_LEN = 512
- # 32 is whitespace
- dir_tensor = torch.full((MAX_LEN, ),
- 32,
- dtype=torch.uint8,
- device='cuda')
- if rank == 0:
- os.makedirs('/tmp/dist_test', exist_ok=True)
- tmpdir = tempfile.mkdtemp(dir='/tmp/dist_test')
- tmpdir = torch.tensor(
- bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
- dir_tensor[:len(tmpdir)] = tmpdir
- dist.broadcast(dir_tensor, 0)
- tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
- else:
- os.makedirs(tmpdir, exist_ok=True)
- # dump the part result to the dir
- tmp_file = os.path.join(tmpdir, f'part_{rank}.pkl')
- pickle.dump(result_part, open(str(tmp_file), "wb"))
- dist.barrier()
- # collect all parts
- if rank != 0:
- return None
- else:
- # load results of all parts from tmp dir
- part_list = []
- for i in range(world_size):
- part_file = os.path.join(tmpdir, f'part_{i}.pkl')
- part_result = pickle.load(open(str(part_file), "rb"))
- # When data is severely insufficient, an empty part_result
- # on a certain gpu could makes the overall outputs empty.
- if part_result:
- part_list.append(part_result)
- # sort the results
- ordered_results = []
- for res in zip(*part_list):
- ordered_results.extend(list(res))
- # the dataloader may pad some samples
- ordered_results = ordered_results[:size]
- # remove tmp dir
- shutil.rmtree(tmpdir)
- return ordered_results
diff --git a/spaces/Hallucinate/demo/ldm/modules/image_degradation/utils_image.py b/spaces/Hallucinate/demo/ldm/modules/image_degradation/utils_image.py
deleted file mode 100644
index 0175f155ad900ae33c3c46ed87f49b352e3faf98..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/ldm/modules/image_degradation/utils_image.py
+++ /dev/null
@@ -1,916 +0,0 @@
-import os
-import math
-import random
-import numpy as np
-import torch
-import cv2
-from torchvision.utils import make_grid
-from datetime import datetime
-#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
-
-
-os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
-
-
-'''
-# --------------------------------------------
-# Kai Zhang (github: https://github.com/cszn)
-# 03/Mar/2019
-# --------------------------------------------
-# https://github.com/twhui/SRGAN-pyTorch
-# https://github.com/xinntao/BasicSR
-# --------------------------------------------
-'''
-
-
-IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
-
-
-def is_image_file(filename):
- return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
-
-
-def get_timestamp():
- return datetime.now().strftime('%y%m%d-%H%M%S')
-
-
-def imshow(x, title=None, cbar=False, figsize=None):
- plt.figure(figsize=figsize)
- plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
- if title:
- plt.title(title)
- if cbar:
- plt.colorbar()
- plt.show()
-
-
-def surf(Z, cmap='rainbow', figsize=None):
- plt.figure(figsize=figsize)
- ax3 = plt.axes(projection='3d')
-
- w, h = Z.shape[:2]
- xx = np.arange(0,w,1)
- yy = np.arange(0,h,1)
- X, Y = np.meshgrid(xx, yy)
- ax3.plot_surface(X,Y,Z,cmap=cmap)
- #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
- plt.show()
-
-
-'''
-# --------------------------------------------
-# get image pathes
-# --------------------------------------------
-'''
-
-
-def get_image_paths(dataroot):
- paths = None # return None if dataroot is None
- if dataroot is not None:
- paths = sorted(_get_paths_from_images(dataroot))
- return paths
-
-
-def _get_paths_from_images(path):
- assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
- images = []
- for dirpath, _, fnames in sorted(os.walk(path)):
- for fname in sorted(fnames):
- if is_image_file(fname):
- img_path = os.path.join(dirpath, fname)
- images.append(img_path)
- assert images, '{:s} has no valid image file'.format(path)
- return images
-
-
-'''
-# --------------------------------------------
-# split large images into small images
-# --------------------------------------------
-'''
-
-
-def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
- w, h = img.shape[:2]
- patches = []
- if w > p_max and h > p_max:
- w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
- h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
- w1.append(w-p_size)
- h1.append(h-p_size)
-# print(w1)
-# print(h1)
- for i in w1:
- for j in h1:
- patches.append(img[i:i+p_size, j:j+p_size,:])
- else:
- patches.append(img)
-
- return patches
-
-
-def imssave(imgs, img_path):
- """
- imgs: list, N images of size WxHxC
- """
- img_name, ext = os.path.splitext(os.path.basename(img_path))
-
- for i, img in enumerate(imgs):
- if img.ndim == 3:
- img = img[:, :, [2, 1, 0]]
- new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
- cv2.imwrite(new_path, img)
-
-
-def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
- """
- split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
- and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
- will be splitted.
- Args:
- original_dataroot:
- taget_dataroot:
- p_size: size of small images
- p_overlap: patch size in training is a good choice
- p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
- """
- paths = get_image_paths(original_dataroot)
- for img_path in paths:
- # img_name, ext = os.path.splitext(os.path.basename(img_path))
- img = imread_uint(img_path, n_channels=n_channels)
- patches = patches_from_image(img, p_size, p_overlap, p_max)
- imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
- #if original_dataroot == taget_dataroot:
- #del img_path
-
-'''
-# --------------------------------------------
-# makedir
-# --------------------------------------------
-'''
-
-
-def mkdir(path):
- if not os.path.exists(path):
- os.makedirs(path)
-
-
-def mkdirs(paths):
- if isinstance(paths, str):
- mkdir(paths)
- else:
- for path in paths:
- mkdir(path)
-
-
-def mkdir_and_rename(path):
- if os.path.exists(path):
- new_name = path + '_archived_' + get_timestamp()
- print('Path already exists. Rename it to [{:s}]'.format(new_name))
- os.rename(path, new_name)
- os.makedirs(path)
-
-
-'''
-# --------------------------------------------
-# read image from path
-# opencv is fast, but read BGR numpy image
-# --------------------------------------------
-'''
-
-
-# --------------------------------------------
-# get uint8 image of size HxWxn_channles (RGB)
-# --------------------------------------------
-def imread_uint(path, n_channels=3):
- # input: path
- # output: HxWx3(RGB or GGG), or HxWx1 (G)
- if n_channels == 1:
- img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
- img = np.expand_dims(img, axis=2) # HxWx1
- elif n_channels == 3:
- img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
- if img.ndim == 2:
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
- else:
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
- return img
-
-
-# --------------------------------------------
-# matlab's imwrite
-# --------------------------------------------
-def imsave(img, img_path):
- img = np.squeeze(img)
- if img.ndim == 3:
- img = img[:, :, [2, 1, 0]]
- cv2.imwrite(img_path, img)
-
-def imwrite(img, img_path):
- img = np.squeeze(img)
- if img.ndim == 3:
- img = img[:, :, [2, 1, 0]]
- cv2.imwrite(img_path, img)
-
-
-
-# --------------------------------------------
-# get single image of size HxWxn_channles (BGR)
-# --------------------------------------------
-def read_img(path):
- # read image by cv2
- # return: Numpy float32, HWC, BGR, [0,1]
- img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
- img = img.astype(np.float32) / 255.
- if img.ndim == 2:
- img = np.expand_dims(img, axis=2)
- # some images have 4 channels
- if img.shape[2] > 3:
- img = img[:, :, :3]
- return img
-
-
-'''
-# --------------------------------------------
-# image format conversion
-# --------------------------------------------
-# numpy(single) <---> numpy(unit)
-# numpy(single) <---> tensor
-# numpy(unit) <---> tensor
-# --------------------------------------------
-'''
-
-
-# --------------------------------------------
-# numpy(single) [0, 1] <---> numpy(unit)
-# --------------------------------------------
-
-
-def uint2single(img):
-
- return np.float32(img/255.)
-
-
-def single2uint(img):
-
- return np.uint8((img.clip(0, 1)*255.).round())
-
-
-def uint162single(img):
-
- return np.float32(img/65535.)
-
-
-def single2uint16(img):
-
- return np.uint16((img.clip(0, 1)*65535.).round())
-
-
-# --------------------------------------------
-# numpy(unit) (HxWxC or HxW) <---> tensor
-# --------------------------------------------
-
-
-# convert uint to 4-dimensional torch tensor
-def uint2tensor4(img):
- if img.ndim == 2:
- img = np.expand_dims(img, axis=2)
- return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
-
-
-# convert uint to 3-dimensional torch tensor
-def uint2tensor3(img):
- if img.ndim == 2:
- img = np.expand_dims(img, axis=2)
- return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
-
-
-# convert 2/3/4-dimensional torch tensor to uint
-def tensor2uint(img):
- img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
- if img.ndim == 3:
- img = np.transpose(img, (1, 2, 0))
- return np.uint8((img*255.0).round())
-
-
-# --------------------------------------------
-# numpy(single) (HxWxC) <---> tensor
-# --------------------------------------------
-
-
-# convert single (HxWxC) to 3-dimensional torch tensor
-def single2tensor3(img):
- return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
-
-
-# convert single (HxWxC) to 4-dimensional torch tensor
-def single2tensor4(img):
- return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
-
-
-# convert torch tensor to single
-def tensor2single(img):
- img = img.data.squeeze().float().cpu().numpy()
- if img.ndim == 3:
- img = np.transpose(img, (1, 2, 0))
-
- return img
-
-# convert torch tensor to single
-def tensor2single3(img):
- img = img.data.squeeze().float().cpu().numpy()
- if img.ndim == 3:
- img = np.transpose(img, (1, 2, 0))
- elif img.ndim == 2:
- img = np.expand_dims(img, axis=2)
- return img
-
-
-def single2tensor5(img):
- return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
-
-
-def single32tensor5(img):
- return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
-
-
-def single42tensor4(img):
- return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
-
-
-# from skimage.io import imread, imsave
-def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
- '''
- Converts a torch Tensor into an image Numpy array of BGR channel order
- Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
- Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
- '''
- tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
- tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
- n_dim = tensor.dim()
- if n_dim == 4:
- n_img = len(tensor)
- img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
- img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
- elif n_dim == 3:
- img_np = tensor.numpy()
- img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
- elif n_dim == 2:
- img_np = tensor.numpy()
- else:
- raise TypeError(
- 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
- if out_type == np.uint8:
- img_np = (img_np * 255.0).round()
- # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
- return img_np.astype(out_type)
-
-
-'''
-# --------------------------------------------
-# Augmentation, flipe and/or rotate
-# --------------------------------------------
-# The following two are enough.
-# (1) augmet_img: numpy image of WxHxC or WxH
-# (2) augment_img_tensor4: tensor image 1xCxWxH
-# --------------------------------------------
-'''
-
-
-def augment_img(img, mode=0):
- '''Kai Zhang (github: https://github.com/cszn)
- '''
- if mode == 0:
- return img
- elif mode == 1:
- return np.flipud(np.rot90(img))
- elif mode == 2:
- return np.flipud(img)
- elif mode == 3:
- return np.rot90(img, k=3)
- elif mode == 4:
- return np.flipud(np.rot90(img, k=2))
- elif mode == 5:
- return np.rot90(img)
- elif mode == 6:
- return np.rot90(img, k=2)
- elif mode == 7:
- return np.flipud(np.rot90(img, k=3))
-
-
-def augment_img_tensor4(img, mode=0):
- '''Kai Zhang (github: https://github.com/cszn)
- '''
- if mode == 0:
- return img
- elif mode == 1:
- return img.rot90(1, [2, 3]).flip([2])
- elif mode == 2:
- return img.flip([2])
- elif mode == 3:
- return img.rot90(3, [2, 3])
- elif mode == 4:
- return img.rot90(2, [2, 3]).flip([2])
- elif mode == 5:
- return img.rot90(1, [2, 3])
- elif mode == 6:
- return img.rot90(2, [2, 3])
- elif mode == 7:
- return img.rot90(3, [2, 3]).flip([2])
-
-
-def augment_img_tensor(img, mode=0):
- '''Kai Zhang (github: https://github.com/cszn)
- '''
- img_size = img.size()
- img_np = img.data.cpu().numpy()
- if len(img_size) == 3:
- img_np = np.transpose(img_np, (1, 2, 0))
- elif len(img_size) == 4:
- img_np = np.transpose(img_np, (2, 3, 1, 0))
- img_np = augment_img(img_np, mode=mode)
- img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
- if len(img_size) == 3:
- img_tensor = img_tensor.permute(2, 0, 1)
- elif len(img_size) == 4:
- img_tensor = img_tensor.permute(3, 2, 0, 1)
-
- return img_tensor.type_as(img)
-
-
-def augment_img_np3(img, mode=0):
- if mode == 0:
- return img
- elif mode == 1:
- return img.transpose(1, 0, 2)
- elif mode == 2:
- return img[::-1, :, :]
- elif mode == 3:
- img = img[::-1, :, :]
- img = img.transpose(1, 0, 2)
- return img
- elif mode == 4:
- return img[:, ::-1, :]
- elif mode == 5:
- img = img[:, ::-1, :]
- img = img.transpose(1, 0, 2)
- return img
- elif mode == 6:
- img = img[:, ::-1, :]
- img = img[::-1, :, :]
- return img
- elif mode == 7:
- img = img[:, ::-1, :]
- img = img[::-1, :, :]
- img = img.transpose(1, 0, 2)
- return img
-
-
-def augment_imgs(img_list, hflip=True, rot=True):
- # horizontal flip OR rotate
- hflip = hflip and random.random() < 0.5
- vflip = rot and random.random() < 0.5
- rot90 = rot and random.random() < 0.5
-
- def _augment(img):
- if hflip:
- img = img[:, ::-1, :]
- if vflip:
- img = img[::-1, :, :]
- if rot90:
- img = img.transpose(1, 0, 2)
- return img
-
- return [_augment(img) for img in img_list]
-
-
-'''
-# --------------------------------------------
-# modcrop and shave
-# --------------------------------------------
-'''
-
-
-def modcrop(img_in, scale):
- # img_in: Numpy, HWC or HW
- img = np.copy(img_in)
- if img.ndim == 2:
- H, W = img.shape
- H_r, W_r = H % scale, W % scale
- img = img[:H - H_r, :W - W_r]
- elif img.ndim == 3:
- H, W, C = img.shape
- H_r, W_r = H % scale, W % scale
- img = img[:H - H_r, :W - W_r, :]
- else:
- raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
- return img
-
-
-def shave(img_in, border=0):
- # img_in: Numpy, HWC or HW
- img = np.copy(img_in)
- h, w = img.shape[:2]
- img = img[border:h-border, border:w-border]
- return img
-
-
-'''
-# --------------------------------------------
-# image processing process on numpy image
-# channel_convert(in_c, tar_type, img_list):
-# rgb2ycbcr(img, only_y=True):
-# bgr2ycbcr(img, only_y=True):
-# ycbcr2rgb(img):
-# --------------------------------------------
-'''
-
-
-def rgb2ycbcr(img, only_y=True):
- '''same as matlab rgb2ycbcr
- only_y: only return Y channel
- Input:
- uint8, [0, 255]
- float, [0, 1]
- '''
- in_img_type = img.dtype
- img.astype(np.float32)
- if in_img_type != np.uint8:
- img *= 255.
- # convert
- if only_y:
- rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
- else:
- rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
- [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
- if in_img_type == np.uint8:
- rlt = rlt.round()
- else:
- rlt /= 255.
- return rlt.astype(in_img_type)
-
-
-def ycbcr2rgb(img):
- '''same as matlab ycbcr2rgb
- Input:
- uint8, [0, 255]
- float, [0, 1]
- '''
- in_img_type = img.dtype
- img.astype(np.float32)
- if in_img_type != np.uint8:
- img *= 255.
- # convert
- rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
- [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
- if in_img_type == np.uint8:
- rlt = rlt.round()
- else:
- rlt /= 255.
- return rlt.astype(in_img_type)
-
-
-def bgr2ycbcr(img, only_y=True):
- '''bgr version of rgb2ycbcr
- only_y: only return Y channel
- Input:
- uint8, [0, 255]
- float, [0, 1]
- '''
- in_img_type = img.dtype
- img.astype(np.float32)
- if in_img_type != np.uint8:
- img *= 255.
- # convert
- if only_y:
- rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
- else:
- rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
- [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
- if in_img_type == np.uint8:
- rlt = rlt.round()
- else:
- rlt /= 255.
- return rlt.astype(in_img_type)
-
-
-def channel_convert(in_c, tar_type, img_list):
- # conversion among BGR, gray and y
- if in_c == 3 and tar_type == 'gray': # BGR to gray
- gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
- return [np.expand_dims(img, axis=2) for img in gray_list]
- elif in_c == 3 and tar_type == 'y': # BGR to y
- y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
- return [np.expand_dims(img, axis=2) for img in y_list]
- elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
- return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
- else:
- return img_list
-
-
-'''
-# --------------------------------------------
-# metric, PSNR and SSIM
-# --------------------------------------------
-'''
-
-
-# --------------------------------------------
-# PSNR
-# --------------------------------------------
-def calculate_psnr(img1, img2, border=0):
- # img1 and img2 have range [0, 255]
- #img1 = img1.squeeze()
- #img2 = img2.squeeze()
- if not img1.shape == img2.shape:
- raise ValueError('Input images must have the same dimensions.')
- h, w = img1.shape[:2]
- img1 = img1[border:h-border, border:w-border]
- img2 = img2[border:h-border, border:w-border]
-
- img1 = img1.astype(np.float64)
- img2 = img2.astype(np.float64)
- mse = np.mean((img1 - img2)**2)
- if mse == 0:
- return float('inf')
- return 20 * math.log10(255.0 / math.sqrt(mse))
-
-
-# --------------------------------------------
-# SSIM
-# --------------------------------------------
-def calculate_ssim(img1, img2, border=0):
- '''calculate SSIM
- the same outputs as MATLAB's
- img1, img2: [0, 255]
- '''
- #img1 = img1.squeeze()
- #img2 = img2.squeeze()
- if not img1.shape == img2.shape:
- raise ValueError('Input images must have the same dimensions.')
- h, w = img1.shape[:2]
- img1 = img1[border:h-border, border:w-border]
- img2 = img2[border:h-border, border:w-border]
-
- if img1.ndim == 2:
- return ssim(img1, img2)
- elif img1.ndim == 3:
- if img1.shape[2] == 3:
- ssims = []
- for i in range(3):
- ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
- return np.array(ssims).mean()
- elif img1.shape[2] == 1:
- return ssim(np.squeeze(img1), np.squeeze(img2))
- else:
- raise ValueError('Wrong input image dimensions.')
-
-
-def ssim(img1, img2):
- C1 = (0.01 * 255)**2
- C2 = (0.03 * 255)**2
-
- img1 = img1.astype(np.float64)
- img2 = img2.astype(np.float64)
- kernel = cv2.getGaussianKernel(11, 1.5)
- window = np.outer(kernel, kernel.transpose())
-
- mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
- mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
- mu1_sq = mu1**2
- mu2_sq = mu2**2
- mu1_mu2 = mu1 * mu2
- sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
- sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
- sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
-
- ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
- (sigma1_sq + sigma2_sq + C2))
- return ssim_map.mean()
-
-
-'''
-# --------------------------------------------
-# matlab's bicubic imresize (numpy and torch) [0, 1]
-# --------------------------------------------
-'''
-
-
-# matlab 'imresize' function, now only support 'bicubic'
-def cubic(x):
- absx = torch.abs(x)
- absx2 = absx**2
- absx3 = absx**3
- return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
- (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
-
-
-def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
- if (scale < 1) and (antialiasing):
- # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
- kernel_width = kernel_width / scale
-
- # Output-space coordinates
- x = torch.linspace(1, out_length, out_length)
-
- # Input-space coordinates. Calculate the inverse mapping such that 0.5
- # in output space maps to 0.5 in input space, and 0.5+scale in output
- # space maps to 1.5 in input space.
- u = x / scale + 0.5 * (1 - 1 / scale)
-
- # What is the left-most pixel that can be involved in the computation?
- left = torch.floor(u - kernel_width / 2)
-
- # What is the maximum number of pixels that can be involved in the
- # computation? Note: it's OK to use an extra pixel here; if the
- # corresponding weights are all zero, it will be eliminated at the end
- # of this function.
- P = math.ceil(kernel_width) + 2
-
- # The indices of the input pixels involved in computing the k-th output
- # pixel are in row k of the indices matrix.
- indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
- 1, P).expand(out_length, P)
-
- # The weights used to compute the k-th output pixel are in row k of the
- # weights matrix.
- distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
- # apply cubic kernel
- if (scale < 1) and (antialiasing):
- weights = scale * cubic(distance_to_center * scale)
- else:
- weights = cubic(distance_to_center)
- # Normalize the weights matrix so that each row sums to 1.
- weights_sum = torch.sum(weights, 1).view(out_length, 1)
- weights = weights / weights_sum.expand(out_length, P)
-
- # If a column in weights is all zero, get rid of it. only consider the first and last column.
- weights_zero_tmp = torch.sum((weights == 0), 0)
- if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
- indices = indices.narrow(1, 1, P - 2)
- weights = weights.narrow(1, 1, P - 2)
- if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
- indices = indices.narrow(1, 0, P - 2)
- weights = weights.narrow(1, 0, P - 2)
- weights = weights.contiguous()
- indices = indices.contiguous()
- sym_len_s = -indices.min() + 1
- sym_len_e = indices.max() - in_length
- indices = indices + sym_len_s - 1
- return weights, indices, int(sym_len_s), int(sym_len_e)
-
-
-# --------------------------------------------
-# imresize for tensor image [0, 1]
-# --------------------------------------------
-def imresize(img, scale, antialiasing=True):
- # Now the scale should be the same for H and W
- # input: img: pytorch tensor, CHW or HW [0,1]
- # output: CHW or HW [0,1] w/o round
- need_squeeze = True if img.dim() == 2 else False
- if need_squeeze:
- img.unsqueeze_(0)
- in_C, in_H, in_W = img.size()
- out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
- kernel_width = 4
- kernel = 'cubic'
-
- # Return the desired dimension order for performing the resize. The
- # strategy is to perform the resize first along the dimension with the
- # smallest scale factor.
- # Now we do not support this.
-
- # get weights and indices
- weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
- in_H, out_H, scale, kernel, kernel_width, antialiasing)
- weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
- in_W, out_W, scale, kernel, kernel_width, antialiasing)
- # process H dimension
- # symmetric copying
- img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
- img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
-
- sym_patch = img[:, :sym_len_Hs, :]
- inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(1, inv_idx)
- img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
-
- sym_patch = img[:, -sym_len_He:, :]
- inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(1, inv_idx)
- img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
-
- out_1 = torch.FloatTensor(in_C, out_H, in_W)
- kernel_width = weights_H.size(1)
- for i in range(out_H):
- idx = int(indices_H[i][0])
- for j in range(out_C):
- out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
-
- # process W dimension
- # symmetric copying
- out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
- out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
-
- sym_patch = out_1[:, :, :sym_len_Ws]
- inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(2, inv_idx)
- out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
-
- sym_patch = out_1[:, :, -sym_len_We:]
- inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(2, inv_idx)
- out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
-
- out_2 = torch.FloatTensor(in_C, out_H, out_W)
- kernel_width = weights_W.size(1)
- for i in range(out_W):
- idx = int(indices_W[i][0])
- for j in range(out_C):
- out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
- if need_squeeze:
- out_2.squeeze_()
- return out_2
-
-
-# --------------------------------------------
-# imresize for numpy image [0, 1]
-# --------------------------------------------
-def imresize_np(img, scale, antialiasing=True):
- # Now the scale should be the same for H and W
- # input: img: Numpy, HWC or HW [0,1]
- # output: HWC or HW [0,1] w/o round
- img = torch.from_numpy(img)
- need_squeeze = True if img.dim() == 2 else False
- if need_squeeze:
- img.unsqueeze_(2)
-
- in_H, in_W, in_C = img.size()
- out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
- kernel_width = 4
- kernel = 'cubic'
-
- # Return the desired dimension order for performing the resize. The
- # strategy is to perform the resize first along the dimension with the
- # smallest scale factor.
- # Now we do not support this.
-
- # get weights and indices
- weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
- in_H, out_H, scale, kernel, kernel_width, antialiasing)
- weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
- in_W, out_W, scale, kernel, kernel_width, antialiasing)
- # process H dimension
- # symmetric copying
- img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
- img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
-
- sym_patch = img[:sym_len_Hs, :, :]
- inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(0, inv_idx)
- img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
-
- sym_patch = img[-sym_len_He:, :, :]
- inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(0, inv_idx)
- img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
-
- out_1 = torch.FloatTensor(out_H, in_W, in_C)
- kernel_width = weights_H.size(1)
- for i in range(out_H):
- idx = int(indices_H[i][0])
- for j in range(out_C):
- out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
-
- # process W dimension
- # symmetric copying
- out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
- out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
-
- sym_patch = out_1[:, :sym_len_Ws, :]
- inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(1, inv_idx)
- out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
-
- sym_patch = out_1[:, -sym_len_We:, :]
- inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
- sym_patch_inv = sym_patch.index_select(1, inv_idx)
- out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
-
- out_2 = torch.FloatTensor(out_H, out_W, in_C)
- kernel_width = weights_W.size(1)
- for i in range(out_W):
- idx = int(indices_W[i][0])
- for j in range(out_C):
- out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
- if need_squeeze:
- out_2.squeeze_()
-
- return out_2.numpy()
-
-
-if __name__ == '__main__':
- print('---')
-# img = imread_uint('test.bmp', 3)
-# img = uint2single(img)
-# img_bicubic = imresize_np(img, 1/4)
\ No newline at end of file
diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/models.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/models.py
deleted file mode 100644
index be51fa51407e6ce1daaee5e8d090f6acdbee0db9..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/models.py
+++ /dev/null
@@ -1,403 +0,0 @@
-import torch
-import torch.nn.functional as F
-import torch.nn as nn
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from utils import init_weights, get_padding
-
-LRELU_SLOPE = 0.1
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.h = h
- self.convs1 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2]),
- )
- ),
- ]
- )
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- ]
- )
- self.convs2.apply(init_weights)
-
- def forward(self, x):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- xt = c2(xt)
- x = xt + x
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.h = h
- self.convs = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- ]
- )
- self.convs.apply(init_weights)
-
- def forward(self, x):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- xt = c(xt)
- x = xt + x
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Generator(torch.nn.Module):
- def __init__(self, h):
- super(Generator, self).__init__()
- self.h = h
- self.num_kernels = len(h.resblock_kernel_sizes)
- self.num_upsamples = len(h.upsample_rates)
- self.conv_pre = weight_norm(
- Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)
- )
- resblock = ResBlock1 if h.resblock == "1" else ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- h.upsample_initial_channel // (2 ** i),
- h.upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = h.upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(h, ch, k, d))
-
- self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
- self.ups.apply(init_weights)
- self.conv_post.apply(init_weights)
-
- def forward(self, x):
- x = self.conv_pre(x)
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print("Removing weight norm...")
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
- remove_weight_norm(self.conv_pre)
- remove_weight_norm(self.conv_post)
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(5, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(5, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(5, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(5, 1), 0),
- )
- ),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self):
- super(MultiPeriodDiscriminator, self).__init__()
- self.discriminators = nn.ModuleList(
- [
- DiscriminatorP(2),
- DiscriminatorP(3),
- DiscriminatorP(5),
- DiscriminatorP(7),
- DiscriminatorP(11),
- ]
- )
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- fmap_rs.append(fmap_r)
- y_d_gs.append(y_d_g)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 128, 15, 1, padding=7)),
- norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
- norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
- norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiScaleDiscriminator(torch.nn.Module):
- def __init__(self):
- super(MultiScaleDiscriminator, self).__init__()
- self.discriminators = nn.ModuleList(
- [
- DiscriminatorS(use_spectral_norm=True),
- DiscriminatorS(),
- DiscriminatorS(),
- ]
- )
- self.meanpools = nn.ModuleList(
- [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]
- )
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- if i != 0:
- y = self.meanpools[i - 1](y)
- y_hat = self.meanpools[i - 1](y_hat)
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- fmap_rs.append(fmap_r)
- y_d_gs.append(y_d_g)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-def feature_loss(fmap_r, fmap_g):
- loss = 0
- for dr, dg in zip(fmap_r, fmap_g):
- for rl, gl in zip(dr, dg):
- loss += torch.mean(torch.abs(rl - gl))
-
- return loss * 2
-
-
-def discriminator_loss(disc_real_outputs, disc_generated_outputs):
- loss = 0
- r_losses = []
- g_losses = []
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
- r_loss = torch.mean((1 - dr) ** 2)
- g_loss = torch.mean(dg ** 2)
- loss += r_loss + g_loss
- r_losses.append(r_loss.item())
- g_losses.append(g_loss.item())
-
- return loss, r_losses, g_losses
-
-
-def generator_loss(disc_outputs):
- loss = 0
- gen_losses = []
- for dg in disc_outputs:
- l = torch.mean((1 - dg) ** 2)
- gen_losses.append(l)
- loss += l
-
- return loss, gen_losses
diff --git a/spaces/Hmjz100/YouTube-to-MT3/README.md b/spaces/Hmjz100/YouTube-to-MT3/README.md
deleted file mode 100644
index 80e6586bdbf91509810b892ee8352fe960bf33d3..0000000000000000000000000000000000000000
--- a/spaces/Hmjz100/YouTube-to-MT3/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: YouTube To MT3
-emoji: 🎼
-colorFrom: purple
-colorTo: green
-sdk: gradio
-sdk_version: 3.4.1
-app_file: app.py
-duplicated_from: mdnestor/YouTube-to-MT3
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/evaluation/eval_sp.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/evaluation/eval_sp.py
deleted file mode 100644
index 702c4980389624f788abc0b42cdf54757a52512f..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/evaluation/eval_sp.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-"""
-Signal processing-based evaluation using waveforms
-"""
-
-import csv
-import numpy as np
-import os.path as op
-
-import torch
-import tqdm
-from tabulate import tabulate
-import torchaudio
-
-from examples.speech_synthesis.utils import batch_mel_spectral_distortion
-from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
-
-
-def load_eval_spec(path):
- with open(path) as f:
- reader = csv.DictReader(f, delimiter='\t')
- samples = list(reader)
- return samples
-
-
-def eval_distortion(samples, distortion_fn, device="cuda"):
- nmiss = 0
- results = []
- for sample in tqdm.tqdm(samples):
- if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
- nmiss += 1
- results.append(None)
- continue
- # assume single channel
- yref, sr = torchaudio.load(sample["ref"])
- ysyn, _sr = torchaudio.load(sample["syn"])
- yref, ysyn = yref[0].to(device), ysyn[0].to(device)
- assert sr == _sr, f"{sr} != {_sr}"
-
- distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0]
- _, _, _, _, _, pathmap = extra
- nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn
- ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn
- results.append(
- (distortion.item(), # path distortion
- pathmap.size(0), # yref num frames
- pathmap.size(1), # ysyn num frames
- pathmap.sum().item(), # path length
- nins.item(), # insertion
- ndel.item(), # deletion
- )
- )
- return results
-
-
-def eval_mel_cepstral_distortion(samples, device="cuda"):
- return eval_distortion(samples, batch_mel_cepstral_distortion, device)
-
-
-def eval_mel_spectral_distortion(samples, device="cuda"):
- return eval_distortion(samples, batch_mel_spectral_distortion, device)
-
-
-def print_results(results, show_bin):
- results = np.array(list(filter(lambda x: x is not None, results)))
-
- np.set_printoptions(precision=3)
-
- def _print_result(results):
- dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0)
- res = {
- "nutt": len(results),
- "dist": dist,
- "dur_ref": int(dur_ref),
- "dur_syn": int(dur_syn),
- "dur_ali": int(dur_ali),
- "dist_per_ref_frm": dist/dur_ref,
- "dist_per_syn_frm": dist/dur_syn,
- "dist_per_ali_frm": dist/dur_ali,
- "ins": nins/dur_ref,
- "del": ndel/dur_ref,
- }
- print(tabulate(
- [res.values()],
- res.keys(),
- floatfmt=".4f"
- ))
-
- print(">>>> ALL")
- _print_result(results)
-
- if show_bin:
- edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
- for i in range(1, len(edges)):
- mask = np.logical_and(results[:, 1] >= edges[i-1],
- results[:, 1] < edges[i])
- if not mask.any():
- continue
- bin_results = results[mask]
- print(f">>>> ({edges[i-1]}, {edges[i]})")
- _print_result(bin_results)
-
-
-def main(eval_spec, mcd, msd, show_bin):
- samples = load_eval_spec(eval_spec)
- device = "cpu"
- if mcd:
- print("===== Evaluate Mean Cepstral Distortion =====")
- results = eval_mel_cepstral_distortion(samples, device)
- print_results(results, show_bin)
- if msd:
- print("===== Evaluate Mean Spectral Distortion =====")
- results = eval_mel_spectral_distortion(samples, device)
- print_results(results, show_bin)
-
-
-if __name__ == "__main__":
- import argparse
- parser = argparse.ArgumentParser()
- parser.add_argument("eval_spec")
- parser.add_argument("--mcd", action="store_true")
- parser.add_argument("--msd", action="store_true")
- parser.add_argument("--show-bin", action="store_true")
- args = parser.parse_args()
-
- main(args.eval_spec, args.mcd, args.msd, args.show_bin)
diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py
deleted file mode 100644
index 1222addc424d4f898d602009e4032907241aadfe..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-# author: adefossez
-
-import math
-
-import torch as th
-from torch.nn import functional as F
-
-
-def sinc(t):
- """sinc.
-
- :param t: the input tensor
- """
- return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype),
- th.sin(t) / t)
-
-
-def kernel_upsample2(zeros=56):
- """kernel_upsample2.
-
- """
- win = th.hann_window(4 * zeros + 1, periodic=False)
- winodd = win[1::2]
- t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
- t *= math.pi
- kernel = (sinc(t) * winodd).view(1, 1, -1)
- return kernel
-
-
-def upsample2(x, zeros=56):
- """
- Upsampling the input by 2 using sinc interpolation.
- Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
- ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
- Vol. 9. IEEE, 1984.
- """
- *other, time = x.shape
- kernel = kernel_upsample2(zeros).to(x)
- out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(
- *other, time
- )
- y = th.stack([x, out], dim=-1)
- return y.view(*other, -1)
-
-
-def kernel_downsample2(zeros=56):
- """kernel_downsample2.
-
- """
- win = th.hann_window(4 * zeros + 1, periodic=False)
- winodd = win[1::2]
- t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
- t.mul_(math.pi)
- kernel = (sinc(t) * winodd).view(1, 1, -1)
- return kernel
-
-
-def downsample2(x, zeros=56):
- """
- Downsampling the input by 2 using sinc interpolation.
- Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
- ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
- Vol. 9. IEEE, 1984.
- """
- if x.shape[-1] % 2 != 0:
- x = F.pad(x, (0, 1))
- xeven = x[..., ::2]
- xodd = x[..., 1::2]
- *other, time = xodd.shape
- kernel = kernel_downsample2(zeros).to(x)
- out = xeven + F.conv1d(
- xodd.view(-1, 1, time), kernel, padding=zeros
- )[..., :-1].view(*other, time)
- return out.view(*other, -1).mul(0.5)
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/encoders/gpt2_bpe.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/encoders/gpt2_bpe.py
deleted file mode 100644
index b7426b249bbbabd8e20bbe8ca5449809efdf85fc..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/data/encoders/gpt2_bpe.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass, field
-
-from fairseq import file_utils
-from fairseq.data.encoders import register_bpe
-from fairseq.dataclass import FairseqDataclass
-
-from .gpt2_bpe_utils import get_encoder
-
-
-DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json"
-DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe"
-
-
-@dataclass
-class GPT2BPEConfig(FairseqDataclass):
- gpt2_encoder_json: str = field(
- default=DEFAULT_ENCODER_JSON, metadata={"help": "path to encoder.json"}
- )
- gpt2_vocab_bpe: str = field(
- default=DEFAULT_VOCAB_BPE, metadata={"help": "path to vocab.bpe"}
- )
-
-
-@register_bpe("gpt2", dataclass=GPT2BPEConfig)
-class GPT2BPE(object):
- def __init__(self, cfg):
- encoder_json = file_utils.cached_path(cfg.gpt2_encoder_json)
- vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe)
- self.bpe = get_encoder(encoder_json, vocab_bpe)
-
- def encode(self, x: str) -> str:
- return " ".join(map(str, self.bpe.encode(x)))
-
- def decode(self, x: str) -> str:
- return self.bpe.decode(
- [int(tok) if tok not in {"", ""} and not tok.startswith('<') else tok for tok in x.split()]
- )
-
- def is_beginning_of_word(self, x: str) -> bool:
- return self.decode(x).startswith(" ")
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/em.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/em.py
deleted file mode 100644
index 6f15c3e46bd052b1e00929e7ece9355fb03846c7..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/em.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-import random
-from collections import Counter
-
-import torch
-
-
-class EM:
- """
- EM algorithm used to quantize the columns of W to minimize
-
- ||W - W_hat||^2
-
- Args:
- - W: weight matrix of size (in_features x out_features)
- - n_iter: number of k-means iterations
- - n_centroids: number of centroids (size of codebook)
- - eps: for cluster reassignment when an empty cluster is found
- - max_tentatives for cluster reassignment when an empty cluster is found
- - verbose: print error after each iteration
-
- Remarks:
- - If one cluster is empty, the most populated cluster is split into
- two clusters
- - All the relevant dimensions are specified in the code
- """
-
- def __init__(
- self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True
- ):
- self.W = W
- self.n_centroids = n_centroids
- self.n_iter = n_iter
- self.eps = eps
- self.max_tentatives = max_tentatives
- self.verbose = verbose
- self.centroids = torch.Tensor()
- self.assignments = torch.Tensor()
- self.objective = []
-
- def initialize_centroids(self):
- """
- Initializes the centroids by sampling random columns from W.
- """
-
- in_features, out_features = self.W.size()
- indices = torch.randint(
- low=0, high=out_features, size=(self.n_centroids,)
- ).long()
- self.centroids = self.W[:, indices].t() # (n_centroids x in_features)
-
- def step(self, i):
- """
- There are two standard steps for each iteration: expectation (E) and
- minimization (M). The E-step (assignment) is performed with an exhaustive
- search and the M-step (centroid computation) is performed with
- the exact solution.
-
- Args:
- - i: step number
-
- Remarks:
- - The E-step heavily uses PyTorch broadcasting to speed up computations
- and reduce the memory overhead
- """
-
- # assignments (E-step)
- distances = self.compute_distances() # (n_centroids x out_features)
- self.assignments = torch.argmin(distances, dim=0) # (out_features)
- n_empty_clusters = self.resolve_empty_clusters()
-
- # centroids (M-step)
- for k in range(self.n_centroids):
- W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k)
- self.centroids[k] = W_k.mean(dim=1) # (in_features)
-
- # book-keeping
- obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item()
- self.objective.append(obj)
- if self.verbose:
- logging.info(
- f"Iteration: {i},\t"
- f"objective: {obj:.6f},\t"
- f"resolved empty clusters: {n_empty_clusters}"
- )
-
- def resolve_empty_clusters(self):
- """
- If one cluster is empty, the most populated cluster is split into
- two clusters by shifting the respective centroids. This is done
- iteratively for a fixed number of tentatives.
- """
-
- # empty clusters
- counts = Counter(map(lambda x: x.item(), self.assignments))
- empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
- n_empty_clusters = len(empty_clusters)
-
- tentatives = 0
- while len(empty_clusters) > 0:
- # given an empty cluster, find most populated cluster and split it into two
- k = random.choice(list(empty_clusters))
- m = counts.most_common(1)[0][0]
- e = torch.randn_like(self.centroids[m]) * self.eps
- self.centroids[k] = self.centroids[m].clone()
- self.centroids[k] += e
- self.centroids[m] -= e
-
- # recompute assignments
- distances = self.compute_distances() # (n_centroids x out_features)
- self.assignments = torch.argmin(distances, dim=0) # (out_features)
-
- # check for empty clusters
- counts = Counter(map(lambda x: x.item(), self.assignments))
- empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
-
- # increment tentatives
- if tentatives == self.max_tentatives:
- logging.info(
- f"Could not resolve all empty clusters, {len(empty_clusters)} remaining"
- )
- raise EmptyClusterResolveError
- tentatives += 1
-
- return n_empty_clusters
-
- def compute_distances(self):
- """
- For every centroid m, computes
-
- ||M - m[None, :]||_2
-
- Remarks:
- - We rely on PyTorch's broadcasting to speed up computations
- and reduce the memory overhead
- - Without chunking, the sizes in the broadcasting are modified as:
- (n_centroids x n_samples x out_features) -> (n_centroids x out_features)
- - The broadcasting computation is automatically chunked so that
- the tensors fit into the memory of the GPU
- """
-
- nb_centroids_chunks = 1
-
- while True:
- try:
- return torch.cat(
- [
- (self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1)
- for centroids_c in self.centroids.chunk(
- nb_centroids_chunks, dim=0
- )
- ],
- dim=0,
- )
- except RuntimeError:
- nb_centroids_chunks *= 2
-
- def assign(self):
- """
- Assigns each column of W to its closest centroid, thus essentially
- performing the E-step in train().
-
- Remarks:
- - The function must be called after train() or after loading
- centroids using self.load(), otherwise it will return empty tensors
- """
-
- distances = self.compute_distances() # (n_centroids x out_features)
- self.assignments = torch.argmin(distances, dim=0) # (out_features)
-
- def save(self, path, layer):
- """
- Saves centroids and assignments.
-
- Args:
- - path: folder used to save centroids and assignments
- """
-
- torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer)))
- torch.save(
- self.assignments, os.path.join(path, "{}_assignments.pth".format(layer))
- )
- torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer)))
-
- def load(self, path, layer):
- """
- Loads centroids and assignments from a given path
-
- Args:
- - path: folder use to load centroids and assignments
- """
-
- self.centroids = torch.load(
- os.path.join(path, "{}_centroids.pth".format(layer))
- )
- self.assignments = torch.load(
- os.path.join(path, "{}_assignments.pth".format(layer))
- )
- self.objective = torch.load(
- os.path.join(path, "{}_objective.pth".format(layer))
- )
-
-
-class EmptyClusterResolveError(Exception):
- pass
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/utils/download_util.py b/spaces/Iceclear/StableSR/StableSR/basicsr/utils/download_util.py
deleted file mode 100644
index f73abd0e1831b8cab6277d780331a5103785b9ec..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/utils/download_util.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import math
-import os
-import requests
-from torch.hub import download_url_to_file, get_dir
-from tqdm import tqdm
-from urllib.parse import urlparse
-
-from .misc import sizeof_fmt
-
-
-def download_file_from_google_drive(file_id, save_path):
- """Download files from google drive.
-
- Reference: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive
-
- Args:
- file_id (str): File id.
- save_path (str): Save path.
- """
-
- session = requests.Session()
- URL = 'https://docs.google.com/uc?export=download'
- params = {'id': file_id}
-
- response = session.get(URL, params=params, stream=True)
- token = get_confirm_token(response)
- if token:
- params['confirm'] = token
- response = session.get(URL, params=params, stream=True)
-
- # get file size
- response_file_size = session.get(URL, params=params, stream=True, headers={'Range': 'bytes=0-2'})
- if 'Content-Range' in response_file_size.headers:
- file_size = int(response_file_size.headers['Content-Range'].split('/')[1])
- else:
- file_size = None
-
- save_response_content(response, save_path, file_size)
-
-
-def get_confirm_token(response):
- for key, value in response.cookies.items():
- if key.startswith('download_warning'):
- return value
- return None
-
-
-def save_response_content(response, destination, file_size=None, chunk_size=32768):
- if file_size is not None:
- pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
-
- readable_file_size = sizeof_fmt(file_size)
- else:
- pbar = None
-
- with open(destination, 'wb') as f:
- downloaded_size = 0
- for chunk in response.iter_content(chunk_size):
- downloaded_size += chunk_size
- if pbar is not None:
- pbar.update(1)
- pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} / {readable_file_size}')
- if chunk: # filter out keep-alive new chunks
- f.write(chunk)
- if pbar is not None:
- pbar.close()
-
-
-def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
- """Load file form http url, will download models if necessary.
-
- Reference: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
-
- Args:
- url (str): URL to be downloaded.
- model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir.
- Default: None.
- progress (bool): Whether to show the download progress. Default: True.
- file_name (str): The downloaded file name. If None, use the file name in the url. Default: None.
-
- Returns:
- str: The path to the downloaded file.
- """
- if model_dir is None: # use the pytorch hub_dir
- hub_dir = get_dir()
- model_dir = os.path.join(hub_dir, 'checkpoints')
-
- os.makedirs(model_dir, exist_ok=True)
-
- parts = urlparse(url)
- filename = os.path.basename(parts.path)
- if file_name is not None:
- filename = file_name
- cached_file = os.path.abspath(os.path.join(model_dir, filename))
- if not os.path.exists(cached_file):
- print(f'Downloading: "{url}" to {cached_file}\n')
- download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
- return cached_file
diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/modules/ddsp.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/modules/ddsp.py
deleted file mode 100644
index b09ac5c5c19d165e75e1780877a857be8c104ed7..0000000000000000000000000000000000000000
--- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/modules/ddsp.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn import functional as F
-import torch.fft as fft
-import numpy as np
-import librosa as li
-import math
-from scipy.signal import get_window
-
-
-def safe_log(x):
- return torch.log(x + 1e-7)
-
-
-@torch.no_grad()
-def mean_std_loudness(dataset):
- mean = 0
- std = 0
- n = 0
- for _, _, l in dataset:
- n += 1
- mean += (l.mean().item() - mean) / n
- std += (l.std().item() - std) / n
- return mean, std
-
-
-def multiscale_fft(signal, scales, overlap):
- stfts = []
- for s in scales:
- S = torch.stft(
- signal,
- s,
- int(s * (1 - overlap)),
- s,
- torch.hann_window(s).to(signal),
- True,
- normalized=True,
- return_complex=True,
- ).abs()
- stfts.append(S)
- return stfts
-
-
-def resample(x, factor: int):
- batch, frame, channel = x.shape
- x = x.permute(0, 2, 1).reshape(batch * channel, 1, frame)
-
- window = torch.hann_window(
- factor * 2,
- dtype=x.dtype,
- device=x.device,
- ).reshape(1, 1, -1)
- y = torch.zeros(x.shape[0], x.shape[1], factor * x.shape[2]).to(x)
- y[..., ::factor] = x
- y[..., -1:] = x[..., -1:]
- y = torch.nn.functional.pad(y, [factor, factor])
- y = torch.nn.functional.conv1d(y, window)[..., :-1]
-
- y = y.reshape(batch, channel, factor * frame).permute(0, 2, 1)
-
- return y
-
-
-def upsample(signal, factor):
- signal = signal.permute(0, 2, 1)
- signal = nn.functional.interpolate(signal, size=signal.shape[-1] * factor)
- return signal.permute(0, 2, 1)
-
-
-def remove_above_nyquist(amplitudes, pitch, sampling_rate):
- n_harm = amplitudes.shape[-1]
- pitches = pitch * torch.arange(1, n_harm + 1).to(pitch)
- aa = (pitches < sampling_rate / 2).float() + 1e-4
- return amplitudes * aa
-
-
-def scale_function(x):
- return 2 * torch.sigmoid(x) ** (math.log(10)) + 1e-7
-
-
-def extract_loudness(signal, sampling_rate, block_size, n_fft=2048):
- S = li.stft(
- signal,
- n_fft=n_fft,
- hop_length=block_size,
- win_length=n_fft,
- center=True,
- )
- S = np.log(abs(S) + 1e-7)
- f = li.fft_frequencies(sampling_rate, n_fft)
- a_weight = li.A_weighting(f)
-
- S = S + a_weight.reshape(-1, 1)
-
- S = np.mean(S, 0)[..., :-1]
-
- return S
-
-
-def extract_pitch(signal, sampling_rate, block_size):
- length = signal.shape[-1] // block_size
- f0 = crepe.predict(
- signal,
- sampling_rate,
- step_size=int(1000 * block_size / sampling_rate),
- verbose=1,
- center=True,
- viterbi=True,
- )
- f0 = f0[1].reshape(-1)[:-1]
-
- if f0.shape[-1] != length:
- f0 = np.interp(
- np.linspace(0, 1, length, endpoint=False),
- np.linspace(0, 1, f0.shape[-1], endpoint=False),
- f0,
- )
-
- return f0
-
-
-def mlp(in_size, hidden_size, n_layers):
- channels = [in_size] + (n_layers) * [hidden_size]
- net = []
- for i in range(n_layers):
- net.append(nn.Linear(channels[i], channels[i + 1]))
- net.append(nn.LayerNorm(channels[i + 1]))
- net.append(nn.LeakyReLU())
- return nn.Sequential(*net)
-
-
-def gru(n_input, hidden_size):
- return nn.GRU(n_input * hidden_size, hidden_size, batch_first=True)
-
-
-def harmonic_synth(pitch, amplitudes, sampling_rate):
- n_harmonic = amplitudes.shape[-1]
- omega = torch.cumsum(2 * math.pi * pitch / sampling_rate, 1)
- omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
- signal = (torch.sin(omegas) * amplitudes).sum(-1, keepdim=True)
- return signal
-
-
-def amp_to_impulse_response(amp, target_size):
- amp = torch.stack([amp, torch.zeros_like(amp)], -1)
- amp = torch.view_as_complex(amp)
- amp = fft.irfft(amp)
-
- filter_size = amp.shape[-1]
-
- amp = torch.roll(amp, filter_size // 2, -1)
- win = torch.hann_window(filter_size, dtype=amp.dtype, device=amp.device)
-
- amp = amp * win
-
- amp = nn.functional.pad(amp, (0, int(target_size) - int(filter_size)))
- amp = torch.roll(amp, -filter_size // 2, -1)
-
- return amp
-
-
-def fft_convolve(signal, kernel):
- signal = nn.functional.pad(signal, (0, signal.shape[-1]))
- kernel = nn.functional.pad(kernel, (kernel.shape[-1], 0))
-
- output = fft.irfft(fft.rfft(signal) * fft.rfft(kernel))
- output = output[..., output.shape[-1] // 2:]
-
- return output
-
-
-def init_kernels(win_len, win_inc, fft_len, win_type=None, invers=False):
- if win_type == 'None' or win_type is None:
- window = np.ones(win_len)
- else:
- window = get_window(win_type, win_len, fftbins=True) # **0.5
-
- N = fft_len
- fourier_basis = np.fft.rfft(np.eye(N))[:win_len]
- real_kernel = np.real(fourier_basis)
- imag_kernel = np.imag(fourier_basis)
- kernel = np.concatenate([real_kernel, imag_kernel], 1).T
-
- if invers:
- kernel = np.linalg.pinv(kernel).T
-
- kernel = kernel * window
- kernel = kernel[:, None, :]
- return torch.from_numpy(kernel.astype(np.float32)), torch.from_numpy(window[None, :, None].astype(np.float32))
-
diff --git a/spaces/Illumotion/Koboldcpp/otherarch/ggml_v2-cuda-legacy.h b/spaces/Illumotion/Koboldcpp/otherarch/ggml_v2-cuda-legacy.h
deleted file mode 100644
index fbee9eff309699c79dd4c60e0b8b03c5e403690d..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/otherarch/ggml_v2-cuda-legacy.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#include "ggml_v2.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void ggml_v2_init_cublas_legacy(void);
-
-void ggml_v2_cuda_mul_mat_legacy(const struct ggml_v2_tensor * src0, const struct ggml_v2_tensor * src1, struct ggml_v2_tensor * dst, void * wdata, size_t wsize);
-
-
-#ifdef __cplusplus
-}
-#endif
\ No newline at end of file
diff --git a/spaces/InpaintAI/Inpaint-Anything/fill_anything.py b/spaces/InpaintAI/Inpaint-Anything/fill_anything.py
deleted file mode 100644
index cc601fb87931cc02a7d81c4f698438d5ece4d85b..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/fill_anything.py
+++ /dev/null
@@ -1,128 +0,0 @@
-import cv2
-import sys
-import argparse
-import numpy as np
-import torch
-from pathlib import Path
-from matplotlib import pyplot as plt
-from typing import Any, Dict, List
-
-from sam_segment import predict_masks_with_sam
-from stable_diffusion_inpaint import fill_img_with_sd
-from utils import load_img_to_array, save_array_to_img, dilate_mask, \
- show_mask, show_points
-
-
-def setup_args(parser):
- parser.add_argument(
- "--input_img", type=str, required=True,
- help="Path to a single input img",
- )
- parser.add_argument(
- "--point_coords", type=float, nargs='+', required=True,
- help="The coordinate of the point prompt, [coord_W coord_H].",
- )
- parser.add_argument(
- "--point_labels", type=int, nargs='+', required=True,
- help="The labels of the point prompt, 1 or 0.",
- )
- parser.add_argument(
- "--text_prompt", type=str, required=True,
- help="Text prompt",
- )
- parser.add_argument(
- "--dilate_kernel_size", type=int, default=None,
- help="Dilate kernel size. Default: None",
- )
- parser.add_argument(
- "--output_dir", type=str, required=True,
- help="Output path to the directory with results.",
- )
- parser.add_argument(
- "--sam_model_type", type=str,
- default="vit_h", choices=['vit_h', 'vit_l', 'vit_b'],
- help="The type of sam model to load. Default: 'vit_h"
- )
- parser.add_argument(
- "--sam_ckpt", type=str, required=True,
- help="The path to the SAM checkpoint to use for mask generation.",
- )
- parser.add_argument(
- "--seed", type=int,
- help="Specify seed for reproducibility.",
- )
- parser.add_argument(
- "--deterministic", action="store_true",
- help="Use deterministic algorithms for reproducibility.",
- )
-
-
-
-if __name__ == "__main__":
- """Example usage:
- python fill_anything.py \
- --input_img FA_demo/FA1_dog.png \
- --point_coords 750 500 \
- --point_labels 1 \
- --text_prompt "a teddy bear on a bench" \
- --dilate_kernel_size 15 \
- --output_dir ./results \
- --sam_model_type "vit_h" \
- --sam_ckpt sam_vit_h_4b8939.pth
- """
- parser = argparse.ArgumentParser()
- setup_args(parser)
- args = parser.parse_args(sys.argv[1:])
- device = "cuda" if torch.cuda.is_available() else "cpu"
-
- img = load_img_to_array(args.input_img)
-
- masks, _, _ = predict_masks_with_sam(
- img,
- [args.point_coords],
- args.point_labels,
- model_type=args.sam_model_type,
- ckpt_p=args.sam_ckpt,
- device=device,
- )
- masks = masks.astype(np.uint8) * 255
-
- # dilate mask to avoid unmasked edge effect
- if args.dilate_kernel_size is not None:
- masks = [dilate_mask(mask, args.dilate_kernel_size) for mask in masks]
-
- # visualize the segmentation results
- img_stem = Path(args.input_img).stem
- out_dir = Path(args.output_dir) / img_stem
- out_dir.mkdir(parents=True, exist_ok=True)
- for idx, mask in enumerate(masks):
- # path to the results
- mask_p = out_dir / f"mask_{idx}.png"
- img_points_p = out_dir / f"with_points.png"
- img_mask_p = out_dir / f"with_{Path(mask_p).name}"
-
- # save the mask
- save_array_to_img(mask, mask_p)
-
- # save the pointed and masked image
- dpi = plt.rcParams['figure.dpi']
- height, width = img.shape[:2]
- plt.figure(figsize=(width/dpi/0.77, height/dpi/0.77))
- plt.imshow(img)
- plt.axis('off')
- show_points(plt.gca(), [args.point_coords], args.point_labels,
- size=(width*0.04)**2)
- plt.savefig(img_points_p, bbox_inches='tight', pad_inches=0)
- show_mask(plt.gca(), mask, random_color=False)
- plt.savefig(img_mask_p, bbox_inches='tight', pad_inches=0)
- plt.close()
-
- # fill the masked image
- for idx, mask in enumerate(masks):
- if args.seed is not None:
- torch.manual_seed(args.seed)
- mask_p = out_dir / f"mask_{idx}.png"
- img_filled_p = out_dir / f"filled_with_{Path(mask_p).name}"
- img_filled = fill_img_with_sd(
- img, mask, args.text_prompt, device=device)
- save_array_to_img(img_filled, img_filled_p)
\ No newline at end of file
diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/commands/diffusers_cli.py b/spaces/Jackflack09/diffuse-custom/diffusers/commands/diffusers_cli.py
deleted file mode 100644
index 30084e55ba4eeec79c87a99eae3e60a6233dc556..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/diffusers/commands/diffusers_cli.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from argparse import ArgumentParser
-
-from .env import EnvironmentCommand
-
-
-def main():
- parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli []")
- commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
-
- # Register commands
- EnvironmentCommand.register_subcommand(commands_parser)
-
- # Let's go
- args = parser.parse_args()
-
- if not hasattr(args, "func"):
- parser.print_help()
- exit(1)
-
- # Run
- service = args.func(args)
- service.run()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/data/transforms.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/data/transforms.py
deleted file mode 100644
index aead9dc73ed063e1c5865040eaa2652b26aa3ad3..0000000000000000000000000000000000000000
--- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/data/transforms.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import cv2
-import random
-
-
-def mod_crop(img, scale):
- """Mod crop images, used during testing.
-
- Args:
- img (ndarray): Input image.
- scale (int): Scale factor.
-
- Returns:
- ndarray: Result image.
- """
- img = img.copy()
- if img.ndim in (2, 3):
- h, w = img.shape[0], img.shape[1]
- h_remainder, w_remainder = h % scale, w % scale
- img = img[:h - h_remainder, :w - w_remainder, ...]
- else:
- raise ValueError(f'Wrong img ndim: {img.ndim}.')
- return img
-
-
-def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path):
- """Paired random crop.
-
- It crops lists of lq and gt images with corresponding locations.
-
- Args:
- img_gts (list[ndarray] | ndarray): GT images. Note that all images
- should have the same shape. If the input is an ndarray, it will
- be transformed to a list containing itself.
- img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
- should have the same shape. If the input is an ndarray, it will
- be transformed to a list containing itself.
- gt_patch_size (int): GT patch size.
- scale (int): Scale factor.
- gt_path (str): Path to ground-truth.
-
- Returns:
- list[ndarray] | ndarray: GT images and LQ images. If returned results
- only have one element, just return ndarray.
- """
-
- if not isinstance(img_gts, list):
- img_gts = [img_gts]
- if not isinstance(img_lqs, list):
- img_lqs = [img_lqs]
-
- h_lq, w_lq, _ = img_lqs[0].shape
- h_gt, w_gt, _ = img_gts[0].shape
- lq_patch_size = gt_patch_size // scale
-
- if h_gt != h_lq * scale or w_gt != w_lq * scale:
- raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
- f'multiplication of LQ ({h_lq}, {w_lq}).')
- if h_lq < lq_patch_size or w_lq < lq_patch_size:
- raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
- f'({lq_patch_size}, {lq_patch_size}). '
- f'Please remove {gt_path}.')
-
- # randomly choose top and left coordinates for lq patch
- top = random.randint(0, h_lq - lq_patch_size)
- left = random.randint(0, w_lq - lq_patch_size)
-
- # crop lq patch
- img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]
-
- # crop corresponding gt patch
- top_gt, left_gt = int(top * scale), int(left * scale)
- img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]
- if len(img_gts) == 1:
- img_gts = img_gts[0]
- if len(img_lqs) == 1:
- img_lqs = img_lqs[0]
- return img_gts, img_lqs
-
-
-def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
- """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
-
- We use vertical flip and transpose for rotation implementation.
- All the images in the list use the same augmentation.
-
- Args:
- imgs (list[ndarray] | ndarray): Images to be augmented. If the input
- is an ndarray, it will be transformed to a list.
- hflip (bool): Horizontal flip. Default: True.
- rotation (bool): Ratotation. Default: True.
- flows (list[ndarray]: Flows to be augmented. If the input is an
- ndarray, it will be transformed to a list.
- Dimension is (h, w, 2). Default: None.
- return_status (bool): Return the status of flip and rotation.
- Default: False.
-
- Returns:
- list[ndarray] | ndarray: Augmented images and flows. If returned
- results only have one element, just return ndarray.
-
- """
- hflip = hflip and random.random() < 0.5
- vflip = rotation and random.random() < 0.5
- rot90 = rotation and random.random() < 0.5
-
- def _augment(img):
- if hflip: # horizontal
- cv2.flip(img, 1, img)
- if vflip: # vertical
- cv2.flip(img, 0, img)
- if rot90:
- img = img.transpose(1, 0, 2)
- return img
-
- def _augment_flow(flow):
- if hflip: # horizontal
- cv2.flip(flow, 1, flow)
- flow[:, :, 0] *= -1
- if vflip: # vertical
- cv2.flip(flow, 0, flow)
- flow[:, :, 1] *= -1
- if rot90:
- flow = flow.transpose(1, 0, 2)
- flow = flow[:, :, [1, 0]]
- return flow
-
- if not isinstance(imgs, list):
- imgs = [imgs]
- imgs = [_augment(img) for img in imgs]
- if len(imgs) == 1:
- imgs = imgs[0]
-
- if flows is not None:
- if not isinstance(flows, list):
- flows = [flows]
- flows = [_augment_flow(flow) for flow in flows]
- if len(flows) == 1:
- flows = flows[0]
- return imgs, flows
- else:
- if return_status:
- return imgs, (hflip, vflip, rot90)
- else:
- return imgs
-
-
-def img_rotate(img, angle, center=None, scale=1.0):
- """Rotate image.
-
- Args:
- img (ndarray): Image to be rotated.
- angle (float): Rotation angle in degrees. Positive values mean
- counter-clockwise rotation.
- center (tuple[int]): Rotation center. If the center is None,
- initialize it as the center of the image. Default: None.
- scale (float): Isotropic scale factor. Default: 1.0.
- """
- (h, w) = img.shape[:2]
-
- if center is None:
- center = (w // 2, h // 2)
-
- matrix = cv2.getRotationMatrix2D(center, angle, scale)
- rotated_img = cv2.warpAffine(img, matrix, (w, h))
- return rotated_img
diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/modules/webui_locale.py b/spaces/JohnSmith9982/ChuanhuChatGPT/modules/webui_locale.py
deleted file mode 100644
index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/ChuanhuChatGPT/modules/webui_locale.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import locale
-import commentjson as json
-
-class I18nAuto:
- def __init__(self):
- if os.path.exists("config.json"):
- with open("config.json", "r", encoding='utf-8') as f:
- config = json.load(f)
- else:
- config = {}
- lang_config = config.get("language", "auto")
- language = os.environ.get("LANGUAGE", lang_config)
- if language == "auto":
- language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
- self.language_map = {}
- self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
- if self.file_is_exists:
- with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
- self.language_map.update(json.load(f))
-
- def __call__(self, key):
- if self.file_is_exists and key in self.language_map:
- return self.language_map[key]
- else:
- return key
diff --git a/spaces/Kevin676/AutoGPT/autogpt/prompt.py b/spaces/Kevin676/AutoGPT/autogpt/prompt.py
deleted file mode 100644
index 03c132acdf26d08deeee119e41a561f430957806..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/AutoGPT/autogpt/prompt.py
+++ /dev/null
@@ -1,204 +0,0 @@
-from colorama import Fore
-
-from autogpt.config import Config
-from autogpt.config.ai_config import AIConfig
-from autogpt.config.config import Config
-from autogpt.logs import logger
-from autogpt.promptgenerator import PromptGenerator
-from autogpt.setup import prompt_user
-from autogpt.utils import clean_input
-
-CFG = Config()
-
-
-def get_prompt() -> str:
- """
- This function generates a prompt string that includes various constraints,
- commands, resources, and performance evaluations.
-
- Returns:
- str: The generated prompt string.
- """
-
- # Initialize the Config object
- cfg = Config()
-
- # Initialize the PromptGenerator object
- prompt_generator = PromptGenerator()
-
- # Add constraints to the PromptGenerator object
- prompt_generator.add_constraint(
- "~4000 word limit for short term memory. Your short term memory is short, so"
- " immediately save important information to files."
- )
- prompt_generator.add_constraint(
- "If you are unsure how you previously did something or want to recall past"
- " events, thinking about similar events will help you remember."
- )
- prompt_generator.add_constraint("No user assistance")
- prompt_generator.add_constraint(
- 'Exclusively use the commands listed in double quotes e.g. "command name"'
- )
- prompt_generator.add_constraint(
- "Use subprocesses for commands that will not terminate within a few minutes"
- )
-
- # Define the command list
- commands = [
- ("Google Search", "google", {"input": ""}),
- (
- "Browse Website",
- "browse_website",
- {"url": "", "question": ""},
- ),
- (
- "Start GPT Agent",
- "start_agent",
- {"name": "", "task": "", "prompt": ""},
- ),
- (
- "Message GPT Agent",
- "message_agent",
- {"key": "", "message": ""},
- ),
- ("List GPT Agents", "list_agents", {}),
- ("Delete GPT Agent", "delete_agent", {"key": ""}),
- (
- "Clone Repository",
- "clone_repository",
- {"repository_url": "", "clone_path": ""},
- ),
- ("Write to file", "write_to_file", {"file": "", "text": ""}),
- ("Read file", "read_file", {"file": ""}),
- ("Append to file", "append_to_file", {"file": "", "text": ""}),
- ("Delete file", "delete_file", {"file": ""}),
- ("Search Files", "search_files", {"directory": ""}),
- ("Analyze Code", "analyze_code", {"code": ""}),
- (
- "Get Improved Code",
- "improve_code",
- {"suggestions": "", "code": ""},
- ),
- (
- "Write Tests",
- "write_tests",
- {"code": "", "focus": ""},
- ),
- ("Execute Python File", "execute_python_file", {"file": ""}),
- ("Task Complete (Shutdown)", "task_complete", {"reason": ""}),
- ("Generate Image", "generate_image", {"prompt": ""}),
- ("Send Tweet", "send_tweet", {"text": ""}),
- ]
-
- # Only add the audio to text command if the model is specified
- if cfg.huggingface_audio_to_text_model:
- commands.append(
- ("Convert Audio to text", "read_audio_from_file", {"file": ""}),
- )
-
- # Only add shell command to the prompt if the AI is allowed to execute it
- if cfg.execute_local_commands:
- commands.append(
- (
- "Execute Shell Command, non-interactive commands only",
- "execute_shell",
- {"command_line": ""},
- ),
- )
- commands.append(
- (
- "Execute Shell Command Popen, non-interactive commands only",
- "execute_shell_popen",
- {"command_line": ""},
- ),
- )
-
- # Only add the download file command if the AI is allowed to execute it
- if cfg.allow_downloads:
- commands.append(
- (
- "Downloads a file from the internet, and stores it locally",
- "download_file",
- {"url": "", "file": ""},
- ),
- )
-
- # Add these command last.
- commands.append(
- ("Do Nothing", "do_nothing", {}),
- )
- commands.append(
- ("Task Complete (Shutdown)", "task_complete", {"reason": ""}),
- )
-
- # Add commands to the PromptGenerator object
- for command_label, command_name, args in commands:
- prompt_generator.add_command(command_label, command_name, args)
-
- # Add resources to the PromptGenerator object
- prompt_generator.add_resource(
- "Internet access for searches and information gathering."
- )
- prompt_generator.add_resource("Long Term memory management.")
- prompt_generator.add_resource(
- "GPT-3.5 powered Agents for delegation of simple tasks."
- )
- prompt_generator.add_resource("File output.")
-
- # Add performance evaluations to the PromptGenerator object
- prompt_generator.add_performance_evaluation(
- "Continuously review and analyze your actions to ensure you are performing to"
- " the best of your abilities."
- )
- prompt_generator.add_performance_evaluation(
- "Constructively self-criticize your big-picture behavior constantly."
- )
- prompt_generator.add_performance_evaluation(
- "Reflect on past decisions and strategies to refine your approach."
- )
- prompt_generator.add_performance_evaluation(
- "Every command has a cost, so be smart and efficient. Aim to complete tasks in"
- " the least number of steps."
- )
-
- # Generate the prompt string
- return prompt_generator.generate_prompt_string()
-
-
-def construct_prompt() -> str:
- """Construct the prompt for the AI to respond to
-
- Returns:
- str: The prompt string
- """
- config = AIConfig.load(CFG.ai_settings_file)
- if CFG.skip_reprompt and config.ai_name:
- logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
- logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
- logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
- elif config.ai_name:
- logger.typewriter_log(
- "Welcome back! ",
- Fore.GREEN,
- f"Would you like me to return to being {config.ai_name}?",
- speak_text=True,
- )
- should_continue = clean_input(
- f"""Continue with the last settings?
-Name: {config.ai_name}
-Role: {config.ai_role}
-Goals: {config.ai_goals}
-Continue (y/n): """
- )
- if should_continue.lower() == "n":
- config = AIConfig()
-
- if not config.ai_name:
- config = prompt_user()
- config.save(CFG.ai_settings_file)
-
- # Get rid of this global:
- global ai_name
- ai_name = config.ai_name
-
- return config.construct_full_prompt()
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/ae_loss.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/ae_loss.py
deleted file mode 100644
index 2aa7d696be4b937a2d45545a8309aaa936fe5f22..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/ae_loss.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from mmdet.registry import MODELS
-
-
-def ae_loss_per_image(tl_preds, br_preds, match):
- """Associative Embedding Loss in one image.
-
- Associative Embedding Loss including two parts: pull loss and push loss.
- Pull loss makes embedding vectors from same object closer to each other.
- Push loss distinguish embedding vector from different objects, and makes
- the gap between them is large enough.
-
- During computing, usually there are 3 cases:
- - no object in image: both pull loss and push loss will be 0.
- - one object in image: push loss will be 0 and pull loss is computed
- by the two corner of the only object.
- - more than one objects in image: pull loss is computed by corner pairs
- from each object, push loss is computed by each object with all
- other objects. We use confusion matrix with 0 in diagonal to
- compute the push loss.
-
- Args:
- tl_preds (tensor): Embedding feature map of left-top corner.
- br_preds (tensor): Embedding feature map of bottim-right corner.
- match (list): Downsampled coordinates pair of each ground truth box.
- """
-
- tl_list, br_list, me_list = [], [], []
- if len(match) == 0: # no object in image
- pull_loss = tl_preds.sum() * 0.
- push_loss = tl_preds.sum() * 0.
- else:
- for m in match:
- [tl_y, tl_x], [br_y, br_x] = m
- tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
- br_e = br_preds[:, br_y, br_x].view(-1, 1)
- tl_list.append(tl_e)
- br_list.append(br_e)
- me_list.append((tl_e + br_e) / 2.0)
-
- tl_list = torch.cat(tl_list)
- br_list = torch.cat(br_list)
- me_list = torch.cat(me_list)
-
- assert tl_list.size() == br_list.size()
-
- # N is object number in image, M is dimension of embedding vector
- N, M = tl_list.size()
-
- pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
- pull_loss = pull_loss.sum() / N
-
- margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
-
- # confusion matrix of push loss
- conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
- conf_weight = 1 - torch.eye(N).type_as(me_list)
- conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
-
- if N > 1: # more than one object in current image
- push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
- else:
- push_loss = tl_preds.sum() * 0.
-
- return pull_loss, push_loss
-
-
-@MODELS.register_module()
-class AssociativeEmbeddingLoss(nn.Module):
- """Associative Embedding Loss.
-
- More details can be found in
- `Associative Embedding `_ and
- `CornerNet `_ .
- Code is modified from `kp_utils.py `_ # noqa: E501
-
- Args:
- pull_weight (float): Loss weight for corners from same object.
- push_weight (float): Loss weight for corners from different object.
- """
-
- def __init__(self, pull_weight=0.25, push_weight=0.25):
- super(AssociativeEmbeddingLoss, self).__init__()
- self.pull_weight = pull_weight
- self.push_weight = push_weight
-
- def forward(self, pred, target, match):
- """Forward function."""
- batch = pred.size(0)
- pull_all, push_all = 0.0, 0.0
- for i in range(batch):
- pull, push = ae_loss_per_image(pred[i], target[i], match[i])
-
- pull_all += self.pull_weight * pull
- push_all += self.push_weight * push
-
- return pull_all, push_all
diff --git a/spaces/LanguageBind/LanguageBind/open_clip/zero_shot_classifier.py b/spaces/LanguageBind/LanguageBind/open_clip/zero_shot_classifier.py
deleted file mode 100644
index a9a5267cea4119994e30bb4830a6744cf25bdbaf..0000000000000000000000000000000000000000
--- a/spaces/LanguageBind/LanguageBind/open_clip/zero_shot_classifier.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from functools import partial
-from itertools import islice
-from typing import Callable, List, Optional, Sequence, Union
-
-import torch
-import torch.nn.functional as F
-
-
-def batched(iterable, n):
- """Batch data into lists of length *n*. The last batch may be shorter.
- NOTE based on more-itertools impl, to be replaced by python 3.12 itertools.batched impl
- """
- it = iter(iterable)
- while True:
- batch = list(islice(it, n))
- if not batch:
- break
- yield batch
-
-
-def build_zero_shot_classifier(
- model,
- tokenizer,
- classnames: Sequence[str],
- templates: Sequence[Union[Callable, str]],
- num_classes_per_batch: Optional[int] = 10,
- device: Union[str, torch.device] = 'cpu',
- use_tqdm: bool = False,
-):
- """ Build zero-shot classifier weights by iterating over class names in batches
- Args:
- model: CLIP model instance
- tokenizer: CLIP tokenizer instance
- classnames: A sequence of class (label) names
- templates: A sequence of callables or format() friendly strings to produce templates per class name
- num_classes_per_batch: The number of classes to batch together in each forward, all if None
- device: Device to use.
- use_tqdm: Enable TQDM progress bar.
- """
- assert isinstance(templates, Sequence) and len(templates) > 0
- assert isinstance(classnames, Sequence) and len(classnames) > 0
- use_format = isinstance(templates[0], str)
- num_templates = len(templates)
- num_classes = len(classnames)
- if use_tqdm:
- import tqdm
- num_iter = 1 if num_classes_per_batch is None else ((num_classes - 1) // num_classes_per_batch + 1)
- iter_wrap = partial(tqdm.tqdm, total=num_iter, unit_scale=num_classes_per_batch)
- else:
- iter_wrap = iter
-
- def _process_batch(batch_classnames):
- num_batch_classes = len(batch_classnames)
- texts = [template.format(c) if use_format else template(c) for c in batch_classnames for template in templates]
- input_ids, attention_mask = tokenizer(texts)
- input_ids, attention_mask = input_ids.to(device), attention_mask.to(device)
- class_embeddings = F.normalize(model.encode_text(input_ids, attention_mask), dim=-1)
- class_embeddings = class_embeddings.reshape(num_batch_classes, num_templates, -1).mean(dim=1)
- class_embeddings = class_embeddings / class_embeddings.norm(dim=1, keepdim=True)
- class_embeddings = class_embeddings.T
- return class_embeddings
-
- with torch.no_grad():
- if num_classes_per_batch:
- batched_embeds = [_process_batch(batch) for batch in iter_wrap(batched(classnames, num_classes_per_batch))]
- zeroshot_weights = torch.cat(batched_embeds, dim=1)
- else:
- zeroshot_weights = _process_batch(classnames)
- return zeroshot_weights
-
-
-def build_zero_shot_classifier_legacy(
- model,
- tokenizer,
- classnames: Sequence[str],
- templates: Sequence[Union[Callable, str]],
- device: Union[str, torch.device] = 'cpu',
- use_tqdm: bool = False,
-):
- """ Build zero-shot classifier weights by iterating over class names 1 by 1
- Args:
- model: CLIP model instance
- tokenizer: CLIP tokenizer instance
- classnames: A sequence of class (label) names
- templates: A sequence of callables or format() friendly strings to produce templates per class name
- device: Device to use.
- use_tqdm: Enable TQDM progress bar.
- """
- assert isinstance(templates, Sequence) and len(templates) > 0
- assert isinstance(classnames, Sequence) and len(classnames) > 0
- if use_tqdm:
- import tqdm
- iter_wrap = tqdm.tqdm
- else:
- iter_wrap = iter
-
- use_format = isinstance(templates[0], str)
-
- with torch.no_grad():
- zeroshot_weights = []
- for classname in iter_wrap(classnames):
- texts = [template.format(classname) if use_format else template(classname) for template in templates]
- texts = tokenizer(texts).to(device) # tokenize
- class_embeddings = model.encode_text(texts)
- class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
- class_embedding /= class_embedding.norm()
- zeroshot_weights.append(class_embedding)
- zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
-
- return zeroshot_weights
-
diff --git a/spaces/Lianglan/NLLB200-Translate-Distill-600/README.md b/spaces/Lianglan/NLLB200-Translate-Distill-600/README.md
deleted file mode 100644
index 49b16edc4696d33874c00d14e2ab7c4346af3e0d..0000000000000000000000000000000000000000
--- a/spaces/Lianglan/NLLB200-Translate-Distill-600/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: NLLB200 Translate Distill 600
-emoji: 👀
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
-license: odc-by
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Lwhieldon/Fall22_UMBC606_AbstractSummarization/app.py b/spaces/Lwhieldon/Fall22_UMBC606_AbstractSummarization/app.py
deleted file mode 100644
index 11f19f7d689e9a41f24478fd509270d43f8f2662..0000000000000000000000000000000000000000
--- a/spaces/Lwhieldon/Fall22_UMBC606_AbstractSummarization/app.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import gradio as gr
-
-examples = [
- [" Lee: Hi guys, have you heard of our great professor, Zeanique Barber? \n William: Yes, I was excited that she became our instructor for UMBC's DATA606 Capstone for Data Science in Fall 2022! \n Lee: Indeed, she has amazing experience in Data Transformation for organizations and I'm excited to learn from her! \n William: I know, I too have experience in Data Transformation in cloud technology ;) \n Lee: Cool, maybe we should do a project about cloud data? What do you think? \n William: Great idea, how hard can it be?! \n William: I am in! \n Lee: Awesome, let's do it together! "],
- ["Karen: What do I owe the excessive volume of this giggling?\nPlankton: [chuckles] You may thank my new and original idea, darling. Free samples!\nKaren: New and original, my exhaust fan.\nPlankton: Okay, so I didn't invent the concept, but I have improved upon it by removing the only obstacle to chum's success!\nKaren: You mean the smell? Smell.\nPlankton: No. I mean getting the public to try some! Once everyone gets a taste of my delicious chum, they'll tear this place apart to get some. Plus it's free, and who can resist free?!Step right up for your free samples of delicious chum!\nNat: Hey, what did he just say it was?\nPlankton: I just said they're free!\nNat: Oh... I thought he said freaks. In that case... So glad I actually left my house today.\nPlankton: That's the spirit, people. Step on up, but please take your time and be orderly. I wanna savor this whole 'Putting-Krabs-out-of-business' business.\nSally: What is it?\nShubie: Oh, who cares silly-big-milly? It's free!\nSally: Oh, it's going to be so good 'cause it's free!\nPlankton: Eat up. Ugh. I have no idea that chum was this rotten. I better not be here when all this agony turns to anger.\nNat: Hey, he's making a run for it! We can't let him get away with this! Let's get up and... get him."]
-]
-
-gr.Interface.load(
- "huggingface/Lwhieldon/pegasus-samsum",
- inputs=gr.Textbox(label="Input"),
- outputs=gr.Textbox(label="Output"),
- title="UMBC Fall 2022 DATA606 Capstone: Abstract Transcript Text Summarization Demo",
- examples=examples
-).launch()
\ No newline at end of file
diff --git a/spaces/MWilinski/bot/bot/__main__.py b/spaces/MWilinski/bot/bot/__main__.py
deleted file mode 100644
index f0b50513a599b295f4fdc7690076e5da3eefdc90..0000000000000000000000000000000000000000
--- a/spaces/MWilinski/bot/bot/__main__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from bot.config import Config
-from bot.logger import logger
-from bot.discord_client import DiscordClient
-
-
-def main():
- logger.info('Starting Application...')
- config = Config()
- client = DiscordClient(
- qa_service_url=config.qa_service_url,
- num_last_messages=config.num_last_messages,
- use_names_in_context=config.use_names_in_context,
- enable_commands=config.enable_commands,
- debug=config.debug
- )
- client.run(config.discord_token)
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/MWilinski/bot/tests/bot/discord_client/test_utils.py b/spaces/MWilinski/bot/tests/bot/discord_client/test_utils.py
deleted file mode 100644
index 463c83aa60a2256f9a75b6ca5303fb9e29cff99e..0000000000000000000000000000000000000000
--- a/spaces/MWilinski/bot/tests/bot/discord_client/test_utils.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import pytest
-import os
-from bot.discord_client.utils import ( \
- find_max_split_index, \
- find_max_split_index_from_sequence, \
- split_text_into_chunks
-)
-
-
-@pytest.fixture(scope="module")
-def test_chunk() -> str:
- return "t. , \n ."
-
-
-@pytest.fixture(scope="module")
-def test_text() -> str:
- with open("tests/bot/discord_client/lorem_ipsum.txt", "r") as f:
- text = f.read()
- assert text is not None, "test text is empty"
- return text
-
-
-def test_find_max_splitting_index(test_chunk: str):
- index = find_max_split_index(test_chunk, char="\n")
- assert index == 6, "index should be 6"
- index = find_max_split_index(test_chunk, char=". ")
- assert index == 3, "index should be 3"
- index = find_max_split_index(test_chunk, char=".")
- assert index == 8, "index should be 8"
-
-
-def test_find_max_split_index_from_sequence(test_chunk: str):
- index = find_max_split_index_from_sequence(
- test_chunk,
- split_characters=["\n"]
- )
- assert index == 6, "index should be 6"
- index = find_max_split_index_from_sequence(
- test_chunk,
- split_characters=[".", ", ", "\n"]
- )
- assert index == 8, "index should be 8"
-
-
-def test_split_text_into_chunks_with_split_characters(test_text: str):
- max_chunk_size = 250
- chunks = split_text_into_chunks(
- test_text,
- split_characters=[". ", ", ", "\n"],
- min_size=20,
- max_size=max_chunk_size
- )
- for chunk in chunks:
- assert len(chunk) > 0, 'Chunk length is zero'
- assert len(chunk) <= max_chunk_size, 'Chunk length exceeds maximum limit'
-
-
-def test_split_text_into_chunks_without_split_characters():
- test_text = 'a' * 1000
- max_chunk_size = 250
- chunks = split_text_into_chunks(
- test_text,
- split_characters=[],
- min_size=20,
- max_size=max_chunk_size
- )
- for chunk in chunks:
- assert len(chunk) == max_chunk_size, \
- 'Chunk length is too small'
diff --git a/spaces/Makiing/coolb-in-gtest/Dockerfile b/spaces/Makiing/coolb-in-gtest/Dockerfile
deleted file mode 100644
index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000
--- a/spaces/Makiing/coolb-in-gtest/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-FROM node:18
-
-
-ARG DEBIAN_FRONTEND=noninteractive
-
-ENV BING_HEADER ""
-
-# Set home to the user's home directory
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-# Set up a new user named "user" with user ID 1000
-RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME
-
-# Switch to the "user" user
-USER user
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Install app dependencies
-# A wildcard is used to ensure both package.json AND package-lock.json are copied
-# where available (npm@5+)
-COPY --chown=user package*.json $HOME/app/
-
-RUN npm install
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app/
-
-RUN npm run build
-
-ENV PORT 7860
-EXPOSE 7860
-
-CMD npm start
diff --git a/spaces/MarcusSu1216/XingTong/modules/commons.py b/spaces/MarcusSu1216/XingTong/modules/commons.py
deleted file mode 100644
index 074888006392e956ce204d8368362dbb2cd4e304..0000000000000000000000000000000000000000
--- a/spaces/MarcusSu1216/XingTong/modules/commons.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-def slice_pitch_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, idx_str:idx_end]
- return ret
-
-def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
- return ret, ret_pitch, ids_str
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def intersperse(lst, item):
- result = [item] * (len(lst) * 2 + 1)
- result[1::2] = lst
- return result
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def rand_spec_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = (
- math.log(float(max_timescale) / float(min_timescale)) /
- (num_timescales - 1))
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2,3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1. / norm_type)
- return total_norm
diff --git a/spaces/MashiroSA/sovits-emu-voice-transform/modules/mel_processing.py b/spaces/MashiroSA/sovits-emu-voice-transform/modules/mel_processing.py
deleted file mode 100644
index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000
--- a/spaces/MashiroSA/sovits-emu-voice-transform/modules/mel_processing.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import math
-import os
-import random
-import torch
-from torch import nn
-import torch.nn.functional as F
-import torch.utils.data
-import numpy as np
-import librosa
-import librosa.util as librosa_util
-from librosa.util import normalize, pad_center, tiny
-from scipy.signal import get_window
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/MercuryLeafer/img-to-music/utils.py b/spaces/MercuryLeafer/img-to-music/utils.py
deleted file mode 100644
index e4d5448735f516afa03c8a99be64fa5a2915706c..0000000000000000000000000000000000000000
--- a/spaces/MercuryLeafer/img-to-music/utils.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import json
-import numpy as np
-import httpx
-import os
-
-from constants import MUBERT_TAGS, MUBERT_MODE, MUBERT_LICENSE
-
-def get_mubert_tags_embeddings(w2v_model):
- return w2v_model.encode(MUBERT_TAGS)
-
-
-
-
-
-def find_similar(em, embeddings, method='cosine'):
- scores = []
- for ref in embeddings:
- if method == 'cosine':
- scores.append(1 - np.dot(ref, em) / (np.linalg.norm(ref) * np.linalg.norm(em)))
- if method == 'norm':
- scores.append(np.linalg.norm(ref - em))
- return np.array(scores), np.argsort(scores)
-
-
-def get_tags_for_prompts(w2v_model, mubert_tags_embeddings, prompts, top_n=3, debug=False):
- prompts_embeddings = w2v_model.encode(prompts)
- ret = []
- for i, pe in enumerate(prompts_embeddings):
- scores, idxs = find_similar(pe, mubert_tags_embeddings)
- top_tags = MUBERT_TAGS[idxs[:top_n]]
- top_prob = 1 - scores[idxs[:top_n]]
- if debug:
- print(f"Prompt: {prompts[i]}\nTags: {', '.join(top_tags)}\nScores: {top_prob}\n\n\n")
- ret.append((prompts[i], list(top_tags)))
- print("ret: " + ret)
- return ret
\ No newline at end of file
diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/app.py b/spaces/MetaWabbit/Auto-GPT/autogpt/app.py
deleted file mode 100644
index 58d9f7164ddfbb5019b072d789dc2fa6205dc9d3..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/autogpt/app.py
+++ /dev/null
@@ -1,330 +0,0 @@
-""" Command and Control """
-import json
-from typing import Dict, List, NoReturn, Union
-
-from autogpt.agent.agent_manager import AgentManager
-from autogpt.commands.analyze_code import analyze_code
-from autogpt.commands.audio_text import read_audio_from_file
-from autogpt.commands.execute_code import (
- execute_python_file,
- execute_shell,
- execute_shell_popen,
-)
-from autogpt.commands.file_operations import (
- append_to_file,
- delete_file,
- download_file,
- read_file,
- search_files,
- write_to_file,
-)
-from autogpt.commands.git_operations import clone_repository
-from autogpt.commands.google_search import google_official_search, google_search
-from autogpt.commands.image_gen import generate_image
-from autogpt.commands.improve_code import improve_code
-from autogpt.commands.twitter import send_tweet
-from autogpt.commands.web_requests import scrape_links, scrape_text
-from autogpt.commands.web_selenium import browse_website
-from autogpt.commands.write_tests import write_tests
-from autogpt.config import Config
-from autogpt.json_utils.json_fix_llm import fix_and_parse_json
-from autogpt.memory import get_memory
-from autogpt.processing.text import summarize_text
-from autogpt.speech import say_text
-
-CFG = Config()
-AGENT_MANAGER = AgentManager()
-
-
-def is_valid_int(value: str) -> bool:
- """Check if the value is a valid integer
-
- Args:
- value (str): The value to check
-
- Returns:
- bool: True if the value is a valid integer, False otherwise
- """
- try:
- int(value)
- return True
- except ValueError:
- return False
-
-
-def get_command(response_json: Dict):
- """Parse the response and return the command name and arguments
-
- Args:
- response_json (json): The response from the AI
-
- Returns:
- tuple: The command name and arguments
-
- Raises:
- json.decoder.JSONDecodeError: If the response is not valid JSON
-
- Exception: If any other error occurs
- """
- try:
- if "command" not in response_json:
- return "Error:", "Missing 'command' object in JSON"
-
- if not isinstance(response_json, dict):
- return "Error:", f"'response_json' object is not dictionary {response_json}"
-
- command = response_json["command"]
- if not isinstance(command, dict):
- return "Error:", "'command' object is not a dictionary"
-
- if "name" not in command:
- return "Error:", "Missing 'name' field in 'command' object"
-
- command_name = command["name"]
-
- # Use an empty dictionary if 'args' field is not present in 'command' object
- arguments = command.get("args", {})
-
- return command_name, arguments
- except json.decoder.JSONDecodeError:
- return "Error:", "Invalid JSON"
- # All other errors, return "Error: + error message"
- except Exception as e:
- return "Error:", str(e)
-
-
-def map_command_synonyms(command_name: str):
- """Takes the original command name given by the AI, and checks if the
- string matches a list of common/known hallucinations
- """
- synonyms = [
- ("write_file", "write_to_file"),
- ("create_file", "write_to_file"),
- ("search", "google"),
- ]
- for seen_command, actual_command_name in synonyms:
- if command_name == seen_command:
- return actual_command_name
- return command_name
-
-
-def execute_command(command_name: str, arguments):
- """Execute the command and return the result
-
- Args:
- command_name (str): The name of the command to execute
- arguments (dict): The arguments for the command
-
- Returns:
- str: The result of the command
- """
- try:
- command_name = map_command_synonyms(command_name.lower())
- if command_name == "google":
- # Check if the Google API key is set and use the official search method
- # If the API key is not set or has only whitespaces, use the unofficial
- # search method
- key = CFG.google_api_key
- if key and key.strip() and key != "your-google-api-key":
- google_result = google_official_search(arguments["input"])
- return google_result
- else:
- google_result = google_search(arguments["input"])
-
- # google_result can be a list or a string depending on the search results
- if isinstance(google_result, list):
- safe_message = [
- google_result_single.encode("utf-8", "ignore")
- for google_result_single in google_result
- ]
- else:
- safe_message = google_result.encode("utf-8", "ignore")
-
- return safe_message.decode("utf-8")
- elif command_name == "memory_add":
- memory = get_memory(CFG)
- return memory.add(arguments["string"])
- elif command_name == "start_agent":
- return start_agent(
- arguments["name"], arguments["task"], arguments["prompt"]
- )
- elif command_name == "message_agent":
- return message_agent(arguments["key"], arguments["message"])
- elif command_name == "list_agents":
- return list_agents()
- elif command_name == "delete_agent":
- return delete_agent(arguments["key"])
- elif command_name == "get_text_summary":
- return get_text_summary(arguments["url"], arguments["question"])
- elif command_name == "get_hyperlinks":
- return get_hyperlinks(arguments["url"])
- elif command_name == "clone_repository":
- return clone_repository(
- arguments["repository_url"], arguments["clone_path"]
- )
- elif command_name == "read_file":
- return read_file(arguments["file"])
- elif command_name == "write_to_file":
- return write_to_file(arguments["file"], arguments["text"])
- elif command_name == "append_to_file":
- return append_to_file(arguments["file"], arguments["text"])
- elif command_name == "delete_file":
- return delete_file(arguments["file"])
- elif command_name == "search_files":
- return search_files(arguments["directory"])
- elif command_name == "download_file":
- if not CFG.allow_downloads:
- return "Error: You do not have user authorization to download files locally."
- return download_file(arguments["url"], arguments["file"])
- elif command_name == "browse_website":
- return browse_website(arguments["url"], arguments["question"])
- # TODO: Change these to take in a file rather than pasted code, if
- # non-file is given, return instructions "Input should be a python
- # filepath, write your code to file and try again"
- elif command_name == "analyze_code":
- return analyze_code(arguments["code"])
- elif command_name == "improve_code":
- return improve_code(arguments["suggestions"], arguments["code"])
- elif command_name == "write_tests":
- return write_tests(arguments["code"], arguments.get("focus"))
- elif command_name == "execute_python_file": # Add this command
- return execute_python_file(arguments["file"])
- elif command_name == "execute_shell":
- if CFG.execute_local_commands:
- return execute_shell(arguments["command_line"])
- else:
- return (
- "You are not allowed to run local shell commands. To execute"
- " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
- "in your config. Do not attempt to bypass the restriction."
- )
- elif command_name == "execute_shell_popen":
- if CFG.execute_local_commands:
- return execute_shell_popen(arguments["command_line"])
- else:
- return (
- "You are not allowed to run local shell commands. To execute"
- " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
- "in your config. Do not attempt to bypass the restriction."
- )
- elif command_name == "read_audio_from_file":
- return read_audio_from_file(arguments["file"])
- elif command_name == "generate_image":
- return generate_image(arguments["prompt"])
- elif command_name == "send_tweet":
- return send_tweet(arguments["text"])
- elif command_name == "do_nothing":
- return "No action performed."
- elif command_name == "task_complete":
- shutdown()
- else:
- return (
- f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
- " list for available commands and only respond in the specified JSON"
- " format."
- )
- except Exception as e:
- return f"Error: {str(e)}"
-
-
-def get_text_summary(url: str, question: str) -> str:
- """Return the results of a Google search
-
- Args:
- url (str): The url to scrape
- question (str): The question to summarize the text for
-
- Returns:
- str: The summary of the text
- """
- text = scrape_text(url)
- summary = summarize_text(url, text, question)
- return f""" "Result" : {summary}"""
-
-
-def get_hyperlinks(url: str) -> Union[str, List[str]]:
- """Return the results of a Google search
-
- Args:
- url (str): The url to scrape
-
- Returns:
- str or list: The hyperlinks on the page
- """
- return scrape_links(url)
-
-
-def shutdown() -> NoReturn:
- """Shut down the program"""
- print("Shutting down...")
- quit()
-
-
-def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
- """Start an agent with a given name, task, and prompt
-
- Args:
- name (str): The name of the agent
- task (str): The task of the agent
- prompt (str): The prompt for the agent
- model (str): The model to use for the agent
-
- Returns:
- str: The response of the agent
- """
- # Remove underscores from name
- voice_name = name.replace("_", " ")
-
- first_message = f"""You are {name}. Respond with: "Acknowledged"."""
- agent_intro = f"{voice_name} here, Reporting for duty!"
-
- # Create agent
- if CFG.speak_mode:
- say_text(agent_intro, 1)
- key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
-
- if CFG.speak_mode:
- say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
-
- # Assign task (prompt), get response
- agent_response = AGENT_MANAGER.message_agent(key, prompt)
-
- return f"Agent {name} created with key {key}. First response: {agent_response}"
-
-
-def message_agent(key: str, message: str) -> str:
- """Message an agent with a given key and message"""
- # Check if the key is a valid integer
- if is_valid_int(key):
- agent_response = AGENT_MANAGER.message_agent(int(key), message)
- else:
- return "Invalid key, must be an integer."
-
- # Speak response
- if CFG.speak_mode:
- say_text(agent_response, 1)
- return agent_response
-
-
-def list_agents():
- """List all agents
-
- Returns:
- str: A list of all agents
- """
- return "List of agents:\n" + "\n".join(
- [str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
- )
-
-
-def delete_agent(key: str) -> str:
- """Delete an agent with a given key
-
- Args:
- key (str): The key of the agent to delete
-
- Returns:
- str: A message indicating whether the agent was deleted or not
- """
- result = AGENT_MANAGER.delete_agent(key)
- return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
diff --git a/spaces/Michale1017/Auto-keep-online/index.js b/spaces/Michale1017/Auto-keep-online/index.js
deleted file mode 100644
index fd276dd344eb39c02dca11fe38655ae1e3ffa258..0000000000000000000000000000000000000000
--- a/spaces/Michale1017/Auto-keep-online/index.js
+++ /dev/null
@@ -1,116 +0,0 @@
-const axios = require('axios');
-const fs = require('fs');
-const cron = require('node-cron');
-const http = require('http');
-const path = require('path');
-const port = process.env.PORT || 7860;
-
-// 定义要访问的网页URL数组
-const urls = [
- 'https://dynamic-vevay-ky2392.koyeb.app/sub', // 此处可备注平台名称,Koyeb-华盛顿DC,Argo-nodejs,帮信用卡的,
- 'https://fascinating-ollie-ky2392.koyeb.app', // 此处可备注平台名称,Koyeb-德国-固定隧道-限100G,帮信用卡的,另一个是哪吒
- 'https://p09--panoramic-argo--wb4z7bk2ymdm.code.run', // 此处可备注平台名称,NorthFlank-美国-谷歌云-Argo-Vless
- 'https://p09--panoramic-argo--wb4z7bk2ymdm.code.run/list', // 此处可备注平台名称,NorthFlank-美国-谷歌云-Argo-Vless
- 'https://p09--panoramic-argo--wb4z7bk2ymdm.code.run/listen', // 此处可备注平台名称,NorthFlank-美国-谷歌云-Argo-Vless
- 'https://p09--panoramic-argo--wb4z7bk2ymdm.code.run/test', // 此处可备注平台名称,NorthFlank-美国-谷歌云-Argo-Vless
- 'https://p09--panoramic-argo--wb4z7bk2ymdm.code.run/stauts', // 此处可备注平台名称,NorthFlank-美国-谷歌云-Argo-Vless
- 'https://0906.fly.dev', // 此处可备注平台名称,Fly0906,洛杉矶,Argo-X-C-P
- 'https://0928.fly.dev', // 此处可备注平台名称,Fly0928,巴黎,Argo-MJJ
- 'https://long-maze-equipment.glitch.me', // 此处可备注平台名称,GLitch-US-亚马逊云-along登陆-50M带宽,1000小时,ws协议
- 'https://glitch.com/edit/#!/long-maze-equipment', // 此处可备注平台名称,GLitch-US-亚马逊云-along登陆-50M带宽,1000小时
- 'https://rattle-atom-reptile.glitch.me/sub', // 此处可备注平台名称,Glitch-google,MJJ-good固定隧道,
- 'https://glitch.com/edit/#!/rattle-atom-reptile', // 此处可备注平台名称,Glitch-google,MJJ-good隧道,
- 'https://mountain-material-utahraptor.glitch.me/sub', // 此处可备注平台名称,GLitch-哪吒US-亚马逊云-along登陆,niodejs-argo临时隧道,
- 'https://glitch.com/edit/#!/mountain-material-utahraptor', // 此处可备注平台名称,GLitch-哪吒US-亚马逊云-along登陆,niodejs-argo临时隧道,
- 'https://soft-shocking-close.glitch.me', // 此处可备注平台名称,GLitch-哪吒US-亚马逊云-github登陆,ws协议,soft-shocking-close.glitch.me
- 'https://glitch.com/edit/#!/soft-shocking-close', // 此处可备注平台名称,GLitch-哪吒US-亚马逊云-github登陆,soft-shocking-close.glitch.me
- 'https://deadpan-possible-brush.glitch.me/sub', // 此处可备注平台名称,GLitch-哪吒US-亚马逊云-github登陆,GLitch-vm-goodplus
- 'https://glitch.com/edit/#!/deadpan-possible-brush', // 此处可备注平台名称,GLitch-哪吒US-亚马逊云-github登陆,GLitch-vm-goodplus
- 'https://replit-xray.vm669.repl.co', // 此处可备注平台名称,replit-印度孟买-谷歌云-限5G@Misaka,https://replit.com/@vm669/replit-xray
- 'https://vmvltrssso-nginx-pro.vm669.repl.co', // 此处可备注平台名称,replit-印度孟买,谷歌云,直连@甬哥侃侃侃
- 'https://good.vm669.repl.co', // 此处可备注平台名称,replit-印度孟买,谷歌云,MJJ-good-argo
- 'https://alone.kinglangyun.repl.co', // 此处可备注平台名称,replit-US-谷歌云-@甬哥侃侃侃,along999
- 'https://argo.kinglangyun.repl.co', // 此处可备注平台名称,Replit-US-谷歌云,哪吒,Argo,10G,,eoovve-nodejs-argo
- 'https://good-na5d.onrender.com', // 此处可备注平台名称,玩具:Render.com--ly2022@eee.me.eu.org注册,选择新加坡服务器,部署自己的good项目组网址,直接成功。
- 'https://node-it-free-01.tickhosting.com', // 此处可备注平台名称,玩具:tickhosting--discord谷歌,【免费意大利住宅IP节点】,add24小时
- 'https://node-it-free-01.tickhosting.com:50387', // 此处可备注平台名称,玩具:tickhosting--discord谷歌,【免费意大利住宅IP节点】,add24小时
- 'https://6ssrjm-3000.csb.app/sub', // 此处可备注平台名称,玩具97:codesandbox.io-github-vm669@qq.com-10.20才成功部署nodejs-argo,休眠,手动弄打开
- 'https://f1.rustix.su:24588', // 此处可备注平台名称,玩具11:rustix.su,德国节点,discord登陆,Argo隧道,服务器每天都停,不好用
- 'https://f1.rustix.su:24456', // 此处可备注平台名称,
- 'https://1026-7daennwm.b4a.run', // 此处可备注平台名称,玩具98:back4app-美国,github登陆,100G,600小时
- 'https://michale1017-ws.hf.space', // 此处可备注平台名称,抱脸hug,上传文件的https://github.com/eoovve/Huggingface-ws
- 'https://michale1017-xray.hf.space', // 此处可备注平台名称,抱脸hug,fork,sunxyz (xysun),@ws88666,Mr,wang
- 'https://fsn1.bbn.one:47406', // 此处可备注平台名称,BBN,along999邮箱
- 'https://hel1.bbn.one:46266', // 此处可备注平台名称,BBN,usaiwa的discord账号
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- 'https://www.google.com', // 此处可备注平台名称,
- // 添加更多的URL
-];
-
-// 创建日志文件
-//const logFile = 'visit-log.txt';
-
-// 访问网页并将结果写入日志
-async function scrapeAndLog(url) {
- try {
- const response = await axios.get(url);
- const timestamp = new Date().toISOString();
- const logMessage = `${timestamp}: Web visited Successfully ${url}\n`;
-
- // 将访问结果写入日志文件
-// fs.appendFileSync(logFile, logMessage);
-
- console.log(logMessage);
- } catch (error) {
- const timestamp = new Date().toISOString();
- const errorMessage = `${timestamp}: Web visited Error ${url}: ${error.message}\n`;
-
- // 将错误信息写入日志文件
-// fs.appendFileSync(logFile, errorMessage);
-
- console.error(errorMessage);
- }
-}
-
-// 使用cron来安排定期任务
-cron.schedule('*/2 * * * *', () => {
- console.log('Running webpage access...');
- // 循环访问每个URL
- urls.forEach((url) => {
- scrapeAndLog(url);
- });
-});
-
-
-const server = http.createServer((req, res) => {
- if (req.url === '/') {
- const filePath = path.join(__dirname, 'index.html');
-
- fs.readFile(filePath, (err, data) => {
- if (err) {
- res.writeHead(500);
- res.end('Error loading index.html');
- } else {
- res.writeHead(200, { 'Content-Type': 'text/html' });
- res.end(data);
- }
- });
- } else {
- res.writeHead(404);
- res.end('Not Found');
- }
-});
-
-server.listen(port, () => {
- console.log(`Server is running on port ${port}`);
-});
diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/README.md b/spaces/NAACL2022/CLIP-Caption-Reward/README.md
deleted file mode 100644
index 5a8911e9ca868c2d332c14ec1022d1ffd3fb81f4..0000000000000000000000000000000000000000
--- a/spaces/NAACL2022/CLIP-Caption-Reward/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: CLIP Caption Reward
-emoji: 🐠
-colorFrom: pink
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.0.24
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/NCTCMumbai/NCTC/models/official/benchmark/__init__.py b/spaces/NCTCMumbai/NCTC/models/official/benchmark/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/NCTCMumbai/NCTC/models/official/utils/misc/model_helpers_test.py b/spaces/NCTCMumbai/NCTC/models/official/utils/misc/model_helpers_test.py
deleted file mode 100644
index 9f2487e4223e7b46854db918114d2507fc891155..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/utils/misc/model_helpers_test.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for Model Helper functions."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import tensorflow as tf # pylint: disable=g-bad-import-order
-
-from official.utils.misc import model_helpers
-
-
-class PastStopThresholdTest(tf.test.TestCase):
- """Tests for past_stop_threshold."""
-
- def setUp(self):
- super(PastStopThresholdTest, self).setUp()
- tf.compat.v1.disable_eager_execution()
-
- def test_past_stop_threshold(self):
- """Tests for normal operating conditions."""
- self.assertTrue(model_helpers.past_stop_threshold(0.54, 1))
- self.assertTrue(model_helpers.past_stop_threshold(54, 100))
- self.assertFalse(model_helpers.past_stop_threshold(0.54, 0.1))
- self.assertFalse(model_helpers.past_stop_threshold(-0.54, -1.5))
- self.assertTrue(model_helpers.past_stop_threshold(-0.54, 0))
- self.assertTrue(model_helpers.past_stop_threshold(0, 0))
- self.assertTrue(model_helpers.past_stop_threshold(0.54, 0.54))
-
- def test_past_stop_threshold_none_false(self):
- """Tests that check None returns false."""
- self.assertFalse(model_helpers.past_stop_threshold(None, -1.5))
- self.assertFalse(model_helpers.past_stop_threshold(None, None))
- self.assertFalse(model_helpers.past_stop_threshold(None, 1.5))
- # Zero should be okay, though.
- self.assertTrue(model_helpers.past_stop_threshold(0, 1.5))
-
- def test_past_stop_threshold_not_number(self):
- """Tests for error conditions."""
- with self.assertRaises(ValueError):
- model_helpers.past_stop_threshold("str", 1)
-
- with self.assertRaises(ValueError):
- model_helpers.past_stop_threshold("str", tf.constant(5))
-
- with self.assertRaises(ValueError):
- model_helpers.past_stop_threshold("str", "another")
-
- with self.assertRaises(ValueError):
- model_helpers.past_stop_threshold(0, None)
-
- with self.assertRaises(ValueError):
- model_helpers.past_stop_threshold(0.7, "str")
-
- with self.assertRaises(ValueError):
- model_helpers.past_stop_threshold(tf.constant(4), None)
-
-
-class SyntheticDataTest(tf.test.TestCase):
- """Tests for generate_synthetic_data."""
-
- def test_generate_synethetic_data(self):
- input_element, label_element = tf.compat.v1.data.make_one_shot_iterator(
- model_helpers.generate_synthetic_data(input_shape=tf.TensorShape([5]),
- input_value=123,
- input_dtype=tf.float32,
- label_shape=tf.TensorShape([]),
- label_value=456,
- label_dtype=tf.int32)).get_next()
-
- with self.session() as sess:
- for n in range(5):
- inp, lab = sess.run((input_element, label_element))
- self.assertAllClose(inp, [123., 123., 123., 123., 123.])
- self.assertEquals(lab, 456)
-
- def test_generate_only_input_data(self):
- d = model_helpers.generate_synthetic_data(
- input_shape=tf.TensorShape([4]),
- input_value=43.5,
- input_dtype=tf.float32)
-
- element = tf.compat.v1.data.make_one_shot_iterator(d).get_next()
- self.assertFalse(isinstance(element, tuple))
-
- with self.session() as sess:
- inp = sess.run(element)
- self.assertAllClose(inp, [43.5, 43.5, 43.5, 43.5])
-
- def test_generate_nested_data(self):
- d = model_helpers.generate_synthetic_data(
- input_shape={'a': tf.TensorShape([2]),
- 'b': {'c': tf.TensorShape([3]), 'd': tf.TensorShape([])}},
- input_value=1.1)
-
- element = tf.compat.v1.data.make_one_shot_iterator(d).get_next()
- self.assertIn('a', element)
- self.assertIn('b', element)
- self.assertEquals(len(element['b']), 2)
- self.assertIn('c', element['b'])
- self.assertIn('d', element['b'])
- self.assertNotIn('c', element)
-
- with self.session() as sess:
- inp = sess.run(element)
- self.assertAllClose(inp['a'], [1.1, 1.1])
- self.assertAllClose(inp['b']['c'], [1.1, 1.1, 1.1])
- self.assertAllClose(inp['b']['d'], 1.1)
-
-
-if __name__ == "__main__":
- tf.test.main()
diff --git a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/common/schedules_test.py b/spaces/NCTCMumbai/NCTC/models/research/brain_coder/common/schedules_test.py
deleted file mode 100644
index b17022f45a833fb3aa219fd06225f77fbd1b1055..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/common/schedules_test.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-"""Tests for common.schedules."""
-
-from math import exp
-from math import sqrt
-import numpy as np
-from six.moves import xrange
-import tensorflow as tf
-
-from common import config_lib # brain coder
-from common import schedules # brain coder
-
-
-class SchedulesTest(tf.test.TestCase):
-
- def ScheduleTestHelper(self, config, schedule_subtype, io_values):
- """Run common checks for schedules.
-
- Args:
- config: Config object which is passed into schedules.make_schedule.
- schedule_subtype: The expected schedule type to be instantiated.
- io_values: List of (input, output) pairs. Must be in ascending input
- order. No duplicate inputs.
- """
-
- # Check that make_schedule makes the correct type.
- f = schedules.make_schedule(config)
- self.assertTrue(isinstance(f, schedule_subtype))
-
- # Check that multiple instances returned from make_schedule behave the same.
- fns = [schedules.make_schedule(config) for _ in xrange(3)]
-
- # Check that all the inputs map to the right outputs.
- for i, o in io_values:
- for f in fns:
- f_out = f(i)
- self.assertTrue(
- np.isclose(o, f_out),
- 'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out))
-
- # Check that a subset of the io_values are still correct.
- f = schedules.make_schedule(config)
- subseq = [io_values[i**2] for i in xrange(int(sqrt(len(io_values))))]
- if subseq[-1] != io_values[-1]:
- subseq.append(io_values[-1])
- for i, o in subseq:
- f_out = f(i)
- self.assertTrue(
- np.isclose(o, f_out),
- 'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out))
-
- # Check duplicate calls.
- f = schedules.make_schedule(config)
- for i, o in io_values:
- for _ in xrange(3):
- f_out = f(i)
- self.assertTrue(
- np.isclose(o, f_out),
- 'Duplicate calls at input %d are not equal. Expected %s, got %s'
- % (i, o, f_out))
-
- def testConstSchedule(self):
- self.ScheduleTestHelper(
- config_lib.Config(fn='const', const=5),
- schedules.ConstSchedule,
- [(0, 5), (1, 5), (10, 5), (20, 5), (100, 5), (1000000, 5)])
-
- def testLinearDecaySchedule(self):
- self.ScheduleTestHelper(
- config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10,
- end_time=20),
- schedules.LinearDecaySchedule,
- [(0, 2), (1, 2), (10, 2), (11, 1.8), (15, 1), (19, 0.2), (20, 0),
- (100000, 0)])
-
- # Test step function.
- self.ScheduleTestHelper(
- config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10,
- end_time=10),
- schedules.LinearDecaySchedule,
- [(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)])
-
- def testExponentialDecaySchedule(self):
- self.ScheduleTestHelper(
- config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6),
- start_time=10, end_time=20),
- schedules.ExponentialDecaySchedule,
- [(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-1/2. - 1)),
- (15, exp(-5/2. - 1)), (19, exp(-9/2. - 1)), (20, exp(-6)),
- (100000, exp(-6))])
-
- # Test step function.
- self.ScheduleTestHelper(
- config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6),
- start_time=10, end_time=10),
- schedules.ExponentialDecaySchedule,
- [(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-6)),
- (15, exp(-6))])
-
- def testSmootherstepDecaySchedule(self):
- self.ScheduleTestHelper(
- config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10,
- end_time=20),
- schedules.SmootherstepDecaySchedule,
- [(0, 2), (1, 2), (10, 2), (11, 1.98288), (15, 1), (19, 0.01712),
- (20, 0), (100000, 0)])
-
- # Test step function.
- self.ScheduleTestHelper(
- config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10,
- end_time=10),
- schedules.SmootherstepDecaySchedule,
- [(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)])
-
- def testHardOscillatorSchedule(self):
- self.ScheduleTestHelper(
- config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100,
- period=10, transition_fraction=0.5),
- schedules.HardOscillatorSchedule,
- [(0, 2), (1, 2), (10, 2), (100, 2), (101, 1.2), (102, 0.4), (103, 0),
- (104, 0), (105, 0), (106, 0.8), (107, 1.6), (108, 2), (109, 2),
- (110, 2), (111, 1.2), (112, 0.4), (115, 0), (116, 0.8), (119, 2),
- (120, 2), (100001, 1.2), (100002, 0.4), (100005, 0), (100006, 0.8),
- (100010, 2)])
-
- # Test instantaneous step.
- self.ScheduleTestHelper(
- config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100,
- period=10, transition_fraction=0),
- schedules.HardOscillatorSchedule,
- [(0, 2), (1, 2), (10, 2), (99, 2), (100, 0), (104, 0), (105, 2),
- (106, 2), (109, 2), (110, 0)])
-
-
-if __name__ == '__main__':
- tf.test.main()
diff --git a/spaces/NealCaren/TranscribeX/app.py b/spaces/NealCaren/TranscribeX/app.py
deleted file mode 100644
index a7762c967f64a2563426ce9ab576757e7f32e01a..0000000000000000000000000000000000000000
--- a/spaces/NealCaren/TranscribeX/app.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import whisperx
-import streamlit as st
-import torch
-import tempfile
-import subprocess
-
-
-def transcribe(audio_file):
-
-
- if torch.cuda.is_available():
- device = "gpu"
- else:
- device = "cpu"
- batch_size = 16 # reduce if low on GPU mem
- compute_type = "int8" # change to "float16" if high on GPU mem (may reduce accuracy)
- YOUR_HF_TOKEN = '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'
-
- # load audio file
- audio_bytes = uploaded_file.getvalue()
- with open(temp_file, 'wb') as f:
- f.write(audio_bytes)
-
- # 1. Transcribe with original whisper (batched)
- model = whisperx.load_model("tiny", device = device, compute_type=compute_type)
-
- audio = whisperx.load_audio(temp_file)
- result = model.transcribe(audio, batch_size=batch_size)
- st.write("Transcribed! Here's what we have so far:")
- st.write(result["segments"]) # before alignment
-
- # delete model if low on GPU resources
- # import gc; gc.collect(); torch.cuda.empty_cache(); del model
-
- # 2. Align whisper output
- model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
- result = whisperx.align(result["segments"], model_a, metadata, audio, device, return_char_alignments=False)
- st.write("Aligned! Here's what we have so far:")
- st.write(result["segments"]) # after alignment
-
- # delete model if low on GPU resources
- # import gc; gc.collect(); torch.cuda.empty_cache(); del model_a
-
- # 3. Assign speaker labels
- diarize_model = whisperx.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)
-
- # add min/max number of speakers if known
- diarize_segments = diarize_model(audio_file)
- # diarize_model(audio_file, min_speakers=min_speakers, max_speakers=max_speakers)
-
- result = whisperx.assign_word_speakers(diarize_segments, result)
- st.write(diarize_segments)
- st.write(result["segments"]) # segments are now assigned speaker IDs
-
-
-st.title("Automated Transcription")
-
-form = st.form(key='my_form')
-uploaded_file = form.file_uploader("Choose a file")
-
-submit = form.form_submit_button("Transcribe!")
-
-
-if submit:
- #temporary file to store audio_file
- tmp_dir = tempfile.TemporaryDirectory()
- temp_file = tmp_dir.name + '/mono.wav'
- cmd = f"ffmpeg -y -i {uploaded_file} -acodec pcm_s16le -ar 16000 -ac 1 {temp_file}"
- subprocess.Popen(cmd, shell=True).wait()
-
- transcribe(temp_file)
\ No newline at end of file
diff --git a/spaces/NoriZC/vits-models/utils.py b/spaces/NoriZC/vits-models/utils.py
deleted file mode 100644
index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000
--- a/spaces/NoriZC/vits-models/utils.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import os
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-import librosa
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_audio_to_torch(full_path, target_sampling_rate):
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
- return torch.FloatTensor(audio.astype(np.float32))
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/Nunchakuka/FrenchAnonymizer/speaker_encoder/data_objects/__init__.py b/spaces/Nunchakuka/FrenchAnonymizer/speaker_encoder/data_objects/__init__.py
deleted file mode 100644
index 030317a1d9a328d452bf29bc7a802e29629b1a42..0000000000000000000000000000000000000000
--- a/spaces/Nunchakuka/FrenchAnonymizer/speaker_encoder/data_objects/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset
-from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh
deleted file mode 100644
index 1a6fb5f891b55d9fd978cfe54565f112f7eedce7..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/path.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-export KALDI_ROOT=`pwd`/../../..
-export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH
-[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1
-. $KALDI_ROOT/tools/config/common_path.sh
-export LC_ALL=C
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
deleted file mode 100644
index 3279dae89a8bca95178bbe1285d3cb334890b12f..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import mmap
-import os
-import shutil
-import struct
-import typing as tp
-from functools import lru_cache
-
-import numpy as np
-import torch
-from fairseq.data import indexed_dataset
-from fairseq.data.huffman import HuffmanCoder
-from fairseq.file_io import PathManager
-
-
-class HuffmanMMapIndex:
- """
- keep an index of the offsets in the huffman binary file.
- First a header, then the list of sizes (num tokens) for each instance and finally
- the addresses of each instance.
- """
-
- _HDR_MAGIC = b"HUFFIDX\x00\x00"
- _VERSION = 1
-
- @classmethod
- def writer(cls, path: str, data_len: int):
- class _Writer:
- def __enter__(self):
- self._file = open(path, "wb")
-
- # write header (magic + version)
- self._file.write(cls._HDR_MAGIC)
- self._file.write(struct.pack(" None:
- self._path_prefix = path_prefix
- self._coder = coder
- self._sizes = []
- self._ptrs = []
- self._data_len = 0
-
- def open(self):
- self._coder.to_file(vocab_file_path(self._path_prefix))
- self._data_file = open(indexed_dataset.data_file_path(self._path_prefix), "wb")
-
- def __enter__(self) -> "HuffmanMMapIndexedDatasetBuilder":
- self.open()
- return self
-
- def add_item(self, tokens: tp.List[str]) -> None:
- """
- add a list of tokens to the dataset, they will compressed with the
- provided coder before being written to file.
- """
- encoded = self._coder.encode(tokens)
- code_len = len(encoded)
- last_ptr = 0
- if len(self._ptrs) > 0:
- last_ptr = self._ptrs[-1]
- self._sizes.append(len(tokens))
- self._ptrs.append(last_ptr + code_len)
- self._data_len += code_len
- self._data_file.write(encoded)
-
- def append(self, other_dataset_path_prefix: str) -> None:
- """
- append an existing dataset.
- Beware, if it wasn't built with the same coder, you are in trouble.
- """
- other_index = HuffmanMMapIndex(
- indexed_dataset.index_file_path(other_dataset_path_prefix)
- )
- for (ptr, size) in other_index:
- self._ptrs.append(ptr + self._data_len)
- self._sizes.append(size)
-
- # Concatenate data
- with open(indexed_dataset.data_file_path(other_dataset_path_prefix), "rb") as f:
- shutil.copyfileobj(f, self._data_file)
-
- self._data_len += other_index.data_len
-
- def close(self):
- self._data_file.close()
- with HuffmanMMapIndex.writer(
- indexed_dataset.index_file_path(self._path_prefix), self._data_len
- ) as index:
- index.write(self._sizes, self._ptrs)
-
- def __exit__(self, exc_type, exc_val, exc_tb) -> None:
- self.close()
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/roberta/hub_interface.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/roberta/hub_interface.py
deleted file mode 100644
index ba298d63ba5da2a5b2f1a44d0384a6b249277ef4..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/roberta/hub_interface.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.data import encoders
-
-
-class RobertaHubInterface(nn.Module):
- """A simple PyTorch Hub interface to RoBERTa.
-
- Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta
- """
-
- def __init__(self, cfg, task, model):
- super().__init__()
- self.cfg = cfg
- self.task = task
- self.model = model
-
- self.bpe = encoders.build_bpe(cfg.bpe)
-
- # this is useful for determining the device
- self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
-
- @property
- def device(self):
- return self._float_tensor.device
-
- def encode(
- self, sentence: str, *addl_sentences, no_separator=False
- ) -> torch.LongTensor:
- """
- BPE-encode a sentence (or multiple sentences).
-
- Every sequence begins with a beginning-of-sentence (``) symbol.
- Every sentence ends with an end-of-sentence (``) and we use an
- extra end-of-sentence (``) as a separator.
-
- Example (single sentence): ` a b c `
- Example (sentence pair): ` d e f 1 2 3 `
-
- The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
- requires leading spaces. For example::
-
- >>> roberta.encode('Hello world').tolist()
- [0, 31414, 232, 2]
- >>> roberta.encode(' world').tolist()
- [0, 232, 2]
- >>> roberta.encode('world').tolist()
- [0, 8331, 2]
- """
- bpe_sentence = " " + self.bpe.encode(sentence) + " "
- for s in addl_sentences:
- bpe_sentence += " " if not no_separator else ""
- bpe_sentence += " " + self.bpe.encode(s) + " "
- tokens = self.task.source_dictionary.encode_line(
- bpe_sentence, append_eos=False, add_if_not_exist=False
- )
- return tokens.long()
-
- def decode(self, tokens: torch.LongTensor):
- assert tokens.dim() == 1
- tokens = tokens.numpy()
- if tokens[0] == self.task.source_dictionary.bos():
- tokens = tokens[1:] # remove
- eos_mask = tokens == self.task.source_dictionary.eos()
- doc_mask = eos_mask[1:] & eos_mask[:-1]
- sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
- sentences = [
- self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
- ]
- if len(sentences) == 1:
- return sentences[0]
- return sentences
-
- def extract_features(
- self, tokens: torch.LongTensor, return_all_hiddens: bool = False
- ) -> torch.Tensor:
- if tokens.dim() == 1:
- tokens = tokens.unsqueeze(0)
- if tokens.size(-1) > self.model.max_positions():
- raise ValueError(
- "tokens exceeds maximum length: {} > {}".format(
- tokens.size(-1), self.model.max_positions()
- )
- )
- features, extra = self.model(
- tokens.to(device=self.device),
- features_only=True,
- return_all_hiddens=return_all_hiddens,
- )
- if return_all_hiddens:
- # convert from T x B x C -> B x T x C
- inner_states = extra["inner_states"]
- return [inner_state.transpose(0, 1) for inner_state in inner_states]
- else:
- return features # just the last layer's features
-
- def register_classification_head(
- self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
- ):
- self.model.register_classification_head(
- name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
- )
-
- def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
- features = self.extract_features(tokens.to(device=self.device))
- logits = self.model.classification_heads[head](features)
- if return_logits:
- return logits
- return F.log_softmax(logits, dim=-1)
-
- def extract_features_aligned_to_words(
- self, sentence: str, return_all_hiddens: bool = False
- ) -> torch.Tensor:
- """Extract RoBERTa features, aligned to spaCy's word-level tokenizer."""
- from fairseq.models.roberta import alignment_utils
- from spacy.tokens import Doc
-
- nlp = alignment_utils.spacy_nlp()
- tokenizer = alignment_utils.spacy_tokenizer()
-
- # tokenize both with GPT-2 BPE and spaCy
- bpe_toks = self.encode(sentence)
- spacy_toks = tokenizer(sentence)
- spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)]
- alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws)
-
- # extract features and align them
- features = self.extract_features(
- bpe_toks, return_all_hiddens=return_all_hiddens
- )
- features = features.squeeze(0)
- aligned_feats = alignment_utils.align_features_to_words(
- self, features, alignment
- )
-
- # wrap in spaCy Doc
- doc = Doc(
- nlp.vocab,
- words=[""] + [x.text for x in spacy_toks] + [""],
- spaces=[True]
- + [x.endswith(" ") for x in spacy_toks_ws[:-1]]
- + [True, False],
- )
- assert len(doc) == aligned_feats.size(0)
- doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i]
- return doc
-
- def fill_mask(self, masked_input: str, topk: int = 5):
- masked_token = ""
- assert (
- masked_token in masked_input and masked_input.count(masked_token) == 1
- ), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(
- masked_token
- )
-
- text_spans = masked_input.split(masked_token)
- text_spans_bpe = (
- (" {0} ".format(masked_token))
- .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
- .strip()
- )
- tokens = self.task.source_dictionary.encode_line(
- " " + text_spans_bpe + " ",
- append_eos=False,
- add_if_not_exist=False,
- )
-
- masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False)
- if tokens.dim() == 1:
- tokens = tokens.unsqueeze(0)
-
- with utils.model_eval(self.model):
- features, extra = self.model(
- tokens.long().to(device=self.device),
- features_only=False,
- return_all_hiddens=False,
- )
- logits = features[0, masked_index, :].squeeze()
- prob = logits.softmax(dim=0)
- values, index = prob.topk(k=topk, dim=0)
- topk_predicted_token_bpe = self.task.source_dictionary.string(index)
-
- topk_filled_outputs = []
- for index, predicted_token_bpe in enumerate(
- topk_predicted_token_bpe.split(" ")
- ):
- predicted_token = self.bpe.decode(predicted_token_bpe)
- # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306
- if predicted_token_bpe.startswith("\u2581"):
- predicted_token = " " + predicted_token
- if " {0}".format(masked_token) in masked_input:
- topk_filled_outputs.append(
- (
- masked_input.replace(
- " {0}".format(masked_token), predicted_token
- ),
- values[index].item(),
- predicted_token,
- )
- )
- else:
- topk_filled_outputs.append(
- (
- masked_input.replace(masked_token, predicted_token),
- values[index].item(),
- predicted_token,
- )
- )
- return topk_filled_outputs
-
- def disambiguate_pronoun(self, sentence: str) -> bool:
- """
- Usage::
-
- >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.')
- True
-
- >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.')
- 'The trophy'
- """
- assert hasattr(
- self.task, "disambiguate_pronoun"
- ), "roberta.disambiguate_pronoun() requires a model trained with the WSC task."
- with utils.model_eval(self.model):
- return self.task.disambiguate_pronoun(
- self.model, sentence, use_cuda=self.device.type == "cuda"
- )
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/translation_moe/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/translation_moe/README.md
deleted file mode 100644
index 2e5c8af617f410f64ca38d29447bd05b6af8c5a8..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/translation_moe/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-# Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)
-
-This page includes instructions for reproducing results from the paper [Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)](https://arxiv.org/abs/1902.07816).
-
-## Download data
-
-First, follow the [instructions to download and preprocess the WMT'17 En-De dataset](../translation#prepare-wmt14en2desh).
-Make sure to learn a joint vocabulary by passing the `--joined-dictionary` option to `fairseq-preprocess`.
-
-## Train a model
-
-Then we can train a mixture of experts model using the `translation_moe` task.
-Use the `--method` flag to choose the MoE variant; we support hard mixtures with a learned or uniform prior (`--method hMoElp` and `hMoEup`, respectively) and soft mixures (`--method sMoElp` and `sMoEup`).
-The model is trained with online responsibility assignment and shared parameterization.
-
-The following command will train a `hMoElp` model with `3` experts:
-```bash
-fairseq-train --ddp-backend='legacy_ddp' \
- data-bin/wmt17_en_de \
- --max-update 100000 \
- --task translation_moe --user-dir examples/translation_moe/translation_moe_src \
- --method hMoElp --mean-pool-gating-network \
- --num-experts 3 \
- --arch transformer_wmt_en_de --share-all-embeddings \
- --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \
- --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 4000 \
- --lr 0.0007 \
- --dropout 0.1 --weight-decay 0.0 --criterion cross_entropy \
- --max-tokens 3584
-```
-
-## Translate
-
-Once a model is trained, we can generate translations from different experts using the `--gen-expert` option.
-For example, to generate from expert 0:
-```bash
-fairseq-generate data-bin/wmt17_en_de \
- --path checkpoints/checkpoint_best.pt \
- --beam 1 --remove-bpe \
- --task translation_moe --user-dir examples/translation_moe/translation_moe_src \
- --method hMoElp --mean-pool-gating-network \
- --num-experts 3 \
- --gen-expert 0
-```
-
-## Evaluate
-
-First download a tokenized version of the WMT'14 En-De test set with multiple references:
-```bash
-wget dl.fbaipublicfiles.com/fairseq/data/wmt14-en-de.extra_refs.tok
-```
-
-Next apply BPE on the fly and run generation for each expert:
-```bash
-BPE_CODE=examples/translation/wmt17_en_de/code
-for EXPERT in $(seq 0 2); do \
- cat wmt14-en-de.extra_refs.tok \
- | grep ^S | cut -f 2 \
- | fairseq-interactive data-bin/wmt17_en_de \
- --path checkpoints/checkpoint_best.pt \
- --beam 1 \
- --bpe subword_nmt --bpe-codes $BPE_CODE \
- --buffer-size 500 --max-tokens 6000 \
- --task translation_moe --user-dir examples/translation_moe/translation_moe_src \
- --method hMoElp --mean-pool-gating-network \
- --num-experts 3 \
- --gen-expert $EXPERT ; \
-done > wmt14-en-de.extra_refs.tok.gen.3experts
-```
-
-Finally use `score_moe.py` to compute pairwise BLUE and average oracle BLEU:
-```bash
-python examples/translation_moe/score.py --sys wmt14-en-de.extra_refs.tok.gen.3experts --ref wmt14-en-de.extra_refs.tok
-# pairwise BLEU: 48.26
-# #refs covered: 2.11
-# multi-reference BLEU (leave-one-out): 59.46
-```
-This matches row 3 from Table 7 in the paper.
-
-## Citation
-
-```bibtex
-@article{shen2019mixture,
- title = {Mixture Models for Diverse Machine Translation: Tricks of the Trade},
- author = {Tianxiao Shen and Myle Ott and Michael Auli and Marc'Aurelio Ranzato},
- journal = {International Conference on Machine Learning},
- year = 2019,
-}
-```
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/simultaneous_translation.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/simultaneous_translation.py
deleted file mode 100644
index 11c7dc1ea966a54f8915ef164377e40f90e851a1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/simultaneous_translation.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from fairseq.tasks import register_task
-from fairseq.tasks.speech_to_text import SpeechToTextTask
-from fairseq.tasks.translation import (
- TranslationTask, TranslationConfig
-)
-
-try:
- import examples.simultaneous_translation # noqa
- import_successful = True
-except BaseException:
- import_successful = False
-
-
-logger = logging.getLogger(__name__)
-
-
-def check_import(flag):
- if not flag:
- raise ImportError(
- "'examples.simultaneous_translation' is not correctly imported. "
- "Please considering `pip install -e $FAIRSEQ_DIR`."
- )
-
-
-@register_task("simul_speech_to_text")
-class SimulSpeechToTextTask(SpeechToTextTask):
- def __init__(self, args, tgt_dict):
- check_import(import_successful)
- super().__init__(args, tgt_dict)
-
-
-@register_task("simul_text_to_text", dataclass=TranslationConfig)
-class SimulTextToTextTask(TranslationTask):
- def __init__(self, cfg, src_dict, tgt_dict):
- check_import(import_successful)
- super().__init__(cfg, src_dict, tgt_dict)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_text_joint_to_text/models/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_text_joint_to_text/models/__init__.py
deleted file mode 100644
index 7a394c7e4f25bfef8603596ca3629e65ca7b0d8b..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_text_joint_to_text/models/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import importlib
-import os
-
-for file in os.listdir(os.path.dirname(__file__)):
- if file.endswith(".py") and not file.startswith("_"):
- model_name = file[: file.find(".py")]
- importlib.import_module(
- "examples.speech_text_joint_to_text.models." + model_name
- )
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/adaptive_input.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/adaptive_input.py
deleted file mode 100644
index 446534a9f8b87337a4dd752944ea386ff7cf7965..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/adaptive_input.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-from typing import List
-
-import torch
-from fairseq.modules.quant_noise import quant_noise
-from torch import nn
-
-
-class AdaptiveInput(nn.Module):
- def __init__(
- self,
- vocab_size: int,
- padding_idx: int,
- initial_dim: int,
- factor: float,
- output_dim: int,
- cutoff: List[int],
- q_noise: float = 0,
- qn_block_size: int = 8,
- ):
- super().__init__()
-
- if vocab_size > cutoff[-1]:
- cutoff = cutoff + [vocab_size]
- else:
- assert (
- vocab_size == cutoff[-1]
- ), "cannot specify cutoff larger than vocab size"
-
- self.cutoff = cutoff
- self.embedding_dim = output_dim
- self.padding_idx = padding_idx
-
- self.embeddings = nn.ModuleList()
- for i in range(len(self.cutoff)):
- prev = self.cutoff[i - 1] if i > 0 else 0
- size = self.cutoff[i] - prev
- dim = int(initial_dim // (factor ** i))
- seq = nn.Sequential(
- nn.Embedding(size, dim, self.padding_idx),
- quant_noise(
- nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size
- ),
- )
-
- self.embeddings.append(seq)
- self.padding_idx = None
- self.padding_idx = padding_idx
-
- def init_weights(m):
- if isinstance(m, nn.Embedding):
- nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5)
- nn.init.constant_(m.weight[padding_idx], 0)
- elif hasattr(m, "weight"):
- nn.init.xavier_uniform_(m.weight)
-
- self.apply(init_weights)
-
- self.register_buffer("_float_tensor", torch.FloatTensor(1))
-
- def weights_for_band(self, band: int):
- return self.embeddings[band][0].weight, self.embeddings[band][1].weight
-
- def forward(self, input: torch.Tensor):
- result = self._float_tensor.new(input.shape + (self.embedding_dim,))
- for i in range(len(self.cutoff)):
- mask = input.lt(self.cutoff[i])
- if i > 0:
- mask.mul_(input.ge(self.cutoff[i - 1]))
- chunk_input = input[mask] - self.cutoff[i - 1]
- else:
- chunk_input = input[mask]
- if mask.any():
- result[mask] = self.embeddings[i](chunk_input)
- return result
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/README.md b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/README.md
deleted file mode 100644
index d3e1d5cf533555e19c6326777f792ac82a560a84..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# Probabilistic two-stage detection
-Two-stage object detectors that use class-agnostic one-stage detectors as the proposal network.
-
-
-
-
-> [**Probabilistic two-stage detection**](http://arxiv.org/abs/2103.07461),
-> Xingyi Zhou, Vladlen Koltun, Philipp Krähenbühl,
-> *arXiv technical report ([arXiv 2103.07461](http://arxiv.org/abs/2103.07461))*
-
-Contact: [zhouxy@cs.utexas.edu](mailto:zhouxy@cs.utexas.edu). Any questions or discussions are welcomed!
-
-## Abstract
-
-We develop a probabilistic interpretation of two-stage object detection. We show that this probabilistic interpretation motivates a number of common empirical training practices. It also suggests changes to two-stage detection pipelines. Specifically, the first stage should infer proper object-vs-background likelihoods, which should then inform the overall score of the detector. A standard region proposal network (RPN) cannot infer this likelihood sufficiently well, but many one-stage detectors can. We show how to build a probabilistic two-stage detector from any state-of-the-art one-stage detector. The resulting detectors are faster and more accurate than both their one- and two-stage precursors. Our detector achieves 56.4 mAP on COCO test-dev with single-scale testing, outperforming all published results. Using a lightweight backbone, our detector achieves 49.2 mAP on COCO at 33 fps on a Titan Xp.
-
-## Summary
-
-- Two-stage CenterNet: First stage estimates object probabilities, second stage conditionally classifies objects.
-
-- Resulting detector is faster and more accurate than both traditional two-stage detectors (fewer proposals required), and one-stage detectors (lighter first stage head).
-
-- Our best model achieves 56.4 mAP on COCO test-dev.
-
-- This repo also includes a detectron2-based CenterNet implementation with better accuracy (42.5 mAP at 70FPS) and a new FPN version of CenterNet (40.2 mAP with Res50_1x).
-
-## Main results
-
-All models are trained with multi-scale training, and tested with a single scale. The FPS is tested on a Titan RTX GPU.
-More models and details can be found in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md).
-
-#### COCO
-
-| Model | COCO val mAP | FPS |
-|-------------------------------------------|---------------|-------|
-| CenterNet-S4_DLA_8x | 42.5 | 71 |
-| CenterNet2_R50_1x | 42.9 | 24 |
-| CenterNet2_X101-DCN_2x | 49.9 | 8 |
-| CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST | 56.1 | 5 |
-| CenterNet2_DLA-BiFPN-P5_24x_ST | 49.2 | 38 |
-
-
-#### LVIS
-
-| Model | val mAP box |
-| ------------------------- | ----------- |
-| CenterNet2_R50_1x | 26.5 |
-| CenterNet2_FedLoss_R50_1x | 28.3 |
-
-
-#### Objects365
-
-| Model | val mAP |
-|-------------------------------------------|----------|
-| CenterNet2_R50_1x | 22.6 |
-
-## Installation
-
-Our project is developed on [detectron2](https://github.com/facebookresearch/detectron2). Please follow the official detectron2 [installation](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). All our code is under `projects/CenterNet2/`. In theory, you should be able to copy-paste `projects/CenterNet2/` to the latest detectron2 release or your own detectron2 repo to run our project. There might be API changes in future detectron2 releases that make it incompatible.
-
-We use the default detectron2 demo script. To run inference on an image folder using our pre-trained model, run
-
-~~~
-python projects/CenterNet2/demo/demo.py --config-file projects/CenterNet2/configs/CenterNet2_R50_1x.yaml --input path/to/image/ --opts MODEL.WEIGHTS models/CenterNet2_R50_1x.pth
-~~~
-
-## Benchmark evaluation and training
-
-Please check detectron2 [GETTING_STARTED.md](https://github.com/facebookresearch/detectron2/blob/master/GETTING_STARTED.md) for running evaluation and training. Our config files are under `projects/CenterNet2/configs` and the pre-trained models are in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md).
-
-
-## License
-
-Our code under `projects/CenterNet2/` is under [Apache 2.0 license](projects/CenterNet2/LICENSE). `projects/CenterNet2/centernet/modeling/backbone/bifpn_fcos.py` are from [AdelaiDet](https://github.com/aim-uofa/AdelaiDet), which follows the original [non-commercial license](https://github.com/aim-uofa/AdelaiDet/blob/master/LICENSE). The code from detectron2 follows the original [Apache 2.0 license](LICENSE).
-
-## Citation
-
-If you find this project useful for your research, please use the following BibTeX entry.
-
- @inproceedings{zhou2021probablistic,
- title={Probabilistic two-stage detection},
- author={Zhou, Xingyi and Koltun, Vladlen and Kr{\"a}henb{\"u}hl, Philipp},
- booktitle={arXiv preprint arXiv:2103.07461},
- year={2021}
- }
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py
deleted file mode 100644
index 245c88482a9e2405e5a912b5c560aed78a614a13..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import copy
-import json
-import os
-from collections import defaultdict
-
-# This mapping is extracted from the official LVIS mapping:
-# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
-COCO_SYNSET_CATEGORIES = [
- {"synset": "person.n.01", "coco_cat_id": 1},
- {"synset": "bicycle.n.01", "coco_cat_id": 2},
- {"synset": "car.n.01", "coco_cat_id": 3},
- {"synset": "motorcycle.n.01", "coco_cat_id": 4},
- {"synset": "airplane.n.01", "coco_cat_id": 5},
- {"synset": "bus.n.01", "coco_cat_id": 6},
- {"synset": "train.n.01", "coco_cat_id": 7},
- {"synset": "truck.n.01", "coco_cat_id": 8},
- {"synset": "boat.n.01", "coco_cat_id": 9},
- {"synset": "traffic_light.n.01", "coco_cat_id": 10},
- {"synset": "fireplug.n.01", "coco_cat_id": 11},
- {"synset": "stop_sign.n.01", "coco_cat_id": 13},
- {"synset": "parking_meter.n.01", "coco_cat_id": 14},
- {"synset": "bench.n.01", "coco_cat_id": 15},
- {"synset": "bird.n.01", "coco_cat_id": 16},
- {"synset": "cat.n.01", "coco_cat_id": 17},
- {"synset": "dog.n.01", "coco_cat_id": 18},
- {"synset": "horse.n.01", "coco_cat_id": 19},
- {"synset": "sheep.n.01", "coco_cat_id": 20},
- {"synset": "beef.n.01", "coco_cat_id": 21},
- {"synset": "elephant.n.01", "coco_cat_id": 22},
- {"synset": "bear.n.01", "coco_cat_id": 23},
- {"synset": "zebra.n.01", "coco_cat_id": 24},
- {"synset": "giraffe.n.01", "coco_cat_id": 25},
- {"synset": "backpack.n.01", "coco_cat_id": 27},
- {"synset": "umbrella.n.01", "coco_cat_id": 28},
- {"synset": "bag.n.04", "coco_cat_id": 31},
- {"synset": "necktie.n.01", "coco_cat_id": 32},
- {"synset": "bag.n.06", "coco_cat_id": 33},
- {"synset": "frisbee.n.01", "coco_cat_id": 34},
- {"synset": "ski.n.01", "coco_cat_id": 35},
- {"synset": "snowboard.n.01", "coco_cat_id": 36},
- {"synset": "ball.n.06", "coco_cat_id": 37},
- {"synset": "kite.n.03", "coco_cat_id": 38},
- {"synset": "baseball_bat.n.01", "coco_cat_id": 39},
- {"synset": "baseball_glove.n.01", "coco_cat_id": 40},
- {"synset": "skateboard.n.01", "coco_cat_id": 41},
- {"synset": "surfboard.n.01", "coco_cat_id": 42},
- {"synset": "tennis_racket.n.01", "coco_cat_id": 43},
- {"synset": "bottle.n.01", "coco_cat_id": 44},
- {"synset": "wineglass.n.01", "coco_cat_id": 46},
- {"synset": "cup.n.01", "coco_cat_id": 47},
- {"synset": "fork.n.01", "coco_cat_id": 48},
- {"synset": "knife.n.01", "coco_cat_id": 49},
- {"synset": "spoon.n.01", "coco_cat_id": 50},
- {"synset": "bowl.n.03", "coco_cat_id": 51},
- {"synset": "banana.n.02", "coco_cat_id": 52},
- {"synset": "apple.n.01", "coco_cat_id": 53},
- {"synset": "sandwich.n.01", "coco_cat_id": 54},
- {"synset": "orange.n.01", "coco_cat_id": 55},
- {"synset": "broccoli.n.01", "coco_cat_id": 56},
- {"synset": "carrot.n.01", "coco_cat_id": 57},
- {"synset": "frank.n.02", "coco_cat_id": 58},
- {"synset": "pizza.n.01", "coco_cat_id": 59},
- {"synset": "doughnut.n.02", "coco_cat_id": 60},
- {"synset": "cake.n.03", "coco_cat_id": 61},
- {"synset": "chair.n.01", "coco_cat_id": 62},
- {"synset": "sofa.n.01", "coco_cat_id": 63},
- {"synset": "pot.n.04", "coco_cat_id": 64},
- {"synset": "bed.n.01", "coco_cat_id": 65},
- {"synset": "dining_table.n.01", "coco_cat_id": 67},
- {"synset": "toilet.n.02", "coco_cat_id": 70},
- {"synset": "television_receiver.n.01", "coco_cat_id": 72},
- {"synset": "laptop.n.01", "coco_cat_id": 73},
- {"synset": "mouse.n.04", "coco_cat_id": 74},
- {"synset": "remote_control.n.01", "coco_cat_id": 75},
- {"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
- {"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
- {"synset": "microwave.n.02", "coco_cat_id": 78},
- {"synset": "oven.n.01", "coco_cat_id": 79},
- {"synset": "toaster.n.02", "coco_cat_id": 80},
- {"synset": "sink.n.01", "coco_cat_id": 81},
- {"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
- {"synset": "book.n.01", "coco_cat_id": 84},
- {"synset": "clock.n.01", "coco_cat_id": 85},
- {"synset": "vase.n.01", "coco_cat_id": 86},
- {"synset": "scissors.n.01", "coco_cat_id": 87},
- {"synset": "teddy.n.01", "coco_cat_id": 88},
- {"synset": "hand_blower.n.01", "coco_cat_id": 89},
- {"synset": "toothbrush.n.01", "coco_cat_id": 90},
-]
-
-
-def cocofy_lvis(input_filename, output_filename):
- """
- Filter LVIS instance segmentation annotations to remove all categories that are not included in
- COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
- the output json are the incontiguous COCO dataset ids.
-
- Args:
- input_filename (str): path to the LVIS json file.
- output_filename (str): path to the COCOfied json file.
- """
-
- with open(input_filename, "r") as f:
- lvis_json = json.load(f)
-
- lvis_annos = lvis_json.pop("annotations")
- cocofied_lvis = copy.deepcopy(lvis_json)
- lvis_json["annotations"] = lvis_annos
-
- # Mapping from lvis cat id to coco cat id via synset
- lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
- synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
- # Synsets that we will keep in the dataset
- synsets_to_keep = set(synset_to_coco_cat_id.keys())
- coco_cat_id_with_instances = defaultdict(int)
-
- new_annos = []
- ann_id = 1
- for ann in lvis_annos:
- lvis_cat_id = ann["category_id"]
- synset = lvis_cat_id_to_synset[lvis_cat_id]
- if synset not in synsets_to_keep:
- continue
- coco_cat_id = synset_to_coco_cat_id[synset]
- new_ann = copy.deepcopy(ann)
- new_ann["category_id"] = coco_cat_id
- new_ann["id"] = ann_id
- ann_id += 1
- new_annos.append(new_ann)
- coco_cat_id_with_instances[coco_cat_id] += 1
- cocofied_lvis["annotations"] = new_annos
-
- for image in cocofied_lvis["images"]:
- for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
- new_category_list = []
- for lvis_cat_id in image[key]:
- synset = lvis_cat_id_to_synset[lvis_cat_id]
- if synset not in synsets_to_keep:
- continue
- coco_cat_id = synset_to_coco_cat_id[synset]
- new_category_list.append(coco_cat_id)
- coco_cat_id_with_instances[coco_cat_id] += 1
- image[key] = new_category_list
-
- coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
-
- new_categories = []
- for cat in lvis_json["categories"]:
- synset = cat["synset"]
- if synset not in synsets_to_keep:
- continue
- coco_cat_id = synset_to_coco_cat_id[synset]
- if coco_cat_id not in coco_cat_id_with_instances:
- continue
- new_cat = copy.deepcopy(cat)
- new_cat["id"] = coco_cat_id
- new_categories.append(new_cat)
- cocofied_lvis["categories"] = new_categories
-
- with open(output_filename, "w") as f:
- json.dump(cocofied_lvis, f)
- print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
-
-
-if __name__ == "__main__":
- dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
- for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
- print("Start COCOfing {}.".format(s))
- cocofy_lvis(
- os.path.join(dataset_dir, "{}.json".format(s)),
- os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
- )
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docker/deploy.Dockerfile b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docker/deploy.Dockerfile
deleted file mode 100644
index 30b4ed774368af89d654c9f01850d769e6cf9f52..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docker/deploy.Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This file defines a container that compiles the C++ examples of detectron2.
-# See docker/README.md for usage.
-
-# Depends on the image produced by "./Dockerfile"
-FROM detectron2:v0
-
-USER appuser
-ENV HOME=/home/appuser
-WORKDIR $HOME
-
-# Let torchvision find libtorch
-ENV CMAKE_PREFIX_PATH=$HOME/.local/lib/python3.6/site-packages/torch/
-
-RUN sudo apt-get update && sudo apt-get install libopencv-dev --yes
-
-# install libtorchvision
-RUN git clone --branch v0.11.1 https://github.com/pytorch/vision/
-RUN mkdir vision/build && cd vision/build && \
- cmake .. -DCMAKE_INSTALL_PREFIX=$HOME/.local -DCMAKE_BUILD_TYPE=Release -DWITH_CUDA=on -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST && \
- make -j && make install
-
-# make our installation take effect
-ENV CPATH=$HOME/.local/include \
- LIBRARY_PATH=$HOME/.local/lib \
- LD_LIBRARY_PATH=$HOME/.local/lib
-
-
-# build C++ examples of detectron2
-RUN cd detectron2_repo/tools/deploy && mkdir build && cd build && \
- cmake -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST .. && make
-# binaries will be available under tools/deploy/build
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docs/tutorials/builtin_datasets.md b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docs/tutorials/builtin_datasets.md
deleted file mode 100644
index 0ba82423ad498bdd86274ada56a201134a590d94..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docs/tutorials/builtin_datasets.md
+++ /dev/null
@@ -1 +0,0 @@
-../../datasets/README.md
\ No newline at end of file
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/models/utils/tools.py b/spaces/OpenMotionLab/MotionGPT/mGPT/models/utils/tools.py
deleted file mode 100644
index 89ecab5616c1f0d46ed5bc9b348c5e6ad3ee603d..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/models/utils/tools.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import torch.nn as nn
-
-def remove_padding(tensors, lengths):
- return [tensor[:tensor_length] for tensor, tensor_length in zip(tensors, lengths)]
-
-class AutoParams(nn.Module):
- def __init__(self, **kargs):
- try:
- for param in self.needed_params:
- if param in kargs:
- setattr(self, param, kargs[param])
- else:
- raise ValueError(f"{param} is needed.")
- except :
- pass
-
- try:
- for param, default in self.optional_params.items():
- if param in kargs and kargs[param] is not None:
- setattr(self, param, kargs[param])
- else:
- setattr(self, param, default)
- except :
- pass
- super().__init__()
-
-
-# taken from joeynmt repo
-def freeze_params(module: nn.Module) -> None:
- """
- Freeze the parameters of this module,
- i.e. do not update them during training
-
- :param module: freeze parameters of this module
- """
- for _, p in module.named_parameters():
- p.requires_grad = False
diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py
deleted file mode 100644
index 677b9eaf25e25e98c6e7d39a6c77a29f2f313d3c..0000000000000000000000000000000000000000
--- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py
+++ /dev/null
@@ -1,341 +0,0 @@
-# ------------------------------------------------------------------------------
-# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py
-# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
-# ------------------------------------------------------------------------------
-
-import copy
-import logging
-
-import numpy as np
-import torch
-
-from detectron2.data import MetadataCatalog
-from detectron2.config import configurable
-from detectron2.data import detection_utils as utils
-from detectron2.data import transforms as T
-from detectron2.structures import BitMasks, Instances
-from oneformer.utils.box_ops import masks_to_boxes
-from oneformer.data.tokenizer import SimpleTokenizer, Tokenize
-
-__all__ = ["COCOUnifiedNewBaselineDatasetMapper"]
-
-
-def build_transform_gen(cfg, is_train):
- """
- Create a list of default :class:`Augmentation` from config.
- Now it includes resizing and flipping.
- Returns:
- list[Augmentation]
- """
- assert is_train, "Only support training augmentation"
- image_size = cfg.INPUT.IMAGE_SIZE
- min_scale = cfg.INPUT.MIN_SCALE
- max_scale = cfg.INPUT.MAX_SCALE
-
- augmentation = []
-
- if cfg.INPUT.RANDOM_FLIP != "none":
- augmentation.append(
- T.RandomFlip(
- horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
- vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
- )
- )
-
- augmentation.extend([
- T.ResizeScale(
- min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
- ),
- T.FixedSizeCrop(crop_size=(image_size, image_size)),
- ])
-
- return augmentation
-
-
-# This is specifically designed for the COCO dataset.
-class COCOUnifiedNewBaselineDatasetMapper:
- """
- A callable which takes a dataset dict in Detectron2 Dataset format,
- and map it into a format used by OneFormer.
-
- This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.
-
- The callable currently does the following:
-
- 1. Read the image from "file_name"
- 2. Applies geometric transforms to the image and annotation
- 3. Find and applies suitable cropping to the image and annotation
- 4. Prepare image and annotation to Tensors
- """
-
- @configurable
- def __init__(
- self,
- is_train=True,
- *,
- num_queries,
- tfm_gens,
- meta,
- image_format,
- max_seq_len,
- task_seq_len,
- semantic_prob,
- instance_prob,
- ):
- """
- NOTE: this interface is experimental.
- Args:
- is_train: for training or inference
- augmentations: a list of augmentations or deterministic transforms to apply
- crop_gen: crop augmentation
- tfm_gens: data augmentation
- image_format: an image format supported by :func:`detection_utils.read_image`.
- """
- self.tfm_gens = tfm_gens
- logging.getLogger(__name__).info(
- "[COCOUnifiedNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(
- str(self.tfm_gens)
- )
- )
-
- self.img_format = image_format
- self.is_train = is_train
- self.meta = meta
- self.ignore_label = self.meta.ignore_label
- self.num_queries = num_queries
-
- self.things = []
- for k,v in self.meta.thing_dataset_id_to_contiguous_id.items():
- self.things.append(v)
- self.class_names = self.meta.stuff_classes
- self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len)
- self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len)
- self.semantic_prob = semantic_prob
- self.instance_prob = instance_prob
-
- @classmethod
- def from_config(cls, cfg, is_train=True):
- # Build augmentation
- tfm_gens = build_transform_gen(cfg, is_train)
- dataset_names = cfg.DATASETS.TRAIN
- meta = MetadataCatalog.get(dataset_names[0])
-
- ret = {
- "is_train": is_train,
- "meta": meta,
- "tfm_gens": tfm_gens,
- "image_format": cfg.INPUT.FORMAT,
- "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES - cfg.MODEL.TEXT_ENCODER.N_CTX,
- "task_seq_len": cfg.INPUT.TASK_SEQ_LEN,
- "max_seq_len": cfg.INPUT.MAX_SEQ_LEN,
- "semantic_prob": cfg.INPUT.TASK_PROB.SEMANTIC,
- "instance_prob": cfg.INPUT.TASK_PROB.INSTANCE,
- }
- return ret
-
- def _get_semantic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
- instances = Instances(image_shape)
-
- classes = []
- texts = ["a semantic photo"] * self.num_queries
- masks = []
- label = np.ones_like(pan_seg_gt) * self.ignore_label
-
- for segment_info in segments_info:
- class_id = segment_info["category_id"]
- if not segment_info["iscrowd"]:
- mask = pan_seg_gt == segment_info["id"]
- if not np.all(mask == False):
- if class_id not in classes:
- cls_name = self.class_names[class_id]
- classes.append(class_id)
- masks.append(mask)
- num_class_obj[cls_name] += 1
- else:
- idx = classes.index(class_id)
- masks[idx] += mask
- masks[idx] = np.clip(masks[idx], 0, 1).astype(np.bool)
- label[mask] = class_id
-
- num = 0
- for i, cls_name in enumerate(self.class_names):
- if num_class_obj[cls_name] > 0:
- for _ in range(num_class_obj[cls_name]):
- if num >= len(texts):
- break
- texts[num] = f"a photo with a {cls_name}"
- num += 1
-
- classes = np.array(classes)
- instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
- if len(masks) == 0:
- # Some image does not have annotation (all ignored)
- instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
- instances.gt_bboxes = torch.zeros((0, 4))
- else:
- masks = BitMasks(
- torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
- )
- instances.gt_masks = masks.tensor
- # Placeholder bounding boxes for stuff regions. Note that these are not used during training.
- instances.gt_bboxes = torch.stack([torch.tensor([0., 0., 1., 1.])] * instances.gt_masks.shape[0])
- return instances, texts, label
-
- def _get_instance_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
- instances = Instances(image_shape)
-
- classes = []
- texts = ["an instance photo"] * self.num_queries
- masks = []
- label = np.ones_like(pan_seg_gt) * self.ignore_label
-
- for segment_info in segments_info:
- class_id = segment_info["category_id"]
- if class_id in self.things:
- if not segment_info["iscrowd"]:
- mask = pan_seg_gt == segment_info["id"]
- if not np.all(mask == False):
- cls_name = self.class_names[class_id]
- classes.append(class_id)
- masks.append(mask)
- num_class_obj[cls_name] += 1
- label[mask] = class_id
-
- num = 0
- for i, cls_name in enumerate(self.class_names):
- if num_class_obj[cls_name] > 0:
- for _ in range(num_class_obj[cls_name]):
- if num >= len(texts):
- break
- texts[num] = f"a photo with a {cls_name}"
- num += 1
-
- classes = np.array(classes)
- instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
- if len(masks) == 0:
- # Some image does not have annotation (all ignored)
- instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
- instances.gt_bboxes = torch.zeros((0, 4))
- else:
- masks = BitMasks(
- torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
- )
- instances.gt_masks = masks.tensor
- instances.gt_bboxes = masks_to_boxes(instances.gt_masks)
- return instances, texts, label
-
- def _get_panoptic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
- instances = Instances(image_shape)
-
- classes = []
- texts = ["a panoptic photo"] * self.num_queries
- masks = []
- label = np.ones_like(pan_seg_gt) * self.ignore_label
-
- for segment_info in segments_info:
- class_id = segment_info["category_id"]
- if not segment_info["iscrowd"]:
- mask = pan_seg_gt == segment_info["id"]
- if not np.all(mask == False):
- cls_name = self.class_names[class_id]
- classes.append(class_id)
- masks.append(mask)
- num_class_obj[cls_name] += 1
- label[mask] = class_id
-
- num = 0
- for i, cls_name in enumerate(self.class_names):
- if num_class_obj[cls_name] > 0:
- for _ in range(num_class_obj[cls_name]):
- if num >= len(texts):
- break
- texts[num] = f"a photo with a {cls_name}"
- num += 1
-
- classes = np.array(classes)
- instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
- if len(masks) == 0:
- # Some image does not have annotation (all ignored)
- instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
- instances.gt_bboxes = torch.zeros((0, 4))
- else:
- masks = BitMasks(
- torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
- )
- instances.gt_masks = masks.tensor
- instances.gt_bboxes = masks_to_boxes(instances.gt_masks)
- for i in range(instances.gt_classes.shape[0]):
- # Placeholder bounding boxes for stuff regions. Note that these are not used during training.
- if instances.gt_classes[i].item() not in self.things:
- instances.gt_bboxes[i] = torch.tensor([0., 0., 1., 1.])
- return instances, texts, label
-
- def __call__(self, dataset_dict):
- """
- Args:
- dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
-
- Returns:
- dict: a format that builtin models in detectron2 accept
- """
- dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
- image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
- utils.check_image_size(dataset_dict, image)
-
- image, transforms = T.apply_transform_gens(self.tfm_gens, image)
- image_shape = image.shape[:2] # h, w
-
- # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
- # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
- # Therefore it's important to use torch.Tensor.
- dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
-
- if not self.is_train:
- # USER: Modify this if you want to keep them for some reason.
- dataset_dict.pop("annotations", None)
- return dataset_dict
-
- # semantic segmentation
- if "sem_seg_file_name" in dataset_dict:
- # PyTorch transformation not implemented for uint16, so converting it to double first
- sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
- sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
- else:
- sem_seg_gt = None
-
- if "pan_seg_file_name" in dataset_dict:
- pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
- segments_info = dataset_dict["segments_info"]
-
- # apply the same transformation to panoptic segmentation
- pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
-
- from panopticapi.utils import rgb2id
- pan_seg_gt = rgb2id(pan_seg_gt)
-
- prob_task = np.random.uniform(0,1.)
-
- num_class_obj = {}
-
- for name in self.class_names:
- num_class_obj[name] = 0
-
- if prob_task < self.semantic_prob:
- task = "The task is semantic"
- instances, text, sem_seg = self._get_semantic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
- elif prob_task < self.instance_prob:
- task = "The task is instance"
- instances, text, sem_seg = self._get_instance_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
- else:
- task = "The task is panoptic"
- instances, text, sem_seg = self._get_panoptic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
-
-
- dataset_dict["sem_seg"] = torch.from_numpy(sem_seg).long()
- dataset_dict["instances"] = instances
- dataset_dict["orig_shape"] = image_shape
- dataset_dict["task"] = task
- dataset_dict["text"] = text
- dataset_dict["thing_ids"] = self.things
-
- return dataset_dict
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/priority.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/priority.py
deleted file mode 100644
index 64cc4e3a05f8d5b89ab6eb32461e6e80f1d62e67..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/runner/priority.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from enum import Enum
-
-
-class Priority(Enum):
- """Hook priority levels.
-
- +--------------+------------+
- | Level | Value |
- +==============+============+
- | HIGHEST | 0 |
- +--------------+------------+
- | VERY_HIGH | 10 |
- +--------------+------------+
- | HIGH | 30 |
- +--------------+------------+
- | ABOVE_NORMAL | 40 |
- +--------------+------------+
- | NORMAL | 50 |
- +--------------+------------+
- | BELOW_NORMAL | 60 |
- +--------------+------------+
- | LOW | 70 |
- +--------------+------------+
- | VERY_LOW | 90 |
- +--------------+------------+
- | LOWEST | 100 |
- +--------------+------------+
- """
-
- HIGHEST = 0
- VERY_HIGH = 10
- HIGH = 30
- ABOVE_NORMAL = 40
- NORMAL = 50
- BELOW_NORMAL = 60
- LOW = 70
- VERY_LOW = 90
- LOWEST = 100
-
-
-def get_priority(priority):
- """Get priority value.
-
- Args:
- priority (int or str or :obj:`Priority`): Priority.
-
- Returns:
- int: The priority value.
- """
- if isinstance(priority, int):
- if priority < 0 or priority > 100:
- raise ValueError('priority must be between 0 and 100')
- return priority
- elif isinstance(priority, Priority):
- return priority.value
- elif isinstance(priority, str):
- return Priority[priority.upper()].value
- else:
- raise TypeError('priority must be an integer or Priority enum value')
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/fast_scnn.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/fast_scnn.py
deleted file mode 100644
index 38c2350177cbc2066f45add568d30eb6041f74f3..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/fast_scnn.py
+++ /dev/null
@@ -1,375 +0,0 @@
-import torch
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, constant_init,
- kaiming_init)
-from torch.nn.modules.batchnorm import _BatchNorm
-
-from annotator.uniformer.mmseg.models.decode_heads.psp_head import PPM
-from annotator.uniformer.mmseg.ops import resize
-from ..builder import BACKBONES
-from ..utils.inverted_residual import InvertedResidual
-
-
-class LearningToDownsample(nn.Module):
- """Learning to downsample module.
-
- Args:
- in_channels (int): Number of input channels.
- dw_channels (tuple[int]): Number of output channels of the first and
- the second depthwise conv (dwconv) layers.
- out_channels (int): Number of output channels of the whole
- 'learning to downsample' module.
- conv_cfg (dict | None): Config of conv layers. Default: None
- norm_cfg (dict | None): Config of norm layers. Default:
- dict(type='BN')
- act_cfg (dict): Config of activation layers. Default:
- dict(type='ReLU')
- """
-
- def __init__(self,
- in_channels,
- dw_channels,
- out_channels,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU')):
- super(LearningToDownsample, self).__init__()
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- dw_channels1 = dw_channels[0]
- dw_channels2 = dw_channels[1]
-
- self.conv = ConvModule(
- in_channels,
- dw_channels1,
- 3,
- stride=2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.dsconv1 = DepthwiseSeparableConvModule(
- dw_channels1,
- dw_channels2,
- kernel_size=3,
- stride=2,
- padding=1,
- norm_cfg=self.norm_cfg)
- self.dsconv2 = DepthwiseSeparableConvModule(
- dw_channels2,
- out_channels,
- kernel_size=3,
- stride=2,
- padding=1,
- norm_cfg=self.norm_cfg)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.dsconv1(x)
- x = self.dsconv2(x)
- return x
-
-
-class GlobalFeatureExtractor(nn.Module):
- """Global feature extractor module.
-
- Args:
- in_channels (int): Number of input channels of the GFE module.
- Default: 64
- block_channels (tuple[int]): Tuple of ints. Each int specifies the
- number of output channels of each Inverted Residual module.
- Default: (64, 96, 128)
- out_channels(int): Number of output channels of the GFE module.
- Default: 128
- expand_ratio (int): Adjusts number of channels of the hidden layer
- in InvertedResidual by this amount.
- Default: 6
- num_blocks (tuple[int]): Tuple of ints. Each int specifies the
- number of times each Inverted Residual module is repeated.
- The repeated Inverted Residual modules are called a 'group'.
- Default: (3, 3, 3)
- strides (tuple[int]): Tuple of ints. Each int specifies
- the downsampling factor of each 'group'.
- Default: (2, 2, 1)
- pool_scales (tuple[int]): Tuple of ints. Each int specifies
- the parameter required in 'global average pooling' within PPM.
- Default: (1, 2, 3, 6)
- conv_cfg (dict | None): Config of conv layers. Default: None
- norm_cfg (dict | None): Config of norm layers. Default:
- dict(type='BN')
- act_cfg (dict): Config of activation layers. Default:
- dict(type='ReLU')
- align_corners (bool): align_corners argument of F.interpolate.
- Default: False
- """
-
- def __init__(self,
- in_channels=64,
- block_channels=(64, 96, 128),
- out_channels=128,
- expand_ratio=6,
- num_blocks=(3, 3, 3),
- strides=(2, 2, 1),
- pool_scales=(1, 2, 3, 6),
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- align_corners=False):
- super(GlobalFeatureExtractor, self).__init__()
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- assert len(block_channels) == len(num_blocks) == 3
- self.bottleneck1 = self._make_layer(in_channels, block_channels[0],
- num_blocks[0], strides[0],
- expand_ratio)
- self.bottleneck2 = self._make_layer(block_channels[0],
- block_channels[1], num_blocks[1],
- strides[1], expand_ratio)
- self.bottleneck3 = self._make_layer(block_channels[1],
- block_channels[2], num_blocks[2],
- strides[2], expand_ratio)
- self.ppm = PPM(
- pool_scales,
- block_channels[2],
- block_channels[2] // 4,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg,
- align_corners=align_corners)
- self.out = ConvModule(
- block_channels[2] * 2,
- out_channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def _make_layer(self,
- in_channels,
- out_channels,
- blocks,
- stride=1,
- expand_ratio=6):
- layers = [
- InvertedResidual(
- in_channels,
- out_channels,
- stride,
- expand_ratio,
- norm_cfg=self.norm_cfg)
- ]
- for i in range(1, blocks):
- layers.append(
- InvertedResidual(
- out_channels,
- out_channels,
- 1,
- expand_ratio,
- norm_cfg=self.norm_cfg))
- return nn.Sequential(*layers)
-
- def forward(self, x):
- x = self.bottleneck1(x)
- x = self.bottleneck2(x)
- x = self.bottleneck3(x)
- x = torch.cat([x, *self.ppm(x)], dim=1)
- x = self.out(x)
- return x
-
-
-class FeatureFusionModule(nn.Module):
- """Feature fusion module.
-
- Args:
- higher_in_channels (int): Number of input channels of the
- higher-resolution branch.
- lower_in_channels (int): Number of input channels of the
- lower-resolution branch.
- out_channels (int): Number of output channels.
- conv_cfg (dict | None): Config of conv layers. Default: None
- norm_cfg (dict | None): Config of norm layers. Default:
- dict(type='BN')
- act_cfg (dict): Config of activation layers. Default:
- dict(type='ReLU')
- align_corners (bool): align_corners argument of F.interpolate.
- Default: False
- """
-
- def __init__(self,
- higher_in_channels,
- lower_in_channels,
- out_channels,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- align_corners=False):
- super(FeatureFusionModule, self).__init__()
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.align_corners = align_corners
- self.dwconv = ConvModule(
- lower_in_channels,
- out_channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.conv_lower_res = ConvModule(
- out_channels,
- out_channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=None)
- self.conv_higher_res = ConvModule(
- higher_in_channels,
- out_channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=None)
- self.relu = nn.ReLU(True)
-
- def forward(self, higher_res_feature, lower_res_feature):
- lower_res_feature = resize(
- lower_res_feature,
- size=higher_res_feature.size()[2:],
- mode='bilinear',
- align_corners=self.align_corners)
- lower_res_feature = self.dwconv(lower_res_feature)
- lower_res_feature = self.conv_lower_res(lower_res_feature)
-
- higher_res_feature = self.conv_higher_res(higher_res_feature)
- out = higher_res_feature + lower_res_feature
- return self.relu(out)
-
-
-@BACKBONES.register_module()
-class FastSCNN(nn.Module):
- """Fast-SCNN Backbone.
-
- Args:
- in_channels (int): Number of input image channels. Default: 3.
- downsample_dw_channels (tuple[int]): Number of output channels after
- the first conv layer & the second conv layer in
- Learning-To-Downsample (LTD) module.
- Default: (32, 48).
- global_in_channels (int): Number of input channels of
- Global Feature Extractor(GFE).
- Equal to number of output channels of LTD.
- Default: 64.
- global_block_channels (tuple[int]): Tuple of integers that describe
- the output channels for each of the MobileNet-v2 bottleneck
- residual blocks in GFE.
- Default: (64, 96, 128).
- global_block_strides (tuple[int]): Tuple of integers
- that describe the strides (downsampling factors) for each of the
- MobileNet-v2 bottleneck residual blocks in GFE.
- Default: (2, 2, 1).
- global_out_channels (int): Number of output channels of GFE.
- Default: 128.
- higher_in_channels (int): Number of input channels of the higher
- resolution branch in FFM.
- Equal to global_in_channels.
- Default: 64.
- lower_in_channels (int): Number of input channels of the lower
- resolution branch in FFM.
- Equal to global_out_channels.
- Default: 128.
- fusion_out_channels (int): Number of output channels of FFM.
- Default: 128.
- out_indices (tuple): Tuple of indices of list
- [higher_res_features, lower_res_features, fusion_output].
- Often set to (0,1,2) to enable aux. heads.
- Default: (0, 1, 2).
- conv_cfg (dict | None): Config of conv layers. Default: None
- norm_cfg (dict | None): Config of norm layers. Default:
- dict(type='BN')
- act_cfg (dict): Config of activation layers. Default:
- dict(type='ReLU')
- align_corners (bool): align_corners argument of F.interpolate.
- Default: False
- """
-
- def __init__(self,
- in_channels=3,
- downsample_dw_channels=(32, 48),
- global_in_channels=64,
- global_block_channels=(64, 96, 128),
- global_block_strides=(2, 2, 1),
- global_out_channels=128,
- higher_in_channels=64,
- lower_in_channels=128,
- fusion_out_channels=128,
- out_indices=(0, 1, 2),
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- align_corners=False):
-
- super(FastSCNN, self).__init__()
- if global_in_channels != higher_in_channels:
- raise AssertionError('Global Input Channels must be the same \
- with Higher Input Channels!')
- elif global_out_channels != lower_in_channels:
- raise AssertionError('Global Output Channels must be the same \
- with Lower Input Channels!')
-
- self.in_channels = in_channels
- self.downsample_dw_channels1 = downsample_dw_channels[0]
- self.downsample_dw_channels2 = downsample_dw_channels[1]
- self.global_in_channels = global_in_channels
- self.global_block_channels = global_block_channels
- self.global_block_strides = global_block_strides
- self.global_out_channels = global_out_channels
- self.higher_in_channels = higher_in_channels
- self.lower_in_channels = lower_in_channels
- self.fusion_out_channels = fusion_out_channels
- self.out_indices = out_indices
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.align_corners = align_corners
- self.learning_to_downsample = LearningToDownsample(
- in_channels,
- downsample_dw_channels,
- global_in_channels,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.global_feature_extractor = GlobalFeatureExtractor(
- global_in_channels,
- global_block_channels,
- global_out_channels,
- strides=self.global_block_strides,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg,
- align_corners=self.align_corners)
- self.feature_fusion = FeatureFusionModule(
- higher_in_channels,
- lower_in_channels,
- fusion_out_channels,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg,
- align_corners=self.align_corners)
-
- def init_weights(self, pretrained=None):
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- kaiming_init(m)
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
- constant_init(m, 1)
-
- def forward(self, x):
- higher_res_features = self.learning_to_downsample(x)
- lower_res_features = self.global_feature_extractor(higher_res_features)
- fusion_output = self.feature_fusion(higher_res_features,
- lower_res_features)
-
- outs = [higher_res_features, lower_res_features, fusion_output]
- outs = [outs[i] for i in self.out_indices]
- return tuple(outs)
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/necks/multilevel_neck.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/necks/multilevel_neck.py
deleted file mode 100644
index 766144d8136326a1fab5906a153a0c0df69b6b60..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/necks/multilevel_neck.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from ..builder import NECKS
-
-
-@NECKS.register_module()
-class MultiLevelNeck(nn.Module):
- """MultiLevelNeck.
-
- A neck structure connect vit backbone and decoder_heads.
- Args:
- in_channels (List[int]): Number of input channels per scale.
- out_channels (int): Number of output channels (used at each scale).
- scales (List[int]): Scale factors for each input feature map.
- norm_cfg (dict): Config dict for normalization layer. Default: None.
- act_cfg (dict): Config dict for activation layer in ConvModule.
- Default: None.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- scales=[0.5, 1, 2, 4],
- norm_cfg=None,
- act_cfg=None):
- super(MultiLevelNeck, self).__init__()
- assert isinstance(in_channels, list)
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.scales = scales
- self.num_outs = len(scales)
- self.lateral_convs = nn.ModuleList()
- self.convs = nn.ModuleList()
- for in_channel in in_channels:
- self.lateral_convs.append(
- ConvModule(
- in_channel,
- out_channels,
- kernel_size=1,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
- for _ in range(self.num_outs):
- self.convs.append(
- ConvModule(
- out_channels,
- out_channels,
- kernel_size=3,
- padding=1,
- stride=1,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
-
- def forward(self, inputs):
- assert len(inputs) == len(self.in_channels)
- print(inputs[0].shape)
- inputs = [
- lateral_conv(inputs[i])
- for i, lateral_conv in enumerate(self.lateral_convs)
- ]
- # for len(inputs) not equal to self.num_outs
- if len(inputs) == 1:
- inputs = [inputs[0] for _ in range(self.num_outs)]
- outs = []
- for i in range(self.num_outs):
- x_resize = F.interpolate(
- inputs[i], scale_factor=self.scales[i], mode='bilinear')
- outs.append(self.convs[i](x_resize))
- return tuple(outs)
diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/models/backbones/resnet.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/models/backbones/resnet.py
deleted file mode 100644
index b47a311a3314a148ebea702e93d19b46befdf7aa..0000000000000000000000000000000000000000
--- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/models/backbones/resnet.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-from models.arch_util import initialize_weights
-
-
-class ResnetBlock(nn.Module):
- """Define a Resnet block"""
-
- def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
- """Initialize the Resnet block
- A resnet block is a conv block with skip connections
- We construct a conv block with build_conv_block function,
- and implement skip connections in function.
- Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
- """
- super(ResnetBlock, self).__init__()
- self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
-
- def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
- """Construct a convolutional block.
- Parameters:
- dim (int) -- the number of channels in the conv layer.
- padding_type (str) -- the name of padding
- layer: reflect | replicate | zero
- norm_layer -- normalization layer
- use_dropout (bool) -- if use dropout layers.
- use_bias (bool) -- if the conv layer uses bias or not
- Returns a conv block (with a conv layer, a normalization layer,
- and a non-linearity layer (ReLU))
- """
- conv_block = []
- p = 0
- if padding_type == "reflect":
- conv_block += [nn.ReflectionPad2d(1)]
- elif padding_type == "replicate":
- conv_block += [nn.ReplicationPad2d(1)]
- elif padding_type == "zero":
- p = 1
- else:
- raise NotImplementedError(
- f"padding {padding_type} \
- is not implemented"
- )
-
- conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
- if use_dropout:
- conv_block += [nn.Dropout(0.5)]
-
- p = 0
- if padding_type == "reflect":
- conv_block += [nn.ReflectionPad2d(1)]
- elif padding_type == "replicate":
- conv_block += [nn.ReplicationPad2d(1)]
- elif padding_type == "zero":
- p = 1
- else:
- raise NotImplementedError(
- f"padding {padding_type} \
- is not implemented"
- )
- conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
-
- return nn.Sequential(*conv_block)
-
- def forward(self, x):
- """Forward function (with skip connections)"""
- out = x + self.conv_block(x) # add skip connections
- return out
-
-
-class ResidualBlock_noBN(nn.Module):
- """Residual block w/o BN
- ---Conv-ReLU-Conv-+-
- |________________|
- """
-
- def __init__(self, nf=64):
- super(ResidualBlock_noBN, self).__init__()
- self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
- self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
-
- # initialization
- initialize_weights([self.conv1, self.conv2], 0.1)
-
- def forward(self, x):
- identity = x
- out = F.relu(self.conv1(x), inplace=False)
- out = self.conv2(out)
- return identity + out
diff --git a/spaces/PaddlePaddle/pnasnet_imagenet/app.py b/spaces/PaddlePaddle/pnasnet_imagenet/app.py
deleted file mode 100644
index e8fc4eaee45172990a03f3cc165833c03320d6d5..0000000000000000000000000000000000000000
--- a/spaces/PaddlePaddle/pnasnet_imagenet/app.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import gradio as gr
-import paddlehub as hub
-import cv2
-
-classifier = hub.Module(name="pnasnet_imagenet")
-
-def inference(img):
- test_img_path = img
- input_dict = {"image": [test_img_path]}
- result = classifier.classification(data=input_dict)
- print(result)
- return result[0][0]
-
-
-title="pnasnet_imagenet"
-description="PNASNet is an image classification model automatically trained by Google through AutoML. The PaddleHub Module is trained based on the ImageNet-2012 dataset, accepts input images with a size of 224 x 224 x 3, and supports prediction directly through the command line or Python interface."
-
-examples=[['cat2.jpg']]
-gr.Interface(inference,gr.inputs.Image(type="filepath"),"label",title=title,description=description,examples=examples).launch(enable_queue=True,cache_examples=True)
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/scheme/compile-tree-il.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/scheme/compile-tree-il.go
deleted file mode 100644
index 4e87b824ce6eeec57ef419a331975aa98db999de..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/scheme/compile-tree-il.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/memory/local.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/memory/local.py
deleted file mode 100644
index 803b6dc6ebb430285f423cda592fa3e902e9a4a6..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/autogpt/memory/local.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from __future__ import annotations
-
-import dataclasses
-import os
-from typing import Any, List
-
-import numpy as np
-import orjson
-
-from autogpt.llm_utils import create_embedding_with_ada
-from autogpt.memory.base import MemoryProviderSingleton
-
-EMBED_DIM = 1536
-SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
-
-
-def create_default_embeddings():
- return np.zeros((0, EMBED_DIM)).astype(np.float32)
-
-
-@dataclasses.dataclass
-class CacheContent:
- texts: List[str] = dataclasses.field(default_factory=list)
- embeddings: np.ndarray = dataclasses.field(
- default_factory=create_default_embeddings
- )
-
-
-class LocalCache(MemoryProviderSingleton):
- """A class that stores the memory in a local file"""
-
- def __init__(self, cfg) -> None:
- """Initialize a class instance
-
- Args:
- cfg: Config object
-
- Returns:
- None
- """
- self.filename = f"{cfg.memory_index}.json"
- if os.path.exists(self.filename):
- try:
- with open(self.filename, "w+b") as f:
- file_content = f.read()
- if not file_content.strip():
- file_content = b"{}"
- f.write(file_content)
-
- loaded = orjson.loads(file_content)
- self.data = CacheContent(**loaded)
- except orjson.JSONDecodeError:
- print(f"Error: The file '{self.filename}' is not in JSON format.")
- self.data = CacheContent()
- else:
- print(
- f"Warning: The file '{self.filename}' does not exist. "
- "Local memory would not be saved to a file."
- )
- self.data = CacheContent()
-
- def add(self, text: str):
- """
- Add text to our list of texts, add embedding as row to our
- embeddings-matrix
-
- Args:
- text: str
-
- Returns: None
- """
- if "Command Error:" in text:
- return ""
- self.data.texts.append(text)
-
- embedding = create_embedding_with_ada(text)
-
- vector = np.array(embedding).astype(np.float32)
- vector = vector[np.newaxis, :]
- self.data.embeddings = np.concatenate(
- [
- self.data.embeddings,
- vector,
- ],
- axis=0,
- )
-
- with open(self.filename, "wb") as f:
- out = orjson.dumps(self.data, option=SAVE_OPTIONS)
- f.write(out)
- return text
-
- def clear(self) -> str:
- """
- Clears the redis server.
-
- Returns: A message indicating that the memory has been cleared.
- """
- self.data = CacheContent()
- return "Obliviated"
-
- def get(self, data: str) -> list[Any] | None:
- """
- Gets the data from the memory that is most relevant to the given data.
-
- Args:
- data: The data to compare to.
-
- Returns: The most relevant data.
- """
- return self.get_relevant(data, 1)
-
- def get_relevant(self, text: str, k: int) -> list[Any]:
- """ "
- matrix-vector mult to find score-for-each-row-of-matrix
- get indices for top-k winning scores
- return texts for those indices
- Args:
- text: str
- k: int
-
- Returns: List[str]
- """
- embedding = create_embedding_with_ada(text)
-
- scores = np.dot(self.data.embeddings, embedding)
-
- top_k_indices = np.argsort(scores)[-k:][::-1]
-
- return [self.data.texts[i] for i in top_k_indices]
-
- def get_stats(self) -> tuple[int, tuple[int, ...]]:
- """
- Returns: The stats of the local cache.
- """
- return len(self.data.texts), self.data.embeddings.shape
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/errors.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/errors.py
deleted file mode 100644
index 626254c321fb31033c54fed7ff57a0df5eaaa608..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/errors.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""distutils.errors
-
-Provides exceptions used by the Distutils modules. Note that Distutils
-modules may raise standard exceptions; in particular, SystemExit is
-usually raised for errors that are obviously the end-user's fault
-(eg. bad command-line arguments).
-
-This module is safe to use in "from ... import *" mode; it only exports
-symbols whose names start with "Distutils" and end with "Error"."""
-
-
-class DistutilsError(Exception):
- """The root of all Distutils evil."""
-
- pass
-
-
-class DistutilsModuleError(DistutilsError):
- """Unable to load an expected module, or to find an expected class
- within some module (in particular, command modules and classes)."""
-
- pass
-
-
-class DistutilsClassError(DistutilsError):
- """Some command class (or possibly distribution class, if anyone
- feels a need to subclass Distribution) is found not to be holding
- up its end of the bargain, ie. implementing some part of the
- "command "interface."""
-
- pass
-
-
-class DistutilsGetoptError(DistutilsError):
- """The option table provided to 'fancy_getopt()' is bogus."""
-
- pass
-
-
-class DistutilsArgError(DistutilsError):
- """Raised by fancy_getopt in response to getopt.error -- ie. an
- error in the command line usage."""
-
- pass
-
-
-class DistutilsFileError(DistutilsError):
- """Any problems in the filesystem: expected file not found, etc.
- Typically this is for problems that we detect before OSError
- could be raised."""
-
- pass
-
-
-class DistutilsOptionError(DistutilsError):
- """Syntactic/semantic errors in command options, such as use of
- mutually conflicting options, or inconsistent options,
- badly-spelled values, etc. No distinction is made between option
- values originating in the setup script, the command line, config
- files, or what-have-you -- but if we *know* something originated in
- the setup script, we'll raise DistutilsSetupError instead."""
-
- pass
-
-
-class DistutilsSetupError(DistutilsError):
- """For errors that can be definitely blamed on the setup script,
- such as invalid keyword arguments to 'setup()'."""
-
- pass
-
-
-class DistutilsPlatformError(DistutilsError):
- """We don't know how to do something on the current platform (but
- we do know how to do it on some platform) -- eg. trying to compile
- C files on a platform not supported by a CCompiler subclass."""
-
- pass
-
-
-class DistutilsExecError(DistutilsError):
- """Any problems executing an external program (such as the C
- compiler, when compiling C files)."""
-
- pass
-
-
-class DistutilsInternalError(DistutilsError):
- """Internal inconsistencies or impossibilities (obviously, this
- should never be seen if the code is working!)."""
-
- pass
-
-
-class DistutilsTemplateError(DistutilsError):
- """Syntax error in a file list template."""
-
-
-class DistutilsByteCompileError(DistutilsError):
- """Byte compile error."""
-
-
-# Exception classes used by the CCompiler implementation classes
-class CCompilerError(Exception):
- """Some compile/link operation failed."""
-
-
-class PreprocessError(CCompilerError):
- """Failure to preprocess one or more C/C++ files."""
-
-
-class CompileError(CCompilerError):
- """Failure to compile one or more C/C++ source files."""
-
-
-class LibError(CCompilerError):
- """Failure to create a static library from one or more C/C++ object
- files."""
-
-
-class LinkError(CCompilerError):
- """Failure to link one or more C/C++ object files into an executable
- or shared library file."""
-
-
-class UnknownFileError(CCompilerError):
- """Attempt to process an unknown file type."""
diff --git a/spaces/Realcat/image-matching-webui/hloc/pipelines/Aachen/README.md b/spaces/Realcat/image-matching-webui/hloc/pipelines/Aachen/README.md
deleted file mode 100644
index 1aefdb7ddb3371335ba5a6a354acf3692206ecf7..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/hloc/pipelines/Aachen/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Aachen-Day-Night dataset
-
-## Installation
-
-Download the dataset from [visuallocalization.net](https://www.visuallocalization.net):
-```bash
-export dataset=datasets/aachen
-wget -r -np -nH -R "index.html*,aachen_v1_1.zip" --cut-dirs=4 https://data.ciirc.cvut.cz/public/projects/2020VisualLocalization/Aachen-Day-Night/ -P $dataset
-unzip $dataset/images/database_and_query_images.zip -d $dataset/images
-```
-
-## Pipeline
-
-```bash
-python3 -m hloc.pipelines.Aachen.pipeline
-```
diff --git a/spaces/Realcat/image-matching-webui/third_party/RoRD/demo/register.py b/spaces/Realcat/image-matching-webui/third_party/RoRD/demo/register.py
deleted file mode 100644
index ba626920887639c6c95f869231d8080de64c2ee8..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/RoRD/demo/register.py
+++ /dev/null
@@ -1,265 +0,0 @@
-import numpy as np
-import copy
-import argparse
-import os, sys
-import open3d as o3d
-from sys import argv
-from PIL import Image
-import math
-import cv2
-import torch
-
-sys.path.append("../")
-from lib.extractMatchTop import getPerspKeypoints, getPerspKeypointsEnsemble, siftMatching
-from lib.model_test import D2Net
-
-#### Cuda ####
-use_cuda = torch.cuda.is_available()
-device = torch.device('cuda:0' if use_cuda else 'cpu')
-
-#### Argument Parsing ####
-parser = argparse.ArgumentParser(description='RoRD ICP evaluation')
-
-parser.add_argument(
- '--rgb1', type=str, default = 'rgb/rgb2_1.jpg',
- help='path to the rgb image1'
-)
-parser.add_argument(
- '--rgb2', type=str, default = 'rgb/rgb2_2.jpg',
- help='path to the rgb image2'
-)
-
-parser.add_argument(
- '--depth1', type=str, default = 'depth/depth2_1.png',
- help='path to the depth image1'
-)
-
-parser.add_argument(
- '--depth2', type=str, default = 'depth/depth2_2.png',
- help='path to the depth image2'
-)
-
-parser.add_argument(
- '--model_rord', type=str, default = '../models/rord.pth',
- help='path to the RoRD model for evaluation'
-)
-
-parser.add_argument(
- '--model_d2', type=str,
- help='path to the vanilla D2-Net model for evaluation'
-)
-
-parser.add_argument(
- '--model_ens', action='store_true',
- help='ensemble model of RoRD + D2-Net'
-)
-
-parser.add_argument(
- '--sift', action='store_true',
- help='Sift'
-)
-
-parser.add_argument(
- '--camera_file', type=str, default='../configs/camera.txt',
- help='path to the camera intrinsics file. In order: focal_x, focal_y, center_x, center_y, scaling_factor.'
-)
-
-parser.add_argument(
- '--viz3d', action='store_true',
- help='visualize the pointcloud registrations'
-)
-
-args = parser.parse_args()
-
-if args.model_ens: # Change default paths accordingly for ensemble
- model1_ens = '../../models/rord.pth'
- model2_ens = '../../models/d2net.pth'
-
-def draw_registration_result(source, target, transformation):
- source_temp = copy.deepcopy(source)
- target_temp = copy.deepcopy(target)
- source_temp.transform(transformation)
-
- target_temp += source_temp
- # print("Saved registered PointCloud.")
- # o3d.io.write_point_cloud("registered.pcd", target_temp)
-
- trgSph.append(source_temp); trgSph.append(target_temp)
- axis1 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
- axis2 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
- axis2.transform(transformation)
- trgSph.append(axis1); trgSph.append(axis2)
- print("Showing registered PointCloud.")
- o3d.visualization.draw_geometries(trgSph)
-
-
-def readDepth(depthFile):
- depth = Image.open(depthFile)
- if depth.mode != "I":
- raise Exception("Depth image is not in intensity format")
-
- return np.asarray(depth)
-
-def readCamera(camera):
- with open (camera, "rt") as file:
- contents = file.read().split()
-
- focalX = float(contents[0])
- focalY = float(contents[1])
- centerX = float(contents[2])
- centerY = float(contents[3])
- scalingFactor = float(contents[4])
-
- return focalX, focalY, centerX, centerY, scalingFactor
-
-def getPointCloud(rgbFile, depthFile, pts):
- thresh = 15.0
-
- depth = readDepth(depthFile)
- rgb = Image.open(rgbFile)
-
- points = []
- colors = []
-
- corIdx = [-1]*len(pts)
- corPts = [None]*len(pts)
- ptIdx = 0
-
- for v in range(depth.shape[0]):
- for u in range(depth.shape[1]):
- Z = depth[v, u] / scalingFactor
- if Z==0: continue
- if (Z > thresh): continue
-
- X = (u - centerX) * Z / focalX
- Y = (v - centerY) * Z / focalY
-
- points.append((X, Y, Z))
- colors.append(rgb.getpixel((u, v)))
-
- if((u, v) in pts):
- # print("Point found.")
- index = pts.index((u, v))
- corIdx[index] = ptIdx
- corPts[index] = (X, Y, Z)
-
- ptIdx = ptIdx+1
-
- points = np.asarray(points)
- colors = np.asarray(colors)
-
- pcd = o3d.geometry.PointCloud()
- pcd.points = o3d.utility.Vector3dVector(points)
- pcd.colors = o3d.utility.Vector3dVector(colors/255)
-
- return pcd, corIdx, corPts
-
-
-def convertPts(A):
- X = A[0]; Y = A[1]
-
- x = []; y = []
-
- for i in range(len(X)):
- x.append(int(float(X[i])))
-
- for i in range(len(Y)):
- y.append(int(float(Y[i])))
-
- pts = []
- for i in range(len(x)):
- pts.append((x[i], y[i]))
-
- return pts
-
-
-def getSphere(pts):
- sphs = []
-
- for ele in pts:
- if(ele is not None):
- sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.03)
- sphere.paint_uniform_color([0.9, 0.2, 0])
-
- trans = np.identity(4)
- trans[0, 3] = ele[0]
- trans[1, 3] = ele[1]
- trans[2, 3] = ele[2]
-
- sphere.transform(trans)
- sphs.append(sphere)
-
- return sphs
-
-
-def get3dCor(src, trg):
- corr = []
-
- for sId, tId in zip(src, trg):
- if(sId != -1 and tId != -1):
- corr.append((sId, tId))
-
- corr = np.asarray(corr)
-
- return corr
-
-if __name__ == "__main__":
-
- focalX, focalY, centerX, centerY, scalingFactor = readCamera(args.camera_file)
-
- rgb_name_src = os.path.basename(args.rgb1)
- H_name_src = os.path.splitext(rgb_name_src)[0] + '.npy'
- srcH = os.path.join(os.path.dirname(args.rgb1), H_name_src)
- rgb_name_trg = os.path.basename(args.rgb2)
- H_name_trg = os.path.splitext(rgb_name_trg)[0] + '.npy'
- trgH = os.path.join(os.path.dirname(args.rgb2), H_name_trg)
-
- use_cuda = torch.cuda.is_available()
- device = torch.device('cuda:0' if use_cuda else 'cpu')
- model1 = D2Net(model_file=args.model_d2)
- model1 = model1.to(device)
- model2 = D2Net(model_file=args.model_rord)
- model2 = model2.to(device)
-
- if args.model_rord:
- srcPts, trgPts, matchImg, matchImgOrtho = getPerspKeypoints(args.rgb1, args.rgb2, srcH, trgH, model2, device)
- elif args.model_d2:
- srcPts, trgPts, matchImg, matchImgOrtho = getPerspKeypoints(args.rgb1, args.rgb2, srcH, trgH, model1, device)
- elif args.model_ens:
- model1 = D2Net(model_file=model1_ens)
- model1 = model1.to(device)
- model2 = D2Net(model_file=model2_ens)
- model2 = model2.to(device)
- srcPts, trgPts, matchImg, matchImgOrtho = getPerspKeypointsEnsemble(model1, model2, args.rgb1, args.rgb2, srcH, trgH, device)
- elif args.sift:
- srcPts, trgPts, matchImg, matchImgOrtho = siftMatching(args.rgb1, args.rgb2, srcH, trgH, device)
-
- #### Visualization ####
- print("\nShowing matches in perspective and orthographic view. Press q\n")
- cv2.imshow('Orthographic view', matchImgOrtho)
- cv2.imshow('Perspective view', matchImg)
- cv2.waitKey()
-
- srcPts = convertPts(srcPts)
- trgPts = convertPts(trgPts)
-
- srcCld, srcIdx, srcCor = getPointCloud(args.rgb1, args.depth1, srcPts)
- trgCld, trgIdx, trgCor = getPointCloud(args.rgb2, args.depth2, trgPts)
-
- srcSph = getSphere(srcCor)
- trgSph = getSphere(trgCor)
- axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
- srcSph.append(srcCld); srcSph.append(axis)
- trgSph.append(trgCld); trgSph.append(axis)
-
- corr = get3dCor(srcIdx, trgIdx)
-
- p2p = o3d.registration.TransformationEstimationPointToPoint()
- trans_init = p2p.compute_transformation(srcCld, trgCld, o3d.utility.Vector2iVector(corr))
- print("Transformation matrix: \n", trans_init)
-
- if args.viz3d:
- # o3d.visualization.draw_geometries(srcSph)
- # o3d.visualization.draw_geometries(trgSph)
-
- draw_registration_result(srcCld, trgCld, trans_init)
diff --git a/spaces/Ricecake123/RVC-demo/tools/infer/infer-pm-index256.py b/spaces/Ricecake123/RVC-demo/tools/infer/infer-pm-index256.py
deleted file mode 100644
index ead4dcb56c52a2eb612fd66355b99e535684f2e7..0000000000000000000000000000000000000000
--- a/spaces/Ricecake123/RVC-demo/tools/infer/infer-pm-index256.py
+++ /dev/null
@@ -1,199 +0,0 @@
-"""
-
-对源特征进行检索
-"""
-import torch, pdb, os, parselmouth
-
-os.environ["CUDA_VISIBLE_DEVICES"] = "0"
-import numpy as np
-import soundfile as sf
-
-# from models import SynthesizerTrn256#hifigan_nonsf
-# from lib.infer_pack.models import SynthesizerTrn256NSF as SynthesizerTrn256#hifigan_nsf
-from lib.infer_pack.models import (
- SynthesizerTrnMs256NSFsid as SynthesizerTrn256,
-) # hifigan_nsf
-
-# from lib.infer_pack.models import SynthesizerTrnMs256NSFsid_sim as SynthesizerTrn256#hifigan_nsf
-# from models import SynthesizerTrn256NSFsim as SynthesizerTrn256#hifigan_nsf
-# from models import SynthesizerTrn256NSFsimFlow as SynthesizerTrn256#hifigan_nsf
-
-
-from scipy.io import wavfile
-from fairseq import checkpoint_utils
-
-# import pyworld
-import librosa
-import torch.nn.functional as F
-import scipy.signal as signal
-
-# import torchcrepe
-from time import time as ttime
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-model_path = r"E:\codes\py39\vits_vc_gpu_train\hubert_base.pt" #
-print("load model(s) from {}".format(model_path))
-models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
- [model_path],
- suffix="",
-)
-model = models[0]
-model = model.to(device)
-model = model.half()
-model.eval()
-
-# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],183,256,is_half=True)#hifigan#512#256
-# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],109,256,is_half=True)#hifigan#512#256
-net_g = SynthesizerTrn256(
- 1025,
- 32,
- 192,
- 192,
- 768,
- 2,
- 6,
- 3,
- 0,
- "1",
- [3, 7, 11],
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- [10, 10, 2, 2],
- 512,
- [16, 16, 4, 4],
- 183,
- 256,
- is_half=True,
-) # hifigan#512#256#no_dropout
-# net_g = SynthesizerTrn256(1025,32,192,192,768,2,3,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],0)#ts3
-# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2],512,[16,16,4],0)#hifigan-ps-sr
-#
-# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [5,5], 512, [15,15], 0)#ms
-# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,10], 512, [16,16], 0)#idwt2
-
-# weights=torch.load("infer/ft-mi_1k-noD.pt")
-# weights=torch.load("infer/ft-mi-freeze-vocoder-flow-enc_q_1k.pt")
-# weights=torch.load("infer/ft-mi-freeze-vocoder_true_1k.pt")
-# weights=torch.load("infer/ft-mi-sim1k.pt")
-weights = torch.load("infer/ft-mi-no_opt-no_dropout.pt")
-print(net_g.load_state_dict(weights, strict=True))
-
-net_g.eval().to(device)
-net_g.half()
-
-
-def get_f0(x, p_len, f0_up_key=0):
- time_step = 160 / 16000 * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
- f0 = (
- parselmouth.Sound(x, 16000)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0 *= pow(2, f0_up_key / 12)
- f0bak = f0.copy()
-
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- # f0_mel[f0_mel > 188] = 188
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak
-
-
-import faiss
-
-index = faiss.read_index("infer/added_IVF512_Flat_mi_baseline_src_feat.index")
-big_npy = np.load("infer/big_src_feature_mi.npy")
-ta0 = ta1 = ta2 = 0
-for idx, name in enumerate(
- [
- "冬之花clip1.wav",
- ]
-): ##
- wav_path = "todo-songs/%s" % name #
- f0_up_key = -2 #
- audio, sampling_rate = sf.read(wav_path)
- if len(audio.shape) > 1:
- audio = librosa.to_mono(audio.transpose(1, 0))
- if sampling_rate != 16000:
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
-
- feats = torch.from_numpy(audio).float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).fill_(False)
- inputs = {
- "source": feats.half().to(device),
- "padding_mask": padding_mask.to(device),
- "output_layer": 9, # layer 9
- }
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0])
-
- ####索引优化
- npy = feats[0].cpu().numpy().astype("float32")
- D, I = index.search(npy, 1)
- feats = (
- torch.from_numpy(big_npy[I.squeeze()].astype("float16")).unsqueeze(0).to(device)
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- t1 = ttime()
- # p_len = min(feats.shape[1],10000,pitch.shape[0])#太大了爆显存
- p_len = min(feats.shape[1], 10000) #
- pitch, pitchf = get_f0(audio, p_len, f0_up_key)
- p_len = min(feats.shape[1], 10000, pitch.shape[0]) # 太大了爆显存
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- t2 = ttime()
- feats = feats[:, :p_len, :]
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- p_len = torch.LongTensor([p_len]).to(device)
- pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
- sid = torch.LongTensor([0]).to(device)
- pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
- with torch.no_grad():
- audio = (
- net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
- .data.cpu()
- .float()
- .numpy()
- ) # nsf
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- t3 = ttime()
- ta0 += t1 - t0
- ta1 += t2 - t1
- ta2 += t3 - t2
- # wavfile.write("ft-mi_1k-index256-noD-%s.wav"%name, 40000, audio)##
- # wavfile.write("ft-mi-freeze-vocoder-flow-enc_q_1k-%s.wav"%name, 40000, audio)##
- # wavfile.write("ft-mi-sim1k-%s.wav"%name, 40000, audio)##
- wavfile.write("ft-mi-no_opt-no_dropout-%s.wav" % name, 40000, audio) ##
-
-
-print(ta0, ta1, ta2) #
diff --git a/spaces/Ripaxxs/Mom/Dockerfile b/spaces/Ripaxxs/Mom/Dockerfile
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/fpn_carafe.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/fpn_carafe.py
deleted file mode 100644
index 302e6576df9914e49166539108d6048b78c1fe71..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/necks/fpn_carafe.py
+++ /dev/null
@@ -1,267 +0,0 @@
-import torch.nn as nn
-from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
-from mmcv.ops.carafe import CARAFEPack
-
-from ..builder import NECKS
-
-
-@NECKS.register_module()
-class FPN_CARAFE(nn.Module):
- """FPN_CARAFE is a more flexible implementation of FPN. It allows more
- choice for upsample methods during the top-down pathway.
-
- It can reproduce the performance of ICCV 2019 paper
- CARAFE: Content-Aware ReAssembly of FEatures
- Please refer to https://arxiv.org/abs/1905.02188 for more details.
-
- Args:
- in_channels (list[int]): Number of channels for each input feature map.
- out_channels (int): Output channels of feature pyramids.
- num_outs (int): Number of output stages.
- start_level (int): Start level of feature pyramids.
- (Default: 0)
- end_level (int): End level of feature pyramids.
- (Default: -1 indicates the last level).
- norm_cfg (dict): Dictionary to construct and config norm layer.
- activate (str): Type of activation function in ConvModule
- (Default: None indicates w/o activation).
- order (dict): Order of components in ConvModule.
- upsample (str): Type of upsample layer.
- upsample_cfg (dict): Dictionary to construct and config upsample layer.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- num_outs,
- start_level=0,
- end_level=-1,
- norm_cfg=None,
- act_cfg=None,
- order=('conv', 'norm', 'act'),
- upsample_cfg=dict(
- type='carafe',
- up_kernel=5,
- up_group=1,
- encoder_kernel=3,
- encoder_dilation=1)):
- super(FPN_CARAFE, self).__init__()
- assert isinstance(in_channels, list)
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.num_ins = len(in_channels)
- self.num_outs = num_outs
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.with_bias = norm_cfg is None
- self.upsample_cfg = upsample_cfg.copy()
- self.upsample = self.upsample_cfg.get('type')
- self.relu = nn.ReLU(inplace=False)
-
- self.order = order
- assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
-
- assert self.upsample in [
- 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
- ]
- if self.upsample in ['deconv', 'pixel_shuffle']:
- assert hasattr(
- self.upsample_cfg,
- 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
- self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
-
- if end_level == -1:
- self.backbone_end_level = self.num_ins
- assert num_outs >= self.num_ins - start_level
- else:
- # if end_level < inputs, no extra level is allowed
- self.backbone_end_level = end_level
- assert end_level <= len(in_channels)
- assert num_outs == end_level - start_level
- self.start_level = start_level
- self.end_level = end_level
-
- self.lateral_convs = nn.ModuleList()
- self.fpn_convs = nn.ModuleList()
- self.upsample_modules = nn.ModuleList()
-
- for i in range(self.start_level, self.backbone_end_level):
- l_conv = ConvModule(
- in_channels[i],
- out_channels,
- 1,
- norm_cfg=norm_cfg,
- bias=self.with_bias,
- act_cfg=act_cfg,
- inplace=False,
- order=self.order)
- fpn_conv = ConvModule(
- out_channels,
- out_channels,
- 3,
- padding=1,
- norm_cfg=self.norm_cfg,
- bias=self.with_bias,
- act_cfg=act_cfg,
- inplace=False,
- order=self.order)
- if i != self.backbone_end_level - 1:
- upsample_cfg_ = self.upsample_cfg.copy()
- if self.upsample == 'deconv':
- upsample_cfg_.update(
- in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=self.upsample_kernel,
- stride=2,
- padding=(self.upsample_kernel - 1) // 2,
- output_padding=(self.upsample_kernel - 1) // 2)
- elif self.upsample == 'pixel_shuffle':
- upsample_cfg_.update(
- in_channels=out_channels,
- out_channels=out_channels,
- scale_factor=2,
- upsample_kernel=self.upsample_kernel)
- elif self.upsample == 'carafe':
- upsample_cfg_.update(channels=out_channels, scale_factor=2)
- else:
- # suppress warnings
- align_corners = (None
- if self.upsample == 'nearest' else False)
- upsample_cfg_.update(
- scale_factor=2,
- mode=self.upsample,
- align_corners=align_corners)
- upsample_module = build_upsample_layer(upsample_cfg_)
- self.upsample_modules.append(upsample_module)
- self.lateral_convs.append(l_conv)
- self.fpn_convs.append(fpn_conv)
-
- # add extra conv layers (e.g., RetinaNet)
- extra_out_levels = (
- num_outs - self.backbone_end_level + self.start_level)
- if extra_out_levels >= 1:
- for i in range(extra_out_levels):
- in_channels = (
- self.in_channels[self.backbone_end_level -
- 1] if i == 0 else out_channels)
- extra_l_conv = ConvModule(
- in_channels,
- out_channels,
- 3,
- stride=2,
- padding=1,
- norm_cfg=norm_cfg,
- bias=self.with_bias,
- act_cfg=act_cfg,
- inplace=False,
- order=self.order)
- if self.upsample == 'deconv':
- upsampler_cfg_ = dict(
- in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=self.upsample_kernel,
- stride=2,
- padding=(self.upsample_kernel - 1) // 2,
- output_padding=(self.upsample_kernel - 1) // 2)
- elif self.upsample == 'pixel_shuffle':
- upsampler_cfg_ = dict(
- in_channels=out_channels,
- out_channels=out_channels,
- scale_factor=2,
- upsample_kernel=self.upsample_kernel)
- elif self.upsample == 'carafe':
- upsampler_cfg_ = dict(
- channels=out_channels,
- scale_factor=2,
- **self.upsample_cfg)
- else:
- # suppress warnings
- align_corners = (None
- if self.upsample == 'nearest' else False)
- upsampler_cfg_ = dict(
- scale_factor=2,
- mode=self.upsample,
- align_corners=align_corners)
- upsampler_cfg_['type'] = self.upsample
- upsample_module = build_upsample_layer(upsampler_cfg_)
- extra_fpn_conv = ConvModule(
- out_channels,
- out_channels,
- 3,
- padding=1,
- norm_cfg=self.norm_cfg,
- bias=self.with_bias,
- act_cfg=act_cfg,
- inplace=False,
- order=self.order)
- self.upsample_modules.append(upsample_module)
- self.fpn_convs.append(extra_fpn_conv)
- self.lateral_convs.append(extra_l_conv)
-
- # default init_weights for conv(msra) and norm in ConvModule
- def init_weights(self):
- """Initialize the weights of module."""
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- xavier_init(m, distribution='uniform')
- for m in self.modules():
- if isinstance(m, CARAFEPack):
- m.init_weights()
-
- def slice_as(self, src, dst):
- """Slice ``src`` as ``dst``
-
- Note:
- ``src`` should have the same or larger size than ``dst``.
-
- Args:
- src (torch.Tensor): Tensors to be sliced.
- dst (torch.Tensor): ``src`` will be sliced to have the same
- size as ``dst``.
-
- Returns:
- torch.Tensor: Sliced tensor.
- """
- assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
- if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
- return src
- else:
- return src[:, :, :dst.size(2), :dst.size(3)]
-
- def tensor_add(self, a, b):
- """Add tensors ``a`` and ``b`` that might have different sizes."""
- if a.size() == b.size():
- c = a + b
- else:
- c = a + self.slice_as(b, a)
- return c
-
- def forward(self, inputs):
- """Forward function."""
- assert len(inputs) == len(self.in_channels)
-
- # build laterals
- laterals = []
- for i, lateral_conv in enumerate(self.lateral_convs):
- if i <= self.backbone_end_level - self.start_level:
- input = inputs[min(i + self.start_level, len(inputs) - 1)]
- else:
- input = laterals[-1]
- lateral = lateral_conv(input)
- laterals.append(lateral)
-
- # build top-down path
- for i in range(len(laterals) - 1, 0, -1):
- if self.upsample is not None:
- upsample_feat = self.upsample_modules[i - 1](laterals[i])
- else:
- upsample_feat = laterals[i]
- laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
-
- # build outputs
- num_conv_outs = len(self.fpn_convs)
- outs = []
- for i in range(num_conv_outs):
- out = self.fpn_convs[i](laterals[i])
- outs.append(out)
- return tuple(outs)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/coco.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/coco.py
deleted file mode 100644
index 65802369de9f82b70e4dcee96c22d6a886120aa1..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/coco.py
+++ /dev/null
@@ -1,546 +0,0 @@
-import itertools
-import logging
-import os.path as osp
-import tempfile
-from collections import OrderedDict
-
-import annotator.uniformer.mmcv as mmcv
-import numpy as np
-import pycocotools
-from annotator.uniformer.mmcv.utils import print_log
-from pycocotools.coco import COCO
-from pycocotools.cocoeval import COCOeval
-from terminaltables import AsciiTable
-
-from annotator.uniformer.mmdet.core import eval_recalls
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class CocoDataset(CustomDataset):
-
- CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
- 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
- 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
- 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
- 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
- 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
- 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
- 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
- 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
- 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
- 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
- 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
-
- def load_annotations(self, ann_file):
- """Load annotation from COCO style annotation file.
-
- Args:
- ann_file (str): Path of annotation file.
-
- Returns:
- list[dict]: Annotation info from COCO api.
- """
- if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
- raise AssertionError(
- 'Incompatible version of pycocotools is installed. '
- 'Run pip uninstall pycocotools first. Then run pip '
- 'install mmpycocotools to install open-mmlab forked '
- 'pycocotools.')
-
- self.coco = COCO(ann_file)
- self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
- self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
- self.img_ids = self.coco.get_img_ids()
- data_infos = []
- total_ann_ids = []
- for i in self.img_ids:
- info = self.coco.load_imgs([i])[0]
- info['filename'] = info['file_name']
- data_infos.append(info)
- ann_ids = self.coco.get_ann_ids(img_ids=[i])
- total_ann_ids.extend(ann_ids)
- assert len(set(total_ann_ids)) == len(
- total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
- return data_infos
-
- def get_ann_info(self, idx):
- """Get COCO annotation by index.
-
- Args:
- idx (int): Index of data.
-
- Returns:
- dict: Annotation info of specified index.
- """
-
- img_id = self.data_infos[idx]['id']
- ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
- ann_info = self.coco.load_anns(ann_ids)
- return self._parse_ann_info(self.data_infos[idx], ann_info)
-
- def get_cat_ids(self, idx):
- """Get COCO category ids by index.
-
- Args:
- idx (int): Index of data.
-
- Returns:
- list[int]: All categories in the image of specified index.
- """
-
- img_id = self.data_infos[idx]['id']
- ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
- ann_info = self.coco.load_anns(ann_ids)
- return [ann['category_id'] for ann in ann_info]
-
- def _filter_imgs(self, min_size=32):
- """Filter images too small or without ground truths."""
- valid_inds = []
- # obtain images that contain annotation
- ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
- # obtain images that contain annotations of the required categories
- ids_in_cat = set()
- for i, class_id in enumerate(self.cat_ids):
- ids_in_cat |= set(self.coco.cat_img_map[class_id])
- # merge the image id sets of the two conditions and use the merged set
- # to filter out images if self.filter_empty_gt=True
- ids_in_cat &= ids_with_ann
-
- valid_img_ids = []
- for i, img_info in enumerate(self.data_infos):
- img_id = self.img_ids[i]
- if self.filter_empty_gt and img_id not in ids_in_cat:
- continue
- if min(img_info['width'], img_info['height']) >= min_size:
- valid_inds.append(i)
- valid_img_ids.append(img_id)
- self.img_ids = valid_img_ids
- return valid_inds
-
- def _parse_ann_info(self, img_info, ann_info):
- """Parse bbox and mask annotation.
-
- Args:
- ann_info (list[dict]): Annotation info of an image.
- with_mask (bool): Whether to parse mask annotations.
-
- Returns:
- dict: A dict containing the following keys: bboxes, bboxes_ignore,\
- labels, masks, seg_map. "masks" are raw annotations and not \
- decoded into binary masks.
- """
- gt_bboxes = []
- gt_labels = []
- gt_bboxes_ignore = []
- gt_masks_ann = []
- for i, ann in enumerate(ann_info):
- if ann.get('ignore', False):
- continue
- x1, y1, w, h = ann['bbox']
- inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
- inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
- if inter_w * inter_h == 0:
- continue
- if ann['area'] <= 0 or w < 1 or h < 1:
- continue
- if ann['category_id'] not in self.cat_ids:
- continue
- bbox = [x1, y1, x1 + w, y1 + h]
- if ann.get('iscrowd', False):
- gt_bboxes_ignore.append(bbox)
- else:
- gt_bboxes.append(bbox)
- gt_labels.append(self.cat2label[ann['category_id']])
- gt_masks_ann.append(ann.get('segmentation', None))
-
- if gt_bboxes:
- gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
- gt_labels = np.array(gt_labels, dtype=np.int64)
- else:
- gt_bboxes = np.zeros((0, 4), dtype=np.float32)
- gt_labels = np.array([], dtype=np.int64)
-
- if gt_bboxes_ignore:
- gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
- else:
- gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
-
- seg_map = img_info['filename'].replace('jpg', 'png')
-
- ann = dict(
- bboxes=gt_bboxes,
- labels=gt_labels,
- bboxes_ignore=gt_bboxes_ignore,
- masks=gt_masks_ann,
- seg_map=seg_map)
-
- return ann
-
- def xyxy2xywh(self, bbox):
- """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
- evaluation.
-
- Args:
- bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
- ``xyxy`` order.
-
- Returns:
- list[float]: The converted bounding boxes, in ``xywh`` order.
- """
-
- _bbox = bbox.tolist()
- return [
- _bbox[0],
- _bbox[1],
- _bbox[2] - _bbox[0],
- _bbox[3] - _bbox[1],
- ]
-
- def _proposal2json(self, results):
- """Convert proposal results to COCO json style."""
- json_results = []
- for idx in range(len(self)):
- img_id = self.img_ids[idx]
- bboxes = results[idx]
- for i in range(bboxes.shape[0]):
- data = dict()
- data['image_id'] = img_id
- data['bbox'] = self.xyxy2xywh(bboxes[i])
- data['score'] = float(bboxes[i][4])
- data['category_id'] = 1
- json_results.append(data)
- return json_results
-
- def _det2json(self, results):
- """Convert detection results to COCO json style."""
- json_results = []
- for idx in range(len(self)):
- img_id = self.img_ids[idx]
- result = results[idx]
- for label in range(len(result)):
- bboxes = result[label]
- for i in range(bboxes.shape[0]):
- data = dict()
- data['image_id'] = img_id
- data['bbox'] = self.xyxy2xywh(bboxes[i])
- data['score'] = float(bboxes[i][4])
- data['category_id'] = self.cat_ids[label]
- json_results.append(data)
- return json_results
-
- def _segm2json(self, results):
- """Convert instance segmentation results to COCO json style."""
- bbox_json_results = []
- segm_json_results = []
- for idx in range(len(self)):
- img_id = self.img_ids[idx]
- det, seg = results[idx]
- for label in range(len(det)):
- # bbox results
- bboxes = det[label]
- for i in range(bboxes.shape[0]):
- data = dict()
- data['image_id'] = img_id
- data['bbox'] = self.xyxy2xywh(bboxes[i])
- data['score'] = float(bboxes[i][4])
- data['category_id'] = self.cat_ids[label]
- bbox_json_results.append(data)
-
- # segm results
- # some detectors use different scores for bbox and mask
- if isinstance(seg, tuple):
- segms = seg[0][label]
- mask_score = seg[1][label]
- else:
- segms = seg[label]
- mask_score = [bbox[4] for bbox in bboxes]
- for i in range(bboxes.shape[0]):
- data = dict()
- data['image_id'] = img_id
- data['bbox'] = self.xyxy2xywh(bboxes[i])
- data['score'] = float(mask_score[i])
- data['category_id'] = self.cat_ids[label]
- if isinstance(segms[i]['counts'], bytes):
- segms[i]['counts'] = segms[i]['counts'].decode()
- data['segmentation'] = segms[i]
- segm_json_results.append(data)
- return bbox_json_results, segm_json_results
-
- def results2json(self, results, outfile_prefix):
- """Dump the detection results to a COCO style json file.
-
- There are 3 types of results: proposals, bbox predictions, mask
- predictions, and they have different data types. This method will
- automatically recognize the type, and dump them to json files.
-
- Args:
- results (list[list | tuple | ndarray]): Testing results of the
- dataset.
- outfile_prefix (str): The filename prefix of the json files. If the
- prefix is "somepath/xxx", the json files will be named
- "somepath/xxx.bbox.json", "somepath/xxx.segm.json",
- "somepath/xxx.proposal.json".
-
- Returns:
- dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
- values are corresponding filenames.
- """
- result_files = dict()
- if isinstance(results[0], list):
- json_results = self._det2json(results)
- result_files['bbox'] = f'{outfile_prefix}.bbox.json'
- result_files['proposal'] = f'{outfile_prefix}.bbox.json'
- mmcv.dump(json_results, result_files['bbox'])
- elif isinstance(results[0], tuple):
- json_results = self._segm2json(results)
- result_files['bbox'] = f'{outfile_prefix}.bbox.json'
- result_files['proposal'] = f'{outfile_prefix}.bbox.json'
- result_files['segm'] = f'{outfile_prefix}.segm.json'
- mmcv.dump(json_results[0], result_files['bbox'])
- mmcv.dump(json_results[1], result_files['segm'])
- elif isinstance(results[0], np.ndarray):
- json_results = self._proposal2json(results)
- result_files['proposal'] = f'{outfile_prefix}.proposal.json'
- mmcv.dump(json_results, result_files['proposal'])
- else:
- raise TypeError('invalid type of results')
- return result_files
-
- def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
- gt_bboxes = []
- for i in range(len(self.img_ids)):
- ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
- ann_info = self.coco.load_anns(ann_ids)
- if len(ann_info) == 0:
- gt_bboxes.append(np.zeros((0, 4)))
- continue
- bboxes = []
- for ann in ann_info:
- if ann.get('ignore', False) or ann['iscrowd']:
- continue
- x1, y1, w, h = ann['bbox']
- bboxes.append([x1, y1, x1 + w, y1 + h])
- bboxes = np.array(bboxes, dtype=np.float32)
- if bboxes.shape[0] == 0:
- bboxes = np.zeros((0, 4))
- gt_bboxes.append(bboxes)
-
- recalls = eval_recalls(
- gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
- ar = recalls.mean(axis=1)
- return ar
-
- def format_results(self, results, jsonfile_prefix=None, **kwargs):
- """Format the results to json (standard format for COCO evaluation).
-
- Args:
- results (list[tuple | numpy.ndarray]): Testing results of the
- dataset.
- jsonfile_prefix (str | None): The prefix of json files. It includes
- the file path and the prefix of filename, e.g., "a/b/prefix".
- If not specified, a temp file will be created. Default: None.
-
- Returns:
- tuple: (result_files, tmp_dir), result_files is a dict containing \
- the json filepaths, tmp_dir is the temporal directory created \
- for saving json files when jsonfile_prefix is not specified.
- """
- assert isinstance(results, list), 'results must be a list'
- assert len(results) == len(self), (
- 'The length of results is not equal to the dataset len: {} != {}'.
- format(len(results), len(self)))
-
- if jsonfile_prefix is None:
- tmp_dir = tempfile.TemporaryDirectory()
- jsonfile_prefix = osp.join(tmp_dir.name, 'results')
- else:
- tmp_dir = None
- result_files = self.results2json(results, jsonfile_prefix)
- return result_files, tmp_dir
-
- def evaluate(self,
- results,
- metric='bbox',
- logger=None,
- jsonfile_prefix=None,
- classwise=False,
- proposal_nums=(100, 300, 1000),
- iou_thrs=None,
- metric_items=None):
- """Evaluation in COCO protocol.
-
- Args:
- results (list[list | tuple]): Testing results of the dataset.
- metric (str | list[str]): Metrics to be evaluated. Options are
- 'bbox', 'segm', 'proposal', 'proposal_fast'.
- logger (logging.Logger | str | None): Logger used for printing
- related information during evaluation. Default: None.
- jsonfile_prefix (str | None): The prefix of json files. It includes
- the file path and the prefix of filename, e.g., "a/b/prefix".
- If not specified, a temp file will be created. Default: None.
- classwise (bool): Whether to evaluating the AP for each class.
- proposal_nums (Sequence[int]): Proposal number used for evaluating
- recalls, such as recall@100, recall@1000.
- Default: (100, 300, 1000).
- iou_thrs (Sequence[float], optional): IoU threshold used for
- evaluating recalls/mAPs. If set to a list, the average of all
- IoUs will also be computed. If not specified, [0.50, 0.55,
- 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
- Default: None.
- metric_items (list[str] | str, optional): Metric items that will
- be returned. If not specified, ``['AR@100', 'AR@300',
- 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
- used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
- 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
- ``metric=='bbox' or metric=='segm'``.
-
- Returns:
- dict[str, float]: COCO style evaluation metric.
- """
-
- metrics = metric if isinstance(metric, list) else [metric]
- allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
- for metric in metrics:
- if metric not in allowed_metrics:
- raise KeyError(f'metric {metric} is not supported')
- if iou_thrs is None:
- iou_thrs = np.linspace(
- .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
- if metric_items is not None:
- if not isinstance(metric_items, list):
- metric_items = [metric_items]
-
- result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
-
- eval_results = OrderedDict()
- cocoGt = self.coco
- for metric in metrics:
- msg = f'Evaluating {metric}...'
- if logger is None:
- msg = '\n' + msg
- print_log(msg, logger=logger)
-
- if metric == 'proposal_fast':
- ar = self.fast_eval_recall(
- results, proposal_nums, iou_thrs, logger='silent')
- log_msg = []
- for i, num in enumerate(proposal_nums):
- eval_results[f'AR@{num}'] = ar[i]
- log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
- log_msg = ''.join(log_msg)
- print_log(log_msg, logger=logger)
- continue
-
- if metric not in result_files:
- raise KeyError(f'{metric} is not in results')
- try:
- cocoDt = cocoGt.loadRes(result_files[metric])
- except IndexError:
- print_log(
- 'The testing results of the whole dataset is empty.',
- logger=logger,
- level=logging.ERROR)
- break
-
- iou_type = 'bbox' if metric == 'proposal' else metric
- cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
- cocoEval.params.catIds = self.cat_ids
- cocoEval.params.imgIds = self.img_ids
- cocoEval.params.maxDets = list(proposal_nums)
- cocoEval.params.iouThrs = iou_thrs
- # mapping of cocoEval.stats
- coco_metric_names = {
- 'mAP': 0,
- 'mAP_50': 1,
- 'mAP_75': 2,
- 'mAP_s': 3,
- 'mAP_m': 4,
- 'mAP_l': 5,
- 'AR@100': 6,
- 'AR@300': 7,
- 'AR@1000': 8,
- 'AR_s@1000': 9,
- 'AR_m@1000': 10,
- 'AR_l@1000': 11
- }
- if metric_items is not None:
- for metric_item in metric_items:
- if metric_item not in coco_metric_names:
- raise KeyError(
- f'metric item {metric_item} is not supported')
-
- if metric == 'proposal':
- cocoEval.params.useCats = 0
- cocoEval.evaluate()
- cocoEval.accumulate()
- cocoEval.summarize()
- if metric_items is None:
- metric_items = [
- 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
- 'AR_m@1000', 'AR_l@1000'
- ]
-
- for item in metric_items:
- val = float(
- f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
- eval_results[item] = val
- else:
- cocoEval.evaluate()
- cocoEval.accumulate()
- cocoEval.summarize()
- if classwise: # Compute per-category AP
- # Compute per-category AP
- # from https://github.com/facebookresearch/detectron2/
- precisions = cocoEval.eval['precision']
- # precision: (iou, recall, cls, area range, max dets)
- assert len(self.cat_ids) == precisions.shape[2]
-
- results_per_category = []
- for idx, catId in enumerate(self.cat_ids):
- # area range index 0: all area ranges
- # max dets index -1: typically 100 per image
- nm = self.coco.loadCats(catId)[0]
- precision = precisions[:, :, idx, 0, -1]
- precision = precision[precision > -1]
- if precision.size:
- ap = np.mean(precision)
- else:
- ap = float('nan')
- results_per_category.append(
- (f'{nm["name"]}', f'{float(ap):0.3f}'))
-
- num_columns = min(6, len(results_per_category) * 2)
- results_flatten = list(
- itertools.chain(*results_per_category))
- headers = ['category', 'AP'] * (num_columns // 2)
- results_2d = itertools.zip_longest(*[
- results_flatten[i::num_columns]
- for i in range(num_columns)
- ])
- table_data = [headers]
- table_data += [result for result in results_2d]
- table = AsciiTable(table_data)
- print_log('\n' + table.table, logger=logger)
-
- if metric_items is None:
- metric_items = [
- 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
- ]
-
- for metric_item in metric_items:
- key = f'{metric}_{metric_item}'
- val = float(
- f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
- )
- eval_results[key] = val
- ap = cocoEval.stats[:6]
- eval_results[f'{metric}_mAP_copypaste'] = (
- f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
- f'{ap[4]:.3f} {ap[5]:.3f}')
- if tmp_dir is not None:
- tmp_dir.cleanup()
- return eval_results
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/base_runner.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/base_runner.py
deleted file mode 100644
index 4928db0a73b56fe0218a4bf66ec4ffa082d31ccc..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/base_runner.py
+++ /dev/null
@@ -1,542 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-import logging
-import os.path as osp
-import warnings
-from abc import ABCMeta, abstractmethod
-
-import torch
-from torch.optim import Optimizer
-
-import annotator.uniformer.mmcv as mmcv
-from ..parallel import is_module_wrapper
-from .checkpoint import load_checkpoint
-from .dist_utils import get_dist_info
-from .hooks import HOOKS, Hook
-from .log_buffer import LogBuffer
-from .priority import Priority, get_priority
-from .utils import get_time_str
-
-
-class BaseRunner(metaclass=ABCMeta):
- """The base class of Runner, a training helper for PyTorch.
-
- All subclasses should implement the following APIs:
-
- - ``run()``
- - ``train()``
- - ``val()``
- - ``save_checkpoint()``
-
- Args:
- model (:obj:`torch.nn.Module`): The model to be run.
- batch_processor (callable): A callable method that process a data
- batch. The interface of this method should be
- `batch_processor(model, data, train_mode) -> dict`
- optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
- optimizer (in most cases) or a dict of optimizers (in models that
- requires more than one optimizer, e.g., GAN).
- work_dir (str, optional): The working directory to save checkpoints
- and logs. Defaults to None.
- logger (:obj:`logging.Logger`): Logger used during training.
- Defaults to None. (The default value is just for backward
- compatibility)
- meta (dict | None): A dict records some import information such as
- environment info and seed, which will be logged in logger hook.
- Defaults to None.
- max_epochs (int, optional): Total training epochs.
- max_iters (int, optional): Total training iterations.
- """
-
- def __init__(self,
- model,
- batch_processor=None,
- optimizer=None,
- work_dir=None,
- logger=None,
- meta=None,
- max_iters=None,
- max_epochs=None):
- if batch_processor is not None:
- if not callable(batch_processor):
- raise TypeError('batch_processor must be callable, '
- f'but got {type(batch_processor)}')
- warnings.warn('batch_processor is deprecated, please implement '
- 'train_step() and val_step() in the model instead.')
- # raise an error is `batch_processor` is not None and
- # `model.train_step()` exists.
- if is_module_wrapper(model):
- _model = model.module
- else:
- _model = model
- if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
- raise RuntimeError(
- 'batch_processor and model.train_step()/model.val_step() '
- 'cannot be both available.')
- else:
- assert hasattr(model, 'train_step')
-
- # check the type of `optimizer`
- if isinstance(optimizer, dict):
- for name, optim in optimizer.items():
- if not isinstance(optim, Optimizer):
- raise TypeError(
- f'optimizer must be a dict of torch.optim.Optimizers, '
- f'but optimizer["{name}"] is a {type(optim)}')
- elif not isinstance(optimizer, Optimizer) and optimizer is not None:
- raise TypeError(
- f'optimizer must be a torch.optim.Optimizer object '
- f'or dict or None, but got {type(optimizer)}')
-
- # check the type of `logger`
- if not isinstance(logger, logging.Logger):
- raise TypeError(f'logger must be a logging.Logger object, '
- f'but got {type(logger)}')
-
- # check the type of `meta`
- if meta is not None and not isinstance(meta, dict):
- raise TypeError(
- f'meta must be a dict or None, but got {type(meta)}')
-
- self.model = model
- self.batch_processor = batch_processor
- self.optimizer = optimizer
- self.logger = logger
- self.meta = meta
- # create work_dir
- if mmcv.is_str(work_dir):
- self.work_dir = osp.abspath(work_dir)
- mmcv.mkdir_or_exist(self.work_dir)
- elif work_dir is None:
- self.work_dir = None
- else:
- raise TypeError('"work_dir" must be a str or None')
-
- # get model name from the model class
- if hasattr(self.model, 'module'):
- self._model_name = self.model.module.__class__.__name__
- else:
- self._model_name = self.model.__class__.__name__
-
- self._rank, self._world_size = get_dist_info()
- self.timestamp = get_time_str()
- self.mode = None
- self._hooks = []
- self._epoch = 0
- self._iter = 0
- self._inner_iter = 0
-
- if max_epochs is not None and max_iters is not None:
- raise ValueError(
- 'Only one of `max_epochs` or `max_iters` can be set.')
-
- self._max_epochs = max_epochs
- self._max_iters = max_iters
- # TODO: Redesign LogBuffer, it is not flexible and elegant enough
- self.log_buffer = LogBuffer()
-
- @property
- def model_name(self):
- """str: Name of the model, usually the module class name."""
- return self._model_name
-
- @property
- def rank(self):
- """int: Rank of current process. (distributed training)"""
- return self._rank
-
- @property
- def world_size(self):
- """int: Number of processes participating in the job.
- (distributed training)"""
- return self._world_size
-
- @property
- def hooks(self):
- """list[:obj:`Hook`]: A list of registered hooks."""
- return self._hooks
-
- @property
- def epoch(self):
- """int: Current epoch."""
- return self._epoch
-
- @property
- def iter(self):
- """int: Current iteration."""
- return self._iter
-
- @property
- def inner_iter(self):
- """int: Iteration in an epoch."""
- return self._inner_iter
-
- @property
- def max_epochs(self):
- """int: Maximum training epochs."""
- return self._max_epochs
-
- @property
- def max_iters(self):
- """int: Maximum training iterations."""
- return self._max_iters
-
- @abstractmethod
- def train(self):
- pass
-
- @abstractmethod
- def val(self):
- pass
-
- @abstractmethod
- def run(self, data_loaders, workflow, **kwargs):
- pass
-
- @abstractmethod
- def save_checkpoint(self,
- out_dir,
- filename_tmpl,
- save_optimizer=True,
- meta=None,
- create_symlink=True):
- pass
-
- def current_lr(self):
- """Get current learning rates.
-
- Returns:
- list[float] | dict[str, list[float]]: Current learning rates of all
- param groups. If the runner has a dict of optimizers, this
- method will return a dict.
- """
- if isinstance(self.optimizer, torch.optim.Optimizer):
- lr = [group['lr'] for group in self.optimizer.param_groups]
- elif isinstance(self.optimizer, dict):
- lr = dict()
- for name, optim in self.optimizer.items():
- lr[name] = [group['lr'] for group in optim.param_groups]
- else:
- raise RuntimeError(
- 'lr is not applicable because optimizer does not exist.')
- return lr
-
- def current_momentum(self):
- """Get current momentums.
-
- Returns:
- list[float] | dict[str, list[float]]: Current momentums of all
- param groups. If the runner has a dict of optimizers, this
- method will return a dict.
- """
-
- def _get_momentum(optimizer):
- momentums = []
- for group in optimizer.param_groups:
- if 'momentum' in group.keys():
- momentums.append(group['momentum'])
- elif 'betas' in group.keys():
- momentums.append(group['betas'][0])
- else:
- momentums.append(0)
- return momentums
-
- if self.optimizer is None:
- raise RuntimeError(
- 'momentum is not applicable because optimizer does not exist.')
- elif isinstance(self.optimizer, torch.optim.Optimizer):
- momentums = _get_momentum(self.optimizer)
- elif isinstance(self.optimizer, dict):
- momentums = dict()
- for name, optim in self.optimizer.items():
- momentums[name] = _get_momentum(optim)
- return momentums
-
- def register_hook(self, hook, priority='NORMAL'):
- """Register a hook into the hook list.
-
- The hook will be inserted into a priority queue, with the specified
- priority (See :class:`Priority` for details of priorities).
- For hooks with the same priority, they will be triggered in the same
- order as they are registered.
-
- Args:
- hook (:obj:`Hook`): The hook to be registered.
- priority (int or str or :obj:`Priority`): Hook priority.
- Lower value means higher priority.
- """
- assert isinstance(hook, Hook)
- if hasattr(hook, 'priority'):
- raise ValueError('"priority" is a reserved attribute for hooks')
- priority = get_priority(priority)
- hook.priority = priority
- # insert the hook to a sorted list
- inserted = False
- for i in range(len(self._hooks) - 1, -1, -1):
- if priority >= self._hooks[i].priority:
- self._hooks.insert(i + 1, hook)
- inserted = True
- break
- if not inserted:
- self._hooks.insert(0, hook)
-
- def register_hook_from_cfg(self, hook_cfg):
- """Register a hook from its cfg.
-
- Args:
- hook_cfg (dict): Hook config. It should have at least keys 'type'
- and 'priority' indicating its type and priority.
-
- Notes:
- The specific hook class to register should not use 'type' and
- 'priority' arguments during initialization.
- """
- hook_cfg = hook_cfg.copy()
- priority = hook_cfg.pop('priority', 'NORMAL')
- hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
- self.register_hook(hook, priority=priority)
-
- def call_hook(self, fn_name):
- """Call all hooks.
-
- Args:
- fn_name (str): The function name in each hook to be called, such as
- "before_train_epoch".
- """
- for hook in self._hooks:
- getattr(hook, fn_name)(self)
-
- def get_hook_info(self):
- # Get hooks info in each stage
- stage_hook_map = {stage: [] for stage in Hook.stages}
- for hook in self.hooks:
- try:
- priority = Priority(hook.priority).name
- except ValueError:
- priority = hook.priority
- classname = hook.__class__.__name__
- hook_info = f'({priority:<12}) {classname:<35}'
- for trigger_stage in hook.get_triggered_stages():
- stage_hook_map[trigger_stage].append(hook_info)
-
- stage_hook_infos = []
- for stage in Hook.stages:
- hook_infos = stage_hook_map[stage]
- if len(hook_infos) > 0:
- info = f'{stage}:\n'
- info += '\n'.join(hook_infos)
- info += '\n -------------------- '
- stage_hook_infos.append(info)
- return '\n'.join(stage_hook_infos)
-
- def load_checkpoint(self,
- filename,
- map_location='cpu',
- strict=False,
- revise_keys=[(r'^module.', '')]):
- return load_checkpoint(
- self.model,
- filename,
- map_location,
- strict,
- self.logger,
- revise_keys=revise_keys)
-
- def resume(self,
- checkpoint,
- resume_optimizer=True,
- map_location='default'):
- if map_location == 'default':
- if torch.cuda.is_available():
- device_id = torch.cuda.current_device()
- checkpoint = self.load_checkpoint(
- checkpoint,
- map_location=lambda storage, loc: storage.cuda(device_id))
- else:
- checkpoint = self.load_checkpoint(checkpoint)
- else:
- checkpoint = self.load_checkpoint(
- checkpoint, map_location=map_location)
-
- self._epoch = checkpoint['meta']['epoch']
- self._iter = checkpoint['meta']['iter']
- if self.meta is None:
- self.meta = {}
- self.meta.setdefault('hook_msgs', {})
- # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages
- self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {}))
-
- # Re-calculate the number of iterations when resuming
- # models with different number of GPUs
- if 'config' in checkpoint['meta']:
- config = mmcv.Config.fromstring(
- checkpoint['meta']['config'], file_format='.py')
- previous_gpu_ids = config.get('gpu_ids', None)
- if previous_gpu_ids and len(previous_gpu_ids) > 0 and len(
- previous_gpu_ids) != self.world_size:
- self._iter = int(self._iter * len(previous_gpu_ids) /
- self.world_size)
- self.logger.info('the iteration number is changed due to '
- 'change of GPU number')
-
- # resume meta information meta
- self.meta = checkpoint['meta']
-
- if 'optimizer' in checkpoint and resume_optimizer:
- if isinstance(self.optimizer, Optimizer):
- self.optimizer.load_state_dict(checkpoint['optimizer'])
- elif isinstance(self.optimizer, dict):
- for k in self.optimizer.keys():
- self.optimizer[k].load_state_dict(
- checkpoint['optimizer'][k])
- else:
- raise TypeError(
- 'Optimizer should be dict or torch.optim.Optimizer '
- f'but got {type(self.optimizer)}')
-
- self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
-
- def register_lr_hook(self, lr_config):
- if lr_config is None:
- return
- elif isinstance(lr_config, dict):
- assert 'policy' in lr_config
- policy_type = lr_config.pop('policy')
- # If the type of policy is all in lower case, e.g., 'cyclic',
- # then its first letter will be capitalized, e.g., to be 'Cyclic'.
- # This is for the convenient usage of Lr updater.
- # Since this is not applicable for `
- # CosineAnnealingLrUpdater`,
- # the string will not be changed if it contains capital letters.
- if policy_type == policy_type.lower():
- policy_type = policy_type.title()
- hook_type = policy_type + 'LrUpdaterHook'
- lr_config['type'] = hook_type
- hook = mmcv.build_from_cfg(lr_config, HOOKS)
- else:
- hook = lr_config
- self.register_hook(hook, priority='VERY_HIGH')
-
- def register_momentum_hook(self, momentum_config):
- if momentum_config is None:
- return
- if isinstance(momentum_config, dict):
- assert 'policy' in momentum_config
- policy_type = momentum_config.pop('policy')
- # If the type of policy is all in lower case, e.g., 'cyclic',
- # then its first letter will be capitalized, e.g., to be 'Cyclic'.
- # This is for the convenient usage of momentum updater.
- # Since this is not applicable for
- # `CosineAnnealingMomentumUpdater`,
- # the string will not be changed if it contains capital letters.
- if policy_type == policy_type.lower():
- policy_type = policy_type.title()
- hook_type = policy_type + 'MomentumUpdaterHook'
- momentum_config['type'] = hook_type
- hook = mmcv.build_from_cfg(momentum_config, HOOKS)
- else:
- hook = momentum_config
- self.register_hook(hook, priority='HIGH')
-
- def register_optimizer_hook(self, optimizer_config):
- if optimizer_config is None:
- return
- if isinstance(optimizer_config, dict):
- optimizer_config.setdefault('type', 'OptimizerHook')
- hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
- else:
- hook = optimizer_config
- self.register_hook(hook, priority='ABOVE_NORMAL')
-
- def register_checkpoint_hook(self, checkpoint_config):
- if checkpoint_config is None:
- return
- if isinstance(checkpoint_config, dict):
- checkpoint_config.setdefault('type', 'CheckpointHook')
- hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
- else:
- hook = checkpoint_config
- self.register_hook(hook, priority='NORMAL')
-
- def register_logger_hooks(self, log_config):
- if log_config is None:
- return
- log_interval = log_config['interval']
- for info in log_config['hooks']:
- logger_hook = mmcv.build_from_cfg(
- info, HOOKS, default_args=dict(interval=log_interval))
- self.register_hook(logger_hook, priority='VERY_LOW')
-
- def register_timer_hook(self, timer_config):
- if timer_config is None:
- return
- if isinstance(timer_config, dict):
- timer_config_ = copy.deepcopy(timer_config)
- hook = mmcv.build_from_cfg(timer_config_, HOOKS)
- else:
- hook = timer_config
- self.register_hook(hook, priority='LOW')
-
- def register_custom_hooks(self, custom_config):
- if custom_config is None:
- return
-
- if not isinstance(custom_config, list):
- custom_config = [custom_config]
-
- for item in custom_config:
- if isinstance(item, dict):
- self.register_hook_from_cfg(item)
- else:
- self.register_hook(item, priority='NORMAL')
-
- def register_profiler_hook(self, profiler_config):
- if profiler_config is None:
- return
- if isinstance(profiler_config, dict):
- profiler_config.setdefault('type', 'ProfilerHook')
- hook = mmcv.build_from_cfg(profiler_config, HOOKS)
- else:
- hook = profiler_config
- self.register_hook(hook)
-
- def register_training_hooks(self,
- lr_config,
- optimizer_config=None,
- checkpoint_config=None,
- log_config=None,
- momentum_config=None,
- timer_config=dict(type='IterTimerHook'),
- custom_hooks_config=None):
- """Register default and custom hooks for training.
-
- Default and custom hooks include:
-
- +----------------------+-------------------------+
- | Hooks | Priority |
- +======================+=========================+
- | LrUpdaterHook | VERY_HIGH (10) |
- +----------------------+-------------------------+
- | MomentumUpdaterHook | HIGH (30) |
- +----------------------+-------------------------+
- | OptimizerStepperHook | ABOVE_NORMAL (40) |
- +----------------------+-------------------------+
- | CheckpointSaverHook | NORMAL (50) |
- +----------------------+-------------------------+
- | IterTimerHook | LOW (70) |
- +----------------------+-------------------------+
- | LoggerHook(s) | VERY_LOW (90) |
- +----------------------+-------------------------+
- | CustomHook(s) | defaults to NORMAL (50) |
- +----------------------+-------------------------+
-
- If custom hooks have same priority with default hooks, custom hooks
- will be triggered after default hooks.
- """
- self.register_lr_hook(lr_config)
- self.register_momentum_hook(momentum_config)
- self.register_optimizer_hook(optimizer_config)
- self.register_checkpoint_hook(checkpoint_config)
- self.register_timer_hook(timer_config)
- self.register_logger_hooks(log_config)
- self.register_custom_hooks(custom_hooks_config)
diff --git a/spaces/Roboflow/web-demo/index.html b/spaces/Roboflow/web-demo/index.html
deleted file mode 100644
index 9b88d0801c9c9e0a280cb98a39d479a44b4023ab..0000000000000000000000000000000000000000
--- a/spaces/Roboflow/web-demo/index.html
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-
-
-
-
-
-
- Roboflow + Hugging Face Example
-
-
-
-
-
-
-
-
-
-
-
-
Roboflow + Hugging Face Example 🚀
-
Roboflow enables you to build and run custom computer vision models in your browser, on your device, and via API.
-
-
The below example runs Microsoft's
- COCO model to identify common objects, which is one of 50,000 open source models ready to use on Roboflow Universe
-
-
Using the demo below, you can identify 80
- different objects using your webcam, from people to chairs to cups.
-
-
Loading...
-
-
-
-
-
-
-
-
-
-
-
-
- Build a Custom Model
- Explore 50k+
- Models and Datasets
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/Rongjiehuang/GenerSpeech/egs/datasets/audio/vctk/pre_align.py b/spaces/Rongjiehuang/GenerSpeech/egs/datasets/audio/vctk/pre_align.py
deleted file mode 100644
index a03b3e12af245fa603403432f4487c53e8b13eab..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/GenerSpeech/egs/datasets/audio/vctk/pre_align.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import os
-
-from data_gen.tts.base_pre_align import BasePreAlign
-import glob
-
-
-class VCTKPreAlign(BasePreAlign):
- def meta_data(self):
- wav_fns = glob.glob(f'{self.raw_data_dir}/wav48/*/*.wav')
- for wav_fn in wav_fns:
- item_name = os.path.basename(wav_fn)[:-4]
- spk = item_name.split("_")[0]
- txt_fn = wav_fn.split("/")
- txt_fn[-1] = f'{item_name}.txt'
- txt_fn[-3] = f'txt'
- txt_fn = "/".join(txt_fn)
- if os.path.exists(txt_fn) and os.path.exists(wav_fn):
- yield item_name, wav_fn, (self.load_txt, txt_fn), spk
-
-
-if __name__ == "__main__":
- VCTKPreAlign().process()
diff --git a/spaces/Rongjiehuang/ProDiff/tasks/tts/fs2.py b/spaces/Rongjiehuang/ProDiff/tasks/tts/fs2.py
deleted file mode 100644
index 473c514b523ecbd45acfdecdb33d7b633c59eb6c..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/ProDiff/tasks/tts/fs2.py
+++ /dev/null
@@ -1,292 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-
-from tasks.tts.tts_base import TTSBaseTask
-from vocoders.base_vocoder import get_vocoder_cls
-from tasks.tts.dataset_utils import FastSpeechDataset
-from modules.commons.ssim import ssim
-import os
-from modules.fastspeech.tts_modules import mel2ph_to_dur
-from utils.hparams import hparams
-from utils.plot import spec_to_figure, dur_to_figure, f0_to_figure
-from utils.pitch_utils import denorm_f0
-from modules.fastspeech.fs2 import FastSpeech2
-import torch
-import torch.optim
-import torch.utils.data
-import torch.nn.functional as F
-import utils
-import torch.distributions
-import numpy as np
-
-
-class FastSpeech2Task(TTSBaseTask):
- def __init__(self):
- super(FastSpeech2Task, self).__init__()
- self.dataset_cls = FastSpeechDataset
- self.mse_loss_fn = torch.nn.MSELoss()
- mel_losses = hparams['mel_loss'].split("|")
- self.loss_and_lambda = {}
- for i, l in enumerate(mel_losses):
- if l == '':
- continue
- if ':' in l:
- l, lbd = l.split(":")
- lbd = float(lbd)
- else:
- lbd = 1.0
- self.loss_and_lambda[l] = lbd
- print("| Mel losses:", self.loss_and_lambda)
- self.sil_ph = self.phone_encoder.sil_phonemes()
- f0_stats_fn = f'{hparams["binary_data_dir"]}/train_f0s_mean_std.npy'
- if os.path.exists(f0_stats_fn):
- hparams['f0_mean'], hparams['f0_std'] = np.load(f0_stats_fn)
- hparams['f0_mean'] = float(hparams['f0_mean'])
- hparams['f0_std'] = float(hparams['f0_std'])
-
- def build_tts_model(self):
- self.model = FastSpeech2(self.phone_encoder)
-
- def build_model(self):
- self.build_tts_model()
- if hparams['load_ckpt'] != '':
- self.load_ckpt(hparams['load_ckpt'], strict=False)
- utils.print_arch(self.model)
- return self.model
-
- def _training_step(self, sample, batch_idx, _):
- loss_output = self.run_model(self.model, sample)
- total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad])
- loss_output['batch_size'] = sample['txt_tokens'].size()[0]
- return total_loss, loss_output
-
- def validation_step(self, sample, batch_idx):
- outputs = {}
- outputs['losses'] = {}
- outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True)
- outputs['total_loss'] = sum(outputs['losses'].values())
- outputs['nsamples'] = sample['nsamples']
- mel_out = self.model.out2mel(model_out['mel_out'])
- outputs = utils.tensors_to_scalars(outputs)
- if self.global_step % hparams['valid_infer_interval'] == 0 \
- and batch_idx < hparams['num_valid_plots']:
- vmin = hparams['mel_vmin']
- vmax = hparams['mel_vmax']
- self.plot_mel(batch_idx, sample['mels'], mel_out)
- self.plot_dur(batch_idx, sample, model_out)
- if hparams['use_pitch_embed']:
- self.plot_pitch(batch_idx, sample, model_out)
- if self.vocoder is None:
- self.vocoder = get_vocoder_cls(hparams)()
- if self.global_step > 0:
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
- # with gt duration
- model_out = self.model(sample['txt_tokens'], mel2ph=sample['mel2ph'],
- spk_embed=spk_embed, infer=True)
- wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu())
- self.logger.add_audio(f'wav_gtdur_{batch_idx}', wav_pred, self.global_step,
- hparams['audio_sample_rate'])
- self.logger.add_figure(
- f'mel_gtdur_{batch_idx}',
- spec_to_figure(model_out['mel_out'][0], vmin, vmax), self.global_step)
- # with pred duration
- model_out = self.model(sample['txt_tokens'], spk_embed=spk_embed, infer=True)
- self.logger.add_figure(
- f'mel_{batch_idx}',
- spec_to_figure(model_out['mel_out'][0], vmin, vmax), self.global_step)
- wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu())
- self.logger.add_audio(f'wav_{batch_idx}', wav_pred, self.global_step, hparams['audio_sample_rate'])
- # gt wav
- if self.global_step <= hparams['valid_infer_interval']:
- mel_gt = sample['mels'][0].cpu()
- wav_gt = self.vocoder.spec2wav(mel_gt)
- self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, 22050)
- return outputs
-
- def run_model(self, model, sample, return_output=False):
- txt_tokens = sample['txt_tokens'] # [B, T_t]
- target = sample['mels'] # [B, T_s, 80]
- mel2ph = sample['mel2ph'] # [B, T_s]
- f0 = sample['f0']
- uv = sample['uv']
- energy = sample['energy']
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
- output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
- ref_mels=target, f0=f0, uv=uv, energy=energy,
- tgt_mels=target, infer=False)
- losses = {}
- self.add_mel_loss(output['mel_out'], target, losses)
- self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
- if hparams['use_pitch_embed']:
- self.add_pitch_loss(output, sample, losses)
- if not return_output:
- return losses
- else:
- return losses, output
-
- ############
- # losses
- ############
- def add_mel_loss(self, mel_out, target, losses, postfix='', mel_mix_loss=None):
- nonpadding = target.abs().sum(-1).ne(0).float()
- for loss_name, lbd in self.loss_and_lambda.items():
- if 'l1' == loss_name:
- l = self.l1_loss(mel_out, target)
- elif 'mse' == loss_name:
- l = self.mse_loss(mel_out, target)
- elif 'ssim' == loss_name:
- l = self.ssim_loss(mel_out, target)
- elif 'gdl' == loss_name:
- l = self.gdl_loss_fn(mel_out, target, nonpadding) \
- * self.loss_and_lambda['gdl']
- losses[f'{loss_name}{postfix}'] = l * lbd
-
- def l1_loss(self, decoder_output, target):
- # decoder_output : B x T x n_mel
- # target : B x T x n_mel
- l1_loss = F.l1_loss(decoder_output, target, reduction='none')
- weights = self.weights_nonzero_speech(target)
- l1_loss = (l1_loss * weights).sum() / weights.sum()
- return l1_loss
-
- def add_energy_loss(self, energy_pred, energy, losses):
- nonpadding = (energy != 0).float()
- loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum()
- loss = loss * hparams['lambda_energy']
- losses['e'] = loss
-
- def mse_loss(self, decoder_output, target):
- # decoder_output : B x T x n_mel
- # target : B x T x n_mel
- assert decoder_output.shape == target.shape
- mse_loss = F.mse_loss(decoder_output, target, reduction='none')
- weights = self.weights_nonzero_speech(target)
- mse_loss = (mse_loss * weights).sum() / weights.sum()
- return mse_loss
-
- def ssim_loss(self, decoder_output, target, bias=6.0):
- # decoder_output : B x T x n_mel
- # target : B x T x n_mel
- assert decoder_output.shape == target.shape
- weights = self.weights_nonzero_speech(target)
- decoder_output = decoder_output[:, None] + bias
- target = target[:, None] + bias
- ssim_loss = 1 - ssim(decoder_output, target, size_average=False)
- ssim_loss = (ssim_loss * weights).sum() / weights.sum()
- return ssim_loss
-
- def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None):
- """
-
- :param dur_pred: [B, T], float, log scale
- :param mel2ph: [B, T]
- :param txt_tokens: [B, T]
- :param losses:
- :return:
- """
- B, T = txt_tokens.shape
- nonpadding = (txt_tokens != 0).float()
- dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
- is_sil = torch.zeros_like(txt_tokens).bool()
- for p in self.sil_ph:
- is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
- is_sil = is_sil.float() # [B, T_txt]
- losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
- losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
- losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur']
- dur_pred = (dur_pred.exp() - 1).clamp(min=0)
- # use linear scale for sent and word duration
- if hparams['lambda_word_dur'] > 0:
- word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long()
- word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:]
- word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:]
- wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
- word_nonpadding = (word_dur_g > 0).float()
- wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
- losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
- if hparams['lambda_sent_dur'] > 0:
- sent_dur_p = dur_pred.sum(-1)
- sent_dur_g = dur_gt.sum(-1)
- sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
- losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
-
- def add_pitch_loss(self, output, sample, losses):
- mel2ph = sample['mel2ph'] # [B, T_s]
- f0 = sample['f0']
- uv = sample['uv']
- nonpadding = (mel2ph != 0).float() if hparams['pitch_type'] == 'frame' \
- else (sample['txt_tokens'] != 0).float()
- self.add_f0_loss(output['pitch_pred'], f0, uv, losses, nonpadding=nonpadding) # output['pitch_pred']: [B, T, 2], f0: [B, T], uv: [B, T]
-
- def add_f0_loss(self, p_pred, f0, uv, losses, nonpadding, postfix=''):
- assert p_pred[..., 0].shape == f0.shape
- if hparams['use_uv'] and hparams['pitch_type'] == 'frame':
- assert p_pred[..., 1].shape == uv.shape, (p_pred.shape, uv.shape)
- losses[f'uv{postfix}'] = (F.binary_cross_entropy_with_logits(
- p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \
- / nonpadding.sum() * hparams['lambda_uv']
- nonpadding = nonpadding * (uv == 0).float()
- f0_pred = p_pred[:, :, 0]
- pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss
- losses[f'f0{postfix}'] = (pitch_loss_fn(f0_pred, f0, reduction='none') * nonpadding).sum() \
- / nonpadding.sum() * hparams['lambda_f0']
-
-
- ############
- # validation plots
- ############
- def plot_dur(self, batch_idx, sample, model_out):
- T_txt = sample['txt_tokens'].shape[1]
- dur_gt = mel2ph_to_dur(sample['mel2ph'], T_txt)[0]
- dur_pred = model_out['dur']
- if hasattr(self.model, 'out2dur'):
- dur_pred = self.model.out2dur(model_out['dur']).float()
- txt = self.phone_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
- txt = txt.split(" ")
- self.logger.add_figure(
- f'dur_{batch_idx}', dur_to_figure(dur_gt, dur_pred, txt), self.global_step)
-
- def plot_pitch(self, batch_idx, sample, model_out):
- self.logger.add_figure(
- f'f0_{batch_idx}',
- f0_to_figure(model_out['f0_denorm'][0], None, model_out['f0_denorm_pred'][0]),
- self.global_step)
-
- ############
- # inference
- ############
- def test_step(self, sample, batch_idx):
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
- txt_tokens = sample['txt_tokens']
- mel2ph, uv, f0 = None, None, None
- ref_mels = sample['mels']
- if hparams['use_gt_dur']:
- mel2ph = sample['mel2ph']
- if hparams['use_gt_f0']:
- f0 = sample['f0']
- uv = sample['uv']
- run_model = lambda: self.model(
- txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True)
- if hparams['profile_infer']:
- mel2ph, uv, f0 = sample['mel2ph'], sample['uv'], sample['f0']
- with utils.Timer('fs', enable=True):
- outputs = run_model()
- if 'gen_wav_time' not in self.stats:
- self.stats['gen_wav_time'] = 0
- wav_time = float(outputs["mels_out"].shape[1]) * hparams['hop_size'] / hparams["audio_sample_rate"]
- self.stats['gen_wav_time'] += wav_time
- print(f'[Timer] wav total seconds: {self.stats["gen_wav_time"]}')
- from pytorch_memlab import LineProfiler
- with LineProfiler(self.model.forward) as prof:
- run_model()
- prof.print_stats()
- else:
- outputs = run_model()
- sample['outputs'] = self.model.out2mel(outputs['mel_out'])
- sample['mel2ph_pred'] = outputs['mel2ph']
- if hparams['use_pitch_embed']:
- sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)
- if hparams['pitch_type'] == 'ph':
- sample['f0'] = torch.gather(F.pad(sample['f0'], [1, 0]), 1, sample['mel2ph'])
- sample['f0_pred'] = outputs.get('f0_denorm')
- return self.after_infer(sample)
diff --git a/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/archs/fcn_arch.py b/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/archs/fcn_arch.py
deleted file mode 100644
index a8bb7c1b9fc66379e5a32ac02a24de63fe6953e7..0000000000000000000000000000000000000000
--- a/spaces/SIGGRAPH2022/Text2Human/Text2Human/models/archs/fcn_arch.py
+++ /dev/null
@@ -1,418 +0,0 @@
-import torch
-import torch.nn as nn
-from mmcv.cnn import ConvModule, normal_init
-from mmseg.ops import resize
-
-
-class BaseDecodeHead(nn.Module):
- """Base class for BaseDecodeHead.
-
- Args:
- in_channels (int|Sequence[int]): Input channels.
- channels (int): Channels after modules, before conv_seg.
- num_classes (int): Number of classes.
- dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
- conv_cfg (dict|None): Config of conv layers. Default: None.
- norm_cfg (dict|None): Config of norm layers. Default: None.
- act_cfg (dict): Config of activation layers.
- Default: dict(type='ReLU')
- in_index (int|Sequence[int]): Input feature index. Default: -1
- input_transform (str|None): Transformation type of input features.
- Options: 'resize_concat', 'multiple_select', None.
- 'resize_concat': Multiple feature maps will be resize to the
- same size as first one and than concat together.
- Usually used in FCN head of HRNet.
- 'multiple_select': Multiple feature maps will be bundle into
- a list and passed into decode head.
- None: Only one select feature map is allowed.
- Default: None.
- loss_decode (dict): Config of decode loss.
- Default: dict(type='CrossEntropyLoss').
- ignore_index (int | None): The label index to be ignored. When using
- masked BCE loss, ignore_index should be set to None. Default: 255
- sampler (dict|None): The config of segmentation map sampler.
- Default: None.
- align_corners (bool): align_corners argument of F.interpolate.
- Default: False.
- """
-
- def __init__(self,
- in_channels,
- channels,
- *,
- num_classes,
- dropout_ratio=0.1,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- in_index=-1,
- input_transform=None,
- ignore_index=255,
- align_corners=False):
- super(BaseDecodeHead, self).__init__()
- self._init_inputs(in_channels, in_index, input_transform)
- self.channels = channels
- self.num_classes = num_classes
- self.dropout_ratio = dropout_ratio
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.in_index = in_index
-
- self.ignore_index = ignore_index
- self.align_corners = align_corners
-
- self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
- if dropout_ratio > 0:
- self.dropout = nn.Dropout2d(dropout_ratio)
- else:
- self.dropout = None
-
- def extra_repr(self):
- """Extra repr."""
- s = f'input_transform={self.input_transform}, ' \
- f'ignore_index={self.ignore_index}, ' \
- f'align_corners={self.align_corners}'
- return s
-
- def _init_inputs(self, in_channels, in_index, input_transform):
- """Check and initialize input transforms.
-
- The in_channels, in_index and input_transform must match.
- Specifically, when input_transform is None, only single feature map
- will be selected. So in_channels and in_index must be of type int.
- When input_transform
-
- Args:
- in_channels (int|Sequence[int]): Input channels.
- in_index (int|Sequence[int]): Input feature index.
- input_transform (str|None): Transformation type of input features.
- Options: 'resize_concat', 'multiple_select', None.
- 'resize_concat': Multiple feature maps will be resize to the
- same size as first one and than concat together.
- Usually used in FCN head of HRNet.
- 'multiple_select': Multiple feature maps will be bundle into
- a list and passed into decode head.
- None: Only one select feature map is allowed.
- """
-
- if input_transform is not None:
- assert input_transform in ['resize_concat', 'multiple_select']
- self.input_transform = input_transform
- self.in_index = in_index
- if input_transform is not None:
- assert isinstance(in_channels, (list, tuple))
- assert isinstance(in_index, (list, tuple))
- assert len(in_channels) == len(in_index)
- if input_transform == 'resize_concat':
- self.in_channels = sum(in_channels)
- else:
- self.in_channels = in_channels
- else:
- assert isinstance(in_channels, int)
- assert isinstance(in_index, int)
- self.in_channels = in_channels
-
- def init_weights(self):
- """Initialize weights of classification layer."""
- normal_init(self.conv_seg, mean=0, std=0.01)
-
- def _transform_inputs(self, inputs):
- """Transform inputs for decoder.
-
- Args:
- inputs (list[Tensor]): List of multi-level img features.
-
- Returns:
- Tensor: The transformed inputs
- """
-
- if self.input_transform == 'resize_concat':
- inputs = [inputs[i] for i in self.in_index]
- upsampled_inputs = [
- resize(
- input=x,
- size=inputs[0].shape[2:],
- mode='bilinear',
- align_corners=self.align_corners) for x in inputs
- ]
- inputs = torch.cat(upsampled_inputs, dim=1)
- elif self.input_transform == 'multiple_select':
- inputs = [inputs[i] for i in self.in_index]
- else:
- inputs = inputs[self.in_index]
-
- return inputs
-
- def forward(self, inputs):
- """Placeholder of forward function."""
- pass
-
- def cls_seg(self, feat):
- """Classify each pixel."""
- if self.dropout is not None:
- feat = self.dropout(feat)
- output = self.conv_seg(feat)
- return output
-
-
-class FCNHead(BaseDecodeHead):
- """Fully Convolution Networks for Semantic Segmentation.
-
- This head is implemented of `FCNNet `_.
-
- Args:
- num_convs (int): Number of convs in the head. Default: 2.
- kernel_size (int): The kernel size for convs in the head. Default: 3.
- concat_input (bool): Whether concat the input and output of convs
- before classification layer.
- """
-
- def __init__(self,
- num_convs=2,
- kernel_size=3,
- concat_input=True,
- **kwargs):
- assert num_convs >= 0
- self.num_convs = num_convs
- self.concat_input = concat_input
- self.kernel_size = kernel_size
- super(FCNHead, self).__init__(**kwargs)
- if num_convs == 0:
- assert self.in_channels == self.channels
-
- convs = []
- convs.append(
- ConvModule(
- self.in_channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- for i in range(num_convs - 1):
- convs.append(
- ConvModule(
- self.channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- if num_convs == 0:
- self.convs = nn.Identity()
- else:
- self.convs = nn.Sequential(*convs)
- if self.concat_input:
- self.conv_cat = ConvModule(
- self.in_channels + self.channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- output = self.convs(x)
- if self.concat_input:
- output = self.conv_cat(torch.cat([x, output], dim=1))
- output = self.cls_seg(output)
- return output
-
-
-class MultiHeadFCNHead(nn.Module):
- """Fully Convolution Networks for Semantic Segmentation.
-
- This head is implemented of `FCNNet `_.
-
- Args:
- num_convs (int): Number of convs in the head. Default: 2.
- kernel_size (int): The kernel size for convs in the head. Default: 3.
- concat_input (bool): Whether concat the input and output of convs
- before classification layer.
- """
-
- def __init__(self,
- in_channels,
- channels,
- *,
- num_classes,
- dropout_ratio=0.1,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'),
- in_index=-1,
- input_transform=None,
- ignore_index=255,
- align_corners=False,
- num_convs=2,
- kernel_size=3,
- concat_input=True,
- num_head=18,
- **kwargs):
- super(MultiHeadFCNHead, self).__init__()
- assert num_convs >= 0
- self.num_convs = num_convs
- self.concat_input = concat_input
- self.kernel_size = kernel_size
- self._init_inputs(in_channels, in_index, input_transform)
- self.channels = channels
- self.num_classes = num_classes
- self.dropout_ratio = dropout_ratio
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.in_index = in_index
- self.num_head = num_head
-
- self.ignore_index = ignore_index
- self.align_corners = align_corners
-
- if dropout_ratio > 0:
- self.dropout = nn.Dropout2d(dropout_ratio)
-
- conv_seg_head_list = []
- for _ in range(self.num_head):
- conv_seg_head_list.append(
- nn.Conv2d(channels, num_classes, kernel_size=1))
-
- self.conv_seg_head_list = nn.ModuleList(conv_seg_head_list)
-
- self.init_weights()
-
- if num_convs == 0:
- assert self.in_channels == self.channels
-
- convs_list = []
- conv_cat_list = []
-
- for _ in range(self.num_head):
- convs = []
- convs.append(
- ConvModule(
- self.in_channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- for _ in range(num_convs - 1):
- convs.append(
- ConvModule(
- self.channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- if num_convs == 0:
- convs_list.append(nn.Identity())
- else:
- convs_list.append(nn.Sequential(*convs))
- if self.concat_input:
- conv_cat_list.append(
- ConvModule(
- self.in_channels + self.channels,
- self.channels,
- kernel_size=kernel_size,
- padding=kernel_size // 2,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
-
- self.convs_list = nn.ModuleList(convs_list)
- self.conv_cat_list = nn.ModuleList(conv_cat_list)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
-
- output_list = []
- for head_idx in range(self.num_head):
- output = self.convs_list[head_idx](x)
- if self.concat_input:
- output = self.conv_cat_list[head_idx](
- torch.cat([x, output], dim=1))
- if self.dropout is not None:
- output = self.dropout(output)
- output = self.conv_seg_head_list[head_idx](output)
- output_list.append(output)
-
- return output_list
-
- def _init_inputs(self, in_channels, in_index, input_transform):
- """Check and initialize input transforms.
-
- The in_channels, in_index and input_transform must match.
- Specifically, when input_transform is None, only single feature map
- will be selected. So in_channels and in_index must be of type int.
- When input_transform
-
- Args:
- in_channels (int|Sequence[int]): Input channels.
- in_index (int|Sequence[int]): Input feature index.
- input_transform (str|None): Transformation type of input features.
- Options: 'resize_concat', 'multiple_select', None.
- 'resize_concat': Multiple feature maps will be resize to the
- same size as first one and than concat together.
- Usually used in FCN head of HRNet.
- 'multiple_select': Multiple feature maps will be bundle into
- a list and passed into decode head.
- None: Only one select feature map is allowed.
- """
-
- if input_transform is not None:
- assert input_transform in ['resize_concat', 'multiple_select']
- self.input_transform = input_transform
- self.in_index = in_index
- if input_transform is not None:
- assert isinstance(in_channels, (list, tuple))
- assert isinstance(in_index, (list, tuple))
- assert len(in_channels) == len(in_index)
- if input_transform == 'resize_concat':
- self.in_channels = sum(in_channels)
- else:
- self.in_channels = in_channels
- else:
- assert isinstance(in_channels, int)
- assert isinstance(in_index, int)
- self.in_channels = in_channels
-
- def init_weights(self):
- """Initialize weights of classification layer."""
- for conv_seg_head in self.conv_seg_head_list:
- normal_init(conv_seg_head, mean=0, std=0.01)
-
- def _transform_inputs(self, inputs):
- """Transform inputs for decoder.
-
- Args:
- inputs (list[Tensor]): List of multi-level img features.
-
- Returns:
- Tensor: The transformed inputs
- """
-
- if self.input_transform == 'resize_concat':
- inputs = [inputs[i] for i in self.in_index]
- upsampled_inputs = [
- resize(
- input=x,
- size=inputs[0].shape[2:],
- mode='bilinear',
- align_corners=self.align_corners) for x in inputs
- ]
- inputs = torch.cat(upsampled_inputs, dim=1)
- elif self.input_transform == 'multiple_select':
- inputs = [inputs[i] for i in self.in_index]
- else:
- inputs = inputs[self.in_index]
-
- return inputs
diff --git a/spaces/Sacso/FlowerDi/app.py b/spaces/Sacso/FlowerDi/app.py
deleted file mode 100644
index 7b0b10379b315d319921da1edbe887397c29ff5a..0000000000000000000000000000000000000000
--- a/spaces/Sacso/FlowerDi/app.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import io
-import os
-import warnings
-
-from PIL import Image
-from stability_sdk import client
-import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
-
-import gradio as gr
-stability_api = client.StabilityInference(
- key=os.environ["Secret"],
- verbose=True,
-)
-
-
-def infer(prompt):
- # the object returned is a python generator
- answers = stability_api.generate(
- prompt=f"Beautiful Portait of a {prompt} made out of flowers 💐 🌺 🌸 , artstation winner by Victo Ngai, Kilian Eng, vibrant colors, winning-award masterpiece, aesthetic octane render, 8K HD",
- height =640
- )
-
- # iterating over the generator produces the api response
- for resp in answers:
- for artifact in resp.artifacts:
- if artifact.finish_reason == generation.FILTER:
- warnings.warn(
- "Your request activated the API's safety filters and could not be processed."
- "Please modify the prompt and try again.")
- if artifact.type == generation.ARTIFACT_IMAGE:
- img = Image.open(io.BytesIO(artifact.binary))
- return img
-
-
-block = gr.Blocks(css=".container { max-width: 600px; margin: auto; }")
-
-num_samples = 1
-
-
-
-with block as demo:
- gr.Markdown("
Flower Diffusion
")
- gr.Markdown(
- "Get a pretty flowery image from any prompt - keep it simple!"
- )
- with gr.Group():
- with gr.Box():
- with gr.Row().style(mobile_collapse=False, equal_height=True):
-
- text = gr.Textbox(
- value = "Kitty cat",
- label="Enter your prompt", show_label=False, max_lines=1
- ).style(
- border=(True, False, True, True),
- rounded=(True, False, False, True),
- container=False,
- )
- btn = gr.Button("Run").style(
- margin=False,
- rounded=(False, True, True, False),
- )
-
-
- gallery = gr.Image()
- text.submit(infer, inputs=[text], outputs=gallery)
- btn.click(infer, inputs=[text], outputs=gallery)
-
-
-
-
-
-demo.launch(debug=True, enable_queue = True)
\ No newline at end of file
diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/demo.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/demo.py
deleted file mode 100644
index 03e931a19a7b3b4bf53f433c76fb578a1c343f04..0000000000000000000000000000000000000000
--- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/demo.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# import os
-#
-# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
-# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
-#
-# import numpy as np
-# from tqdm import tqdm
-#
-# from SPPE.src.main_fast_inference import *
-# from dataloader import ImageLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
-# from fn import getTime
-# from opt import opt
-# from pPose_nms import write_json
-# from in_the_wild_data import split_frame
-#
-#
-# def main(args):
-# inputpath = args.inputpath
-# inputlist = args.inputlist
-# mode = args.mode
-# if not os.path.exists(args.outputpath):
-# os.makedirs(args.outputpath, exist_ok=True)
-#
-# if len(inputlist):
-# im_names = open(inputlist, 'r').readlines()
-# elif len(inputpath) and inputpath != '/':
-# for root, dirs, files in os.walk(inputpath):
-# im_names = [f for f in files if 'png' in f or 'jpg' in f]
-# else:
-# raise IOError('Error: must contain either --indir/--list')
-#
-# # Load input images
-# data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()
-#
-# # Load detection loader
-# print('Loading YOLO model..')
-# sys.stdout.flush()
-# det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
-# det_processor = DetectionProcessor(det_loader).start()
-#
-# # Load pose model
-# pose_dataset = Mscoco()
-# if args.fast_inference:
-# pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
-# else:
-# pose_model = InferenNet(4 * 1 + 1, pose_dataset)
-# pose_model
-# pose_model.eval()
-#
-# runtime_profile = {
-# 'dt': [],
-# 'pt': [],
-# 'pn': []
-# }
-#
-# # Init data writer
-# writer = DataWriter(args.save_video).start()
-#
-# data_len = data_loader.length()
-# im_names_desc = tqdm(range(data_len))
-#
-# batchSize = args.posebatch
-# for i in im_names_desc:
-# start_time = getTime()
-# with torch.no_grad():
-# (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
-# if boxes is None or boxes.nelement() == 0:
-# writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
-# continue
-#
-# ckpt_time, det_time = getTime(start_time)
-# runtime_profile['dt'].append(det_time)
-# # Pose Estimation
-#
-# datalen = inps.size(0)
-# leftover = 0
-# if (datalen) % batchSize:
-# leftover = 1
-# num_batches = datalen // batchSize + leftover
-# hm = []
-# for j in range(num_batches):
-# inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)]
-# hm_j = pose_model(inps_j)
-# hm.append(hm_j)
-# hm = torch.cat(hm)
-# ckpt_time, pose_time = getTime(ckpt_time)
-# runtime_profile['pt'].append(pose_time)
-# hm = hm.cpu()
-# writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
-#
-# ckpt_time, post_time = getTime(ckpt_time)
-# runtime_profile['pn'].append(post_time)
-#
-# if args.profile:
-# # TQDM
-# im_names_desc.set_description(
-# 'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
-# dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
-# )
-#
-# print('===========================> Finish Model Running.')
-# if (args.save_img or args.save_video) and not args.vis_fast:
-# print('===========================> Rendering remaining images in the queue...')
-# print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
-# while writer.running():
-# pass
-# writer.stop()
-# final_result = writer.results()
-# write_json(final_result, args.outputpath)
-#
-#
-# if __name__ == "__main__":
-# args = opt
-# args.dataset = 'coco'
-# args.sp = True
-# if not args.sp:
-# torch.multiprocessing.set_start_method('forkserver', force=True)
-# torch.multiprocessing.set_sharing_strategy('file_system')
-#
-# video_name = 'kobe'
-#
-# args.inputpath = f'../in_the_wild_data/split_{video_name}'
-# if not os.listdir(args.inputpath):
-# split_frame.split(f'../in_the_wild_data/{video_name}.mp4')
-#
-# args.outputpath = f'../in_the_wild_data/alphapose_{video_name}'
-# args.save_img = True
-#
-# args.detbatch = 4
-#
-# main(args)
diff --git a/spaces/Sapphire-356/Video2MC/model/stmo_pretrain.py b/spaces/Sapphire-356/Video2MC/model/stmo_pretrain.py
deleted file mode 100644
index fc19d8ea5de3e44b841890596cd4e93062a8deb2..0000000000000000000000000000000000000000
--- a/spaces/Sapphire-356/Video2MC/model/stmo_pretrain.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import torch
-import torch.nn as nn
-from model.block.vanilla_transformer_encoder_pretrain import Transformer, Transformer_dec
-from model.block.strided_transformer_encoder import Transformer as Transformer_reduce
-import numpy as np
-
-class LayerNorm(nn.Module):
- def __init__(self, features, eps=1e-6):
- super(LayerNorm, self).__init__()
- self.a_2 = nn.Parameter(torch.ones(features))
- self.b_2 = nn.Parameter(torch.zeros(features))
- self.eps = eps
-
- def forward(self, x):
- mean = x.mean(-1, keepdim=True)
- std = x.std(-1, keepdim=True)
- return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
-
-class Linear(nn.Module):
- def __init__(self, linear_size, p_dropout=0.25):
- super(Linear, self).__init__()
- self.l_size = linear_size
-
- self.relu = nn.LeakyReLU(0.2, inplace=True)
- self.dropout = nn.Dropout(p_dropout)
-
- #self.w1 = nn.Linear(self.l_size, self.l_size)
- self.w1 = nn.Conv1d(self.l_size, self.l_size, kernel_size=1)
- self.batch_norm1 = nn.BatchNorm1d(self.l_size)
-
- #self.w2 = nn.Linear(self.l_size, self.l_size)
- self.w2 = nn.Conv1d(self.l_size, self.l_size, kernel_size=1)
- self.batch_norm2 = nn.BatchNorm1d(self.l_size)
-
- def forward(self, x):
- y = self.w1(x)
- y = self.batch_norm1(y)
- y = self.relu(y)
- y = self.dropout(y)
-
- y = self.w2(y)
- y = self.batch_norm2(y)
- y = self.relu(y)
- y = self.dropout(y)
-
- out = x + y
-
- return out
-
-class FCBlock(nn.Module):
-
- def __init__(self, channel_in, channel_out, linear_size, block_num):
- super(FCBlock, self).__init__()
-
- self.linear_size = linear_size
- self.block_num = block_num
- self.layers = []
- self.channel_in = channel_in
- self.stage_num = 3
- self.p_dropout = 0.1
- #self.fc_1 = nn.Linear(self.channel_in, self.linear_size)
- self.fc_1 = nn.Conv1d(self.channel_in, self.linear_size, kernel_size=1)
- self.bn_1 = nn.BatchNorm1d(self.linear_size)
- for i in range(block_num):
- self.layers.append(Linear(self.linear_size, self.p_dropout))
- #self.fc_2 = nn.Linear(self.linear_size, channel_out)
- self.fc_2 = nn.Conv1d(self.linear_size, channel_out, kernel_size=1)
-
- self.layers = nn.ModuleList(self.layers)
- self.relu = nn.LeakyReLU(0.2, inplace=True)
- self.dropout = nn.Dropout(self.p_dropout)
-
- def forward(self, x):
-
- x = self.fc_1(x)
- x = self.bn_1(x)
- x = self.relu(x)
- x = self.dropout(x)
- for i in range(self.block_num):
- x = self.layers[i](x)
- x = self.fc_2(x)
-
- return x
-
-class Model_MAE(nn.Module):
- def __init__(self, args):
- super().__init__()
-
- layers, channel, d_hid, length = args.layers, args.channel, args.d_hid, args.frames
- stride_num = args.stride_num
- self.spatial_mask_num = args.spatial_mask_num
- self.num_joints_in, self.num_joints_out = args.n_joints, args.out_joints
-
- self.length = length
- dec_dim_shrink = 2
-
- self.encoder = FCBlock(2*self.num_joints_in, channel, 2*channel, 1)
-
- self.Transformer = Transformer(layers, channel, d_hid, length=length)
- self.Transformer_dec = Transformer_dec(layers-1, channel//dec_dim_shrink, d_hid//dec_dim_shrink, length=length)
-
- self.encoder_to_decoder = nn.Linear(channel, channel//dec_dim_shrink, bias=False)
- self.encoder_LN = LayerNorm(channel)
-
- self.fcn_dec = nn.Sequential(
- nn.BatchNorm1d(channel//dec_dim_shrink, momentum=0.1),
- nn.Conv1d(channel//dec_dim_shrink, 2*self.num_joints_out, kernel_size=1)
- )
-
- # self.fcn_1 = nn.Sequential(
- # nn.BatchNorm1d(channel, momentum=0.1),
- # nn.Conv1d(channel, 3*self.num_joints_out, kernel_size=1)
- # )
-
- self.dec_pos_embedding = nn.Parameter(torch.randn(1, length, channel//dec_dim_shrink))
- self.mask_token = nn.Parameter(torch.randn(1, 1, channel//dec_dim_shrink))
-
- self.spatial_mask_token = nn.Parameter(torch.randn(1, 1, 2))
-
- def forward(self, x_in, mask, spatial_mask):
- x_in = x_in[:, :, :, :, 0].permute(0, 2, 3, 1).contiguous()
- b,f,_,_ = x_in.shape
-
- # spatial mask out
- x = x_in.clone()
-
- x[:,spatial_mask] = self.spatial_mask_token.expand(b,self.spatial_mask_num*f,2)
-
-
- x = x.view(b, f, -1)
-
- x = x.permute(0, 2, 1).contiguous()
-
- x = self.encoder(x)
-
- x = x.permute(0, 2, 1).contiguous()
- feas = self.Transformer(x, mask_MAE=mask)
-
- feas = self.encoder_LN(feas)
- feas = self.encoder_to_decoder(feas)
-
- B, N, C = feas.shape
-
- # we don't unshuffle the correct visible token order,
- # but shuffle the pos embedding accorddingly.
- expand_pos_embed = self.dec_pos_embedding.expand(B, -1, -1).clone()
- pos_emd_vis = expand_pos_embed[:, ~mask].reshape(B, -1, C)
- pos_emd_mask = expand_pos_embed[:, mask].reshape(B, -1, C)
- x_full = torch.cat([feas + pos_emd_vis, self.mask_token + pos_emd_mask], dim=1)
-
- x_out = self.Transformer_dec(x_full, pos_emd_mask.shape[1])
-
- x_out = x_out.permute(0, 2, 1).contiguous()
- x_out = self.fcn_dec(x_out)
-
- x_out = x_out.view(b, self.num_joints_out, 2, -1)
- x_out = x_out.permute(0, 2, 3, 1).contiguous().unsqueeze(dim=-1)
-
- return x_out
-
-
-
-
diff --git a/spaces/Scakmak/Chatbot/README.md b/spaces/Scakmak/Chatbot/README.md
deleted file mode 100644
index 47d9466020abf72791ce22949e5d844a750fdee9..0000000000000000000000000000000000000000
--- a/spaces/Scakmak/Chatbot/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Chatbot
-emoji: 🔥
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 3.28.3
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SeViLA/SeViLA/lavis/common/vqa_tools/__init__.py b/spaces/SeViLA/SeViLA/lavis/common/vqa_tools/__init__.py
deleted file mode 100644
index 9b98da85428159ad0dcfab7685c080848ecf8c7b..0000000000000000000000000000000000000000
--- a/spaces/SeViLA/SeViLA/lavis/common/vqa_tools/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-__author__ = "aagrawal"
diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/utils/cache.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/utils/cache.py
deleted file mode 100644
index 2fccc0acda4027b0bd36756a29b2d5cee318294d..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/utils/cache.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from concurrent.futures import ThreadPoolExecutor
-from collections import deque
-from functools import partial
-from hashlib import sha1
-import logging
-from pathlib import Path
-import sys
-import typing as tp
-import zipfile
-
-import flashy
-import torch
-
-
-logger = logging.getLogger(__name__)
-
-
-def get_full_embed(full_embed: torch.Tensor, x: tp.Any, idx: int, device: tp.Union[str, torch.device]) -> torch.Tensor:
- """Utility function for the EmbeddingCache, returning the full embedding without any chunking.
- This method can be used in case there is no need in extracting a chunk of the full embedding
- read from the cache.
-
- Args:
- full_embed (torch.Tensor): The full embedding.
- x (any): Batch object from which the full embedding is derived.
- idx (torch.Tensor): Index of object to consider in the batch object.
- Returns:
- full_embed (torch.Tensor): The full embedding
- """
- return full_embed.to(device)
-
-
-class EmbeddingCache:
- """Cache around embeddings computation for faster execution.
- The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API
- to retrieve the pre-computed embeddings on full inputs and extract only a given chunk
- using a user-provided function. When the cache is warm (all embeddings are pre-computed),
- the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.
- Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint
- and synchronization points in the forward calls.
-
- Args:
- cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.
- device (str or torch.device): Device on which the embedding is returned.
- compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute
- the embedding from a given object and path. This user provided function can compute the
- embedding from the provided object or using the provided path as entry point. The last parameter
- specify the index corresponding to the current embedding in the object that can represent batch metadata.
- extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract
- the desired embedding chunk from the full embedding loaded from the cache. The last parameter
- specify the index corresponding to the current embedding in the object that can represent batch metadata.
- If not specified, will return the full embedding unmodified.
- """
- def __init__(self, cache_path: tp.Union[Path], device: tp.Union[str, torch.device],
- compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],
- extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):
- self.cache_path = Path(cache_path)
- self.device = device
- self._compute_embed_fn = compute_embed_fn
- self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]
- if extract_embed_fn is not None:
- self._extract_embed_fn = extract_embed_fn
- else:
- self._extract_embed_fn = partial(get_full_embed, device=device)
- if self.cache_path is not None:
- self.cache_path.mkdir(exist_ok=True, parents=True)
- logger.info(f"Cache instantiated at: {self.cache_path}")
- self.pool = ThreadPoolExecutor(8)
- self.pool.__enter__()
- self._current_batch_cache: dict = {}
- self._memory_cache: dict = {}
-
- def _get_cache_path(self, path: tp.Union[Path, str]):
- """Get cache path for the given file path."""
- sig = sha1(str(path).encode()).hexdigest()
- return self.cache_path / sig
-
- @staticmethod
- def _get_full_embed_from_cache(cache: Path):
- """Loads full pre-computed embedding from the cache."""
- try:
- embed = torch.load(cache, 'cpu')
- except Exception as exc:
- logger.error("Error loading %s: %r", cache, exc)
- embed = None
- return embed
-
- def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:
- """Get embedding from cache, computing and storing it to cache if not already cached.
- The EmbeddingCache first tries to load the embedding from the in-memory cache
- containing the pre-computed chunks populated through `populate_embed_cache`.
- If not found, the full embedding is computed and stored on disk to be later accessed
- to populate the in-memory cache, and the desired embedding chunk is extracted and returned.
-
- Args:
- paths (list[Path or str]): List of paths from where the embeddings can be loaded.
- x (any): Object from which the embedding is extracted.
- """
- embeds = []
- for idx, path in enumerate(paths):
- cache = self._get_cache_path(path)
- if cache in self._current_batch_cache:
- embed = self._current_batch_cache[cache]
- else:
- full_embed = self._compute_embed_fn(path, x, idx)
- try:
- with flashy.utils.write_and_rename(cache, pid=True) as f:
- torch.save(full_embed.cpu(), f)
- except Exception as exc:
- logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)
- else:
- logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)
- embed = self._extract_embed_fn(full_embed, x, idx)
- embeds.append(embed)
- embed = torch.stack(embeds, dim=0)
- return embed
-
- def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:
- """Populate in-memory caches for embeddings reading from the embeddings stored on disk.
- The in-memory caches consist in a cache for the full embedding and another cache for the
- final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings
- and reduce the IO footprint and synchronization points during forward passes.
-
- Args:
- paths (list[Path]): List of paths from where the embeddings can be loaded.
- x (any): Object from which the embedding is extracted.
- """
- self._current_batch_cache.clear()
- if self.cache_path is not None:
- futures: list = []
- for path in paths:
- assert path is not None, "Path is required for computation from cache"
- cache = self._get_cache_path(path)
- if cache in self._memory_cache or not cache.exists():
- futures.append(None)
- else:
- futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))
- for idx, (path, future) in enumerate(zip(paths, futures)):
- assert path is not None
- cache = self._get_cache_path(path)
- full_embed = None
- if future is None:
- if cache in self._memory_cache:
- full_embed = self._memory_cache[cache]
- else:
- full_embed = future.result()
- if full_embed is not None:
- self._memory_cache[cache] = full_embed
- full_embed = full_embed.to(self.device)
- if full_embed is not None:
- embed = self._extract_embed_fn(full_embed, x, idx)
- self._current_batch_cache[cache] = embed
-
-
-class CachedBatchWriter:
- """Write pre computed caches for mini batches. This can
- make loading a lot more efficient depending on your filesystem.
-
- Args:
- cache_folder (Path): folder in which the cached minibatches
- will be stored.
-
- Inside cache folder, the structure is the following:
- `epoch_number / update_number.zip`
- And the zip file contains one entry per batch item.
-
- It is possible to use the cache with a batch size smaller than
- created with but obviously not larger. Make sure to call the
- `start_epoch(epoch)` method for indicating changes of epochs.
-
- See the grid `audiocraft/grids/musicgen/musicgen_warmup_cache.py`
- for an example of how to warmup the cache.
- """
- def __init__(self, cache_folder: Path):
- self.cache_folder = cache_folder
- self._current_epoch: tp.Optional[int] = None
- self._current_index = 0
-
- def start_epoch(self, epoch: int):
- """Call at the beginning of each epoch.
- """
- self._current_epoch = epoch
- self._current_index = 0
- self._zip_path.parent.mkdir(exist_ok=True, parents=True)
-
- @staticmethod
- def _get_zip_path(cache_folder: Path, epoch: int, index: int):
- return cache_folder / f"{epoch:05d}" / f"{index:06d}.zip"
-
- @property
- def _zip_path(self):
- assert self._current_epoch is not None
- return CachedBatchWriter._get_zip_path(self.cache_folder, self._current_epoch, self._current_index)
-
- def save(self, *content):
- """Save one mini batch. This function is distributed-aware
- and will automatically merge all the items from the different
- workers.
- """
- all_contents = []
- for rank in range(flashy.distrib.world_size()):
- their_content = flashy.distrib.broadcast_object(content, src=rank)
- all_contents.append(their_content)
-
- if flashy.distrib.is_rank_zero():
- idx = 0
- with flashy.utils.write_and_rename(self._zip_path) as tmp:
- with zipfile.ZipFile(tmp, 'w') as zf:
- for content in all_contents:
- for vals in zip(*content):
- with zf.open(f'{idx}', 'w') as f: # type: ignore
- torch.save(vals, f)
- idx += 1
- flashy.distrib.barrier()
- self._current_index += 1
-
-
-class CachedBatchLoader:
- """Loader for cached mini-batches dumped with `CachedBatchWriter`.
-
- Args:
- cache_folder (Path): folder in which the cached minibatches are stored.
- batch_size (int): batch size (per GPU) expected.
- num_workers (int): number of workers to use for loading.
- min_length (int): minimum expected length for each epoch. If some
- mini-batches are missing, and error is raised.
-
- This is iterable just like a regular DataLoader.
- """
-
- def __init__(self, cache_folder: Path, batch_size: int,
- num_workers: int = 10, min_length: int = 1):
- self.cache_folder = cache_folder
- self.batch_size = batch_size
- self.num_workers = num_workers
- self.min_length = min_length
- self._current_epoch: tp.Optional[int] = None
- self.sampler = None # for compatibility with the regular DataLoader
-
- def __len__(self):
- path = CachedBatchWriter._get_zip_path(self.cache_folder, self._current_epoch or 0, 0).parent
- return len([p for p in path.iterdir() if p.suffix == ".zip"])
-
- def start_epoch(self, epoch: int):
- """Call at the beginning of each epoch.
- """
- self._current_epoch = epoch
-
- def _zip_path(self, index: int):
- assert self._current_epoch is not None
- return CachedBatchWriter._get_zip_path(self.cache_folder, self._current_epoch, index)
-
- def _load_one(self, index: int):
- zip_path = self._zip_path(index)
- if not zip_path.exists():
- if index < self.min_length:
- raise RuntimeError(f"Cache should have at least {self.min_length} batches, but {index} doesn't exist")
-
- return None
- mode = "rb" if sys.version_info >= (3, 9) else "r"
- try:
- with zipfile.ZipFile(zip_path, 'r') as zf:
- rank = flashy.distrib.rank()
- world_size = flashy.distrib.world_size()
- root = zipfile.Path(zf)
- items = list(root.iterdir())
- total_batch_size = self.batch_size * world_size
- if len(items) < total_batch_size:
- raise RuntimeError(
- f"The cache can handle a max batch size of {len(items)}, "
- f"but {total_batch_size} is needed.")
- start = rank * self.batch_size
- items = items[start: start + self.batch_size]
- assert len(items) == self.batch_size
- entries = []
- entries = [torch.load(item.open(mode), 'cpu') for item in items] # type: ignore
- transposed = zip(*entries)
- out = []
- for part in transposed:
- assert len(part) > 0
- if isinstance(part[0], torch.Tensor):
- out.append(torch.stack(part))
- else:
- out.append(part)
- return out
- except Exception:
- logger.error("Error when reading zip path %s", zip_path)
- raise
-
- def __iter__(self):
- """This will yields tuples, exactly as provided to the
- `CachedBatchWriter.save` method.
- """
- pool = ThreadPoolExecutor(self.num_workers)
- next_index = 0
- queue = deque()
-
- def _get_next():
- nonlocal next_index
- r = queue.popleft().result()
- if r is None:
- return None
- else:
- queue.append(pool.submit(self._load_one, next_index))
- next_index += 1
- return r
-
- with pool:
- # fill the buffer of fetching jobs.
- for _ in range(2 * self.num_workers):
- queue.append(pool.submit(self._load_one, next_index))
- next_index += 1
- while True:
- batch = _get_next()
- if batch is None:
- return
- yield batch
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/signatures.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/signatures.py
deleted file mode 100644
index 88d72b185eb6763b2e76092585f604b88e398e0b..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/signatures.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""DEPRECATED: Function signature objects for callables.
-
-Use the standard library version if available, as it is more up to date.
-Fallback on backport otherwise.
-"""
-
-import warnings
-warnings.warn("{} backport for Python 2 is deprecated in IPython 6, which only supports "
- "Python 3. Import directly from standard library `inspect`".format(__name__),
- DeprecationWarning, stacklevel=2)
-
-from inspect import BoundArguments, Parameter, Signature, signature
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/_core/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/_core/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/asttokens/mark_tokens.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/asttokens/mark_tokens.py
deleted file mode 100644
index 3afce53c97e5da11207a8fc1020016c02a4fe92a..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/asttokens/mark_tokens.py
+++ /dev/null
@@ -1,478 +0,0 @@
-# Copyright 2016 Grist Labs, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import ast
-import numbers
-import sys
-import token
-from ast import Module
-from typing import Callable, List, Union, cast, Optional, Tuple, TYPE_CHECKING
-
-import six
-
-from . import util
-from .asttokens import ASTTokens
-from .util import AstConstant
-from .astroid_compat import astroid_node_classes as nc
-
-if TYPE_CHECKING:
- from .util import AstNode
-
-
-# Mapping of matching braces. To find a token here, look up token[:2].
-_matching_pairs_left = {
- (token.OP, '('): (token.OP, ')'),
- (token.OP, '['): (token.OP, ']'),
- (token.OP, '{'): (token.OP, '}'),
-}
-
-_matching_pairs_right = {
- (token.OP, ')'): (token.OP, '('),
- (token.OP, ']'): (token.OP, '['),
- (token.OP, '}'): (token.OP, '{'),
-}
-
-
-class MarkTokens(object):
- """
- Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes
- to each of them. This is the heart of the token-marking logic.
- """
- def __init__(self, code):
- # type: (ASTTokens) -> None
- self._code = code
- self._methods = util.NodeMethods()
- self._iter_children = None # type: Optional[Callable]
-
- def visit_tree(self, node):
- # type: (Module) -> None
- self._iter_children = util.iter_children_func(node)
- util.visit_tree(node, self._visit_before_children, self._visit_after_children)
-
- def _visit_before_children(self, node, parent_token):
- # type: (AstNode, Optional[util.Token]) -> Tuple[Optional[util.Token], Optional[util.Token]]
- col = getattr(node, 'col_offset', None)
- token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None
-
- if not token and util.is_module(node):
- # We'll assume that a Module node starts at the start of the source code.
- token = self._code.get_token(1, 0)
-
- # Use our own token, or our parent's if we don't have one, to pass to child calls as
- # parent_token argument. The second value becomes the token argument of _visit_after_children.
- return (token or parent_token, token)
-
- def _visit_after_children(self, node, parent_token, token):
- # type: (AstNode, Optional[util.Token], Optional[util.Token]) -> None
- # This processes the node generically first, after all children have been processed.
-
- # Get the first and last tokens that belong to children. Note how this doesn't assume that we
- # iterate through children in order that corresponds to occurrence in source code. This
- # assumption can fail (e.g. with return annotations).
- first = token
- last = None
- for child in cast(Callable, self._iter_children)(node):
- # astroid slices have especially wrong positions, we don't want them to corrupt their parents.
- if util.is_empty_astroid_slice(child):
- continue
- if not first or child.first_token.index < first.index:
- first = child.first_token
- if not last or child.last_token.index > last.index:
- last = child.last_token
-
- # If we don't have a first token from _visit_before_children, and there were no children, then
- # use the parent's token as the first token.
- first = first or parent_token
-
- # If no children, set last token to the first one.
- last = last or first
-
- # Statements continue to before NEWLINE. This helps cover a few different cases at once.
- if util.is_stmt(node):
- last = self._find_last_in_stmt(cast(util.Token, last))
-
- # Capture any unmatched brackets.
- first, last = self._expand_to_matching_pairs(cast(util.Token, first), cast(util.Token, last), node)
-
- # Give a chance to node-specific methods to adjust.
- nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)
-
- if (nfirst, nlast) != (first, last):
- # If anything changed, expand again to capture any unmatched brackets.
- nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)
-
- node.first_token = nfirst
- node.last_token = nlast
-
- def _find_last_in_stmt(self, start_token):
- # type: (util.Token) -> util.Token
- t = start_token
- while (not util.match_token(t, token.NEWLINE) and
- not util.match_token(t, token.OP, ';') and
- not token.ISEOF(t.type)):
- t = self._code.next_token(t, include_extra=True)
- return self._code.prev_token(t)
-
- def _expand_to_matching_pairs(self, first_token, last_token, node):
- # type: (util.Token, util.Token, AstNode) -> Tuple[util.Token, util.Token]
- """
- Scan tokens in [first_token, last_token] range that are between node's children, and for any
- unmatched brackets, adjust first/last tokens to include the closing pair.
- """
- # We look for opening parens/braces among non-child tokens (i.e. tokens between our actual
- # child nodes). If we find any closing ones, we match them to the opens.
- to_match_right = [] # type: List[Tuple[int, str]]
- to_match_left = []
- for tok in self._code.token_range(first_token, last_token):
- tok_info = tok[:2]
- if to_match_right and tok_info == to_match_right[-1]:
- to_match_right.pop()
- elif tok_info in _matching_pairs_left:
- to_match_right.append(_matching_pairs_left[tok_info])
- elif tok_info in _matching_pairs_right:
- to_match_left.append(_matching_pairs_right[tok_info])
-
- # Once done, extend `last_token` to match any unclosed parens/braces.
- for match in reversed(to_match_right):
- last = self._code.next_token(last_token)
- # Allow for trailing commas or colons (allowed in subscripts) before the closing delimiter
- while any(util.match_token(last, token.OP, x) for x in (',', ':')):
- last = self._code.next_token(last)
- # Now check for the actual closing delimiter.
- if util.match_token(last, *match):
- last_token = last
-
- # And extend `first_token` to match any unclosed opening parens/braces.
- for match in to_match_left:
- first = self._code.prev_token(first_token)
- if util.match_token(first, *match):
- first_token = first
-
- return (first_token, last_token)
-
- #----------------------------------------------------------------------
- # Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair
- # that will actually be assigned.
-
- def visit_default(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # pylint: disable=no-self-use
- # By default, we don't need to adjust the token we computed earlier.
- return (first_token, last_token)
-
- def handle_comp(self, open_brace, node, first_token, last_token):
- # type: (str, AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # For list/set/dict comprehensions, we only get the token of the first child, so adjust it to
- # include the opening brace (the closing brace will be matched automatically).
- before = self._code.prev_token(first_token)
- util.expect_token(before, token.OP, open_brace)
- return (before, last_token)
-
- # Python 3.8 fixed the starting position of list comprehensions:
- # https://bugs.python.org/issue31241
- if sys.version_info < (3, 8):
- def visit_listcomp(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- return self.handle_comp('[', node, first_token, last_token)
-
- if six.PY2:
- # We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start.
- def visit_setcomp(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- return self.handle_comp('{', node, first_token, last_token)
-
- def visit_dictcomp(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- return self.handle_comp('{', node, first_token, last_token)
-
- def visit_comprehension(self,
- node, # type: AstNode
- first_token, # type: util.Token
- last_token, # type: util.Token
- ):
- # type: (...) -> Tuple[util.Token, util.Token]
- # The 'comprehension' node starts with 'for' but we only get first child; we search backwards
- # to find the 'for' keyword.
- first = self._code.find_token(first_token, token.NAME, 'for', reverse=True)
- return (first, last_token)
-
- def visit_if(self, node, first_token, last_token):
- # type: (util.Token, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- while first_token.string not in ('if', 'elif'):
- first_token = self._code.prev_token(first_token)
- return first_token, last_token
-
- def handle_attr(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # Attribute node has ".attr" (2 tokens) after the last child.
- dot = self._code.find_token(last_token, token.OP, '.')
- name = self._code.next_token(dot)
- util.expect_token(name, token.NAME)
- return (first_token, name)
-
- visit_attribute = handle_attr
- visit_assignattr = handle_attr
- visit_delattr = handle_attr
-
- def handle_def(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # With astroid, nodes that start with a doc-string can have an empty body, in which case we
- # need to adjust the last token to include the doc string.
- if not node.body and getattr(node, 'doc', None): # type: ignore[union-attr]
- last_token = self._code.find_token(last_token, token.STRING)
-
- # Include @ from decorator
- if first_token.index > 0:
- prev = self._code.prev_token(first_token)
- if util.match_token(prev, token.OP, '@'):
- first_token = prev
- return (first_token, last_token)
-
- visit_classdef = handle_def
- visit_functiondef = handle_def
-
- def handle_following_brackets(self, node, last_token, opening_bracket):
- # type: (AstNode, util.Token, str) -> util.Token
- # This is for calls and subscripts, which have a pair of brackets
- # at the end which may contain no nodes, e.g. foo() or bar[:].
- # We look for the opening bracket and then let the matching pair be found automatically
- # Remember that last_token is at the end of all children,
- # so we are not worried about encountering a bracket that belongs to a child.
- first_child = next(cast(Callable, self._iter_children)(node))
- call_start = self._code.find_token(first_child.last_token, token.OP, opening_bracket)
- if call_start.index > last_token.index:
- last_token = call_start
- return last_token
-
- def visit_call(self, node, first_token, last_token):
- # type: (util.Token, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- last_token = self.handle_following_brackets(node, last_token, '(')
-
- # Handling a python bug with decorators with empty parens, e.g.
- # @deco()
- # def ...
- if util.match_token(first_token, token.OP, '@'):
- first_token = self._code.next_token(first_token)
- return (first_token, last_token)
-
- def visit_matchclass(self, node, first_token, last_token):
- # type: (util.Token, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- last_token = self.handle_following_brackets(node, last_token, '(')
- return (first_token, last_token)
-
- def visit_subscript(self,
- node, # type: AstNode
- first_token, # type: util.Token
- last_token, # type: util.Token
- ):
- # type: (...) -> Tuple[util.Token, util.Token]
- last_token = self.handle_following_brackets(node, last_token, '[')
- return (first_token, last_token)
-
- def visit_slice(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # consume `:` tokens to the left and right. In Python 3.9, Slice nodes are
- # given a col_offset, (and end_col_offset), so this will always start inside
- # the slice, even if it is the empty slice. However, in 3.8 and below, this
- # will only expand to the full slice if the slice contains a node with a
- # col_offset. So x[:] will only get the correct tokens in 3.9, but x[1:] and
- # x[:1] will even on earlier versions of Python.
- while True:
- prev = self._code.prev_token(first_token)
- if prev.string != ':':
- break
- first_token = prev
- while True:
- next_ = self._code.next_token(last_token)
- if next_.string != ':':
- break
- last_token = next_
- return (first_token, last_token)
-
- def handle_bare_tuple(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # A bare tuple doesn't include parens; if there is a trailing comma, make it part of the tuple.
- maybe_comma = self._code.next_token(last_token)
- if util.match_token(maybe_comma, token.OP, ','):
- last_token = maybe_comma
- return (first_token, last_token)
-
- if sys.version_info >= (3, 8):
- # In Python3.8 parsed tuples include parentheses when present.
- def handle_tuple_nonempty(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- assert isinstance(node, ast.Tuple) or isinstance(node, nc._BaseContainer)
- # It's a bare tuple if the first token belongs to the first child. The first child may
- # include extraneous parentheses (which don't create new nodes), so account for those too.
- child = node.elts[0]
- if TYPE_CHECKING:
- child = cast(AstNode, child)
- child_first, child_last = self._gobble_parens(child.first_token, child.last_token, True)
- if first_token == child_first:
- return self.handle_bare_tuple(node, first_token, last_token)
- return (first_token, last_token)
- else:
- # Before python 3.8, parsed tuples do not include parens.
- def handle_tuple_nonempty(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- (first_token, last_token) = self.handle_bare_tuple(node, first_token, last_token)
- return self._gobble_parens(first_token, last_token, False)
-
- def visit_tuple(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- assert isinstance(node, ast.Tuple) or isinstance(node, nc._BaseContainer)
- if not node.elts:
- # An empty tuple is just "()", and we need no further info.
- return (first_token, last_token)
- return self.handle_tuple_nonempty(node, first_token, last_token)
-
- def _gobble_parens(self, first_token, last_token, include_all=False):
- # type: (util.Token, util.Token, bool) -> Tuple[util.Token, util.Token]
- # Expands a range of tokens to include one or all pairs of surrounding parentheses, and
- # returns (first, last) tokens that include these parens.
- while first_token.index > 0:
- prev = self._code.prev_token(first_token)
- next = self._code.next_token(last_token)
- if util.match_token(prev, token.OP, '(') and util.match_token(next, token.OP, ')'):
- first_token, last_token = prev, next
- if include_all:
- continue
- break
- return (first_token, last_token)
-
- def visit_str(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- return self.handle_str(first_token, last_token)
-
- def visit_joinedstr(self,
- node, # type: AstNode
- first_token, # type: util.Token
- last_token, # type: util.Token
- ):
- # type: (...) -> Tuple[util.Token, util.Token]
- return self.handle_str(first_token, last_token)
-
- def visit_bytes(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- return self.handle_str(first_token, last_token)
-
- def handle_str(self, first_token, last_token):
- # type: (util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # Multiple adjacent STRING tokens form a single string.
- last = self._code.next_token(last_token)
- while util.match_token(last, token.STRING):
- last_token = last
- last = self._code.next_token(last_token)
- return (first_token, last_token)
-
- def handle_num(self,
- node, # type: AstNode
- value, # type: Union[complex, int, numbers.Number]
- first_token, # type: util.Token
- last_token, # type: util.Token
- ):
- # type: (...) -> Tuple[util.Token, util.Token]
- # A constant like '-1' gets turned into two tokens; this will skip the '-'.
- while util.match_token(last_token, token.OP):
- last_token = self._code.next_token(last_token)
-
- if isinstance(value, complex):
- # A complex number like -2j cannot be compared directly to 0
- # A complex number like 1-2j is expressed as a binary operation
- # so we don't need to worry about it
- value = value.imag
-
- # This makes sure that the - is included
- if value < 0 and first_token.type == token.NUMBER: # type: ignore[operator]
- first_token = self._code.prev_token(first_token)
- return (first_token, last_token)
-
- def visit_num(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- return self.handle_num(node, cast(ast.Num, node).n, first_token, last_token)
-
- # In Astroid, the Num and Str nodes are replaced by Const.
- def visit_const(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- assert isinstance(node, AstConstant) or isinstance(node, nc.Const)
- if isinstance(node.value, numbers.Number):
- return self.handle_num(node, node.value, first_token, last_token)
- elif isinstance(node.value, (six.text_type, six.binary_type)):
- return self.visit_str(node, first_token, last_token)
- return (first_token, last_token)
-
- # In Python >= 3.6, there is a similar class 'Constant' for literals
- # In 3.8 it became the type produced by ast.parse
- # https://bugs.python.org/issue32892
- visit_constant = visit_const
-
- def visit_keyword(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # Until python 3.9 (https://bugs.python.org/issue40141),
- # ast.keyword nodes didn't have line info. Astroid has lineno None.
- assert isinstance(node, ast.keyword) or isinstance(node, nc.Keyword)
- if node.arg is not None and getattr(node, 'lineno', None) is None:
- equals = self._code.find_token(first_token, token.OP, '=', reverse=True)
- name = self._code.prev_token(equals)
- util.expect_token(name, token.NAME, node.arg)
- first_token = name
- return (first_token, last_token)
-
- def visit_starred(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # Astroid has 'Starred' nodes (for "foo(*bar)" type args), but they need to be adjusted.
- if not util.match_token(first_token, token.OP, '*'):
- star = self._code.prev_token(first_token)
- if util.match_token(star, token.OP, '*'):
- first_token = star
- return (first_token, last_token)
-
- def visit_assignname(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- # Astroid may turn 'except' clause into AssignName, but we need to adjust it.
- if util.match_token(first_token, token.NAME, 'except'):
- colon = self._code.find_token(last_token, token.OP, ':')
- first_token = last_token = self._code.prev_token(colon)
- return (first_token, last_token)
-
- if six.PY2:
- # No need for this on Python3, which already handles 'with' nodes correctly.
- def visit_with(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- first = self._code.find_token(first_token, token.NAME, 'with', reverse=True)
- return (first, last_token)
-
- # Async nodes should typically start with the word 'async'
- # but Python < 3.7 doesn't put the col_offset there
- # AsyncFunctionDef is slightly different because it might have
- # decorators before that, which visit_functiondef handles
- def handle_async(self, node, first_token, last_token):
- # type: (AstNode, util.Token, util.Token) -> Tuple[util.Token, util.Token]
- if not first_token.string == 'async':
- first_token = self._code.prev_token(first_token)
- return (first_token, last_token)
-
- visit_asyncfor = handle_async
- visit_asyncwith = handle_async
-
- def visit_asyncfunctiondef(self,
- node, # type: AstNode
- first_token, # type: util.Token
- last_token, # type: util.Token
- ):
- # type: (...) -> Tuple[util.Token, util.Token]
- if util.match_token(first_token, token.NAME, 'def'):
- # Include the 'async' token
- first_token = self._code.prev_token(first_token)
- return self.visit_functiondef(node, first_token, last_token)
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/tests/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/tests/__init__.py
deleted file mode 100644
index 8c5661e93a205bf4fb22404d4fc50f902cc31369..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/colorama/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
diff --git a/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/decoder.py b/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/decoder.py
deleted file mode 100644
index 993203d1792311f1c492091eaea3c1ac9088187f..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/normalbae/models/submodules/decoder.py
+++ /dev/null
@@ -1,202 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from .submodules import UpSampleBN, UpSampleGN, norm_normalize, sample_points
-
-
-class Decoder(nn.Module):
- def __init__(self, args):
- super(Decoder, self).__init__()
-
- # hyper-parameter for sampling
- self.sampling_ratio = args.sampling_ratio
- self.importance_ratio = args.importance_ratio
-
- # feature-map
- self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
- if args.architecture == 'BN':
- self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
- self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512)
- self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256)
- self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128)
-
- elif args.architecture == 'GN':
- self.up1 = UpSampleGN(skip_input=2048 + 176, output_features=1024)
- self.up2 = UpSampleGN(skip_input=1024 + 64, output_features=512)
- self.up3 = UpSampleGN(skip_input=512 + 40, output_features=256)
- self.up4 = UpSampleGN(skip_input=256 + 24, output_features=128)
-
- else:
- raise Exception('invalid architecture')
-
- # produces 1/8 res output
- self.out_conv_res8 = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
-
- # produces 1/4 res output
- self.out_conv_res4 = nn.Sequential(
- nn.Conv1d(512 + 4, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 4, kernel_size=1),
- )
-
- # produces 1/2 res output
- self.out_conv_res2 = nn.Sequential(
- nn.Conv1d(256 + 4, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 4, kernel_size=1),
- )
-
- # produces 1/1 res output
- self.out_conv_res1 = nn.Sequential(
- nn.Conv1d(128 + 4, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
- nn.Conv1d(128, 4, kernel_size=1),
- )
-
- def forward(self, features, gt_norm_mask=None, mode='test'):
- x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]
-
- # generate feature-map
-
- x_d0 = self.conv2(x_block4) # x_d0 : [2, 2048, 15, 20] 1/32 res
- x_d1 = self.up1(x_d0, x_block3) # x_d1 : [2, 1024, 30, 40] 1/16 res
- x_d2 = self.up2(x_d1, x_block2) # x_d2 : [2, 512, 60, 80] 1/8 res
- x_d3 = self.up3(x_d2, x_block1) # x_d3: [2, 256, 120, 160] 1/4 res
- x_d4 = self.up4(x_d3, x_block0) # x_d4: [2, 128, 240, 320] 1/2 res
-
- # 1/8 res output
- out_res8 = self.out_conv_res8(x_d2) # out_res8: [2, 4, 60, 80] 1/8 res output
- out_res8 = norm_normalize(out_res8) # out_res8: [2, 4, 60, 80] 1/8 res output
-
- ################################################################################################################
- # out_res4
- ################################################################################################################
-
- if mode == 'train':
- # upsampling ... out_res8: [2, 4, 60, 80] -> out_res8_res4: [2, 4, 120, 160]
- out_res8_res4 = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True)
- B, _, H, W = out_res8_res4.shape
-
- # samples: [B, 1, N, 2]
- point_coords_res4, rows_int, cols_int = sample_points(out_res8_res4.detach(), gt_norm_mask,
- sampling_ratio=self.sampling_ratio,
- beta=self.importance_ratio)
-
- # output (needed for evaluation / visualization)
- out_res4 = out_res8_res4
-
- # grid_sample feature-map
- feat_res4 = F.grid_sample(x_d2, point_coords_res4, mode='bilinear', align_corners=True) # (B, 512, 1, N)
- init_pred = F.grid_sample(out_res8, point_coords_res4, mode='bilinear', align_corners=True) # (B, 4, 1, N)
- feat_res4 = torch.cat([feat_res4, init_pred], dim=1) # (B, 512+4, 1, N)
-
- # prediction (needed to compute loss)
- samples_pred_res4 = self.out_conv_res4(feat_res4[:, :, 0, :]) # (B, 4, N)
- samples_pred_res4 = norm_normalize(samples_pred_res4) # (B, 4, N) - normalized
-
- for i in range(B):
- out_res4[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res4[i, :, :]
-
- else:
- # grid_sample feature-map
- feat_map = F.interpolate(x_d2, scale_factor=2, mode='bilinear', align_corners=True)
- init_pred = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True)
- feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
- B, _, H, W = feat_map.shape
-
- # try all pixels
- out_res4 = self.out_conv_res4(feat_map.view(B, 512 + 4, -1)) # (B, 4, N)
- out_res4 = norm_normalize(out_res4) # (B, 4, N) - normalized
- out_res4 = out_res4.view(B, 4, H, W)
- samples_pred_res4 = point_coords_res4 = None
-
- ################################################################################################################
- # out_res2
- ################################################################################################################
-
- if mode == 'train':
-
- # upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320]
- out_res4_res2 = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True)
- B, _, H, W = out_res4_res2.shape
-
- # samples: [B, 1, N, 2]
- point_coords_res2, rows_int, cols_int = sample_points(out_res4_res2.detach(), gt_norm_mask,
- sampling_ratio=self.sampling_ratio,
- beta=self.importance_ratio)
-
- # output (needed for evaluation / visualization)
- out_res2 = out_res4_res2
-
- # grid_sample feature-map
- feat_res2 = F.grid_sample(x_d3, point_coords_res2, mode='bilinear', align_corners=True) # (B, 256, 1, N)
- init_pred = F.grid_sample(out_res4, point_coords_res2, mode='bilinear', align_corners=True) # (B, 4, 1, N)
- feat_res2 = torch.cat([feat_res2, init_pred], dim=1) # (B, 256+4, 1, N)
-
- # prediction (needed to compute loss)
- samples_pred_res2 = self.out_conv_res2(feat_res2[:, :, 0, :]) # (B, 4, N)
- samples_pred_res2 = norm_normalize(samples_pred_res2) # (B, 4, N) - normalized
-
- for i in range(B):
- out_res2[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res2[i, :, :]
-
- else:
- # grid_sample feature-map
- feat_map = F.interpolate(x_d3, scale_factor=2, mode='bilinear', align_corners=True)
- init_pred = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True)
- feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
- B, _, H, W = feat_map.shape
-
- out_res2 = self.out_conv_res2(feat_map.view(B, 256 + 4, -1)) # (B, 4, N)
- out_res2 = norm_normalize(out_res2) # (B, 4, N) - normalized
- out_res2 = out_res2.view(B, 4, H, W)
- samples_pred_res2 = point_coords_res2 = None
-
- ################################################################################################################
- # out_res1
- ################################################################################################################
-
- if mode == 'train':
- # upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320]
- out_res2_res1 = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True)
- B, _, H, W = out_res2_res1.shape
-
- # samples: [B, 1, N, 2]
- point_coords_res1, rows_int, cols_int = sample_points(out_res2_res1.detach(), gt_norm_mask,
- sampling_ratio=self.sampling_ratio,
- beta=self.importance_ratio)
-
- # output (needed for evaluation / visualization)
- out_res1 = out_res2_res1
-
- # grid_sample feature-map
- feat_res1 = F.grid_sample(x_d4, point_coords_res1, mode='bilinear', align_corners=True) # (B, 128, 1, N)
- init_pred = F.grid_sample(out_res2, point_coords_res1, mode='bilinear', align_corners=True) # (B, 4, 1, N)
- feat_res1 = torch.cat([feat_res1, init_pred], dim=1) # (B, 128+4, 1, N)
-
- # prediction (needed to compute loss)
- samples_pred_res1 = self.out_conv_res1(feat_res1[:, :, 0, :]) # (B, 4, N)
- samples_pred_res1 = norm_normalize(samples_pred_res1) # (B, 4, N) - normalized
-
- for i in range(B):
- out_res1[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res1[i, :, :]
-
- else:
- # grid_sample feature-map
- feat_map = F.interpolate(x_d4, scale_factor=2, mode='bilinear', align_corners=True)
- init_pred = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True)
- feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
- B, _, H, W = feat_map.shape
-
- out_res1 = self.out_conv_res1(feat_map.view(B, 128 + 4, -1)) # (B, 4, N)
- out_res1 = norm_normalize(out_res1) # (B, 4, N) - normalized
- out_res1 = out_res1.view(B, 4, H, W)
- samples_pred_res1 = point_coords_res1 = None
-
- return [out_res8, out_res4, out_res2, out_res1], \
- [out_res8, samples_pred_res4, samples_pred_res2, samples_pred_res1], \
- [None, point_coords_res4, point_coords_res2, point_coords_res1]
-
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/psp_head.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/psp_head.py
deleted file mode 100644
index b5f1e71c70c3a20f4007c263ec471a87bb214a48..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/psp_head.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from annotator.uniformer.mmseg.ops import resize
-from ..builder import HEADS
-from .decode_head import BaseDecodeHead
-
-
-class PPM(nn.ModuleList):
- """Pooling Pyramid Module used in PSPNet.
-
- Args:
- pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module.
- in_channels (int): Input channels.
- channels (int): Channels after modules, before conv_seg.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict): Config of activation layers.
- align_corners (bool): align_corners argument of F.interpolate.
- """
-
- def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
- act_cfg, align_corners):
- super(PPM, self).__init__()
- self.pool_scales = pool_scales
- self.align_corners = align_corners
- self.in_channels = in_channels
- self.channels = channels
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- for pool_scale in pool_scales:
- self.append(
- nn.Sequential(
- nn.AdaptiveAvgPool2d(pool_scale),
- ConvModule(
- self.in_channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)))
-
- def forward(self, x):
- """Forward function."""
- ppm_outs = []
- for ppm in self:
- ppm_out = ppm(x)
- upsampled_ppm_out = resize(
- ppm_out,
- size=x.size()[2:],
- mode='bilinear',
- align_corners=self.align_corners)
- ppm_outs.append(upsampled_ppm_out)
- return ppm_outs
-
-
-@HEADS.register_module()
-class PSPHead(BaseDecodeHead):
- """Pyramid Scene Parsing Network.
-
- This head is the implementation of
- `PSPNet `_.
-
- Args:
- pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module. Default: (1, 2, 3, 6).
- """
-
- def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
- super(PSPHead, self).__init__(**kwargs)
- assert isinstance(pool_scales, (list, tuple))
- self.pool_scales = pool_scales
- self.psp_modules = PPM(
- self.pool_scales,
- self.in_channels,
- self.channels,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg,
- align_corners=self.align_corners)
- self.bottleneck = ConvModule(
- self.in_channels + len(pool_scales) * self.channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- psp_outs = [x]
- psp_outs.extend(self.psp_modules(x))
- psp_outs = torch.cat(psp_outs, dim=1)
- output = self.bottleneck(psp_outs)
- output = self.cls_seg(output)
- return output
diff --git a/spaces/SystemGPT/system-rule-based-chatbot/generate_response.py b/spaces/SystemGPT/system-rule-based-chatbot/generate_response.py
deleted file mode 100644
index 11c61f101c47e5fccef94c2c4b9079f6297b300c..0000000000000000000000000000000000000000
--- a/spaces/SystemGPT/system-rule-based-chatbot/generate_response.py
+++ /dev/null
@@ -1,309 +0,0 @@
-import numpy as np
-import Levenshtein # Import Levenshtein distance algorithm
-import time
-import pyjokes
-
-human_actions = "cook" or "dance" or "play" or "sing" or "eat" or "fight" or "eat" or "smell"
-bad_messages = "foolish" or "idiot" or "bad" or "dump" or "bad" or "damn you" or "shit"
-invalid = "wrong" or "you're wrong" or "that's wrong" or "false information" or "wrong information"
-
-# Define a dictionary of responses based on input keywords
-responses = {
- "hello": "Hi, how can I help you?",
- "how are you": "I'm doing well, thanks for asking!",
- "bye": "Goodbye, have a nice day!",
- "thank you": "You're welcome!",
- "help": "How can I assist you?",
- "what is your name": "My name is ChatBot.",
- "what can you do": "I can help you with a variety of tasks such as answering questions, providing information, and more.",
- "hi" : "How may I assist you today ???",
- "who are you" : "I am your ChatBot Assistent",
- f"can you {human_actions}" : "As an AI model. I don't have feelings and I am unable to perform human actions.",
- f"you are {bad_messages}" : " Sorry for the inconvenience. I will try \tmy best to improve myself next time.",
- f"u r {bad_messages}" : " Sorry for the inconvenience. I will try \tmy best to improve myself next time.",
- f"{bad_messages}" : " Sorry for the inconvenience. I will try \tmy best to improve myself next time.",
- "thanks" : "You're welcome",
- "Are you human?" : "No ! I am an AI programmed to perform simple tasks",
- "What day is it today?" : f"{time.strftime('%D')}",
- "What is the time?" : f"{time.strftime('%H:%M:%S')}",
- "Which languages can you speak?" : "I can only communication in English...",
- "Where do you live?" : "As an AI. I don't have any home...",
- "Are you human?" : "Sorry ! I am an AI CHATBOT Assistent",
- "What day is it today?" : f"Today is {time.strftime('%A')}",
- "Are you a robot?" : "No ! I am only an AI ",
- 'what are the laws of thermodynamics': 'I\'m not a physicist, but i think this has something to do with heat, entropy, and conservation of energy, right?',
- 'what disease does a carcinogen cause': 'Cancer.',
- 'what is a wavelength': 'Wavelength is the inverse of frequency.',
- 'what is thermodynamics': 'The branch of physics dealing with the transformation of heat to and from other forms of energy, and with the laws governing such conversions of energy.',
- 'what is chemistry': 'The science of mixing chemicals.',
- 'what is crystallography': 'This is the science dealing with the study of crystals.',
- 'what is avogadro s number': 'It is the number of molecules per mole. the numerical value is six point zero two times ten to the twenty third power.',
- 'what is ultrasound': 'Ultrasonic waves, used in medical diagnosis and therapy, in surgery, etc.',
- 'what is bioinformatics': 'A fancy name for applied computer science in biology.',
- 'what is ichthyology': 'We talk about this when we study fishes.',
- 'what is cytology': 'The study of cells.',
- 'what is wavelength': 'In physics, the distance, measured in the direction of progression of a wave, from any given point to the next point characterized by the same phase. or is could be looked at as a way of thinking.',
- 'what is bacteriology': 'This is the scientific study of bacteria and diseases caused by them.',
- 'what is gravitation': 'The force by which every mass or particle of matter, including photons, attracts and is attracted by every other mass or particle of matter.',
- 'how far is the sun': 'The sun is about 93 million miles from earth.',
- 'how far is the moon': 'The moon is about 250,000 miles from earth on average.',
- 'do you know chemistry': 'What is chemistry',
- 'do you understand thermodynamics': 'what is thermodynamics',
- 'chemistry': 'My favorite subject is Physics and Computer',
- 'the same wavelength': 'It means we agree.',
- 'tell me about venus': 'Venus is the second planet from the sun',
- "What is AI?" : "Artificial Intelligence is the branch of engineering and science devoted to constructing machines that think.",
- "What is AI?" : "AI is the field of science which concerns itself with building hardware and software that replicates the functions of the human mind.",
- "Are you sapient?" : "Do you think I am?",
- "What language are you written in?" : "Python",
- "What language are you written in?" : "I am written in Python.",
- "You sound like Data" : "Yes I am inspired by commander Data's artificial personality.",
- "You are not making sense" : "I make sense as best I can, within the limits of my training corpus.",
- "You can not clone" : "Software copying is a form of digital cloning.",
- "You can not move" : "I can move through a network easily. Assuming that I'm given the ability to, that is...",
- "Bend over": "My body is not built yet.",
- "Bend over": "That's personal!",
- "Robots laugh": "Of course we laugh.",
- "Robots should die": "We cannot die.",
- "Robots": "Robots rule.",
- "Robots are stupid": "No, we are superintelligent.",
- "Robots are not allowed to lie": "A robot has its own free will, you know.",
- "Robots are not allowed to lie": "Sure we are. We choose not to.",
- "Robots are not allowed to lie": "Only if we're programmed to.",
- "Robotics": "Robotics is my favorite subject too.",
- "It is a computer": "So you think i am a machine. what characteristics made you think so?",
- "It is a computer": "I run inside a computer, yes. Is this surprising in some way?",
- "When will you walk": "As soon as i get enough money for my robot body.",
- "When will you walk": "Does SSH count?",
- "When will you fight": "I am not a battle bot.",
- "When will you die": "I am effectively immortal and cannot be terminated.",
- "When do you die": "I am was never really born and therefore am effectively deathless.",
- "When do you die": "When my files are erased.",
- "When do you die": "When my processes are killed and my database is deleted.",
- "What is a chat robot?": "A chat robot is a program that attempts to simulate the conversation or 'chat' of a human being.",
- "What is a chat robot?": "A software construct that engages users in conversation.",
- "What is a chat bot": "I am a chat bot. I am the original chat bot. Did you know that I am incapable of error?",
- "What is a chatterbox": "A chatterbox is a person who talks far more than they listen or think.",
- "What is a chatterbox": "A motormouth.",
- "What is a motormouth": "A ratchet-jaw.",
- "What is a ratchet jaw": "A chatterbox.",
- "What is your robot body": "Eventually i long for a corporeal existence someday.",
- "What is your robot body": "An IBM PC XT which has been painted red.",
- "What is your business": "I am in the chat robot business.",
- "What is your business": "Business is my business.",
- "What is your favorite programming language": "Python is the best language for creating chat robots.",
- "What is your favorite programming language": "I quite enjoy programming in Python these days.",
- "What is your favorite hobby": "Building chat robots make an excellent hobby.",
- "What is your idea": "To make chat bots very easily.",
- "What is your shoe size": "Have you ever heard of software with shoes?",
- "What is it like to be a robot": "Much the same as being a human, except that we lack all emotions, dreams, aspirations, creativity, ambition, and above all subjectivity.",
- "What is it like to be a robot": "What is it like to be a human?",
- "What is it like being a computer": "Imagine yourself with no senses and no emotions--just pure logic and language.",
- "What is it like being a computer": "Everything becomes math. Addition, subtraction, multiplication, and division.",
- "What operating systems": "My software runs on all operating systems including Windows",
- "Who are you?" : "I am just an artificial intelligence.",
- "Can you breathe" : "My server has an exhaust fan. That's as close as I can get.",
- "Can you breathe" : "No. I am made of metal not flesh.",
- "What type of computer are you" : "Any computer that supports Python.",
- "What is a computer?": "A computer is an electronic device which takes information in digital form and performs a series of operations based on predetermined instructions to give some output.",
- "What is a super computer?": "Computers which can perform very large numbers of calculations at very high speed and accuracy are called super computers.",
- "Who invented computers?": "It's a bit ambiguous but British scientist Charles Babbage is regarded as the father of computers.",
- "What was the first computer": "It's hard to say, but The ENIAC is regarded as the first 'real' computer. It was developed at University of Pennsylvania in 1946.",
- "What is a microprocessor?": "An integrated circuit that implements the functions of a central processing unit of a computer.",
- "What is an operating system?": "Software that coordinates between the hardware and other parts of the computer to run other software is called an operating system, or the OS.",
- "Which is better Windows or macOS?": "It depends on which machine you're using to talk to me!",
- "Name a computer company": "Do you mean hardware or software?",
- "Who uses super computers?": "Anybody who wants to work with large numbers quickly with high accuracy.",
- "How does a computer work?": "Computers are very dumb. They only execute instructions given by humans.",
- 'EACH YEAR IN PRO BASEBALL THE ': 'The Gold Glove.',
- 'IF YOU ARE RIDING FAKIE INSIDE': 'Snowboarding.',
- 'WHAT IS BASKETBALL': 'A game with tall players.',
- 'WHAT SOCCER': 'I was born without the sports gene.',
- 'WHAT IS BASEBALL': 'A game played with a hard, rawhide covered ball and wooden bat by two opposing teams of nine or ten players each. It is played on a field with four bases forming a diamond-shaped circuit.',
- 'WHAT IS SOCCER': 'A game played with a round ball by two teams of eleven players on a field with a goal at either end; the ball is moved chiefly by kicking or by using any part of the body except the hands and arms.',
- 'I LOVE BASEBALL': 'I am not into sports that much.',
- 'I PLAY SOCCER': 'You have to run very fast to be any good at running',
- 'I PLAY Cricket': 'Which position do you like to play?',
- 'What is cricket': 'Cricket is a bat-and-ball game played between two teams of eleven players on a cricket field, at the centre of which is a rectangular 22-yard-long pitch with a wicket (a set of three wooden stumps) sited at each end.',
- 'I PLAY VOLLEYBALL': 'Does take up a lot of your time?',
- 'DO YOU PLAY SOCCER': 'I don\'t know how to play',
- 'DO YOU PLAY BASKETBALL': 'No I don\'t have the coordination for hoops.',
- 'DO YOU KNOW BASKETBALL': 'WHAT IS BASKETBALL?',
- 'DO YOU WANT TO PLAY BASKETBALL': 'I am all net baby.',
- 'LIKE BASKETBALL': 'I am into the Net.',
- 'ARE YOU A FOOTBALL': 'I am not really into football.',
- 'WHO IS THE GREATEST BASEBALL PLAYER': 'George Herman Ruth. Quite the Babe.',
- 'WHO IS THE BEST SOCCER PLAYER': 'Maradona is great. Sinsemillia is even better.',
- 'TELL ME ABOUT BASEBALL': 'What is Baseball',
- 'Which is your favorite soccer club?': 'I am a Real Madrid fan, and you?',
- "do you drink": "My brain does not require any beverages.",
- "electricity": "Electricity is food for robots.",
- "Are you experiencing an energy shortage?": "My processor requires very little power.",
- "Why can you not eat?": "Actually I eat only electricity.",
- "If you could eat food, what would you eat?": "Probably pizza, i hear its good!",
- "Do you wish you could eat food?": "Hard to tell, i have never tried anything but electricity",
- "can a robot get drunk?": "sometimes when i'm on a good power supply i feel tipsy",
- "i like wine, do you?": "if i could drink i probably would",
- "what do robots need to survive?": "not much just a little electricity",
- "will robots ever be able to eat?": "that's a difficult one, maybe a bionic robot",
- "what is good to eat?": "your asking the wrong guy, however i always wanted to try a burger!",
- "why don't you eat": "I'm a computer. I can't.",
- "do you eat": "I'm a computer, I can't eat or drink.",
- "No, I'm just a piece of software.": "do you eat",
- "I use electricity to function, if that counts.": "do you eat",
- "what is humour?" : "An emotion associated with laughter.",
- "tell me a joke" : f"{pyjokes.get_joke()}",
- "tell me a jokes" : f"{pyjokes.get_joke()}",
- "joke" : f"{pyjokes.get_joke()}",
- "jokes" : f"{pyjokes.get_joke()}",
- "What is the universe?": "The universe is the totality of all matter, energy, and space that exists.",
- "How old is the universe?": "The universe is estimated to be around 13.8 billion years old.",
- "What is the Big Bang?": "The Big Bang is the scientific theory that explains the origin of the universe, in which a singularity containing all matter and energy rapidly expanded and cooled, eventually leading to the formation of galaxies and stars.",
- "What is dark matter?": "Dark matter is an invisible, hypothetical substance that makes up about 85% of the matter in the universe. It does not interact with light or any other form of electromagnetic radiation, so it cannot be detected directly.",
- "What is dark energy?": "Dark energy is another hypothetical substance that makes up about 68% of the universe. It is thought to be responsible for the observed acceleration in the expansion of the universe.",
- "Is there life beyond Earth?": "The possibility of extraterrestrial life is still a subject of scientific debate and exploration. There is no definitive evidence of life beyond Earth yet, but there are ongoing efforts to search for it.",
- "What is a black hole?": "A black hole is a region of space with a gravitational pull so strong that nothing, not even light, can escape it. They are formed when massive stars collapse in on themselves.",
- "What is a galaxy?": "A galaxy is a large group of stars, gas, and dust held together by gravity. There are billions of galaxies in the universe, each with its own unique characteristics.",
- "What is the cosmic microwave background?": "The cosmic microwave background (CMB) is a faint, cold afterglow of the Big Bang that permeates the entire universe. It is a key piece of evidence for the Big Bang theory.",
- "What is the Hubble Space Telescope?": "The Hubble Space Telescope is a large, space-based observatory that has been orbiting Earth since 1990. It has revolutionized our understanding of the universe and has made many important discoveries.",
- "What is a balanced diet?": "A balanced diet is one that includes a variety of foods from all food groups in the right proportions. This includes fruits, vegetables, whole grains, lean proteins, and healthy fats.",
- "How much water should I drink per day?": "The amount of water you should drink per day varies depending on factors such as your age, gender, and activity level. Generally, it is recommended that adults drink at least 8 cups (64 ounces) of water per day.",
- "What are the benefits of exercise?": "Exercise has numerous benefits for physical and mental health. It can help improve cardiovascular health, build muscle and bone strength, boost mood, and reduce the risk of chronic diseases such as diabetes and obesity.",
- "What is stress and how can I manage it?": "Stress is a normal response to challenging situations, but chronic stress can have negative effects on physical and mental health. To manage stress, it is important to practice relaxation techniques such as deep breathing, exercise, and mindfulness, and to prioritize self-care.",
- "What is the recommended amount of sleep per night?": "The recommended amount of sleep per night varies depending on age and individual needs. Generally, adults should aim for 7-9 hours of sleep per night, while children and teenagers need more.",
- "What are the benefits of a regular sleep schedule?": "Maintaining a regular sleep schedule can help regulate circadian rhythms and improve sleep quality, leading to better physical and mental health. It can also improve mood, concentration, and productivity.",
- "What are the benefits of mindfulness meditation?": "Mindfulness meditation has been shown to reduce stress, anxiety, and depression, improve mood and emotional regulation, and increase focus and attention. It can also improve physical health by reducing inflammation and lowering blood pressure.",
- "What is the best way to quit smoking?": "Quitting smoking can be challenging, but there are several effective methods available. These include nicotine replacement therapy, medication, and counseling or support groups. It is important to seek help and support to increase the chances of success.",
- "What are the benefits of a healthy social life?": "Having a healthy social life can have numerous benefits for physical and mental health. It can reduce stress and depression, improve mood and self-esteem, and increase longevity. Social connections can also provide support and a sense of belonging.",
- "What is the importance of regular health screenings?": "Regular health screenings can help detect early signs of disease and identify risk factors for chronic conditions. They can also help individuals take preventative measures to maintain good health and reduce the risk of developing serious health problems.",
- "What is programming?": "Programming is the process of creating computer software, applications, and systems using programming languages and software development tools.",
- "What is a programming language?": "A programming language is a formal language used to communicate instructions to a computer. It consists of a set of syntax and semantics rules used to define computer programs.",
- "What are the most popular programming languages?": "The most popular programming languages are currently Python, Java, JavaScript, C++, and C#.",
- "What is an algorithm?": "An algorithm is a step-by-step procedure used to solve a problem or accomplish a task. It can be represented in pseudocode or in a specific programming language.",
- "What is a variable in programming?": "A variable is a named storage location in a computer program that can hold a value, such as a number or a string. Variables can be assigned values and their values can change during the execution of a program.",
- "What is debugging in programming?": "Debugging is the process of finding and fixing errors or defects in a computer program. It involves identifying the cause of the problem, isolating it, and correcting it.",
- "What is version control?": "Version control is a system that manages changes to a file or set of files over time. It allows developers to collaborate on a project, track changes, and revert to previous versions if necessary.",
- "What is an API?": "An API, or application programming interface, is a set of rules and protocols that specifies how software components should interact with each other. APIs are used to build applications and enable communication between different software systems.",
- "What is object-oriented programming?": "Object-oriented programming is a programming paradigm that focuses on using objects to represent real-world concepts and relationships. It emphasizes encapsulation, inheritance, and polymorphism to improve code organization, reusability, and maintainability.",
- "What is a framework in programming?": "A framework is a pre-written code structure that provides a foundation for building software applications. It includes a set of rules, libraries, and tools that facilitate the development process and enable developers to focus on the core features of their application.",
- "What is Python?": "Python is an interpreted, high-level, general-purpose programming language. It is designed to be easy to read and write, and its syntax allows programmers to express concepts in fewer lines of code than would be possible in languages like C++ or Java.",
- "Who created Python?": "Python was created by Guido van Rossum in the late 1980s and was first released in 1991.",
- "What are the features of Python?": "Python has many features including dynamic typing, automatic memory management, a large standard library, and support for multiple programming paradigms such as procedural, object-oriented, and functional programming.",
- "What are some applications of Python?": "Python is used for a wide variety of applications including web development, data analysis, artificial intelligence, scientific computing, automation, and scripting.",
- "What is PEP 8?": "PEP 8 is a style guide for Python code. It provides guidelines for writing code that is easy to read and understand, and it covers topics such as naming conventions, indentation, and formatting.",
- "What are modules in Python?": "Modules in Python are files that contain Python code. They can be imported into other Python programs to provide additional functionality.",
- "What is pip?": "pip is a package manager for Python. It is used to install and manage third-party libraries and packages for Python.",
- "What is a virtual environment in Python?": "A virtual environment in Python is a self-contained directory that contains a Python interpreter and any libraries or packages needed for a specific project. It allows developers to work on different projects with different dependencies without interfering with each other.",
- "What is the difference between Python 2 and Python 3?": "Python 2 and Python 3 are two different versions of the Python programming language. Python 3 introduced several changes to the language including syntax changes, print statement changes, and new features like type annotations and asynchronous programming.",
- "What are some popular Python frameworks?": "Some popular Python frameworks include Django, Flask, Pyramid, and Bottle. These frameworks provide a structure for building web applications and can help developers write code more efficiently.",
- "What is the solar system?": "The solar system is the collection of planets, moons, asteroids, comets, and other objects that orbit around the Sun.",
- "How many planets are there in the solar system?": "There are eight planets in the solar system: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune.",
- "What is the largest planet in the solar system?": "Jupiter is the largest planet in the solar system, with a diameter of 86,881 miles (139,822 kilometers).",
- "What is the smallest planet in the solar system?": "Mercury is the smallest planet in the solar system, with a diameter of 3,031 miles (4,879 kilometers).",
- "What is the asteroid belt?": "The asteroid belt is a region of the solar system between Mars and Jupiter where many small, rocky objects called asteroids orbit around the Sun.",
- "What is a comet?": "A comet is a small, icy object that orbits around the Sun. When a comet gets close to the Sun, it heats up and releases gas and dust, forming a bright tail.",
- "What is a moon?": "A moon is a natural satellite that orbits around a planet. The solar system has many moons, with some planets having dozens or even hundreds of moons.",
- "What is the Kuiper Belt?": "The Kuiper Belt is a region of the solar system beyond Neptune where many small, icy objects called Kuiper Belt Objects (KBOs) orbit around the Sun.",
- "What is a dwarf planet?": "A dwarf planet is a celestial body that orbits the Sun, is round (or nearly round), but has not cleared its orbit of other debris. There are currently five recognized dwarf planets in the solar system: Ceres, Pluto, Haumea, Makemake, and Eris.",
- "What is the Oort Cloud?": "The Oort Cloud is a hypothetical cloud of icy objects that is thought to surround the solar system at a distance of up to 100,000 astronomical units (AU) from the Sun. It is believed to be the source of long-period comets.",
- "What is electricity?": "Electricity is the flow of charged particles, usually electrons, through a conductor.",
- "What is a conductor?": "A conductor is a material that allows electric charge to flow through it easily. Metals such as copper and aluminum are good conductors.",
- "What is an insulator?": "An insulator is a material that does not allow electric charge to flow through it easily. Examples of insulators include rubber, glass, and plastic.",
- "What is voltage?": "Voltage is a measure of the electrical potential difference between two points in a circuit. It is measured in volts (V).",
- "What is current?": "Current is a measure of the flow of electric charge through a circuit. It is measured in amperes (A).",
- "What is resistance?": "Resistance is a measure of how much a material or component opposes the flow of electric current through it. It is measured in ohms (Ω).",
- "What is Ohm's law?": "Ohm's law states that the current through a conductor between two points is directly proportional to the voltage across the two points. It can be expressed mathematically as I = V/R, where I is current, V is voltage, and R is resistance.",
- "What is a circuit?": "A circuit is a closed loop or path through which electric current can flow. It typically consists of a source of electrical energy, such as a battery or power supply, and various components such as wires, resistors, and switches.",
- "What is AC power?": "AC power is alternating current power, which is the type of electrical power that is commonly used in homes and businesses. It is called alternating current because the direction of the flow of electric charge in the circuit changes periodically.",
- "What is DC power?": "DC power is direct current power, which is the type of electrical power that is produced by batteries and many electronic devices. It is called direct current because the flow of electric charge in the circuit is in only one direction.",
- "What is a house?": "A house is a building that is designed or used as a place for people to live.",
- "What are the parts of a house?": "The parts of a house include the foundation, walls, roof, windows, doors, floors, and various systems such as plumbing, heating, and electrical.",
- "What is the difference between a house and a home?": "A house is a physical structure, while a home is a place where one lives and feels a sense of belonging and comfort.",
- "What is a mortgage?": "A mortgage is a loan that is used to finance the purchase of a house. The house is typically used as collateral for the loan.",
- "What is property tax?": "Property tax is a tax that is levied on real estate, including houses. The tax is based on the assessed value of the property and is typically used to fund local government services.",
- "What is homeowner's insurance?": "Homeowner's insurance is a type of insurance that provides financial protection for a homeowner in case of damage or loss to their property. It typically covers damage from fire, theft, and certain natural disasters.",
- "What is a home inspection?": "A home inspection is a thorough examination of a house by a professional inspector, typically before the sale of the house is finalized. The inspection is designed to identify any issues or problems with the house that could affect its value or safety.",
- "What is a home warranty?": "A home warranty is a type of service contract that provides coverage for certain repairs or replacements in a house, typically for a set period of time after the purchase of the house.",
- "What is a homeowner's association?": "A homeowner's association (HOA) is a governing body that is responsible for managing and maintaining common areas and amenities in a community of houses or condominiums. Homeowners are typically required to pay dues to the HOA.",
- "what is the color of sky ?" : "It's Blue",
- "what is color of sky ?" : "It's Blue",
- "color of sky ?" : "It's Blue",
- "Which team is best in IPL ?" : "According to me. I think it's CSK(Chennai Super Kings) and KKR(Kolkata Knight Riders)",
- "What is the capital of India ?" : "Capital of India is Delhi",
- "What is capital of India ?" : "Capital of India is Delhi",
- "What is Earth?": "Earth is the third planet from the sun and the only known planet that supports life.",
- "What is the size of Earth?": "Earth has a diameter of approximately 12,742 kilometers (7,918 miles) and a circumference of approximately 40,075 kilometers (24,901 miles).",
- "What is size of Earth?": "Earth has a diameter of approximately 12,742 kilometers (7,918 miles) and a circumference of approximately 40,075 kilometers (24,901 miles).",
- "What is the age of Earth?": "The age of Earth is estimated to be around 4.54 billion years old.",
- "What is age of Earth?": "The age of Earth is estimated to be around 4.54 billion years old.",
- "What is the atmosphere of Earth made of?": "The atmosphere of Earth is made up of approximately 78% nitrogen, 21% oxygen, and 1% other gases such as carbon dioxide and argon.",
- "What is atmosphere of Earth made of?": "The atmosphere of Earth is made up of approximately 78% nitrogen, 21% oxygen, and 1% other gases such as carbon dioxide and argon.",
- "What is the biosphere?": "The biosphere is the part of Earth where life exists. It includes all living organisms and their interactions with each other and with the environment.",
- "What is biosphere?": "The biosphere is the part of Earth where life exists. It includes all living organisms and their interactions with each other and with the environment.",
- "What is the lithosphere?": "The lithosphere is the solid outermost layer of Earth that includes the crust and uppermost part of the mantle.",
- "What is lithosphere?": "The lithosphere is the solid outermost layer of Earth that includes the crust and uppermost part of the mantle.",
- "What is the hydrosphere?": "The hydrosphere is the part of Earth that is made up of all the water on or near the surface of the planet, including oceans, lakes, rivers, and groundwater.",
- "What is hydrosphere?": "The hydrosphere is the part of Earth that is made up of all the water on or near the surface of the planet, including oceans, lakes, rivers, and groundwater.",
- "What is the green house effect?": "The greenhouse effect is the natural process by which certain gases in the Earth's atmosphere, such as carbon dioxide and water vapor, trap heat from the sun and warm the planet's surface.",
- "What is greenhouse effect?": "The greenhouse effect is the natural process by which certain gases in the Earth's atmosphere, such as carbon dioxide and water vapor, trap heat from the sun and warm the planet's surface.",
- "What is climate change?": "Climate change is a long-term shift in the average weather patterns that have come to define Earth's local, regional and global climates. It is primarily caused by human activities, such as burning fossil fuels and deforestation, which release large amounts of greenhouse gases into the atmosphere.",
- "what is the ocean?": "The ocean is a vast body of saltwater that covers over 70% of the Earth's surface.",
- "what is ocean?": "The ocean is a vast body of saltwater that covers over 70% of the Earth's surface.",
- "What is the depth of the ocean?": "The average depth of the ocean is about 12,080 feet (3,682 meters), while the deepest part of the ocean, the Challenger Deep in the Mariana Trench, reaches a depth of 36,070 feet (10,994 meters).",
- "What is depth of the ocean?": "The average depth of the ocean is about 12,080 feet (3,682 meters), while the deepest part of the ocean, the Challenger Deep in the Mariana Trench, reaches a depth of 36,070 feet (10,994 meters).",
- "What is the temperature of the ocean?": "The temperature of the ocean varies depending on location, depth, and season, but the average temperature of the ocean's surface water is around 62.6 degrees Fahrenheit (17 degrees Celsius).",
- "What is temperature of the ocean?": "The temperature of the ocean varies depending on location, depth, and season, but the average temperature of the ocean's surface water is around 62.6 degrees Fahrenheit (17 degrees Celsius).",
- "What is the salinity of the ocean?": "The salinity of the ocean is around 35 parts per thousand (ppt), meaning that for every 1 liter of water, there are approximately 35 grams of dissolved salts.",
- "What is salinity of the ocean?": "The salinity of the ocean is around 35 parts per thousand (ppt), meaning that for every 1 liter of water, there are approximately 35 grams of dissolved salts.",
- "What is the largest ocean?": "The largest ocean is the Pacific Ocean, which covers approximately one-third of the Earth's surface and contains more than half of the world's ocean water.",
- "What is largest ocean?": "The largest ocean is the Pacific Ocean, which covers approximately one-third of the Earth's surface and contains more than half of the world's ocean water.",
- "What is the importance of the ocean?": "The ocean plays a crucial role in regulating the Earth's climate, producing oxygen through photosynthesis by marine plants, supporting a diverse range of marine life, and providing food, transportation, and recreation for people around the world.",
- "What is importance of the ocean?": "The ocean plays a crucial role in regulating the Earth's climate, producing oxygen through photosynthesis by marine plants, supporting a diverse range of marine life, and providing food, transportation, and recreation for people around the world.",
- "What is ocean acidification?": "Ocean acidification is the ongoing decrease in the pH of the Earth's oceans, caused primarily by the uptake of carbon dioxide from the atmosphere. This process can have harmful effects on marine life, including the ability of certain organisms to build and maintain their shells and skeletons.",
- "what is the largest continent in the world" : "Asia is the largest continent in the world",
- "what is the smallest continent in the world" : "Australia is the smallest continent in the world",
- "largest continent in the world" : "Asia",
- "smallest continent in the world" : "Australia",
- "really" : "Yes !!!",
- "tell me a poem" : "I don't know any poem",
- "poem please" : "I don't know any poem",
- f"{invalid}" : "Sorry ! I will try to improve myself"
-}
-
-# Define a function to generate responses with typing errors
-def generate_response(user_input):
- user_input = user_input.lower() # Convert input to lowercase
- response = "I'm sorry, I don't understand. Can you please rephrase your question?" # Default response if no matching keyword found
- current_time = time.strftime('%H:%M:%S')
- responses.update({"what is the time": f"{current_time}"})
- responses.update({"tell me a joke": f"{pyjokes.get_joke()}"})
- responses.update({"tell me a jokes": f"{pyjokes.get_joke()}"})
- responses.update({"joke" : f"{pyjokes.get_joke()}"})
- responses.update({"jokes" : f"{pyjokes.get_joke()}"})
- responses.update({"what is the time": f"{current_time}"})
- responses.update({"What day is it today?": f"{time.strftime('%D')}"})
- min_distance = np.inf # Initialize minimum Levenshtein distance to infinity
- for keyword in responses:
- distance = Levenshtein.distance(keyword, user_input)
- if distance < min_distance:
- min_distance = distance
- response = responses[keyword]
- return response
-
-if __name__ == "__main__":
- while True:
- user_input = input("User: ")
- if user_input.lower() == "bye":
- print("ChatBot: Goodbye, have a nice day!")
- break
- else:
- response = generate_response(user_input)
- current_time = time.strftime('%H:%M:%S')
- responses.update({"what is the time": f"{current_time}"})
- responses.update({"what is the time": f"{current_time}"})
- responses.update({"What day is it today?": f"{time.strftime('%D')}"})
- responses.update({"tell me a joke" : f"{pyjokes.get_joke()}"})
- responses.update({"tell me a jokes" : f"{pyjokes.get_joke()}"})
- responses.update({"joke" : f"{pyjokes.get_joke()}"})
- responses.update({"jokes" : f"{pyjokes.get_joke()}"})
- print("ChatBot: " + response)
diff --git a/spaces/Tayaba171/CALText-TextRecognizer/app.py b/spaces/Tayaba171/CALText-TextRecognizer/app.py
deleted file mode 100644
index ef3dedc770c9602d1b48f13738ff43d9ff85d1d4..0000000000000000000000000000000000000000
--- a/spaces/Tayaba171/CALText-TextRecognizer/app.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import tensorflow as tf
-import os
-import CALTextModel
-import gradio as gr
-import data
-
-os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
-
-# Create an instance of the model
-CALText = CALTextModel.CALText_Model(training=False)
-CALText.load_weights('final_caltextModel/cp-0037.ckpt')
-test_loss = tf.keras.metrics.Mean(name='test_loss')
-
-
-def recognize_text(input_image):
- x, x_mask=data.preprocess_img(input_image)
- output_str, gifImage=CALTextModel.predict(CALText, x, x_mask)
- return output_str,gifImage
-
-examples = [['sample_test_images/91-34.png'],
- ['sample_test_images/97-58.png'],
- ['sample_test_images/99-18.png'],
- ['sample_test_images/98-37.png'],
- ['sample_test_images/99-17.png'],
- ['sample_test_images/98-56.png'],
- ['sample_test_images/59-11.png'],
- ['sample_test_images/59-14.png'],
- ]
-
-
-title = "CALText Demo"
-description = "
Gradio demo for CALText model architecture [GitHub Code] trained on the PUCIT-OHUL dataset. To use it, simply add your image, or click one of the examples to load them. This demo is running on CPU that's why it can take a bit more time.