| filename
				 stringlengths 13 19 | text
				 stringlengths 134 1.04M | 
|---|---|
| 
	the-stack_106_32214 | 
	import pandas as pd
from dash_website.utils.aws_loader import load_csv
columns_to_take = [
    "id",
    "sex",
    "age_category",
    "sample",
    "aging_rate",
    "Age",
    "Biological_Age",
    "Ethnicity.White",
    "Ethnicity.British",
    "Ethnicity.Irish",
    "Ethnicity.White_Other",
    "Ethnicity.Mixed",
    "Ethnicity.White_and_Black_Caribbean",
    "Ethnicity.White_and_Black_African",
    "Ethnicity.White_and_Asian",
    "Ethnicity.Mixed_Other",
    "Ethnicity.Asian",
    "Ethnicity.Indian",
    "Ethnicity.Pakistani",
    "Ethnicity.Bangladeshi",
    "Ethnicity.Asian_Other",
    "Ethnicity.Black",
    "Ethnicity.Caribbean",
    "Ethnicity.African",
    "Ethnicity.Black_Other",
    "Ethnicity.Chinese",
    "Ethnicity.Other",
    "Ethnicity.Other_ethnicity",
    "Ethnicity.Do_not_know",
    "Ethnicity.Prefer_not_to_answer",
    "Ethnicity.NA",
]
if __name__ == "__main__":
    list_information = []
    for chamber_type in [3, 4]:
        information_raw = load_csv(
            f"page12_AttentionMapsVideos/RawVideos/files/AttentionMaps-samples_Age_Heart_MRI_{chamber_type}chambersRawVideo.csv",
            usecols=columns_to_take,
        )[columns_to_take].set_index("id")
        information_raw.drop(index=information_raw[information_raw["aging_rate"] != "normal"].index, inplace=True)
        information = pd.DataFrame(
            None,
            columns=["chamber", "sex", "age_group", "sample", "chronological_age", "biological_age", "ethnicity"],
            index=information_raw.index,
        )
        information["chamber"] = chamber_type
        information["sex"] = information_raw["sex"].str.lower()
        information["age_group"] = information_raw["age_category"]
        information["sample"] = information_raw["sample"]
        information["chronological_age"] = information_raw["Age"].round(1)
        information["biological_age"] = information_raw["Biological_Age"]
        for id_participant in information_raw.index:
            ethnicities = information_raw.loc[
                id_participant, information_raw.columns[information_raw.columns.str.startswith("Ethnicity")]
            ]
            information.loc[id_participant, "ethnicity"] = " ".join(
                list(map(lambda list_ethni: list_ethni.split(".")[1], ethnicities[ethnicities == 1].index))
            )
        list_information.append(information.reset_index())
    pd.concat(list_information).reset_index(drop=True).to_feather("all_data/datasets/videos/information.feather") | 
| 
	the-stack_106_32215 | 
	# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
# All rights reserved
#
from ppmessage.core.apkinfo import ApkInfo
from ppmessage.core.ipainfo import IpaInfo
import traceback
import random
import sys
import os
APP_NAME = "ppmessage"
if len(sys.argv) == 2:
    APP_NAME = sys.argv[1]
IOS_IPA_FILE = APP_NAME + ".ipa"
IOS_IPA_PATH = "../web/assets/static/yvertical/portal/resources/app/" + IOS_IPA_FILE
DOWNLOAD_APP_HTML = "../web/assets/static/yvertical/portal/download-app/download-app.html"
DOWNLOAD_APP_HTML_TEMPLATE = "../web/assets/static/yvertical/portal/download-app/download-app.html.template"
def _importIOSApp():
    _file_name = IOS_IPA_FILE
    _file_path = IOS_IPA_PATH
    _ipa = IpaInfo()
    _ipa.init(_file_path, _file_name)
    print(_ipa.getDistinctName())
    print(_ipa.getFriendlyName())
    print(_ipa.getVersionName())
    _ipa.putPList("ppmessage.cn", "root", "YVERTICAL1q2w3e4r5t")
    
    _plist_url = _ipa.getPListUrl("ppmessage.cn")
    print(_plist_url)
    return _plist_url
def _download_html(_ios_plist_url):
    _ios_download_link = ""
    _r = None
    with open(DOWNLOAD_APP_HTML_TEMPLATE, "r") as _file:
        _r = _file.read()
        _r = _r.decode("utf8")
        _r = _r.replace("{IOS_PLIST_URL}", _ios_plist_url)
    with open(DOWNLOAD_APP_HTML, "w") as _file:
        _r = _r.encode("utf8")
        _file.write(_r)
def _import():
    _plist_url = _importIOSApp()
    _download_html(_plist_url)
    return
if __name__ == "__main__":
    import sys
    reload(sys)
    sys.setdefaultencoding('utf8')
    import codecs
    codecs.register(lambda name: codecs.lookup('utf8') if name == 'utf8mb4' else None)
    _import()
    
 | 
| 
	the-stack_106_32216 | 
	import time
import logging
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.model.load_balancing.cloud_service import *
from spaceone.inventory.connector.load_balancing import LoadBalancingConnector
from spaceone.inventory.model.load_balancing.cloud_service_type import CLOUD_SERVICE_TYPES
_LOGGER = logging.getLogger(__name__)
class LoadBalancingManager(GoogleCloudManager):
    connector_name = 'LoadBalancingConnector'
    cloud_service_types = CLOUD_SERVICE_TYPES
    def collect_cloud_service(self, params):
        _LOGGER.debug(f'** Load Balancing START **')
        start_time = time.time()
        """
        Args:
            params:
                - options
                - schema
                - secret_data
                - filter
                - zones
        Response:
            CloudServiceResponse/ErrorResourceResponse
        """
        collected_cloud_services = []
        error_responses = []
        lb_id = ""
        secret_data = params['secret_data']
        load_bal_conn: LoadBalancingConnector = self.locator.get_connector(self.connector_name, **params)
        project_id = secret_data.get('project_id')
        load_balancers = []
        instance_groups = load_bal_conn.list_instance_groups()
        target_pools = load_bal_conn.list_target_pools()
        url_maps = load_bal_conn.list_url_maps()
        forwarding_rules = load_bal_conn.list_forwarding_rules()
        backend_services = load_bal_conn.list_back_end_services()
        backend_buckets = load_bal_conn.list_back_end_buckets()
        ssl_certificates = load_bal_conn.list_ssl_certificates()
        auto_scalers = load_bal_conn.list_auto_scalers()
        health_checks = load_bal_conn.list_health_checks()
        legacy_health_checks = []
        http_health_checks = load_bal_conn.list_http_health_checks()
        https_health_checks = load_bal_conn.list_https_health_checks()
        legacy_health_checks.extend(http_health_checks)
        legacy_health_checks.extend(https_health_checks)
        # proxies
        grpc_proxies = load_bal_conn.list_grpc_proxies()
        http_proxies = load_bal_conn.list_target_http_proxies()
        https_proxies = load_bal_conn.list_target_https_proxies()
        ssl_proxies = load_bal_conn.list_ssl_proxies()
        tcp_proxies = load_bal_conn.list_tcp_proxies()
        target_proxies, selective_proxies = self.get_all_proxy_list(grpc_proxies,
                                                                    http_proxies,
                                                                    https_proxies,
                                                                    ssl_proxies,
                                                                    tcp_proxies,
                                                                    forwarding_rules)
        lbs_from_proxy = self.get_load_balacer_from_target_proxy(backend_services,
                                                                 selective_proxies,
                                                                 project_id)
        lbs_from_url_map = self.get_load_balancer_from_url_maps(url_maps, backend_services, backend_buckets, project_id)
        lbs_from_target_pool = self.get_load_balancer_from_target_pools(target_pools, project_id)
        load_balancers.extend(lbs_from_proxy)
        load_balancers.extend(lbs_from_url_map)
        load_balancers.extend(lbs_from_target_pool)
        for load_balancer in load_balancers:
            try:
                lb_id = load_balancer.get('id')
                lb_type = load_balancer.get('lb_type')
                health_checks_vo = load_balancer.get('heath_check_vos', {})
                health_self_links = health_checks_vo.get('health_check_self_link_list', [])
                ##################################
                # Set Target Proxies
                ##################################
                if lb_type != 'target_proxy':
                    matched_target_proxies, matched_certificates = self.get_matched_target_proxies(load_balancer,
                                                                                                   target_proxies,
                                                                                                   ssl_certificates)
                    load_balancer.update({'target_proxies': matched_target_proxies,
                                          'certificates': matched_certificates})
                ##################################
                # Set forwarding Rules to Load Balancer
                ##################################
                matched_forwarding_rules = self.get_matched_forwarding_rules(load_balancer, forwarding_rules)
                load_balancer.update({'forwarding_rules': matched_forwarding_rules})
                ##################################
                # Set Health Check to Load Balancer
                ##################################
                if len(health_self_links) > 0:
                    filter_check_list = list(set(health_checks_vo.get('health_check_list', [])))
                    filter_check_self_link_list = list(set(health_checks_vo.get('health_check_self_link_list', [])))
                    matched_health_list = self._get_matched_health_checks(filter_check_self_link_list, health_checks)
                    if len(matched_health_list) == len(filter_check_list):
                        load_balancer['heath_check_vos'].update({
                            'health_check_list': filter_check_list,
                            'health_check_self_link_list': filter_check_self_link_list,
                            'health_checks': matched_health_list
                        })
                    else:
                        matched_health_legacy_list = self._get_matched_health_checks(filter_check_self_link_list,
                                                                                     legacy_health_checks)
                        matched_health_list.extend(matched_health_legacy_list)
                        load_balancer['heath_check_vos'].update({
                            'health_check_list': filter_check_list,
                            'health_check_self_link_list': filter_check_self_link_list,
                            'health_checks': matched_health_list
                        })
                ############################
                # Set Front to Load Balancer
                ############################
                frontends = self.get_front_from_loadbalancer(load_balancer)
                frontend_display = self._get_frontend_display(frontends)
                if len(frontends) > 0:
                    load_balancer.update({'frontends': frontends,
                                          'frontend_display': frontend_display})
                #############################
                # Set Backend to Load Balancer
                #############################
                backend_vo = {}
                if lb_type in ['target_pool']:
                    backend_vo.update({
                        'type': 'target_pool',
                        'target_pool_backend': self.get_backend_from_target_pools(load_balancer, instance_groups)
                    })
                elif lb_type in ['url_map', 'target_proxy']:
                    key = 'proxy_backend' if lb_type == 'target_proxy' else 'url_map_backend'
                    backends = self.get_backend_from_url_map_and_proxy(load_balancer, instance_groups, auto_scalers)
                    backend_vo.update({
                        'type': 'proxy' if lb_type == 'target_proxy' else 'url_map',
                        key: backends
                    })
                load_balancer.update({'backends': backend_vo})
                ########################################
                # Set Backend Tab to LoadBalancer
                ########################################
                backends_tab = self._get_backend_tabs(load_balancer)
                load_balancer.update({
                    'backend_tabs': backends_tab
                })
                ########################################
                # Set Backend Display
                ########################################
                backend_display = self._get_backend_display(load_balancer)
                load_balancer.update({
                    'backends_display': backend_display
                })
                '''
                            Get Appropriate Region & Protocols
    
                            Protocols
                            -  1. Frontend's forwarding Maps
                               2. Backend's end protocol
    
                            Region 
                            - backend-svc's backend
    
                '''
                lead_protocol = self._get_lead_protocol(load_balancer)
                region = self._get_proper_region(load_balancer)
                load_balancer.update({
                    'lead_protocol': lead_protocol,
                    'region': region
                })
                refer_link = self._get_refer_link(load_balancer, project_id)
                _name = load_balancer.get('name', '')
                load_balance_data = LoadBalancing(load_balancer, strict=False)
                lb_resource = LoadBalancingResource({
                    'name': _name,
                    'region_code': region,
                    'data': load_balance_data,
                    'reference': ReferenceModel(load_balance_data.reference(refer_link))
                })
                self.set_region_code(region)
                collected_cloud_services.append(LoadBalancingResponse({'resource': lb_resource}))
            except Exception as e:
                _LOGGER.error(f'[collect_cloud_service] => {e}')
                error_response = self.generate_resource_error_response(e, 'NetworkService', 'LoadBalancing', lb_id)
                error_responses.append(error_response)
        _LOGGER.debug(f'** Load Balancing Finished {time.time() - start_time} Seconds **')
        return collected_cloud_services, error_responses
    def get_backend_from_target_pools(self, loadbalcnacer, instance_group):
        for pool in loadbalcnacer.get('target_pools', []):
            ratio = pool.get('failover_ratio')
            target_backend = {
                'name': pool.get('name'),
                'region': self._get_matched_last_target('region', pool),
                'session_affinity': pool.get('session_affinity'),
                'health_check': self._get_matched_last_target_in_list(pool.get('health_checks', [])),
                'backup_pool': self._get_matched_last_target('backup_pool', pool) if pool.get('backup_pool') else '',
                'fail_over': 0.0 if ratio is None else float(ratio),
                'fail_over_display': '0.0 %' if ratio is None else f'{str(float(ratio) * 100)} %',
                'backend_instances': self.get_instance_for_back_ends(loadbalcnacer, pool, instance_group)
            }
            return target_backend
    def get_backend_from_url_map_and_proxy(self, loadbalcnacer, instance_group, auto_scalers):
        target_backend_list = []
        backend_services = loadbalcnacer.get('backend_services', [])
        backend_buckets = loadbalcnacer.get('backend_buckets', [])
        for service in backend_services:
            selected_port = self.get_selected_port(service.get('healthChecks'), loadbalcnacer.get('heath_check_vos'))
            for backend_service_back in service.get('backends', []):
                balancing_mode = backend_service_back.get('balancingMode')
                balancing_mode_display = self._get_balancing_mode_display(backend_service_back)
                auto_scaler_vo = self._get_selected_instance_group(backend_service_back, instance_group, auto_scalers)
                target_backend_svc = {
                    'name': service.get('name'),
                    'type': 'Instance group',
                    'instance_name': self._get_matched_last_target('group', backend_service_back),
                    'region': self.get_region_from_group(backend_service_back.get('group', '')),
                    'cloud_cdn': 'enabled' if service.get('enableCdn') else 'disabled',
                    'end_point_protocol': service.get('protocol'),
                    'named_port': service.get('portName'),
                    'timeout': str(service.get('timeoutSec')) + ' seconds',
                    'health_check': self._get_matched_last_target_in_list(service.get('healthChecks', [])),
                    'capacity': float(backend_service_back.get('capacityScaler', 0.0)),
                    'capacity_display': str(float(backend_service_back.get('capacityScaler', 0.0)) * 100) + ' %',
                    'selected_port': selected_port,
                    'balancing_mode': balancing_mode,
                    'balancing_mode_display': balancing_mode_display,
                    'scheme': service.get('loadBalancingScheme')
                }
                if auto_scaler_vo is not None:
                    auto_display = self._get_autoscaling_display(auto_scaler_vo)
                    target_backend_svc.update({
                        'autoscaling_policy': auto_scaler_vo,
                        'autoscaling_display': 'No configuration' if auto_display == '' else auto_display,
                    })
                else:
                    target_backend_svc.update({
                        'autoscaling_display': 'No configuration',
                    })
                target_backend_list.append(target_backend_svc)
        for bucket in backend_buckets:
            region = self._get_matched_last_target('region', bucket) if bucket.get('region') else 'global'
            target_backend_bucket = {
                'name': bucket.get('name'),
                'instance_name': bucket.get('bucketName'),
                'type': 'Backend Bucket',
                'region': region,
                'cloud_cdn': 'enabled' if bucket.get('enableCdn') else 'disabled',
                'custom_response_headers': bucket.get('customResponseHeaders', [])
            }
            target_backend_list.append(target_backend_bucket)
        return target_backend_list
    def get_selected_port(self, health_checks, health_checks_vos):
        selected_port = []
        for hk in health_checks:
            for single_hk in health_checks_vos.get('health_checks', []):
                if hk == single_hk.get('selfLink'):
                    key = self._get_key_name_for_health_check(single_hk)
                    hc_vo = single_hk.get(key, {}).get('port')
                    if key and hc_vo:
                        selected_port.append(hc_vo)
        return selected_port
    def get_region_from_group(self, group_link):
        if '/zones/' in group_link:
            parsed_group = group_link[group_link.find('/zones/') + 7:]
            zone = parsed_group[:parsed_group.find('/')]
            return zone[:-2]
        else:
            return self._extract_region_from_group(group_link)
    def get_instance_for_back_ends(self, lb, pool, instance_groups):
        instance_list = []
        instances = pool.get('instances', [])
        addresses = [d.get('IPAddress') for d in lb.get('forwarding_rules', []) if d.get('IPAddress', '') != '']
        source_link = lb.get('source_link')
        for instance_group in instance_groups:
            if source_link in instance_group.get('targetPools', []):
                instance_list.append({
                    'type': 'Instance Group',
                    'name': instance_group.get('name'),
                    'region': 'global' if instance_group.get('region') is None else self._get_matched_last_target(
                        'region', instance_group),
                    'zone': '',
                    'address': addresses
                })
        for instance in instances:
            zone = self._extract_zone(instance)
            instance_list.append({
                'type': 'Compute VM',
                'name': instance[instance.rfind('/') + 1:],
                'region': zone[:-2],
                'zone': zone,
                'address': addresses
            })
        return instance_list
    def get_all_proxy_list(self, grpc_proxies, http_proxies, https_proxies, ssl_proxies, tcp_proxies, forwarding_rules):
        proxy_list = []
        proxy_list_relate_to_load_balancer = []
        all_resources = [{'type': 'grpc',
                          'source': grpc_proxies},
                         {'type': 'http',
                          'source': http_proxies},
                         {'type': 'https',
                          'source': https_proxies},
                         {'type': 'ssl',
                          'source': ssl_proxies},
                         {'type': 'tcp',
                          'source': tcp_proxies}
                         ]
        for resource in all_resources:
            for proxy in resource.get('source', []):
                proxy_type = resource.get('type')
                proxy_key: str = self._get_proxy_key(resource.get('type'))
                in_used_by, in_used_by_display = self._get_in_used_by_forwarding_rule(proxy, forwarding_rules)
                proxy_vo = {
                    proxy_key: proxy,
                    'proxy_key': proxy_key,
                    'type': proxy_type,
                    'name': proxy.get('name', ''),
                    'description': proxy.get('description', ''),
                    'target_resource': {},
                    'in_used_by': in_used_by,
                    'target_proxy_display': {
                        'name': proxy.get('name', ''),
                        'description': proxy.get('description', ''),
                        'type': f'{proxy_type.upper()} Proxy',
                        'target_resource': self._get_matched_last_target('urlMap', proxy),
                        'in_used_by_display': in_used_by_display,
                        'creation_timestamp': proxy.get('creationTimestamp'),
                    },
                }
                if proxy_type in ['ssl', 'tcp']:
                    proxy_list_relate_to_load_balancer.append(proxy_vo)
                proxy_list.append(proxy_vo)
        return proxy_list, proxy_list_relate_to_load_balancer
    def get_load_balancer_from_url_maps(self, url_maps, backend_services, backend_buckets, project):
        load_balancer_list = []
        for url_map in url_maps:
            region = self._get_matched_last_target('region', url_map) if url_map.get('region') else 'global'
            url_map_single_vo = {}
            identifiers = self._get_matched_services(url_map)
            backend_svc_list = self.get_lb_info_from_selected_items(identifiers, 'selfLink', backend_services)
            health_check_list = self._get_health_checks_from_backend_svc(backend_svc_list)
            backend_bucktet_list = self.get_lb_info_from_selected_items(identifiers, 'selfLink', backend_buckets)
            host_and_path_rules = self.get_matched_host_and_path(url_map)
            url_map_single_vo.update({
                'lb_type': 'url_map',
                'project': project,
                'region': region,
                'id': url_map.get('id'),
                'description': url_map.get('description', ''),
                'name': url_map.get('name'),
                'self_link': self._get_self_link(project, region, url_map.get('name')),
                'identifier': url_map.get('selfLink'),
                'heath_check_vos': {
                    'health_check_list': self._get_matched_last_target_in_list(health_check_list),
                    'health_check_self_link_list': health_check_list,
                },
                'backend_services': backend_svc_list,
                'backend_buckets': backend_bucktet_list,
                'host_and_paths': host_and_path_rules,
                'creation_timestamp': url_map.get('creationTimestamp')
            })
            load_balancer_list.append(url_map_single_vo)
        return load_balancer_list
    def get_load_balancer_from_target_pools(self, target_pools, project):
        load_balancer_list = []
        for target_pool in target_pools:
            region = self._get_matched_last_target('region', target_pool) if target_pool.get('region') else 'global'
            health_checks = target_pool.get('healthChecks', [])
            target_pool.update({
                '_region': region,
                'num_of_instance': len(target_pool.get('instances', [])),
                '_health_checks': self._get_matched_last_target_in_list(health_checks)
            })
            target_pool_vo = {
                'lb_type': 'target_pool',
                'project': project,
                'region': region,
                'description': target_pool.get('description'),
                'id': target_pool.get('id'),
                'name': target_pool.get('name'),
                'heath_check_vos': {
                    'health_check_list': self._get_matched_last_target_in_list(health_checks),
                    'health_check_self_link_list': health_checks,
                },
                'identifier': target_pool.get('selfLink'),
                'self_link': self._get_self_link(project, region, target_pool.get('name')),
                'creation_timestamp': target_pool.get('creationTimestamp'),
                'target_pools': [TargetPools(target_pool, strict=False)]
            }
            load_balancer_list.append(target_pool_vo)
        return load_balancer_list
    def get_load_balacer_from_target_proxy(self, backends, selective_proxies, project):
        backend_proxies = []
        for ssl_tcp_proxy in selective_proxies:
            key = 'tcp_proxy' if 'tcp_proxy' in ssl_tcp_proxy else 'ssl_proxy'
            proxy_info = ssl_tcp_proxy.get(key, {})
            for backend in backends:
                health_checks = backend.get('healthChecks', [])
                if backend.get('selfLink') == proxy_info.get('service', ''):
                    region = self._extract_region_from_proxy(backend.get('selfLink'), project)
                    backend.update({'region': region, 'type': 'Global' if region == 'global' else region})
                    backend_proxy_vo = {
                        'lb_type': 'target_proxy',
                        'project': project,
                        'id': proxy_info.get('id'),
                        'region': region,
                        'name': proxy_info.get('name'),
                        'self_link': self._get_self_link(project, region, proxy_info.get('name')),
                        'heath_check_vos': {
                            'health_check_list': self._get_matched_last_target_in_list(health_checks),
                            'health_check_self_link_list': health_checks,
                        },
                        'identifier': proxy_info.get('selfLink'),
                        'creation_timestamp': proxy_info.get('creationTimestamp'),
                        'target_proxies': [ssl_tcp_proxy],
                        'backend_services': [backend]
                    }
                    backend_proxies.append(backend_proxy_vo)
        return backend_proxies
    def get_matched_forwarding_rules(self, loadbalancer, forwarding_rules):
        matched_forwarding_rules = []
        lb_type = loadbalancer.get('lb_type')
        if lb_type == 'target_pool':
            for forwarding_rule in forwarding_rules:
                if loadbalancer.get('identifier') == forwarding_rule.get('target'):
                    region = forwarding_rule.get('region')
                    r_type = 'Global' if region == 'global' or region is None else 'Regional'
                    if r_type == 'Regional':
                        _region = region[region.rfind('/')+1:]
                        forwarding_rule.update({'region': _region})
                    forwarding_rule.update({'type': r_type})
                    if forwarding_rule.get('portRange', '').find('-') > 0:
                        forwarding_rule.update({'portRange': self._get_port_ranage_from_str(forwarding_rule.get('portRange', ''))})
                    matched_forwarding_rules.append(forwarding_rule)
        elif lb_type in ['url_map', 'target_proxy']:
            self_links = self._get_self_links_from_proxies(loadbalancer.get('target_proxies', []))
            for forwarding_rule in forwarding_rules:
                if forwarding_rule.get('target') in self_links:
                    region = forwarding_rule.get('region')
                    r_type = 'Global' if region == 'global' or region is None else 'Regional'
                    if r_type == 'Regional':
                        _region = region[region.rfind('/')+1:]
                        forwarding_rule.update({'region': _region})
                    forwarding_rule.update({'type': r_type})
                    if forwarding_rule.get('portRange', '').find('-') > 0:
                        forwarding_rule.update({'portRange': self._get_port_ranage_from_str(forwarding_rule.get('portRange', ''))})
                    matched_forwarding_rules.append(forwarding_rule)
        return matched_forwarding_rules
    def get_matched_target_proxies(self, lb, target_proxies, certs):
        matched_target_proxies = []
        matched_certificate = []
        for target_proxy in target_proxies:
            key = target_proxy.get('proxy_key')
            proxy_info = target_proxy.get(key, {})
            if 'urlMap' in proxy_info:
                if proxy_info.get('urlMap') == lb.get('identifier'):
                    if 'sslCertificates' in proxy_info:
                        matching_ones = self._get_matched_certificates(certs, proxy_info.get('sslCertificates', []))
                        for matching_one in matching_ones:
                            key = 'managed' if 'managed' in matching_one else 'selfManaged'
                            ssl_type = 'Customer supplied'
                            if key == 'managed':
                                ssl_type = 'Google managed'
                            elif key == 'selfManaged':
                                ssl_type = 'Customer managed'
                            managed = matching_one.get(key, {})
                            domain_info = managed.get('domainStatus', {})
                            domain_status = self.convert_labels_format(managed.get('domainStatus', {}))
                            domains = [dd for dd in domain_info]
                            matching_one.update({
                                'domains': domains,
                                'type': ssl_type
                            })
                            if domain_status:
                                matching_one['managed'].update({'domain_status': domain_status})
                            matched_certificate.append(matching_one)
                    matched_target_proxies.append(target_proxy)
        return matched_target_proxies, matched_certificate
    def get_matched_host_and_path(self, target_item):
        host_and_path_rules = []
        if 'defaultService' in target_item:
            host_and_path_rules.append({
                'host': ['All unmatched (default)'],
                'path': ['All unmatched (default)'],
                'backend': self._get_matched_last_target('defaultService', target_item)
            })
        if 'hostRules' in target_item:
            host_rule_map = {}
            for host_rule in target_item.get('hostRules', []):
                host_rule_map[host_rule.get('pathMatcher')] = {'host': host_rule.get('hosts', [])}
            for path_matcher in target_item.get('pathMatchers', []):
                _name = path_matcher.get('name', '')
                default_service = path_matcher.get('defaultService')
                if default_service:
                    host_and_path_rules.append({
                        'host': host_rule_map.get(_name, {}).get('host'),
                        'path': ['/*'],
                        'backend': self._get_matched_last_target('defaultService', target_item)
                    })
                for path_rule in path_matcher.get('pathRules', []):
                    host_and_path_rules.append({
                        'host': host_rule_map.get(_name, {}).get('host'),
                        'path': path_rule.get('paths', []),
                        'backend': self._get_matched_last_target('service', path_rule)
                    })
        return host_and_path_rules
    def get_front_from_loadbalancer(self, loadbalancer):
        frontends = []
        proxies = loadbalancer.get('target_proxies', [])
        pools = loadbalancer.get('target_pools', [])
        for forwarding_rule in loadbalancer.get('forwarding_rules', []):
            target = forwarding_rule.get('target', '')
            ports = forwarding_rule.get('ports', [])
            region = 'global' if forwarding_rule.get('region') is None else forwarding_rule.get('region')
            _region = region[region.rfind('/') + 1:]
            if not proxies:
                for pool in pools:
                    if target == pool.get('self_link'):
                        front_single = {
                            'name': forwarding_rule.get('name'),
                            'protocols': forwarding_rule.get('IPProtocol').upper(),
                            'scope': 'Global' if region == 'global' else f'Regional ({_region})',
                            'region': _region,
                            'ip_address': forwarding_rule.get('IPAddress'),
                            'port': self._get_list_from_str(
                                forwarding_rule.get('portRange')) if not ports else self._get_list_from_str(ports),
                            'network_tier': forwarding_rule.get('networkTier').capitalize()
                        }
                        frontends.append(front_single)
            else:
                for proxy in proxies:
                    key = proxy.get('proxy_key', '')
                    proxy_vo = proxy.get(key)
                    if target == proxy_vo.get('selfLink'):
                        front_single = {
                            'name': forwarding_rule.get('name'),
                            'protocols': proxy.get('type').upper(),
                            'scope': 'Global' if region == 'global' else f'Regional ({_region})',
                            'region': _region,
                            'ip_address': forwarding_rule.get('IPAddress'),
                            'port': self._get_list_from_str(
                                forwarding_rule.get('portRange')) if not ports else self._get_list_from_str(ports),
                            'network_tier': forwarding_rule.get('networkTier').capitalize()
                        }
                        if 'sslCertificates' in proxy_vo:
                            front_single.update(
                                {'certificate': self._get_matched_last_target_in_list(proxy_vo.get('sslCertificates'))})
                        frontends.append(front_single)
        return frontends
    @staticmethod
    def _get_frontend_display(frontend):
        frontend_display = ''
        rule_length = len(frontend)
        if rule_length > 0:
            regions = list(set([ft.get('region') for ft in frontend if 'region' in ft]))
            located_at = regions[0] if len(regions) == 1 else 'Multi regions' if len(regions) > 1 else ''
            _located_at = f'within {located_at}' if located_at != '' else ''
            plural = '' if rule_length == 1 else 's'
            frontend_display = f'{rule_length} Forwarding Rule{plural} {_located_at}'
        return frontend_display
    @staticmethod
    def _get_refer_link(lb, project):
        base = 'https://console.cloud.google.com/net-services/loadbalancing/details'
        lb_type = lb.get('lb_type')
        name = lb.get('name')
        region = lb.get('region')
        if lb_type == 'url_map':
            return base + f'/http/{name}?project={project}'
        elif lb_type == 'target_pool':
            return base + f'/network/{region}/{name}?project={project}'
        else:
            return base + f'/proxy/{name}?project={project}'
    @staticmethod
    def _get_proper_region(lb):
        lb_type = lb.get('lb_type')
        proper_region = ''
        if lb_type == 'url_map':
            backends = lb.get('backends', {})
            _type = backends.get('type')
            _backends = backends.get(f'{_type}_backend', [])
            prop = [backend.get('region') for backend in _backends if backend.get('region', '') != 'global']
            _prop = list(set(prop))
            proper_region = 'global' if not _prop else _prop[0]
        elif lb_type == 'target_pool':
            proper_region = lb.get('region')
        else:
            proper_region = lb.get('region')
        return proper_region
    @staticmethod
    def _get_lead_protocol(load_balancer):
        lead_protocol = ''
        all_protocols = [d.get('protocols') for d in load_balancer.get('frontends', []) if 'protocols' in d]
        if len(all_protocols) > 0:
            lead_protocol = 'HTTP(S)' if 'HTTPS' in all_protocols and 'HTTP' in all_protocols else all_protocols[0]
        else:
            all_protocols = [d.get('type').upper() for d in load_balancer.get('target_proxies', []) if 'type' in d]
            lead_protocol = f'{all_protocols[0]} (Proxy)' if len(all_protocols) > 0 else 'TCP/UDP'
        return lead_protocol
    @staticmethod
    def _get_backend_tabs(load_balancers):
        backends_tab_list = []
        backends = load_balancers.get('backends', {})
        backends_type = backends.get('type')
        object_key = f'{backends_type}_backend'
        if backends_type in ['proxy', 'url_map']:
            for back_end in backends.get(object_key, []):
                _region = back_end.get('region')
                backends_tab_list.append({
                    'name': back_end.get('name'),
                    'type': 'Backend Bucket' if back_end.get('type') == 'Backend Bucket' else 'Backend service',
                    'scope': 'Global' if _region == 'global' else f'Regional ({_region})',
                    'protocol': back_end.get('end_point_protocol', ''),
                })
        else:
            back_end = backends.get(object_key, {})
            _region = back_end.get('region')
            protocol = back_end.get('end_point_protocol', '')
            backends_tab_list.append({
                'name': back_end.get('name'),
                'type': 'Backend Bucket' if back_end.get('type') == 'Backend Bucket' else 'Backend service',
                'scope': 'Global' if _region == 'global' else f'Regional ({_region})',
                'protocol': '',
            })
        return backends_tab_list
    @staticmethod
    def _get_backend_display(load_balancer):
        lb_type = load_balancer.get('lb_type')
        display = ''
        if lb_type == 'target_pool':
            _pools = len(load_balancer.get('target_pools', []))
            pools = load_balancer.get('target_pools', [])
            num_of_instance = 0
            for pool in pools:
                num_of_instance = num_of_instance + len(pool.get('instances', []))
            pool_plural = '(s)' if _pools > 1 else ''
            num_plural = 's' if num_of_instance > 1 else ''
            display = f'{_pools} Target pool{pool_plural}' if num_of_instance == 0 else \
                f'{_pools} Target pool{pool_plural} ({num_of_instance} Instance{num_plural})'
        else:
            service = len(load_balancer.get('backend_services', []))
            bucket = len(load_balancer.get('backend_buckets', []))
            display = f'{service} Backend Services & {bucket} Backend Buckets' if service > 0 and bucket > 0 \
                else f'{bucket} Backend Buckets' if bucket > 0 else f'{service} Backend Service'
        return display
    @staticmethod
    def _extract_zone(self_link):
        p_len = len('/zones/')
        p_key = '/zones/'
        _zone = self_link[self_link.find(p_key) + p_len:]
        return _zone[:_zone.find('/')]
    @staticmethod
    def _get_matched_certificates(certs, ssl_certificates):
        certificates = []
        for cert in certs:
            if cert.get('selfLink', '') in ssl_certificates:
                certificates.append(cert)
        return certificates
    @staticmethod
    def _get_matched_services(target_item):
        matching_item_self_links = []
        if 'defaultService' in target_item:
            matching_item_self_links.append(target_item.get('defaultService'))
        if 'pathMatchers' in target_item and isinstance(target_item.get('pathMatchers'), list):
            for path_matcher in target_item.get('pathMatchers'):
                if path_matcher.get('defaultService', '') not in matching_item_self_links:
                    matching_item_self_links.append(path_matcher.get('defaultService', ''))
                if 'pathRules' in path_matcher and isinstance(path_matcher.get('pathRules'), list):
                    for rule in path_matcher.get('pathRules'):
                        if rule.get('service') not in matching_item_self_links:
                            matching_item_self_links.append(rule.get('service'))
        return matching_item_self_links
    @staticmethod
    def _get_self_links_from_proxies(target_proxies):
        self_link_list = []
        for proxy in target_proxies:
            key = proxy.get('proxy_key')
            self_link = proxy.get(key, {}).get('selfLink')
            if self_link:
                self_link_list.append(self_link)
        return self_link_list
    @staticmethod
    def _get_matched_last_target(key, source):
        a = source.get(key, '')
        return a[a.rfind('/') + 1:]
    @staticmethod
    def _get_matched_last_target_in_list(target_list):
        matched_links_vos = []
        for target_item in target_list:
            a = target_item
            matched_links_vos.append(a[a.rfind('/') + 1:])
        return matched_links_vos
    @staticmethod
    def _get_self_link(project, region, name):
        return f'https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/load_balancing/{name}'
    @staticmethod
    def _get_zone_from_target(key, source):
        a = source.get(key, '')
        return a[a.find('zones') + 6:a.find('/instances')]
    @staticmethod
    def _get_list_from_str(target_str):
        switching_target = None
        if isinstance(target_str, int):
            switching_target = target_str
        else:
            port_range = target_str.split('-')
            switching_target = port_range[0] if len(port_range) > 1 and port_range[0] == port_range[1] else target_str
        return switching_target if isinstance(switching_target, list) else [switching_target]
    @staticmethod
    def _get_port_ranage_from_str(target_str):
            port_range = target_str.split('-')
            switching_target = port_range[0] if len(port_range) > 1 and port_range[0] == port_range[1] else target_str
            return switching_target
    @staticmethod
    def _get_in_used_by_forwarding_rule(target_proxy, forwarding_rules):
        in_used_by = []
        in_used_by_display = []
        for forwarding_rule in forwarding_rules:
            if forwarding_rule.get('target') == target_proxy.get('selfLink'):
                in_used_by.append({
                    'id': forwarding_rule.get('id', ''),
                    'name': forwarding_rule.get('name', ''),
                    'self_link': forwarding_rule.get('selfLink', ''),
                })
                in_used_by_display.append(forwarding_rule.get('name', ''))
        return in_used_by, in_used_by_display
    @staticmethod
    def _get_matching_target_proxy(loadbalancer, all_proxies):
        target_proxies = []
        for proxy in all_proxies:
            proxy_key: str = 'grpc_proxy' if 'grpc_proxy' in proxy \
                else 'http_proxy' if 'http_proxy' in proxy else 'https_proxy'
            selected_px = proxy.get(proxy_key, {}).get('url_map', '')
            if selected_px == loadbalancer.get('identifier', ''):
                proxy['target_resource'].update({
                    'id': loadbalancer.get('id'),
                    'name': loadbalancer.get('name'),
                    'self_link': loadbalancer.get('identifier')
                })
                target_proxies.append(proxy)
        return target_proxies
    @staticmethod
    def _extract_region_from_proxy(self_link, project):
        p_len = len(project) + 1
        p_key = f'{project}/'
        _region = self_link[self_link.find(p_key) + p_len:]
        return _region[:_region.find('/')]
    @staticmethod
    def _extract_region_from_group(self_link):
        p_len = 9
        p_key = self_link.find('/regions/')
        if p_key == -1:
            return 'global'
        else:
            _region = self_link[p_key + p_len:]
            return _region[:_region.find('/')]
    @staticmethod
    def _get_proxy_key(proxy_type):
        proxy_key = 'tcp_proxy'
        if proxy_type == 'grpc':
            proxy_key = 'grpc_proxy'
        elif proxy_type == 'http':
            proxy_key = 'http_proxy'
        elif proxy_type == 'https':
            proxy_key = 'https_proxy'
        elif proxy_type == 'ssl':
            proxy_key = 'ssl_proxy'
        return proxy_key
    def get_lb_info_from_selected_items(self, identifier, key, selected_items):
        matched_lb_vo = []
        for selected_item in selected_items:
            if selected_item.get(key, '') in identifier:
                region = self._extract_region_from_group(selected_item.get(key, ''))
                _type = 'Global' if region == 'global' else 'Regional'
                selected_item.update({
                    'region': self._extract_region_from_group(selected_item.get(key, '')),
                    'type': _type
                })
                matched_lb_vo.append(selected_item)
        return matched_lb_vo
    @staticmethod
    def _get_matched_health_checks(self_link_list, health_checks):
        health_check_list = []
        for health_check in health_checks:
            if health_check.get('selfLink', '') in self_link_list:
                health_check_list.append(health_check)
        return health_check_list
    @staticmethod
    def _get_health_checks_from_backend_svc(backend_svcs):
        health_check_list = []
        for backend_svc in backend_svcs:
            if len(backend_svc.get('healthChecks', [])) > 0:
                health_check_list.extend(backend_svc.get('healthChecks'))
        return health_check_list
    @staticmethod
    def _get_autoscaling_display(autoscaling_policy):
        auto_scaling_display = ''
        if 'cpuUtilization' in autoscaling_policy:
            cpu_util = autoscaling_policy.get('cpuUtilization', {})
            target = float(cpu_util.get('utilizationTarget', 0.0)) * 100
            auto_scaling_display = f'On: Target CPU utilization {target} %'
        elif 'loadBalancingUtilization' in autoscaling_policy:
            cpu_util = autoscaling_policy.get('loadBalancingUtilization', {})
            target = float(cpu_util.get('utilizationTarget', 0.0)) * 100
            auto_scaling_display = f'On: Load balancing utilization {target} %'
        elif 'customMetricUtilizations' in autoscaling_policy:
            auto_scaling_display = f'On: custom metrics'
        return auto_scaling_display
    @staticmethod
    def _get_balancing_mode_display(backend):
        display_msg = 'No configuration'
        if 'maxUtilization' in backend:
            rate = float(backend.get('maxUtilization', 0.0)) * 100
            display_msg = f'Max Backend Utilization: {rate} %'
        elif 'maxRate' in backend:
            rate = int(backend.get('maxRate', 0))
            display_msg = f'Max Backend Rate: {rate}'
        elif 'maxRatePerInstance' in backend:
            rate = float(backend.get('maxRatePerInstance', 0.0))
            display_msg = f'Max Backend Rate Per Instance: {rate}'
        elif 'maxRatePerEndpoint' in backend:
            rate = float(backend.get('maxRatePerEndpoint', 0.0))
            display_msg = f'Max Backend Rate Per Endpoint: {rate}'
        elif 'maxConnections' in backend:
            rate = int(backend.get('maxConnections', 0))
            display_msg = f'Max Backend Connection: {rate}'
        elif 'maxConnectionsPerInstance' in backend:
            rate = int(backend.get('maxConnectionsPerInstance', 0))
            display_msg = f'Max Backend Connections Per Instance: {rate}'
        elif 'maxConnectionsPerEndpoint' in backend:
            rate = int(backend.get('maxConnectionsPerEndpoint', 0))
            display_msg = f'Max Backend Connections Per Endpoint: {rate}'
        return display_msg
    @staticmethod
    def _get_selected_instance_group(backend, instance_groups, auto_scalers):
        for instance_group in instance_groups:
            if backend.get('group') == instance_group.get('instanceGroup'):
                for auto_scaler in auto_scalers:
                    if auto_scaler.get('target') == instance_group.get('selfLink'):
                        auto_policy = auto_scaler.get('autoscalingPolicy', {})
                        return auto_policy
    @staticmethod
    def _get_key_name_for_health_check(hk):
        if 'tcpHealthCheck' in hk:
            return 'tcpHealthCheck'
        elif 'sslHealthCheck' in hk:
            return 'tcpHealthCheck'
        elif 'httpHealthCheck' in hk:
            return 'tcpHealthCheck'
        elif 'httpsHealthCheck' in hk:
            return 'tcpHealthCheck'
        elif 'http2HealthCheck' in hk:
            return 'tcpHealthCheck'
        elif 'grpcHealthCheck' in hk:
            return 'grpcHealthCheck'
        else:
            return None | 
| 
	the-stack_106_32218 | 
	"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import annotations
import operator
from textwrap import dedent
from typing import (
    TYPE_CHECKING,
    Dict,
    Optional,
    Tuple,
    Union,
    cast,
)
from warnings import (
    catch_warnings,
    simplefilter,
    warn,
)
import numpy as np
from pandas._libs import (
    algos,
    hashtable as htable,
    iNaT,
    lib,
)
from pandas._typing import (
    AnyArrayLike,
    ArrayLike,
    DtypeObj,
    FrameOrSeriesUnion,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
    construct_1d_object_array_from_listlike,
    infer_dtype_from_array,
)
from pandas.core.dtypes.common import (
    ensure_float64,
    ensure_int64,
    ensure_object,
    ensure_platform_int,
    ensure_uint64,
    is_array_like,
    is_bool_dtype,
    is_categorical_dtype,
    is_complex_dtype,
    is_datetime64_dtype,
    is_datetime64_ns_dtype,
    is_extension_array_dtype,
    is_float_dtype,
    is_integer,
    is_integer_dtype,
    is_list_like,
    is_numeric_dtype,
    is_object_dtype,
    is_period_dtype,
    is_scalar,
    is_signed_integer_dtype,
    is_timedelta64_dtype,
    is_unsigned_integer_dtype,
    needs_i8_conversion,
    pandas_dtype,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.generic import (
    ABCDatetimeArray,
    ABCExtensionArray,
    ABCIndex,
    ABCMultiIndex,
    ABCRangeIndex,
    ABCSeries,
    ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
    isna,
    na_value_for_dtype,
)
from pandas.core.array_algos.take import take_nd
from pandas.core.construction import (
    array as pd_array,
    ensure_wrapped_if_datetimelike,
    extract_array,
)
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
    from pandas import (
        Categorical,
        DataFrame,
        Index,
        Series,
    )
    from pandas.core.arrays import (
        DatetimeArray,
        TimedeltaArray,
    )
_shared_docs: Dict[str, str] = {}
# --------------- #
# dtype access    #
# --------------- #
def _ensure_data(values: ArrayLike) -> Tuple[np.ndarray, DtypeObj]:
    """
    routine to ensure that our data is of the correct
    input dtype for lower-level routines
    This will coerce:
    - ints -> int64
    - uint -> uint64
    - bool -> uint64 (TODO this should be uint8)
    - datetimelike -> i8
    - datetime64tz -> i8 (in local tz)
    - categorical -> codes
    Parameters
    ----------
    values : array-like
    Returns
    -------
    values : ndarray
    pandas_dtype : np.dtype or ExtensionDtype
    """
    if not isinstance(values, ABCMultiIndex):
        # extract_array would raise
        values = extract_array(values, extract_numpy=True)
    # we check some simple dtypes first
    if is_object_dtype(values):
        return ensure_object(np.asarray(values)), np.dtype("object")
    try:
        if is_bool_dtype(values):
            # we are actually coercing to uint64
            # until our algos support uint8 directly (see TODO)
            return np.asarray(values).astype("uint64"), np.dtype("bool")
        elif is_signed_integer_dtype(values):
            return ensure_int64(values), np.dtype("int64")
        elif is_unsigned_integer_dtype(values):
            return ensure_uint64(values), np.dtype("uint64")
        elif is_float_dtype(values):
            return ensure_float64(values), np.dtype("float64")
        elif is_complex_dtype(values):
            # ignore the fact that we are casting to float
            # which discards complex parts
            with catch_warnings():
                simplefilter("ignore", np.ComplexWarning)
                values = ensure_float64(values)
            return values, np.dtype("float64")
    except (TypeError, ValueError, OverflowError):
        # if we are trying to coerce to a dtype
        # and it is incompatible this will fall through to here
        return ensure_object(values), np.dtype("object")
    # datetimelike
    if needs_i8_conversion(values.dtype):
        if is_period_dtype(values.dtype):
            from pandas import PeriodIndex
            values = PeriodIndex(values)._data
        elif is_timedelta64_dtype(values.dtype):
            from pandas import TimedeltaIndex
            values = TimedeltaIndex(values)._data
        else:
            # Datetime
            if values.ndim > 1 and is_datetime64_ns_dtype(values.dtype):
                # Avoid calling the DatetimeIndex constructor as it is 1D only
                # Note: this is reached by DataFrame.rank calls GH#27027
                # TODO(EA2D): special case not needed with 2D EAs
                asi8 = values.view("i8")
                dtype = values.dtype
                return asi8, dtype
            from pandas import DatetimeIndex
            values = DatetimeIndex(values)._data
        dtype = values.dtype
        return values.asi8, dtype
    elif is_categorical_dtype(values.dtype):
        values = cast("Categorical", values)
        values = values.codes
        dtype = pandas_dtype("category")
        # we are actually coercing to int64
        # until our algos support int* directly (not all do)
        values = ensure_int64(values)
        return values, dtype
    # we have failed, return object
    values = np.asarray(values, dtype=object)
    return ensure_object(values), np.dtype("object")
def _reconstruct_data(
    values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
) -> ArrayLike:
    """
    reverse of _ensure_data
    Parameters
    ----------
    values : np.ndarray or ExtensionArray
    dtype : np.ndtype or ExtensionDtype
    original : AnyArrayLike
    Returns
    -------
    ExtensionArray or np.ndarray
    """
    if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
        # Catch DatetimeArray/TimedeltaArray
        return values
    if is_extension_array_dtype(dtype):
        cls = dtype.construct_array_type()
        if isinstance(values, cls) and values.dtype == dtype:
            return values
        values = cls._from_sequence(values)
    elif is_bool_dtype(dtype):
        values = values.astype(dtype, copy=False)
        # we only support object dtypes bool Index
        if isinstance(original, ABCIndex):
            values = values.astype(object, copy=False)
    elif dtype is not None:
        if is_datetime64_dtype(dtype):
            dtype = "datetime64[ns]"
        elif is_timedelta64_dtype(dtype):
            dtype = "timedelta64[ns]"
        values = values.astype(dtype, copy=False)
    return values
def _ensure_arraylike(values) -> ArrayLike:
    """
    ensure that we are arraylike if not already
    """
    if not is_array_like(values):
        inferred = lib.infer_dtype(values, skipna=False)
        if inferred in ["mixed", "string", "mixed-integer"]:
            # "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
            if isinstance(values, tuple):
                values = list(values)
            values = construct_1d_object_array_from_listlike(values)
        else:
            values = np.asarray(values)
    return values
_hashtables = {
    "float64": htable.Float64HashTable,
    "uint64": htable.UInt64HashTable,
    "int64": htable.Int64HashTable,
    "string": htable.StringHashTable,
    "object": htable.PyObjectHashTable,
}
def _get_hashtable_algo(values: np.ndarray):
    """
    Parameters
    ----------
    values : np.ndarray
    Returns
    -------
    htable : HashTable subclass
    values : ndarray
    """
    values, _ = _ensure_data(values)
    ndtype = _check_object_for_strings(values)
    htable = _hashtables[ndtype]
    return htable, values
def _get_values_for_rank(values: ArrayLike):
    if is_categorical_dtype(values):
        values = cast("Categorical", values)._values_for_rank()
    values, _ = _ensure_data(values)
    return values
def get_data_algo(values: ArrayLike):
    values = _get_values_for_rank(values)
    ndtype = _check_object_for_strings(values)
    htable = _hashtables.get(ndtype, _hashtables["object"])
    return htable, values
def _check_object_for_strings(values: np.ndarray) -> str:
    """
    Check if we can use string hashtable instead of object hashtable.
    Parameters
    ----------
    values : ndarray
    Returns
    -------
    str
    """
    ndtype = values.dtype.name
    if ndtype == "object":
        # it's cheaper to use a String Hash Table than Object; we infer
        # including nulls because that is the only difference between
        # StringHashTable and ObjectHashtable
        if lib.infer_dtype(values, skipna=False) in ["string"]:
            ndtype = "string"
    return ndtype
# --------------- #
# top-level algos #
# --------------- #
def unique(values):
    """
    Hash table-based unique. Uniques are returned in order
    of appearance. This does NOT sort.
    Significantly faster than numpy.unique for long enough sequences.
    Includes NA values.
    Parameters
    ----------
    values : 1d array-like
    Returns
    -------
    numpy.ndarray or ExtensionArray
        The return can be:
        * Index : when the input is an Index
        * Categorical : when the input is a Categorical dtype
        * ndarray : when the input is a Series/ndarray
        Return numpy.ndarray or ExtensionArray.
    See Also
    --------
    Index.unique : Return unique values from an Index.
    Series.unique : Return unique values of Series object.
    Examples
    --------
    >>> pd.unique(pd.Series([2, 1, 3, 3]))
    array([2, 1, 3])
    >>> pd.unique(pd.Series([2] + [1] * 5))
    array([2, 1])
    >>> pd.unique(pd.Series([pd.Timestamp('20160101'),
    ...                     pd.Timestamp('20160101')]))
    array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
    >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
    ...                      pd.Timestamp('20160101', tz='US/Eastern')]))
    array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
          dtype=object)
    >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
    ...                     pd.Timestamp('20160101', tz='US/Eastern')]))
    DatetimeIndex(['2016-01-01 00:00:00-05:00'],
    ...           dtype='datetime64[ns, US/Eastern]', freq=None)
    >>> pd.unique(list('baabc'))
    array(['b', 'a', 'c'], dtype=object)
    An unordered Categorical will return categories in the
    order of appearance.
    >>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
    [b, a, c]
    Categories (3, object): [b, a, c]
    >>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
    ...                                    categories=list('abc'))))
    [b, a, c]
    Categories (3, object): [b, a, c]
    An ordered Categorical preserves the category ordering.
    >>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
    ...                                    categories=list('abc'),
    ...                                    ordered=True)))
    [b, a, c]
    Categories (3, object): [a < b < c]
    An array of tuples
    >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
    array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
    """
    values = _ensure_arraylike(values)
    if is_extension_array_dtype(values):
        # Dispatch to extension dtype's unique.
        return values.unique()
    original = values
    htable, values = _get_hashtable_algo(values)
    table = htable(len(values))
    uniques = table.unique(values)
    uniques = _reconstruct_data(uniques, original.dtype, original)
    return uniques
unique1d = unique
def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
    """
    Compute the isin boolean array.
    Parameters
    ----------
    comps : array-like
    values : array-like
    Returns
    -------
    ndarray[bool]
        Same length as `comps`.
    """
    if not is_list_like(comps):
        raise TypeError(
            "only list-like objects are allowed to be passed "
            f"to isin(), you passed a [{type(comps).__name__}]"
        )
    if not is_list_like(values):
        raise TypeError(
            "only list-like objects are allowed to be passed "
            f"to isin(), you passed a [{type(values).__name__}]"
        )
    if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
        values = _ensure_arraylike(list(values))
    elif isinstance(values, ABCMultiIndex):
        # Avoid raising in extract_array
        values = np.array(values)
    else:
        values = extract_array(values, extract_numpy=True)
    comps = _ensure_arraylike(comps)
    comps = extract_array(comps, extract_numpy=True)
    if is_extension_array_dtype(comps.dtype):
        return comps.isin(values)
    elif needs_i8_conversion(comps.dtype):
        # Dispatch to DatetimeLikeArrayMixin.isin
        return pd_array(comps).isin(values)
    elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
        # e.g. comps are integers and values are datetime64s
        return np.zeros(comps.shape, dtype=bool)
        # TODO: not quite right ... Sparse/Categorical
    elif needs_i8_conversion(values.dtype):
        return isin(comps, values.astype(object))
    elif is_extension_array_dtype(values.dtype):
        return isin(np.asarray(comps), np.asarray(values))
    # GH16012
    # Ensure np.in1d doesn't get object types or it *may* throw an exception
    # Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
    # in1d is faster for small sizes
    if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
        # If the values include nan we need to check for nan explicitly
        # since np.nan it not equal to np.nan
        if isna(values).any():
            def f(c, v):
                return np.logical_or(np.in1d(c, v), np.isnan(c))
        else:
            f = np.in1d
    else:
        common = np.find_common_type([values.dtype, comps.dtype], [])
        values = values.astype(common, copy=False)
        comps = comps.astype(common, copy=False)
        name = common.name
        if name == "bool":
            name = "uint8"
        f = getattr(htable, f"ismember_{name}")
    return f(comps, values)
def factorize_array(
    values: np.ndarray,
    na_sentinel: int = -1,
    size_hint: Optional[int] = None,
    na_value=None,
    mask: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray]:
    """
    Factorize an array-like to codes and uniques.
    This doesn't do any coercion of types or unboxing before factorization.
    Parameters
    ----------
    values : ndarray
    na_sentinel : int, default -1
    size_hint : int, optional
        Passed through to the hashtable's 'get_labels' method
    na_value : object, optional
        A value in `values` to consider missing. Note: only use this
        parameter when you know that you don't have any values pandas would
        consider missing in the array (NaN for float data, iNaT for
        datetimes, etc.).
    mask : ndarray[bool], optional
        If not None, the mask is used as indicator for missing values
        (True = missing, False = valid) instead of `na_value` or
        condition "val != val".
    Returns
    -------
    codes : ndarray
    uniques : ndarray
    """
    hash_klass, values = get_data_algo(values)
    table = hash_klass(size_hint or len(values))
    uniques, codes = table.factorize(
        values, na_sentinel=na_sentinel, na_value=na_value, mask=mask
    )
    codes = ensure_platform_int(codes)
    return codes, uniques
@doc(
    values=dedent(
        """\
    values : sequence
        A 1-D sequence. Sequences that aren't pandas objects are
        coerced to ndarrays before factorization.
    """
    ),
    sort=dedent(
        """\
    sort : bool, default False
        Sort `uniques` and shuffle `codes` to maintain the
        relationship.
    """
    ),
    size_hint=dedent(
        """\
    size_hint : int, optional
        Hint to the hashtable sizer.
    """
    ),
)
def factorize(
    values,
    sort: bool = False,
    na_sentinel: Optional[int] = -1,
    size_hint: Optional[int] = None,
) -> Tuple[np.ndarray, Union[np.ndarray, Index]]:
    """
    Encode the object as an enumerated type or categorical variable.
    This method is useful for obtaining a numeric representation of an
    array when all that matters is identifying distinct values. `factorize`
    is available as both a top-level function :func:`pandas.factorize`,
    and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
    Parameters
    ----------
    {values}{sort}
    na_sentinel : int or None, default -1
        Value to mark "not found". If None, will not drop the NaN
        from the uniques of the values.
        .. versionchanged:: 1.1.2
    {size_hint}\
    Returns
    -------
    codes : ndarray
        An integer ndarray that's an indexer into `uniques`.
        ``uniques.take(codes)`` will have the same values as `values`.
    uniques : ndarray, Index, or Categorical
        The unique valid values. When `values` is Categorical, `uniques`
        is a Categorical. When `values` is some other pandas object, an
        `Index` is returned. Otherwise, a 1-D ndarray is returned.
        .. note ::
           Even if there's a missing value in `values`, `uniques` will
           *not* contain an entry for it.
    See Also
    --------
    cut : Discretize continuous-valued array.
    unique : Find the unique value in an array.
    Examples
    --------
    These examples all show factorize as a top-level method like
    ``pd.factorize(values)``. The results are identical for methods like
    :meth:`Series.factorize`.
    >>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
    >>> codes
    array([0, 0, 1, 2, 0]...)
    >>> uniques
    array(['b', 'a', 'c'], dtype=object)
    With ``sort=True``, the `uniques` will be sorted, and `codes` will be
    shuffled so that the relationship is the maintained.
    >>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
    >>> codes
    array([1, 1, 0, 2, 1]...)
    >>> uniques
    array(['a', 'b', 'c'], dtype=object)
    Missing values are indicated in `codes` with `na_sentinel`
    (``-1`` by default). Note that missing values are never
    included in `uniques`.
    >>> codes, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
    >>> codes
    array([ 0, -1,  1,  2,  0]...)
    >>> uniques
    array(['b', 'a', 'c'], dtype=object)
    Thus far, we've only factorized lists (which are internally coerced to
    NumPy arrays). When factorizing pandas objects, the type of `uniques`
    will differ. For Categoricals, a `Categorical` is returned.
    >>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
    >>> codes, uniques = pd.factorize(cat)
    >>> codes
    array([0, 0, 1]...)
    >>> uniques
    ['a', 'c']
    Categories (3, object): ['a', 'b', 'c']
    Notice that ``'b'`` is in ``uniques.categories``, despite not being
    present in ``cat.values``.
    For all other pandas objects, an Index of the appropriate type is
    returned.
    >>> cat = pd.Series(['a', 'a', 'c'])
    >>> codes, uniques = pd.factorize(cat)
    >>> codes
    array([0, 0, 1]...)
    >>> uniques
    Index(['a', 'c'], dtype='object')
    If NaN is in the values, and we want to include NaN in the uniques of the
    values, it can be achieved by setting ``na_sentinel=None``.
    >>> values = np.array([1, 2, 1, np.nan])
    >>> codes, uniques = pd.factorize(values)  # default: na_sentinel=-1
    >>> codes
    array([ 0,  1,  0, -1])
    >>> uniques
    array([1., 2.])
    >>> codes, uniques = pd.factorize(values, na_sentinel=None)
    >>> codes
    array([0, 1, 0, 2])
    >>> uniques
    array([ 1.,  2., nan])
    """
    # Implementation notes: This method is responsible for 3 things
    # 1.) coercing data to array-like (ndarray, Index, extension array)
    # 2.) factorizing codes and uniques
    # 3.) Maybe boxing the uniques in an Index
    #
    # Step 2 is dispatched to extension types (like Categorical). They are
    # responsible only for factorization. All data coercion, sorting and boxing
    # should happen here.
    if isinstance(values, ABCRangeIndex):
        return values.factorize(sort=sort)
    values = _ensure_arraylike(values)
    original = values
    if not isinstance(values, ABCMultiIndex):
        values = extract_array(values, extract_numpy=True)
    # GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques
    # of values, assign na_sentinel=-1 to replace code value for NaN.
    dropna = True
    if na_sentinel is None:
        na_sentinel = -1
        dropna = False
    if (
        isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
        and values.freq is not None
    ):
        codes, uniques = values.factorize(sort=sort)
        if isinstance(original, ABCIndex):
            uniques = original._shallow_copy(uniques, name=None)
        elif isinstance(original, ABCSeries):
            from pandas import Index
            uniques = Index(uniques)
        return codes, uniques
    if is_extension_array_dtype(values.dtype):
        codes, uniques = values.factorize(na_sentinel=na_sentinel)
        dtype = original.dtype
    else:
        values, dtype = _ensure_data(values)
        if original.dtype.kind in ["m", "M"]:
            # Note: factorize_array will cast NaT bc it has a __int__
            #  method, but will not cast the more-correct dtype.type("nat")
            na_value = iNaT
        else:
            na_value = None
        codes, uniques = factorize_array(
            values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
        )
    if sort and len(uniques) > 0:
        uniques, codes = safe_sort(
            uniques, codes, na_sentinel=na_sentinel, assume_unique=True, verify=False
        )
    code_is_na = codes == na_sentinel
    if not dropna and code_is_na.any():
        # na_value is set based on the dtype of uniques, and compat set to False is
        # because we do not want na_value to be 0 for integers
        na_value = na_value_for_dtype(uniques.dtype, compat=False)
        uniques = np.append(uniques, [na_value])
        codes = np.where(code_is_na, len(uniques) - 1, codes)
    uniques = _reconstruct_data(uniques, dtype, original)
    # return original tenor
    if isinstance(original, ABCIndex):
        if original.dtype.kind in ["m", "M"] and isinstance(uniques, np.ndarray):
            original._data = cast(
                "Union[DatetimeArray, TimedeltaArray]", original._data
            )
            uniques = type(original._data)._simple_new(uniques, dtype=original.dtype)
        uniques = original._shallow_copy(uniques, name=None)
    elif isinstance(original, ABCSeries):
        from pandas import Index
        uniques = Index(uniques)
    return codes, uniques
def value_counts(
    values,
    sort: bool = True,
    ascending: bool = False,
    normalize: bool = False,
    bins=None,
    dropna: bool = True,
) -> Series:
    """
    Compute a histogram of the counts of non-null values.
    Parameters
    ----------
    values : ndarray (1-d)
    sort : bool, default True
        Sort by values
    ascending : bool, default False
        Sort in ascending order
    normalize: bool, default False
        If True then compute a relative histogram
    bins : integer, optional
        Rather than count values, group them into half-open bins,
        convenience for pd.cut, only works with numeric data
    dropna : bool, default True
        Don't include counts of NaN
    Returns
    -------
    Series
    """
    from pandas.core.series import Series
    name = getattr(values, "name", None)
    if bins is not None:
        from pandas.core.reshape.tile import cut
        values = Series(values)
        try:
            ii = cut(values, bins, include_lowest=True)
        except TypeError as err:
            raise TypeError("bins argument only works with numeric data.") from err
        # count, remove nulls (from the index), and but the bins
        result = ii.value_counts(dropna=dropna)
        result = result[result.index.notna()]
        result.index = result.index.astype("interval")
        result = result.sort_index()
        # if we are dropna and we have NO values
        if dropna and (result._values == 0).all():
            result = result.iloc[0:0]
        # normalizing is by len of all (regardless of dropna)
        counts = np.array([len(ii)])
    else:
        if is_extension_array_dtype(values):
            # handle Categorical and sparse,
            result = Series(values)._values.value_counts(dropna=dropna)
            result.name = name
            counts = result._values
        else:
            keys, counts = value_counts_arraylike(values, dropna)
            result = Series(counts, index=keys, name=name)
    if sort:
        result = result.sort_values(ascending=ascending)
    if normalize:
        result = result / counts.sum()
    return result
# Called once from SparseArray, otherwise could be private
def value_counts_arraylike(values, dropna: bool):
    """
    Parameters
    ----------
    values : arraylike
    dropna : bool
    Returns
    -------
    uniques : np.ndarray or ExtensionArray
    counts : np.ndarray
    """
    values = _ensure_arraylike(values)
    original = values
    values, _ = _ensure_data(values)
    ndtype = values.dtype.name
    if needs_i8_conversion(original.dtype):
        # datetime, timedelta, or period
        keys, counts = htable.value_count_int64(values, dropna)
        if dropna:
            msk = keys != iNaT
            keys, counts = keys[msk], counts[msk]
    else:
        # ndarray like
        # TODO: handle uint8
        f = getattr(htable, f"value_count_{ndtype}")
        keys, counts = f(values, dropna)
    keys = _reconstruct_data(keys, original.dtype, original)
    return keys, counts
def duplicated(values: ArrayLike, keep: Union[str, bool] = "first") -> np.ndarray:
    """
    Return boolean ndarray denoting duplicate values.
    Parameters
    ----------
    values : ndarray-like
        Array over which to check for duplicate values.
    keep : {'first', 'last', False}, default 'first'
        - ``first`` : Mark duplicates as ``True`` except for the first
          occurrence.
        - ``last`` : Mark duplicates as ``True`` except for the last
          occurrence.
        - False : Mark all duplicates as ``True``.
    Returns
    -------
    duplicated : ndarray
    """
    values, _ = _ensure_data(values)
    ndtype = values.dtype.name
    f = getattr(htable, f"duplicated_{ndtype}")
    return f(values, keep=keep)
def mode(values, dropna: bool = True) -> Series:
    """
    Returns the mode(s) of an array.
    Parameters
    ----------
    values : array-like
        Array over which to check for duplicate values.
    dropna : boolean, default True
        Don't consider counts of NaN/NaT.
        .. versionadded:: 0.24.0
    Returns
    -------
    mode : Series
    """
    from pandas import Series
    import pandas.core.indexes.base as ibase
    values = _ensure_arraylike(values)
    original = values
    # categorical is a fast-path
    if is_categorical_dtype(values):
        if isinstance(values, Series):
            # TODO: should we be passing `name` below?
            return Series(values._values.mode(dropna=dropna), name=values.name)
        return values.mode(dropna=dropna)
    if dropna and needs_i8_conversion(values.dtype):
        mask = values.isnull()
        values = values[~mask]
    values, _ = _ensure_data(values)
    ndtype = values.dtype.name
    f = getattr(htable, f"mode_{ndtype}")
    result = f(values, dropna=dropna)
    try:
        result = np.sort(result)
    except TypeError as err:
        warn(f"Unable to sort modes: {err}")
    result = _reconstruct_data(result, original.dtype, original)
    # Ensure index is type stable (should always use int index)
    return Series(result, index=ibase.default_index(len(result)))
def rank(
    values: ArrayLike,
    axis: int = 0,
    method: str = "average",
    na_option: str = "keep",
    ascending: bool = True,
    pct: bool = False,
) -> np.ndarray:
    """
    Rank the values along a given axis.
    Parameters
    ----------
    values : array-like
        Array whose values will be ranked. The number of dimensions in this
        array must not exceed 2.
    axis : int, default 0
        Axis over which to perform rankings.
    method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
        The method by which tiebreaks are broken during the ranking.
    na_option : {'keep', 'top'}, default 'keep'
        The method by which NaNs are placed in the ranking.
        - ``keep``: rank each NaN value with a NaN ranking
        - ``top``: replace each NaN with either +/- inf so that they
                   there are ranked at the top
    ascending : boolean, default True
        Whether or not the elements should be ranked in ascending order.
    pct : boolean, default False
        Whether or not to the display the returned rankings in integer form
        (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
    """
    if values.ndim == 1:
        values = _get_values_for_rank(values)
        ranks = algos.rank_1d(
            values,
            labels=np.zeros(len(values), dtype=np.int64),
            ties_method=method,
            ascending=ascending,
            na_option=na_option,
            pct=pct,
        )
    elif values.ndim == 2:
        values = _get_values_for_rank(values)
        ranks = algos.rank_2d(
            values,
            axis=axis,
            ties_method=method,
            ascending=ascending,
            na_option=na_option,
            pct=pct,
        )
    else:
        raise TypeError("Array with ndim > 2 are not supported.")
    return ranks
def checked_add_with_arr(
    arr: np.ndarray,
    b,
    arr_mask: Optional[np.ndarray] = None,
    b_mask: Optional[np.ndarray] = None,
) -> np.ndarray:
    """
    Perform array addition that checks for underflow and overflow.
    Performs the addition of an int64 array and an int64 integer (or array)
    but checks that they do not result in overflow first. For elements that
    are indicated to be NaN, whether or not there is overflow for that element
    is automatically ignored.
    Parameters
    ----------
    arr : array addend.
    b : array or scalar addend.
    arr_mask : np.ndarray[bool] or None, default None
        array indicating which elements to exclude from checking
    b_mask : np.ndarray[bool] or None, default None
        array or scalar indicating which element(s) to exclude from checking
    Returns
    -------
    sum : An array for elements x + b for each element x in arr if b is
          a scalar or an array for elements x + y for each element pair
          (x, y) in (arr, b).
    Raises
    ------
    OverflowError if any x + y exceeds the maximum or minimum int64 value.
    """
    # For performance reasons, we broadcast 'b' to the new array 'b2'
    # so that it has the same size as 'arr'.
    b2 = np.broadcast_to(b, arr.shape)
    if b_mask is not None:
        # We do the same broadcasting for b_mask as well.
        b2_mask = np.broadcast_to(b_mask, arr.shape)
    else:
        b2_mask = None
    # For elements that are NaN, regardless of their value, we should
    # ignore whether they overflow or not when doing the checked add.
    if arr_mask is not None and b2_mask is not None:
        not_nan = np.logical_not(arr_mask | b2_mask)
    elif arr_mask is not None:
        not_nan = np.logical_not(arr_mask)
    elif b_mask is not None:
        not_nan = np.logical_not(b2_mask)
    else:
        not_nan = np.empty(arr.shape, dtype=bool)
        not_nan.fill(True)
    # gh-14324: For each element in 'arr' and its corresponding element
    # in 'b2', we check the sign of the element in 'b2'. If it is positive,
    # we then check whether its sum with the element in 'arr' exceeds
    # np.iinfo(np.int64).max. If so, we have an overflow error. If it
    # it is negative, we then check whether its sum with the element in
    # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
    # error as well.
    mask1 = b2 > 0
    mask2 = b2 < 0
    if not mask1.any():
        to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
    elif not mask2.any():
        to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
    else:
        to_raise = (
            (np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]
        ).any() or (
            (np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]
        ).any()
    if to_raise:
        raise OverflowError("Overflow in int64 addition")
    return arr + b
def quantile(x, q, interpolation_method="fraction"):
    """
    Compute sample quantile or quantiles of the input array. For example, q=0.5
    computes the median.
    The `interpolation_method` parameter supports three values, namely
    `fraction` (default), `lower` and `higher`. Interpolation is done only,
    if the desired quantile lies between two data points `i` and `j`. For
    `fraction`, the result is an interpolated value between `i` and `j`;
    for `lower`, the result is `i`, for `higher` the result is `j`.
    Parameters
    ----------
    x : ndarray
        Values from which to extract score.
    q : scalar or array
        Percentile at which to extract score.
    interpolation_method : {'fraction', 'lower', 'higher'}, optional
        This optional parameter specifies the interpolation method to use,
        when the desired quantile lies between two data points `i` and `j`:
        - fraction: `i + (j - i)*fraction`, where `fraction` is the
                    fractional part of the index surrounded by `i` and `j`.
        -lower: `i`.
        - higher: `j`.
    Returns
    -------
    score : float
        Score at percentile.
    Examples
    --------
    >>> from scipy import stats
    >>> a = np.arange(100)
    >>> stats.scoreatpercentile(a, 50)
    49.5
    """
    x = np.asarray(x)
    mask = isna(x)
    x = x[~mask]
    values = np.sort(x)
    def _interpolate(a, b, fraction):
        """
        Returns the point at the given fraction between a and b, where
        'fraction' must be between 0 and 1.
        """
        return a + (b - a) * fraction
    def _get_score(at):
        if len(values) == 0:
            return np.nan
        idx = at * (len(values) - 1)
        if idx % 1 == 0:
            score = values[int(idx)]
        else:
            if interpolation_method == "fraction":
                score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
            elif interpolation_method == "lower":
                score = values[np.floor(idx)]
            elif interpolation_method == "higher":
                score = values[np.ceil(idx)]
            else:
                raise ValueError(
                    "interpolation_method can only be 'fraction' "
                    ", 'lower' or 'higher'"
                )
        return score
    if is_scalar(q):
        return _get_score(q)
    else:
        q = np.asarray(q, np.float64)
        result = [_get_score(x) for x in q]
        result = np.array(result, dtype=np.float64)
        return result
# --------------- #
# select n        #
# --------------- #
class SelectN:
    def __init__(self, obj, n: int, keep: str):
        self.obj = obj
        self.n = n
        self.keep = keep
        if self.keep not in ("first", "last", "all"):
            raise ValueError('keep must be either "first", "last" or "all"')
    def compute(self, method: str) -> FrameOrSeriesUnion:
        raise NotImplementedError
    def nlargest(self):
        return self.compute("nlargest")
    def nsmallest(self):
        return self.compute("nsmallest")
    @staticmethod
    def is_valid_dtype_n_method(dtype: DtypeObj) -> bool:
        """
        Helper function to determine if dtype is valid for
        nsmallest/nlargest methods
        """
        return (
            is_numeric_dtype(dtype) and not is_complex_dtype(dtype)
        ) or needs_i8_conversion(dtype)
class SelectNSeries(SelectN):
    """
    Implement n largest/smallest for Series
    Parameters
    ----------
    obj : Series
    n : int
    keep : {'first', 'last'}, default 'first'
    Returns
    -------
    nordered : Series
    """
    def compute(self, method: str) -> Series:
        n = self.n
        dtype = self.obj.dtype
        if not self.is_valid_dtype_n_method(dtype):
            raise TypeError(f"Cannot use method '{method}' with dtype {dtype}")
        if n <= 0:
            return self.obj[[]]
        dropped = self.obj.dropna()
        # slow method
        if n >= len(self.obj):
            ascending = method == "nsmallest"
            return dropped.sort_values(ascending=ascending).head(n)
        # fast method
        arr, pandas_dtype = _ensure_data(dropped.values)
        if method == "nlargest":
            arr = -arr
            if is_integer_dtype(pandas_dtype):
                # GH 21426: ensure reverse ordering at boundaries
                arr -= 1
            elif is_bool_dtype(pandas_dtype):
                # GH 26154: ensure False is smaller than True
                arr = 1 - (-arr)
        if self.keep == "last":
            arr = arr[::-1]
        narr = len(arr)
        n = min(n, narr)
        kth_val = algos.kth_smallest(arr.copy(), n - 1)
        (ns,) = np.nonzero(arr <= kth_val)
        inds = ns[arr[ns].argsort(kind="mergesort")]
        if self.keep != "all":
            inds = inds[:n]
        if self.keep == "last":
            # reverse indices
            inds = narr - 1 - inds
        return dropped.iloc[inds]
class SelectNFrame(SelectN):
    """
    Implement n largest/smallest for DataFrame
    Parameters
    ----------
    obj : DataFrame
    n : int
    keep : {'first', 'last'}, default 'first'
    columns : list or str
    Returns
    -------
    nordered : DataFrame
    """
    def __init__(self, obj, n: int, keep: str, columns):
        super().__init__(obj, n, keep)
        if not is_list_like(columns) or isinstance(columns, tuple):
            columns = [columns]
        columns = list(columns)
        self.columns = columns
    def compute(self, method: str) -> DataFrame:
        from pandas import Int64Index
        n = self.n
        frame = self.obj
        columns = self.columns
        for column in columns:
            dtype = frame[column].dtype
            if not self.is_valid_dtype_n_method(dtype):
                raise TypeError(
                    f"Column {repr(column)} has dtype {dtype}, "
                    f"cannot use method {repr(method)} with this dtype"
                )
        def get_indexer(current_indexer, other_indexer):
            """
            Helper function to concat `current_indexer` and `other_indexer`
            depending on `method`
            """
            if method == "nsmallest":
                return current_indexer.append(other_indexer)
            else:
                return other_indexer.append(current_indexer)
        # Below we save and reset the index in case index contains duplicates
        original_index = frame.index
        cur_frame = frame = frame.reset_index(drop=True)
        cur_n = n
        indexer = Int64Index([])
        for i, column in enumerate(columns):
            # For each column we apply method to cur_frame[column].
            # If it's the last column or if we have the number of
            # results desired we are done.
            # Otherwise there are duplicates of the largest/smallest
            # value and we need to look at the rest of the columns
            # to determine which of the rows with the largest/smallest
            # value in the column to keep.
            series = cur_frame[column]
            is_last_column = len(columns) - 1 == i
            values = getattr(series, method)(
                cur_n, keep=self.keep if is_last_column else "all"
            )
            if is_last_column or len(values) <= cur_n:
                indexer = get_indexer(indexer, values.index)
                break
            # Now find all values which are equal to
            # the (nsmallest: largest)/(nlargest: smallest)
            # from our series.
            border_value = values == values[values.index[-1]]
            # Some of these values are among the top-n
            # some aren't.
            unsafe_values = values[border_value]
            # These values are definitely among the top-n
            safe_values = values[~border_value]
            indexer = get_indexer(indexer, safe_values.index)
            # Go on and separate the unsafe_values on the remaining
            # columns.
            cur_frame = cur_frame.loc[unsafe_values.index]
            cur_n = n - len(indexer)
        frame = frame.take(indexer)
        # Restore the index on frame
        frame.index = original_index.take(indexer)
        # If there is only one column, the frame is already sorted.
        if len(columns) == 1:
            return frame
        ascending = method == "nsmallest"
        return frame.sort_values(columns, ascending=ascending, kind="mergesort")
# ---- #
# take #
# ---- #
def take(
    arr, indices: np.ndarray, axis: int = 0, allow_fill: bool = False, fill_value=None
):
    """
    Take elements from an array.
    Parameters
    ----------
    arr : sequence
        Non array-likes (sequences without a dtype) are coerced
        to an ndarray.
    indices : sequence of integers
        Indices to be taken.
    axis : int, default 0
        The axis over which to select values.
    allow_fill : bool, default False
        How to handle negative values in `indices`.
        * False: negative values in `indices` indicate positional indices
          from the right (the default). This is similar to :func:`numpy.take`.
        * True: negative values in `indices` indicate
          missing values. These values are set to `fill_value`. Any other
          negative values raise a ``ValueError``.
    fill_value : any, optional
        Fill value to use for NA-indices when `allow_fill` is True.
        This may be ``None``, in which case the default NA value for
        the type (``self.dtype.na_value``) is used.
        For multi-dimensional `arr`, each *element* is filled with
        `fill_value`.
    Returns
    -------
    ndarray or ExtensionArray
        Same type as the input.
    Raises
    ------
    IndexError
        When `indices` is out of bounds for the array.
    ValueError
        When the indexer contains negative values other than ``-1``
        and `allow_fill` is True.
    Notes
    -----
    When `allow_fill` is False, `indices` may be whatever dimensionality
    is accepted by NumPy for `arr`.
    When `allow_fill` is True, `indices` should be 1-D.
    See Also
    --------
    numpy.take : Take elements from an array along an axis.
    Examples
    --------
    >>> from pandas.api.extensions import take
    With the default ``allow_fill=False``, negative numbers indicate
    positional indices from the right.
    >>> take(np.array([10, 20, 30]), [0, 0, -1])
    array([10, 10, 30])
    Setting ``allow_fill=True`` will place `fill_value` in those positions.
    >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
    array([10., 10., nan])
    >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
    ...      fill_value=-10)
    array([ 10,  10, -10])
    """
    if not is_array_like(arr):
        arr = np.asarray(arr)
    indices = np.asarray(indices, dtype=np.intp)
    if allow_fill:
        # Pandas style, -1 means NA
        validate_indices(indices, arr.shape[axis])
        result = take_nd(
            arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
        )
    else:
        # NumPy style
        result = arr.take(indices, axis=axis)
    return result
# ------------ #
# searchsorted #
# ------------ #
def searchsorted(arr, value, side="left", sorter=None) -> np.ndarray:
    """
    Find indices where elements should be inserted to maintain order.
    .. versionadded:: 0.25.0
    Find the indices into a sorted array `arr` (a) such that, if the
    corresponding elements in `value` were inserted before the indices,
    the order of `arr` would be preserved.
    Assuming that `arr` is sorted:
    ======  ================================
    `side`  returned index `i` satisfies
    ======  ================================
    left    ``arr[i-1] < value <= self[i]``
    right   ``arr[i-1] <= value < self[i]``
    ======  ================================
    Parameters
    ----------
    arr: array-like
        Input array. If `sorter` is None, then it must be sorted in
        ascending order, otherwise `sorter` must be an array of indices
        that sort it.
    value : array_like
        Values to insert into `arr`.
    side : {'left', 'right'}, optional
        If 'left', the index of the first suitable location found is given.
        If 'right', return the last such index.  If there is no suitable
        index, return either 0 or N (where N is the length of `self`).
    sorter : 1-D array_like, optional
        Optional array of integer indices that sort array a into ascending
        order. They are typically the result of argsort.
    Returns
    -------
    array of ints
        Array of insertion points with the same shape as `value`.
    See Also
    --------
    numpy.searchsorted : Similar method from NumPy.
    """
    if sorter is not None:
        sorter = ensure_platform_int(sorter)
    if (
        isinstance(arr, np.ndarray)
        and is_integer_dtype(arr.dtype)
        and (is_integer(value) or is_integer_dtype(value))
    ):
        # if `arr` and `value` have different dtypes, `arr` would be
        # recast by numpy, causing a slow search.
        # Before searching below, we therefore try to give `value` the
        # same dtype as `arr`, while guarding against integer overflows.
        iinfo = np.iinfo(arr.dtype.type)
        value_arr = np.array([value]) if is_scalar(value) else np.array(value)
        if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
            # value within bounds, so no overflow, so can convert value dtype
            # to dtype of arr
            dtype = arr.dtype
        else:
            dtype = value_arr.dtype
        if is_scalar(value):
            value = dtype.type(value)
        else:
            value = pd_array(value, dtype=dtype)
    elif not (
        is_object_dtype(arr) or is_numeric_dtype(arr) or is_categorical_dtype(arr)
    ):
        # E.g. if `arr` is an array with dtype='datetime64[ns]'
        # and `value` is a pd.Timestamp, we may need to convert value
        arr = ensure_wrapped_if_datetimelike(arr)
    return arr.searchsorted(value, side=side, sorter=sorter)
# ---- #
# diff #
# ---- #
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
def diff(arr, n: int, axis: int = 0, stacklevel=3):
    """
    difference of n between self,
    analogous to s-s.shift(n)
    Parameters
    ----------
    arr : ndarray
    n : int
        number of periods
    axis : int
        axis to shift on
    stacklevel : int
        The stacklevel for the lost dtype warning.
    Returns
    -------
    shifted
    """
    n = int(n)
    na = np.nan
    dtype = arr.dtype
    if dtype.kind == "b":
        op = operator.xor
    else:
        op = operator.sub
    if isinstance(dtype, PandasDtype):
        # PandasArray cannot necessarily hold shifted versions of itself.
        arr = arr.to_numpy()
        dtype = arr.dtype
    if is_extension_array_dtype(dtype):
        if hasattr(arr, f"__{op.__name__}__"):
            if axis != 0:
                raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
            return op(arr, arr.shift(n))
        else:
            warn(
                "dtype lost in 'diff()'. In the future this will raise a "
                "TypeError. Convert to a suitable dtype prior to calling 'diff'.",
                FutureWarning,
                stacklevel=stacklevel,
            )
            arr = np.asarray(arr)
            dtype = arr.dtype
    is_timedelta = False
    is_bool = False
    if needs_i8_conversion(arr.dtype):
        dtype = np.int64
        arr = arr.view("i8")
        na = iNaT
        is_timedelta = True
    elif is_bool_dtype(dtype):
        # We have to cast in order to be able to hold np.nan
        dtype = np.object_
        is_bool = True
    elif is_integer_dtype(dtype):
        # We have to cast in order to be able to hold np.nan
        # int8, int16 are incompatible with float64,
        # see https://github.com/cython/cython/issues/2646
        if arr.dtype.name in ["int8", "int16"]:
            dtype = np.float32
        else:
            dtype = np.float64
    orig_ndim = arr.ndim
    if orig_ndim == 1:
        # reshape so we can always use algos.diff_2d
        arr = arr.reshape(-1, 1)
        # TODO: require axis == 0
    dtype = np.dtype(dtype)
    out_arr = np.empty(arr.shape, dtype=dtype)
    na_indexer = [slice(None)] * arr.ndim
    na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
    out_arr[tuple(na_indexer)] = na
    if arr.ndim == 2 and arr.dtype.name in _diff_special:
        # TODO: can diff_2d dtype specialization troubles be fixed by defining
        #  out_arr inside diff_2d?
        algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta)
    else:
        # To keep mypy happy, _res_indexer is a list while res_indexer is
        #  a tuple, ditto for lag_indexer.
        _res_indexer = [slice(None)] * arr.ndim
        _res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
        res_indexer = tuple(_res_indexer)
        _lag_indexer = [slice(None)] * arr.ndim
        _lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
        lag_indexer = tuple(_lag_indexer)
        # need to make sure that we account for na for datelike/timedelta
        # we don't actually want to subtract these i8 numbers
        if is_timedelta:
            res = arr[res_indexer]
            lag = arr[lag_indexer]
            mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
            if mask.any():
                res = res.copy()
                res[mask] = 0
                lag = lag.copy()
                lag[mask] = 0
            result = res - lag
            result[mask] = na
            out_arr[res_indexer] = result
        elif is_bool:
            out_arr[res_indexer] = arr[res_indexer] ^ arr[lag_indexer]
        else:
            out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
    if is_timedelta:
        out_arr = out_arr.view("timedelta64[ns]")
    if orig_ndim == 1:
        out_arr = out_arr[:, 0]
    return out_arr
# --------------------------------------------------------------------
# Helper functions
# Note: safe_sort is in algorithms.py instead of sorting.py because it is
#  low-dependency, is used in this module, and used private methods from
#  this module.
def safe_sort(
    values,
    codes=None,
    na_sentinel: int = -1,
    assume_unique: bool = False,
    verify: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
    """
    Sort ``values`` and reorder corresponding ``codes``.
    ``values`` should be unique if ``codes`` is not None.
    Safe for use with mixed types (int, str), orders ints before strs.
    Parameters
    ----------
    values : list-like
        Sequence; must be unique if ``codes`` is not None.
    codes : list_like, optional
        Indices to ``values``. All out of bound indices are treated as
        "not found" and will be masked with ``na_sentinel``.
    na_sentinel : int, default -1
        Value in ``codes`` to mark "not found".
        Ignored when ``codes`` is None.
    assume_unique : bool, default False
        When True, ``values`` are assumed to be unique, which can speed up
        the calculation. Ignored when ``codes`` is None.
    verify : bool, default True
        Check if codes are out of bound for the values and put out of bound
        codes equal to na_sentinel. If ``verify=False``, it is assumed there
        are no out of bound codes. Ignored when ``codes`` is None.
        .. versionadded:: 0.25.0
    Returns
    -------
    ordered : ndarray
        Sorted ``values``
    new_codes : ndarray
        Reordered ``codes``; returned when ``codes`` is not None.
    Raises
    ------
    TypeError
        * If ``values`` is not list-like or if ``codes`` is neither None
        nor list-like
        * If ``values`` cannot be sorted
    ValueError
        * If ``codes`` is not None and ``values`` contain duplicates.
    """
    if not is_list_like(values):
        raise TypeError(
            "Only list-like objects are allowed to be passed to safe_sort as values"
        )
    if not isinstance(values, (np.ndarray, ABCExtensionArray)):
        # don't convert to string types
        dtype, _ = infer_dtype_from_array(values)
        values = np.asarray(values, dtype=dtype)
    sorter = None
    if (
        not is_extension_array_dtype(values)
        and lib.infer_dtype(values, skipna=False) == "mixed-integer"
    ):
        ordered = _sort_mixed(values)
    else:
        try:
            sorter = values.argsort()
            ordered = values.take(sorter)
        except TypeError:
            # Previous sorters failed or were not applicable, try `_sort_mixed`
            # which would work, but which fails for special case of 1d arrays
            # with tuples.
            if values.size and isinstance(values[0], tuple):
                ordered = _sort_tuples(values)
            else:
                ordered = _sort_mixed(values)
    # codes:
    if codes is None:
        return ordered
    if not is_list_like(codes):
        raise TypeError(
            "Only list-like objects or None are allowed to "
            "be passed to safe_sort as codes"
        )
    codes = ensure_platform_int(np.asarray(codes))
    if not assume_unique and not len(unique(values)) == len(values):
        raise ValueError("values should be unique if codes is not None")
    if sorter is None:
        # mixed types
        hash_klass, values = get_data_algo(values)
        t = hash_klass(len(values))
        t.map_locations(values)
        sorter = ensure_platform_int(t.lookup(ordered))
    if na_sentinel == -1:
        # take_nd is faster, but only works for na_sentinels of -1
        order2 = sorter.argsort()
        new_codes = take_nd(order2, codes, fill_value=-1)
        if verify:
            mask = (codes < -len(values)) | (codes >= len(values))
        else:
            mask = None
    else:
        reverse_indexer = np.empty(len(sorter), dtype=np.int_)
        reverse_indexer.put(sorter, np.arange(len(sorter)))
        # Out of bound indices will be masked with `na_sentinel` next, so we
        # may deal with them here without performance loss using `mode='wrap'`
        new_codes = reverse_indexer.take(codes, mode="wrap")
        mask = codes == na_sentinel
        if verify:
            mask = mask | (codes < -len(values)) | (codes >= len(values))
    if mask is not None:
        np.putmask(new_codes, mask, na_sentinel)
    return ordered, ensure_platform_int(new_codes)
def _sort_mixed(values):
    """ order ints before strings in 1d arrays, safe in py3 """
    str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
    nums = np.sort(values[~str_pos])
    strs = np.sort(values[str_pos])
    return np.concatenate([nums, np.asarray(strs, dtype=object)])
def _sort_tuples(values: np.ndarray):
    """
    Convert array of tuples (1d) to array or array (2d).
    We need to keep the columns separately as they contain different types and
    nans (can't use `np.sort` as it may fail when str and nan are mixed in a
    column as types cannot be compared).
    """
    from pandas.core.internals.construction import to_arrays
    from pandas.core.sorting import lexsort_indexer
    arrays, _ = to_arrays(values, None)
    indexer = lexsort_indexer(arrays, orders=True)
    return values[indexer]
def union_with_duplicates(lvals: np.ndarray, rvals: np.ndarray) -> np.ndarray:
    """
    Extracts the union from lvals and rvals with respect to duplicates and nans in
    both arrays.
    Parameters
    ----------
    lvals: np.ndarray
        left values which is ordered in front.
    rvals: np.ndarray
        right values ordered after lvals.
    Returns
    -------
    np.ndarray containing the unsorted union of both arrays
    """
    indexer = []
    l_count = value_counts(lvals, dropna=False)
    r_count = value_counts(rvals, dropna=False)
    l_count, r_count = l_count.align(r_count, fill_value=0)
    unique_array = unique(np.append(lvals, rvals))
    if is_extension_array_dtype(lvals) or is_extension_array_dtype(rvals):
        unique_array = pd_array(unique_array)
    for i, value in enumerate(unique_array):
        indexer += [i] * int(max(l_count[value], r_count[value]))
    return unique_array.take(indexer)
 | 
| 
	the-stack_106_32219 | 
	# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 14:51:46 2019
@author: MAGESHWARAN
"""
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score
# read hours dataset
hour_df = pd.read_csv("hour.csv")
print(hour_df.columns)
# ----------------------- Feature Selection ---------------------
corr_initial = hour_df.corr()
# generate heatmap to for correlation
heat_map2 = sns.heatmap(corr_initial, linewidth=0.01)
# plt.savefig("heat_map2")
# create features from category variables, making binary dummy variables
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for field in dummy_fields:
    dummies = pd.get_dummies(hour_df[field], prefix = field, drop_first = False)
    hour_df = pd.concat([hour_df, dummies], axis = 1)
# drop features
drop_fields = ["instant", "dteday", "season", "yr", "mnth", "weekday", "weathersit", "workingday", "hr", "atemp"]
data = hour_df.drop(drop_fields, axis = 1)
print(data.columns)
corr = data.corr()
# generate heatmap to for correlation
heat_map = sns.heatmap(corr, linewidth=0.01)
# plt.savefig("heat_map")
# ------------------------ Feature Scaling --------------------
# scale all the quantitative features
quant_features = ["temp", "casual", "registered", "cnt", "hum", "windspeed"]
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for feature in quant_features:
    mean, std = data[feature].mean(), data[feature].std()
    scaled_features[feature] = [mean, std] # used for later purpose
    # making features with 0 mean and std of 1
    data.loc[:, feature] = (data[feature] - mean) / std
X = data.drop(["cnt", "casual", "registered"], axis = 1)
y = data[["cnt", "casual", "registered"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.002)
# --------------------- Training and Prediction--------------------
linear_model = MLPRegressor()
linear_model.fit(X_train, y_train)
y_pred = linear_model.predict(X_test)
print("Test score(R2):", r2_score(y_pred, y_test) * 100, "%")
print("Test score(model_score):", linear_model.score(X_test, y_test) * 100, "%") | 
| 
	the-stack_106_32220 | 
	def checkio(first, second):
   list1=first.split(',')
   list2 = second.split(',')
   set1=set(list1)
   set2=set(list2)
   new_set = set1&set2
   if len(new_set)  == 0:
       return ''
   else:
       return ','.join(sorted(new_set))
 | 
| 
	the-stack_106_32224 | 
	import logging
import typing as t
from src.error import Error
from src.smali_method import SmaliMethod
class SmaliClass:
    def __init__(self, name: str):
        self.name: str = name
        self.sig: str = ''
        self.methods: t.List[SmaliMethod] = []
        self.header_block: t.List[str] = []
    def __parse_class_header(self, idx: int,
                             file_lines: t.List[str]
                             ) -> t.Tuple[int, t.List[str],
                                          str, t.Optional[Error]]:
        try:
            sig = file_lines[0].split()[-1][1:-1].strip()
        except IndexError:
            return 0, [], '', Error(
                'Could not parse class header: {}'.format(self.name))
        i = 1  # Skipping the first line
        header_block: t.List[str] = []
        for i in range(idx, len(file_lines)):
            line = file_lines[i]
            if '.method' in line:
                break
            header_block.append(line)
        return i, header_block, sig, None
    def __parse_method(self,
                       idx: int,
                       file_lines: t.List[str]
                       ) -> t.Tuple[int, t.Optional[SmaliMethod],
                                    t.Optional[Error]]:
        i = 0
        method_block: t.List[str] = []
        for i in range(idx, len(file_lines)):
            line = file_lines[i]
            method_block.append(line)
            if '.end method' in line:
                break
        method = SmaliMethod(self.name, method_block[0])
        err = method.parse(method_block[1:])
        if err:
            return -1, None, err
        return i, method, None
    def parse(self, file_path: str) -> t.Optional[Error]:
        logging.debug("Parsing SmaliClass: [%s]...", file_path)
        with open(file_path, 'r') as fd:
            file_lines = fd.read().splitlines()
            idx = 0
            idx, self.header_block, self.sig, err = self.__parse_class_header(
                idx, file_lines)
            if err:
                return err
            while idx < len(file_lines):
                line = file_lines[idx]
                if '.method' in line:
                    idx, method, err = self.__parse_method(idx, file_lines)
                    if err:
                        return err
                    if not method:
                        raise Exception('FAIL')
                    self.methods.append(method)
                idx += 1
        return None
    def write(self, fd: t.IO[t.Any]):
        logging.debug('Writing clazz [%s]', self.name)
        for line in self.header_block:
            fd.write(line)
            fd.write('\n')
        for method in self.methods:
            method.write(fd)
    # Removes the package from a class's name
    # `com.afjoseph.test.aaa` -> `aaa`
    def get_simple_name(self) -> str:
        if not '.' in self.name:
            return self.name
        return self.name.split('.')[-1].strip()
 | 
| 
	the-stack_106_32226 | 
	# (c) 2015, Jon Hadfield <[email protected]>
"""
Description: This lookup takes an AWS region and an RDS instance
name and returns the endpoint name.
Example Usage:
{{ lookup('aws_rds_endpoint_name_from_instance_name', ('eu-west-1', 'mydb')) }}
"""
from ansible import errors
try:
    import boto.rds
except ImportError:
    raise AnsibleError("aws_rds_endpoint_name_from_instance_name lookup cannot be run without boto installed")
class LookupModule(object):
    def __init__(self, basedir=None, **kwargs):
                                self.basedir = basedir
    def run(self, terms, variables=None, **kwargs):
        region = terms[0]
        instance_name = terms[1]
        db_conn = boto.rds.connect_to_region(region)
        db = db_conn.get_all_dbinstances(instance_name)
        if db and db[0]:
            return [db[0].endpoint[0]]
        return None
 | 
| 
	the-stack_106_32227 | 
	import requests
from django.core.exceptions import ImproperlyConfigured
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.helpers import render_authentication_error
from allauth.socialaccount.models import SocialLogin
from allauth.socialaccount import app_settings, providers
from .provider import PersonaProvider
def persona_login(request):
    assertion = request.POST.get('assertion', '')
    settings = app_settings.PROVIDERS.get(PersonaProvider.id, {})
    audience = settings.get('AUDIENCE', None)
    if audience is None:
        raise ImproperlyConfigured(
            "No Persona audience configured. Please "
            "add an AUDIENCE item to the "
            "SOCIALACCOUNT_PROVIDERS['persona'] setting.")
    resp = requests.post('https://verifier.login.persona.org/verify',
                         {'assertion': assertion,
                          'audience': audience})
    try:
        resp.raise_for_status()
        extra_data = resp.json()
        if extra_data['status'] != 'okay':
            return render_authentication_error(
                request,
                provider_id=PersonaProvider.id,
                extra_context={'response': extra_data})
    except (ValueError, requests.RequestException) as e:
        return render_authentication_error(
            request,
            provider_id=PersonaProvider.id,
            exception=e)
    login = providers.registry \
        .by_id(PersonaProvider.id) \
        .sociallogin_from_response(request, extra_data)
    login.state = SocialLogin.state_from_request(request)
    return complete_social_login(request, login)
 | 
| 
	the-stack_106_32228 | 
	#! /usr/bin/env python3
'''
This module provides access to the BankTransaction class
Usage:
    t1 = BankTransaction()
    t1.info()
    t2 = BankTransaction(account=BankAccount(account_number=90101))
    t2.info()
    account_list = [BankAccount(subscriber_origin='nn_NO')]
    t3 = BankTransaction(account=account_list[0])
    t4 = BankTransaction(account=account_list[0])
    t3.info()
    t4.info()
    person_ca = Person()
    person_ca.create('fr_CA')
    ba = BankAccount(person=person_ca)
    transaction = BankTransaction(account=ba)
    transaction.info()
'''
import random
from datetime import datetime
from .BankAccount import BankAccount
actions = {
            'Deposit': +1,
            'Credit Transfer': +1,
            'Withdrawl': -1,
            'Debit Payment': -1
}
class BankTransaction:
    '''
    The BankTransaction object can be initialised empty or with arguments
    For each omitted argument, data will be randomly set
    Arguments:
        value (float), optional:
            float number as in `9999.10` or `-1000`
            Note: if description is provided, value should be also
        description (str), optional:
            If not in actions (line 21) and value has `-`, will be a debit
            If description is in actions, signs in value will be ignored
            Its recommended to provide a value if description is customised
        moment (str), optional:
            If provided, recommended `YYYY-mm-dd HH:MM:SS` format
            If omitted, current date and time will be provided
        account (obj), optional:
            send a constructed Account object to replace account_number
            Account has precedence over account_number argument
        account_number (int), optional:
            recommended to have 4-8 digits
            or use account argument to use an Account object
    Attributes:
        account_number (int): account number for the transaction
        account_balance (float): if a BankAccount object is provided
        description (str),
        moment (str),
        value (float)
    '''
    def __init__(self, value=None, description=None, moment=None, account=None, account_number=None):
        if account or not account_number:
            if not account or not isinstance(account, BankAccount):
                account = BankAccount(subscriber_name='Jane Doe')
            account_number = account.account_number
        operation = None
        if description:
            if description in actions:
                operation = actions[description]
            else:
                operation = -1 if value < 0 else +1
        else:
            description = random.choice(list(actions))
            operation = actions[description]
        if not isinstance(value, (float, int)):
            value = random.random() * (10 ** random.randrange(1,6))
        value = round( abs(float(value)), 2 ) * operation
        if not moment:
            moment = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        self.moment = moment
        self.account_number = account_number
        self.value = value
        self.description = description
        if account:
            account_balance = round( account.balance + value, 2 )
            self.account_balance = account.balance = account_balance
        else:
            self.account_balance = round( value, 2 )
    def info(self) -> dict:
        '''
        Return a dictionary with Bank Transaction object data
        '''
        return { 'moment': self.moment, 'account_number': self.account_number, 'value': self.value, 'description': self.description, 'account_balance': self.account_balance }
    def csv(self) -> str:
        '''
        Return a csv string with Bank Transaction object data
        '''
        return f'"{self.moment}",{self.account_number},{self.value},"{self.description}",{self.account_balance}'.replace('None', '').replace('""', '')
 | 
| 
	the-stack_106_32229 | 
	#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import contextlib
import glob
import os
import re
import shutil
import subprocess
import sys
import hashlib
import platform
from amd_hipify import amd_hipify
from distutils.version import LooseVersion
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
from util import (  # noqa: E402
    run,
    is_windows, is_macOS, is_linux,
    get_logger)
import util.android as android  # noqa: E402
log = get_logger("build")
class BaseError(Exception):
    """Base class for errors originating from build.py."""
    pass
class BuildError(BaseError):
    """Error from running build steps."""
    def __init__(self, *messages):
        super().__init__("\n".join(messages))
class UsageError(BaseError):
    """Usage related error."""
    def __init__(self, message):
        super().__init__(message)
def _check_python_version():
    # According to the BUILD.md, python 3.5+ is required:
    # Python 2 is definitely not supported and it should be safer to consider
    # it won't run with python 4:
    if sys.version_info[0] != 3:
        raise BuildError(
            "Bad python major version: expecting python 3, found version "
            "'{}'".format(sys.version))
    if sys.version_info[1] < 6:
        raise BuildError(
            "Bad python minor version: expecting python 3.6+, found version "
            "'{}'".format(sys.version))
def _str_to_bool(s):
    """Convert string to bool (in argparse context)."""
    if s.lower() not in ['true', 'false']:
        raise ValueError('Need bool; got %r' % s)
    return {'true': True, 'false': False}[s.lower()]
_check_python_version()
def _openvino_verify_device_type(device_read):
    choices = ["CPU_FP32", "GPU_FP32", "GPU_FP16", "VAD-M_FP16", "MYRIAD_FP16", "VAD-F_FP32"]
    status_hetero = True
    res = False
    if (device_read in choices):
        res = True
    elif (device_read.startswith("HETERO:") or device_read.startswith("MULTI:")):
        res = True
        comma_separated_devices = device_read.split(":")
        comma_separated_devices = comma_separated_devices[1].split(',')
        if (len(comma_separated_devices) < 2):
            print("Atleast two devices required in Hetero Mode")
            status_hetero = False
        dev_options = ["CPU", "GPU", "MYRIAD", "FPGA", "HDDL"]
        for dev in comma_separated_devices:
            if (dev not in dev_options):
                status_hetero = False
                break
    def invalid_hetero_build():
        print("\n" + "If trying to build Hetero or Multi, specifiy the supported devices along with it." + + "\n")
        print("specify the keyword HETERO or MULTI followed by the devices ")
        print("in the order of priority you want to build" + "\n")
        print("The different hardware devices that can be added in HETERO or MULTI")
        print("are ['CPU','GPU','MYRIAD','FPGA','HDDL']" + "\n")
        print("An example of how to specify the hetero build type. Ex: HETERO:GPU,CPU" + "\n")
        print("An example of how to specify the MULTI build type. Ex: MULTI:MYRIAD,CPU" + "\n")
        sys.exit("Wrong Build Type selected")
    if (res is False):
        print("\n" + "You have selcted wrong configuration for the build.")
        print("pick the build type for specific Hardware Device from following options: ", choices)
        print("\n")
        if not (device_read.startswith("HETERO:") or device_read.startswith("MULTI:")):
            invalid_hetero_build()
        sys.exit("Wrong Build Type selected")
    if (status_hetero is False):
        invalid_hetero_build()
    return device_read
def parse_arguments():
    parser = argparse.ArgumentParser(
        description="ONNXRuntime CI build driver.",
        usage="""  # noqa
        Default behavior is --update --build --test for native architecture builds.
        Default behavior is --update --build for cross-compiled builds.
        The Update phase will update git submodules, and run cmake to generate makefiles.
        The Build phase will build all projects.
        The Test phase will run all unit tests, and optionally the ONNX tests.
        Use the individual flags to only run the specified stages.
        """)
    # Main arguments
    parser.add_argument(
        "--build_dir", required=True, help="Path to the build directory.")
    parser.add_argument(
        "--config", nargs="+", default=["Debug"],
        choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
        help="Configuration(s) to build.")
    parser.add_argument(
        "--update", action='store_true', help="Update makefiles.")
    parser.add_argument("--build", action='store_true', help="Build.")
    parser.add_argument(
        "--clean", action='store_true',
        help="Run 'cmake --build --target clean' for the selected config/s.")
    parser.add_argument(
        "--parallel", nargs='?', const='0', default='1', type=int,
        help="Use parallel build. The optional value specifies the maximum number of parallel jobs. "
             "If the optional value is 0 or unspecified, it is interpreted as the number of CPUs.")
    parser.add_argument("--test", action='store_true', help="Run unit tests.")
    parser.add_argument(
        "--skip_tests", action='store_true', help="Skip all tests.")
    # Training options
    parser.add_argument(
        "--enable_nvtx_profile", action='store_true', help="Enable NVTX profile in ORT.")
    parser.add_argument(
        "--enable_memory_profile", action='store_true', help="Enable memory profile in ORT.")
    parser.add_argument(
        "--enable_training", action='store_true', help="Enable training in ORT.")
    parser.add_argument(
        "--disable_nccl", action='store_true', help="Disable Nccl.")
    parser.add_argument(
        "--mpi_home", help="Path to MPI installation dir")
    parser.add_argument(
        "--nccl_home", help="Path to NCCL installation dir")
    parser.add_argument(
        "--use_mpi", nargs='?', default=True, const=True, type=_str_to_bool)
    # enable ONNX tests
    parser.add_argument(
        "--enable_onnx_tests", action='store_true',
        help="""When running the Test phase, run onnx_test_running against
        available test data directories.""")
    parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
    parser.add_argument(
        "--fuzz_testing", action='store_true', help="Enable Fuzz testing of the onnxruntime.")
    parser.add_argument(
        "--enable_symbolic_shape_infer_tests", action='store_true',
        help="""When running the Test phase, run symbolic shape inference against
        available test data directories.""")
    # generate documentaiton
    parser.add_argument(
        "--gen_doc", action='store_true',
        help="Generate documentation on contrib ops")
    # CUDA related
    parser.add_argument("--use_cuda", action='store_true', help="Enable CUDA.")
    parser.add_argument(
        "--cuda_version", help="The version of CUDA toolkit to use. "
        "Auto-detect if not specified. e.g. 9.0")
    parser.add_argument(
        "--cuda_home", help="Path to CUDA home."
        "Read from CUDA_HOME environment variable if --use_cuda is true and "
        "--cuda_home is not specified.")
    parser.add_argument(
        "--cudnn_home", help="Path to CUDNN home. "
        "Read from CUDNN_HOME environment variable if --use_cuda is true and "
        "--cudnn_home is not specified.")
    # Python bindings
    parser.add_argument(
        "--enable_pybind", action='store_true', help="Enable Python Bindings.")
    parser.add_argument(
        "--build_wheel", action='store_true', help="Build Python Wheel.")
    parser.add_argument(
        "--wheel_name_suffix", help="Suffix to append to created wheel names. "
        "This value is currently only used for nightly builds.")
    parser.add_argument(
        "--numpy_version", help="Installs a specific version of numpy "
        "before building the python binding.")
    parser.add_argument(
        "--skip-keras-test", action='store_true',
        help="Skip tests with Keras if keras is installed")
    # C-Sharp bindings
    parser.add_argument(
        "--build_csharp", action='store_true',
        help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
        "For building C# bindings and packaging them into nuget package use --build_nuget arg.")
    parser.add_argument(
        "--build_nuget", action='store_true',
        help="Build C#.Net DLL and NuGet package on the local machine. "
        "Currently only Windows and Linux platforms are supported.")
    # Java bindings
    parser.add_argument(
        "--build_java", action='store_true', help="Build Java bindings.")
    # Node.js binding
    parser.add_argument(
        "--build_nodejs", action='store_true',
        help="Build Node.js binding and NPM package.")
    # Build a shared lib
    parser.add_argument(
        "--build_shared_lib", action='store_true',
        help="Build a shared library for the ONNXRuntime.")
    # Build options
    parser.add_argument(
        "--cmake_extra_defines", nargs="+",
        help="Extra definitions to pass to CMake during build system "
        "generation. These are just CMake -D options without the leading -D.")
    parser.add_argument(
        "--target",
        help="Build a specific target, e.g. winml_dll")
    parser.add_argument(
        "--x86", action='store_true',
        help="Create x86 makefiles. Requires --update and no existing cache "
        "CMake setup. Delete CMakeCache.txt if needed")
    parser.add_argument(
        "--arm", action='store_true',
        help="Create ARM makefiles. Requires --update and no existing cache "
        "CMake setup. Delete CMakeCache.txt if needed")
    parser.add_argument(
        "--arm64", action='store_true',
        help="Create ARM64 makefiles. Requires --update and no existing cache "
        "CMake setup. Delete CMakeCache.txt if needed")
    parser.add_argument(
        "--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
    parser.add_argument("--android", action='store_true', help='Build for Android')
    parser.add_argument(
        "--android_abi", default="arm64-v8a",
        choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
        help="Specify the target Android Application Binary Interface (ABI)")
    parser.add_argument("--android_api", type=int, default=27, help='Android API Level, e.g. 21')
    parser.add_argument("--android_sdk_path", type=str, help='Path to the Android SDK')
    parser.add_argument("--android_ndk_path", default="", help="Path to the Android NDK")
    parser.add_argument("--android_cpp_shared", action="store_true",
                        help="Build with shared libc++ instead of the default static libc++.")
    parser.add_argument("--android_run_emulator", action="store_true",
                        help="Start up an Android emulator if needed.")
    parser.add_argument("--ios", action='store_true', help="build for ios")
    parser.add_argument(
        "--ios_sysroot", default="",
        help="Specify the location name of the macOS platform SDK to be used")
    parser.add_argument(
        "--ios_toolchain_dir", default="",
        help="Path to ios toolchain binaries")
    parser.add_argument(
        "--ios_toolchain_file", default="",
        help="Path to ios toolchain file, "
        "or cmake/onnxruntime_ios.toolchain.cmake will be used")
    parser.add_argument(
        "--xcode_code_signing_team_id", default="",
        help="The development team ID used for code signing in Xcode")
    parser.add_argument(
        "--use_xcode", action='store_true',
        help="Use Xcode as cmake generator, this is only supported on MacOS.")
    parser.add_argument(
        "--osx_arch",
        default="arm64" if platform.machine() == "arm64" else "x86_64",
        choices=["arm64", "x86_64"],
        help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS")
    parser.add_argument(
        "--apple_deploy_target", type=str,
        help="Specify the minimum version of the target platform "
        "(e.g. macOS or iOS)"
        "This is only supported on MacOS")
    # Arguments needed by CI
    parser.add_argument(
        "--cmake_path", default="cmake", help="Path to the CMake program.")
    parser.add_argument(
        "--ctest_path", default="ctest", help="Path to the CTest program.")
    parser.add_argument(
        "--skip_submodule_sync", action='store_true', help="Don't do a "
        "'git submodule update'. Makes the Update phase faster.")
    parser.add_argument(
        "--use_vstest", action='store_true',
        help="Use use_vstest for running unitests.")
    parser.add_argument(
        "--use_mimalloc", default=['none'],
        choices=['none', 'stl', 'arena', 'all'], help="Use mimalloc.")
    parser.add_argument(
        "--use_dnnl", action='store_true', help="Build with DNNL.")
    parser.add_argument(
        "--dnnl_gpu_runtime", action='store', default='', type=str.lower,
        help="e.g. --dnnl_gpu_runtime ocl")
    parser.add_argument(
        "--dnnl_opencl_root", action='store', default='',
        help="Path to OpenCL SDK. "
        "e.g. --dnnl_opencl_root \"C:/Program Files (x86)/IntelSWTools/sw_dev_tools/OpenCL/sdk\"")
    parser.add_argument(
        "--use_featurizers", action='store_true',
        help="Build with ML Featurizer support.")
    parser.add_argument(
        "--use_openvino", nargs="?", const="CPU_FP32",
        type=_openvino_verify_device_type,
        help="Build with OpenVINO for specific hardware.")
    parser.add_argument(
        "--use_coreml", action='store_true', help="Build with CoreML support.")
    parser.add_argument(
        "--use_nnapi", action='store_true', help="Build with NNAPI support.")
    parser.add_argument(
        "--nnapi_min_api", type=int,
        help="Minimum Android API level to enable NNAPI, should be no less than 27")
    parser.add_argument(
        "--use_rknpu", action='store_true', help="Build with RKNPU.")
    parser.add_argument(
        "--use_preinstalled_eigen", action='store_true',
        help="Use pre-installed Eigen.")
    parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
    parser.add_argument(
        "--use_openmp", action='store_true', help="Build with OpenMP")
    parser.add_argument(
        "--enable_msinternal", action="store_true",
        help="Enable for Microsoft internal builds only.")
    parser.add_argument("--llvm_path", help="Path to llvm dir")
    parser.add_argument(
        "--use_vitisai", action='store_true', help="Build with Vitis-AI")
    parser.add_argument(
        "--use_nuphar", action='store_true', help="Build with nuphar")
    parser.add_argument(
        "--use_tensorrt", action='store_true', help="Build with TensorRT")
    parser.add_argument(
        "--tensorrt_home", help="Path to TensorRT installation dir")
    parser.add_argument(
        "--use_migraphx", action='store_true', help="Build with MIGraphX")
    parser.add_argument(
        "--migraphx_home", help="Path to MIGraphX installation dir")
    parser.add_argument(
        "--use_full_protobuf", action='store_true',
        help="Use the full protobuf library")
    parser.add_argument(
        "--skip_onnx_tests", action='store_true', help="Explicitly disable "
        "all onnx related tests. Note: Use --skip_tests to skip all tests.")
    parser.add_argument(
        "--skip_winml_tests", action='store_true',
        help="Explicitly disable all WinML related tests")
    parser.add_argument(
        "--skip_nodejs_tests", action='store_true',
        help="Explicitly disable all Node.js binding tests")
    parser.add_argument(
        "--enable_msvc_static_runtime", action='store_true',
        help="Enable static linking of MSVC runtimes.")
    parser.add_argument(
        "--enable_language_interop_ops", action='store_true',
        help="Enable operator implemented in language other than cpp")
    parser.add_argument(
        "--cmake_generator",
        choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Ninja'],
        default='Visual Studio 15 2017' if is_windows() else None,
        help="Specify the generator that CMake invokes. "
        "This is only supported on Windows")
    parser.add_argument(
        "--enable_multi_device_test", action='store_true',
        help="Test with multi-device. Mostly used for multi-device GPU")
    parser.add_argument(
        "--use_dml", action='store_true', help="Build with DirectML.")
    parser.add_argument(
        "--use_winml", action='store_true', help="Build with WinML.")
    parser.add_argument(
        "--winml_root_namespace_override", type=str,
        help="Specify the namespace that WinML builds into.")
    parser.add_argument(
        "--use_telemetry", action='store_true',
        help="Only official builds can set this flag to enable telemetry.")
    parser.add_argument(
        "--enable_wcos", action='store_true',
        help="Build for Windows Core OS.")
    parser.add_argument(
        "--enable_windows_store", action='store_true',
        help="Build for Windows Store")
    parser.add_argument(
        "--enable_lto", action='store_true',
        help="Enable Link Time Optimization")
    parser.add_argument(
        "--use_acl", nargs="?", const="ACL_1905",
        choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
        help="Build with ACL for ARM architectures.")
    parser.add_argument(
        "--acl_home", help="Path to ACL home dir")
    parser.add_argument(
        "--acl_libs", help="Path to ACL libraries")
    parser.add_argument(
        "--use_armnn", action='store_true',
        help="Enable ArmNN Execution Provider.")
    parser.add_argument(
        "--armnn_relu", action='store_true',
        help="Use the Relu operator implementation from the ArmNN EP.")
    parser.add_argument(
        "--armnn_bn", action='store_true',
        help="Use the Batch Normalization operator implementation from the ArmNN EP.")
    parser.add_argument(
        "--armnn_home", help="Path to ArmNN home dir")
    parser.add_argument(
        "--armnn_libs", help="Path to ArmNN libraries")
    parser.add_argument(
        "--build_micro_benchmarks", action='store_true',
        help="Build ONNXRuntime micro-benchmarks.")
    # options to reduce binary size
    parser.add_argument("--minimal_build", action='store',
                        const='on', default='off', nargs='?', type=str.lower,
                        help="Create a build that only supports ORT format models. "
                        "See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. "
                        "RTTI is automatically disabled in a minimal build. "
                        "To enable execution providers that compile kernels at runtime (e.g. NNAPI) pass 'extended' "
                        "as a parameter. e.g. '--minimal_build extended'.")
    parser.add_argument("--include_ops_by_config", type=str,
                        help="include ops from config file. "
                        "See /docs/Reduced_Operator_Kernel_build.md for more information.")
    parser.add_argument("--enable_reduced_operator_type_support", action='store_true',
                        help='If --include_ops_by_config is specified, and the configuration file was created from ORT '
                             'format models with type reduction enabled, limit the types individual operators support '
                             'where possible to further reduce the build size. '
                             'See /docs/Reduced_Operator_Kernel_build.md for more information.')
    parser.add_argument("--disable_contrib_ops", action='store_true',
                        help="Disable contrib ops (reduces binary size)")
    parser.add_argument("--disable_ml_ops", action='store_true',
                        help="Disable traditional ML ops (reduces binary size)")
    parser.add_argument("--disable_rtti", action='store_true', help="Disable RTTI (reduces binary size)")
    parser.add_argument("--disable_exceptions", action='store_true',
                        help="Disable exceptions to reduce binary size. Requires --minimal_build.")
    parser.add_argument("--disable_ort_format_load", action='store_true',
                        help='Disable support for loading ORT format models in a non-minimal build.')
    parser.add_argument("--use_rocm", action='store_true', help="Build with ROCm")
    parser.add_argument("--rocm_home", help="Path to ROCm installation dir")
    # Code coverage
    parser.add_argument("--code_coverage", action='store_true',
                        help="Generate code coverage when targetting Android (only).")
    return parser.parse_args()
def resolve_executable_path(command_or_path):
    """Returns the absolute path of an executable."""
    executable_path = shutil.which(command_or_path)
    if executable_path is None:
        raise BuildError("Failed to resolve executable path for "
                         "'{}'.".format(command_or_path))
    return os.path.realpath(executable_path)
def get_linux_distro():
    try:
        with open('/etc/os-release', 'r') as f:
            dist_info = dict(
                line.strip().split('=', 1) for line in f.readlines())
        return dist_info.get('NAME', '').strip('"'), dist_info.get(
            'VERSION', '').strip('"')
    except (IOError, ValueError):
        return '', ''
def is_ubuntu_1604():
    dist, ver = get_linux_distro()
    return dist == 'Ubuntu' and ver.startswith('16.04')
def get_config_build_dir(build_dir, config):
    # build directory per configuration
    return os.path.join(build_dir, config)
def run_subprocess(args, cwd=None, capture_stdout=False, dll_path=None,
                   shell=False, env={}):
    if isinstance(args, str):
        raise ValueError("args should be a sequence of strings, not a string")
    my_env = os.environ.copy()
    if dll_path:
        if is_windows():
            my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
        else:
            if "LD_LIBRARY_PATH" in my_env:
                my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
            else:
                my_env["LD_LIBRARY_PATH"] = dll_path
    my_env.update(env)
    return run(*args, cwd=cwd, capture_stdout=capture_stdout, shell=shell, env=my_env)
def update_submodules(source_dir):
    run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
    run_subprocess(["git", "submodule", "update", "--init", "--recursive"],
                   cwd=source_dir)
def is_docker():
    path = '/proc/self/cgroup'
    return (
        os.path.exists('/.dockerenv') or
        os.path.isfile(path) and any('docker' in line for line in open(path))
    )
def install_python_deps(numpy_version=""):
    dep_packages = ['setuptools', 'wheel', 'pytest']
    dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version
                        else 'numpy>=1.16.6')
    dep_packages.append('sympy>=1.1')
    dep_packages.append('packaging')
    dep_packages.append('cerberus')
    run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',
                    'files.pythonhosted.org'] + dep_packages)
# We need to install Torch to test certain functionalities of the ORT Python package
def install_torch():
    # Command works for both Windows
    run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',
                    'files.pythonhosted.org', 'torch===1.5.1+cu101', 'torchvision===0.6.1+cu101',
                    '-f', 'https://download.pytorch.org/whl/torch_stable.html'])
def check_md5(filename, expected_md5):
    if not os.path.exists(filename):
        return False
    hash_md5 = hashlib.md5()
    BLOCKSIZE = 1024 * 64
    with open(filename, "rb") as f:
        buf = f.read(BLOCKSIZE)
        while len(buf) > 0:
            hash_md5.update(buf)
            buf = f.read(BLOCKSIZE)
    hex = hash_md5.hexdigest()
    if hex != expected_md5:
        log.info('md5 mismatch, expect %s, got %s' % (expected_md5, hex))
        os.remove(filename)
        return False
    return True
def setup_test_data(build_dir, configs):
    # create a shortcut for test models if there is a 'models'
    # folder in build_dir
    if is_windows():
        src_model_dir = os.path.join(build_dir, 'models')
        if os.path.exists('C:\\local\\models') and not os.path.exists(
                src_model_dir):
            log.debug("creating shortcut %s -> %s" % (
                'C:\\local\\models', src_model_dir))
            run_subprocess(['mklink', '/D', '/J', src_model_dir,
                            'C:\\local\\models'], shell=True)
        for config in configs:
            config_build_dir = get_config_build_dir(build_dir, config)
            os.makedirs(config_build_dir, exist_ok=True)
            dest_model_dir = os.path.join(config_build_dir, 'models')
            if os.path.exists('C:\\local\\models') and not os.path.exists(
                    dest_model_dir):
                log.debug("creating shortcut %s -> %s" % (
                    'C:\\local\\models', dest_model_dir))
                run_subprocess(['mklink', '/D', '/J', dest_model_dir,
                                'C:\\local\\models'], shell=True)
            elif os.path.exists(src_model_dir) and not os.path.exists(
                    dest_model_dir):
                log.debug("creating shortcut %s -> %s" % (
                    src_model_dir, dest_model_dir))
                run_subprocess(['mklink', '/D', '/J', dest_model_dir,
                                src_model_dir], shell=True)
def use_dev_mode(args):
    if args.use_acl:
        return 'OFF'
    if args.use_armnn:
        return 'OFF'
    if args.ios and is_macOS():
        return 'OFF'
    return 'ON'
def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home,
                        mpi_home, nccl_home, tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
                        path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):
    log.info("Generating CMake build tree")
    cmake_dir = os.path.join(source_dir, "cmake")
    cmake_args = [
        cmake_path, cmake_dir,
        "-Donnxruntime_RUN_ONNX_TESTS=" + (
            "ON" if args.enable_onnx_tests else "OFF"),
        "-Donnxruntime_BUILD_WINML_TESTS=" + (
            "OFF" if args.skip_winml_tests else "ON"),
        "-Donnxruntime_GENERATE_TEST_REPORTS=ON",
        "-Donnxruntime_DEV_MODE=" + use_dev_mode(args),
        "-DPYTHON_EXECUTABLE=" + sys.executable,
        "-Donnxruntime_USE_CUDA=" + ("ON" if args.use_cuda else "OFF"),
        "-Donnxruntime_CUDNN_HOME=" + (cudnn_home if args.use_cuda else ""),
        "-Donnxruntime_USE_FEATURIZERS=" + (
            "ON" if args.use_featurizers else "OFF"),
        "-Donnxruntime_CUDA_HOME=" + (cuda_home if args.use_cuda else ""),
        "-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=" + (
            "ON" if args.use_mimalloc == "stl" or
            args.use_mimalloc == "all" else "OFF"),
        "-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=" + (
            "ON" if args.use_mimalloc == "arena" or
            args.use_mimalloc == "all" else "OFF"),
        "-Donnxruntime_ENABLE_PYTHON=" + (
            "ON" if args.enable_pybind else "OFF"),
        "-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
        "-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
        "-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
        "-Donnxruntime_BUILD_SHARED_LIB=" + (
            "ON" if args.build_shared_lib else "OFF"),
        "-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
        "-Donnxruntime_DNNL_GPU_RUNTIME=" + (args.dnnl_gpu_runtime if args.use_dnnl else ""),
        "-Donnxruntime_DNNL_OPENCL_ROOT=" + (args.dnnl_opencl_root if args.use_dnnl else ""),
        "-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
        "-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
        "-Donnxruntime_USE_OPENMP=" + (
            "ON" if args.use_openmp and not (
                args.use_nnapi or
                args.android or (args.ios and is_macOS())
                or args.use_rknpu)
            else "OFF"),
        "-Donnxruntime_USE_TVM=" + ("ON" if args.use_nuphar else "OFF"),
        "-Donnxruntime_USE_LLVM=" + ("ON" if args.use_nuphar else "OFF"),
        "-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + (
            "ON" if args.enable_msinternal else "OFF"),
        "-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
        "-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
        "-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
        "-Donnxruntime_TENSORRT_HOME=" + (
            tensorrt_home if args.use_tensorrt else ""),
        # set vars for migraphx
        "-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
        "-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
        # By default - we currently support only cross compiling for
        # ARM/ARM64 (no native compilation supported through this
        # script).
        "-Donnxruntime_CROSS_COMPILING=" + (
            "ON" if args.arm64 or args.arm else "OFF"),
        "-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
        "-Donnxruntime_DISABLE_ML_OPS=" + ("ON" if args.disable_ml_ops else "OFF"),
        "-Donnxruntime_DISABLE_RTTI=" + ("ON" if args.disable_rtti else "OFF"),
        "-Donnxruntime_DISABLE_EXCEPTIONS=" + ("ON" if args.disable_exceptions else "OFF"),
        "-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=" + ("ON" if args.disable_ort_format_load else "OFF"),
        "-Donnxruntime_MINIMAL_BUILD=" + ("ON" if args.minimal_build != 'off' else "OFF"),
        "-Donnxruntime_EXTENDED_MINIMAL_BUILD=" + ("ON" if args.minimal_build == 'extended' else "OFF"),
        "-Donnxruntime_REDUCED_OPS_BUILD=" + ("ON" if args.include_ops_by_config else "OFF"),
        "-Donnxruntime_MSVC_STATIC_RUNTIME=" + (
            "ON" if args.enable_msvc_static_runtime else "OFF"),
        # enable pyop if it is nightly build
        "-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + (
            "ON" if args.enable_language_interop_ops else "OFF"),
        "-Donnxruntime_USE_DML=" + ("ON" if args.use_dml else "OFF"),
        "-Donnxruntime_USE_WINML=" + ("ON" if args.use_winml else "OFF"),
        "-Donnxruntime_USE_TELEMETRY=" + (
            "ON" if args.use_telemetry else "OFF"),
        "-Donnxruntime_ENABLE_LTO=" + ("ON" if args.enable_lto else "OFF"),
        "-Donnxruntime_USE_ACL=" + ("ON" if args.use_acl else "OFF"),
        "-Donnxruntime_USE_ACL_1902=" + (
            "ON" if args.use_acl == "ACL_1902" else "OFF"),
        "-Donnxruntime_USE_ACL_1905=" + (
            "ON" if args.use_acl == "ACL_1905" else "OFF"),
        "-Donnxruntime_USE_ACL_1908=" + (
            "ON" if args.use_acl == "ACL_1908" else "OFF"),
        "-Donnxruntime_USE_ACL_2002=" + (
            "ON" if args.use_acl == "ACL_2002" else "OFF"),
        "-Donnxruntime_USE_ARMNN=" + (
            "ON" if args.use_armnn else "OFF"),
        "-Donnxruntime_ARMNN_RELU_USE_CPU=" + (
            "OFF" if args.armnn_relu else "ON"),
        "-Donnxruntime_ARMNN_BN_USE_CPU=" + (
            "OFF" if args.armnn_bn else "ON"),
        # Training related flags
        "-Donnxruntime_ENABLE_NVTX_PROFILE=" + (
            "ON" if args.enable_nvtx_profile else "OFF"),
        "-Donnxruntime_ENABLE_TRAINING=" + (
            "ON" if args.enable_training else "OFF"),
        # Enable advanced computations such as AVX for some traininig related ops.
        "-Donnxruntime_ENABLE_CPU_FP16_OPS=" + (
            "ON" if args.enable_training else "OFF"),
        "-Donnxruntime_USE_NCCL=" + (
            "OFF" if args.disable_nccl else "ON"),
        "-Donnxruntime_BUILD_BENCHMARKS=" + (
            "ON" if args.build_micro_benchmarks else "OFF"),
        "-Donnxruntime_USE_ROCM=" + ("ON" if args.use_rocm else "OFF"),
        "-Donnxruntime_ROCM_HOME=" + (rocm_home if args.use_rocm else ""),
        "-DOnnxruntime_GCOV_COVERAGE=" + ("ON" if args.code_coverage else "OFF"),
        "-Donnxruntime_USE_MPI=" + (
            "ON" if args.use_mpi else "OFF"),
        "-Donnxruntime_ENABLE_MEMORY_PROFILE=" + (
            "ON" if args.enable_memory_profile else "OFF"),
    ]
    if acl_home and os.path.exists(acl_home):
        cmake_args += ["-Donnxruntime_ACL_HOME=" + acl_home]
    if acl_libs and os.path.exists(acl_libs):
        cmake_args += ["-Donnxruntime_ACL_LIBS=" + acl_libs]
    if armnn_home and os.path.exists(armnn_home):
        cmake_args += ["-Donnxruntime_ARMNN_HOME=" + armnn_home]
    if armnn_libs and os.path.exists(armnn_libs):
        cmake_args += ["-Donnxruntime_ARMNN_LIBS=" + armnn_libs]
    if mpi_home and os.path.exists(mpi_home):
        if args.use_mpi:
            cmake_args += ["-Donnxruntime_MPI_HOME=" + mpi_home]
        else:
            log.warning("mpi_home is supplied but use_mpi is set to false."
                        " Build will continue without linking MPI libraries.")
    if nccl_home and os.path.exists(nccl_home):
        cmake_args += ["-Donnxruntime_NCCL_HOME=" + nccl_home]
    if args.winml_root_namespace_override:
        cmake_args += ["-Donnxruntime_WINML_NAMESPACE_OVERRIDE=" +
                       args.winml_root_namespace_override]
    if args.use_openvino:
        cmake_args += ["-Donnxruntime_USE_OPENVINO=ON",
                       "-Donnxruntime_USE_OPENVINO_MYRIAD=" + (
                           "ON" if args.use_openvino == "MYRIAD_FP16" else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_GPU_FP32=" + (
                           "ON" if args.use_openvino == "GPU_FP32" else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_GPU_FP16=" + (
                           "ON" if args.use_openvino == "GPU_FP16" else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_CPU_FP32=" + (
                           "ON" if args.use_openvino == "CPU_FP32" else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_VAD_M=" + (
                           "ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_VAD_F=" + (
                           "ON" if args.use_openvino == "VAD-F_FP32" else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_HETERO=" + (
                           "ON" if args.use_openvino.startswith("HETERO") else "OFF"),
                       "-Donnxruntime_USE_OPENVINO_DEVICE=" + (args.use_openvino),
                       "-Donnxruntime_USE_OPENVINO_MULTI=" + (
                           "ON" if args.use_openvino.startswith("MULTI") else "OFF")]
    # TensorRT and OpenVINO providers currently only supports
    # full_protobuf option.
    if (args.use_full_protobuf or args.use_tensorrt or
            args.use_openvino or args.use_vitisai or args.gen_doc):
        cmake_args += [
            "-Donnxruntime_USE_FULL_PROTOBUF=ON",
            "-DProtobuf_USE_STATIC_LIBS=ON"
        ]
    if args.use_nuphar and args.llvm_path is not None:
        cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
    if args.use_cuda and not is_windows():
        nvml_stub_path = cuda_home + "/lib64/stubs"
        cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
    if args.use_preinstalled_eigen:
        cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON",
                       "-Deigen_SOURCE_PATH=" + args.eigen_path]
    if args.nnapi_min_api:
        cmake_args += ["-Donnxruntime_NNAPI_MIN_API=" + str(args.nnapi_min_api)]
    if args.android:
        cmake_args += [
            "-DCMAKE_TOOLCHAIN_FILE=" + args.android_ndk_path + "/build/cmake/android.toolchain.cmake",
            "-DANDROID_PLATFORM=android-" + str(args.android_api),
            "-DANDROID_ABI=" + str(args.android_abi)
        ]
        if args.android_cpp_shared:
            cmake_args += ["-DANDROID_STL=c++_shared"]
    if is_macOS() and not args.android:
        cmake_args += ["-DCMAKE_OSX_ARCHITECTURES=" + args.osx_arch]
        # since cmake 3.19, it uses the xcode latest buildsystem, which is not supported by this project.
        cmake_verstr = subprocess.check_output(['cmake', '--version']).decode('utf-8').split()[2]
        if args.use_xcode and LooseVersion(cmake_verstr) >= LooseVersion('3.19.0'):
            cmake_args += ["-T", "buildsystem=1"]
        if args.apple_deploy_target:
            cmake_args += ["-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target]
    if args.use_coreml:
        if not is_macOS():
            raise BuildError("Build CoreML EP requires macOS")
        cmake_args += ["-Donnxruntime_USE_COREML=ON"]
    if args.ios:
        if is_macOS():
            needed_args = [
                args.use_xcode,
                args.ios_sysroot,
                args.apple_deploy_target,
            ]
            arg_names = [
                "--use_xcode            " +
                "<need use xcode to cross build iOS on MacOS>",
                "--ios_sysroot          " +
                "<the location or name of the macOS platform SDK>",
                "--apple_deploy_target  " +
                "<the minimum version of the target platform>",
            ]
            if not all(needed_args):
                raise BuildError(
                    "iOS build on MacOS canceled due to missing arguments: " +
                    ', '.join(
                        val for val, cond in zip(arg_names, needed_args)
                        if not cond))
            cmake_args += [
                "-DCMAKE_SYSTEM_NAME=iOS",
                "-Donnxruntime_BUILD_SHARED_LIB=ON",
                "-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
                "-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target,
                # we do not need protoc binary for ios cross build
                "-Dprotobuf_BUILD_PROTOC_BINARIES=OFF",
                "-DCMAKE_TOOLCHAIN_FILE=" + (
                    args.ios_toolchain_file if args.ios_toolchain_file
                    else "../cmake/onnxruntime_ios.toolchain.cmake")
            ]
            # Code sign the binaries, if the code signing development team id is provided
            if args.xcode_code_signing_team_id:
                cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=" + args.xcode_code_signing_team_id]
        else:
            # TODO: the cross compiling on Linux is not officially supported by Apple
            #   and is already broken with the latest codebase, so it should be removed.
            # We are cross compiling on Linux
            needed_args = [
                args.ios_sysroot,
                args.arm64 or args.arm,
                args.ios_toolchain_dir
            ]
            arg_names = [
                "--ios_sysroot <path to sysroot>",
                "--arm or --arm64",
                "--ios_toolchain_dir <path to toolchain>"
            ]
            if not all(needed_args):
                raise BuildError(
                    "iOS build canceled due to missing arguments: " +
                    ', '.join(
                        val for val, cond in zip(arg_names, needed_args)
                        if not cond))
            compilers = sorted(
                glob.glob(args.ios_toolchain_dir + "/bin/*-clang*"))
            os.environ["PATH"] = os.path.join(
                args.ios_toolchain_dir, "bin") + os.pathsep + os.environ.get(
                    "PATH", "")
            os.environ["LD_LIBRARY_PATH"] = os.path.join(
                args.ios_toolchain_dir, "/lib") + os.pathsep + os.environ.get(
                    "LD_LIBRARY_PATH", "")
            if len(compilers) != 2:
                raise BuildError(
                    "error identifying compilers in ios_toolchain_dir")
            cmake_args += [
                "-DCMAKE_OSX_ARCHITECTURES=" +
                ("arm64" if args.arm64 else "arm"),
                "-DCMAKE_SYSTEM_NAME=iOSCross",
                "-Donnxruntime_BUILD_UNIT_TESTS=OFF",
                "-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
                "-DCMAKE_C_COMPILER=" + compilers[0],
                "-DCMAKE_CXX_COMPILER=" + compilers[1]
            ]
    if path_to_protoc_exe:
        cmake_args += [
            "-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s" % path_to_protoc_exe]
    if args.fuzz_testing:
        if not (args.build_shared_lib and
                is_windows() and
                args.cmake_generator == 'Visual Studio 16 2019' and
                args.use_full_protobuf):
            raise BuildError(
                "Fuzz test has only be tested with build shared libs option using MSVC on windows")
        cmake_args += [
            "-Donnxruntime_BUILD_UNIT_TESTS=ON",
            "-Donnxruntime_FUZZ_TEST=ON",
            "-Donnxruntime_USE_FULL_PROTOBUF=ON"]
    if args.gen_doc:
        cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=ON"]
    else:
        cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=OFF"]
    cmake_args += ["-D{}".format(define) for define in cmake_extra_defines]
    cmake_args += cmake_extra_args
    # ADO pipelines will store the pipeline build number
    # (e.g. 191101-2300.1.master) and source version in environment
    # variables. If present, use these values to define the
    # WinML/ORT DLL versions.
    build_number = os.getenv('Build_BuildNumber')
    source_version = os.getenv('Build_SourceVersion')
    if build_number and source_version:
        build_matches = re.fullmatch(
            r"(\d\d)(\d\d)(\d\d)(\d\d)\.(\d+)", build_number)
        if build_matches:
            YY = build_matches.group(2)
            MM = build_matches.group(3)
            DD = build_matches.group(4)
            # Get ORT major and minor number
            with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:
                first_line = f.readline()
                ort_version_matches = re.match(r"(\d+).(\d+)", first_line)
                if not ort_version_matches:
                    raise BuildError("Couldn't read version from VERSION_FILE")
                ort_major = ort_version_matches.group(1)
                ort_minor = ort_version_matches.group(2)
                # Example (BuildNumber: 191101-2300.1.master,
                # SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)
                # MajorPart = 1
                # MinorPart = 0
                # BuildPart = 1911
                # PrivatePart = 123
                # String = 191101-2300.1.master.0bce7ae
                cmake_args += [
                    "-DVERSION_MAJOR_PART={}".format(ort_major),
                    "-DVERSION_MINOR_PART={}".format(ort_minor),
                    "-DVERSION_BUILD_PART={}".format(YY),
                    "-DVERSION_PRIVATE_PART={}{}".format(MM, DD),
                    "-DVERSION_STRING={}.{}.{}.{}".format(
                        ort_major, ort_minor, build_number,
                        source_version[0:7])
                ]
    for config in configs:
        config_build_dir = get_config_build_dir(build_dir, config)
        os.makedirs(config_build_dir, exist_ok=True)
        if args.use_nuphar:
            os.environ["PATH"] = os.path.join(
                config_build_dir, "external", "tvm",
                config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
        run_subprocess(
            cmake_args + [
                "-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
                ("ON" if config.lower() == 'debug' and not args.use_nuphar and not
                 args.use_openvino and not
                 args.enable_msvc_static_runtime
                 else "OFF"), "-DCMAKE_BUILD_TYPE={}".format(config)],
            cwd=config_build_dir)
def clean_targets(cmake_path, build_dir, configs):
    for config in configs:
        log.info("Cleaning targets for %s configuration", config)
        build_dir2 = get_config_build_dir(build_dir, config)
        cmd_args = [cmake_path,
                    "--build", build_dir2,
                    "--config", config,
                    "--target", "clean"]
        run_subprocess(cmd_args)
def build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, target=None):
    for config in configs:
        log.info("Building targets for %s configuration", config)
        build_dir2 = get_config_build_dir(build_dir, config)
        cmd_args = [cmake_path,
                    "--build", build_dir2,
                    "--config", config]
        if target:
            cmd_args.extend(['--target', target])
        build_tool_args = []
        if num_parallel_jobs != 1:
            if is_windows() and args.cmake_generator != 'Ninja':
                build_tool_args += [
                    "/maxcpucount:{}".format(num_parallel_jobs),
                    # if nodeReuse is true, msbuild processes will stay around for a bit after the build completes
                    "/nodeReuse:False",
                ]
            elif (is_macOS() and args.use_xcode):
                # CMake will generate correct build tool args for Xcode
                cmd_args += ["--parallel", str(num_parallel_jobs)]
            else:
                build_tool_args += ["-j{}".format(num_parallel_jobs)]
        if build_tool_args:
            cmd_args += ["--"]
            cmd_args += build_tool_args
        env = {}
        if args.android:
            env['ANDROID_SDK_ROOT'] = args.android_sdk_path
        run_subprocess(cmd_args, env=env)
def add_dir_if_exists(directory, dir_list):
    if os.path.isdir(directory):
        dir_list.append(directory)
def setup_cuda_vars(args):
    cuda_home = ""
    cudnn_home = ""
    if args.use_cuda:
        cuda_home = args.cuda_home if args.cuda_home else os.getenv(
            "CUDA_HOME")
        cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(
            "CUDNN_HOME")
        cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))
        cudnn_home_valid = (cudnn_home is not None and os.path.exists(
            cudnn_home))
        if not cuda_home_valid or not cudnn_home_valid:
            raise BuildError(
                "cuda_home and cudnn_home paths must be specified and valid.",
                "cuda_home='{}' valid={}. cudnn_home='{}' valid={}"
                .format(
                    cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))
    return cuda_home, cudnn_home
def setup_tensorrt_vars(args):
    tensorrt_home = ""
    if args.use_tensorrt:
        tensorrt_home = (args.tensorrt_home if args.tensorrt_home
                         else os.getenv("TENSORRT_HOME"))
        tensorrt_home_valid = (tensorrt_home is not None and
                               os.path.exists(tensorrt_home))
        if not tensorrt_home_valid:
            raise BuildError(
                "tensorrt_home paths must be specified and valid.",
                "tensorrt_home='{}' valid={}."
                .format(tensorrt_home, tensorrt_home_valid))
        # Set maximum workspace size in byte for
        # TensorRT (1GB = 1073741824 bytes).
        os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
        # Set maximum number of iterations to detect unsupported nodes
        # and partition the models for TensorRT.
        os.environ["ORT_TENSORRT_MAX_PARTITION_ITERATIONS"] = "1000"
        # Set minimum subgraph node size in graph partitioning
        # for TensorRT.
        os.environ["ORT_TENSORRT_MIN_SUBGRAPH_SIZE"] = "1"
        # Set FP16 flag
        os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
    return tensorrt_home
def setup_migraphx_vars(args):
    migraphx_home = None
    if (args.use_migraphx):
        print("migraphx_home = {}".format(args.migraphx_home))
        migraphx_home = args.migraphx_home or os.getenv("MIGRAPHX_HOME") or None
        migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))
        if (migraphx_home_not_valid):
            raise BuildError("migraphx_home paths must be specified and valid.",
                             "migraphx_home='{}' valid={}."
                             .format(migraphx_home, migraphx_home_not_valid))
    return migraphx_home or ''
def setup_dml_build(args, cmake_path, build_dir, configs):
    if args.use_dml:
        for config in configs:
            # Run the RESTORE_PACKAGES target to perform the initial
            # NuGet setup.
            cmd_args = [cmake_path,
                        "--build", get_config_build_dir(build_dir, config),
                        "--config", config,
                        "--target", "RESTORE_PACKAGES"]
            run_subprocess(cmd_args)
def setup_rocm_build(args, configs):
    rocm_home = None
    if (args.use_rocm):
        print("rocm_home = {}".format(args.rocm_home))
        rocm_home = args.rocm_home or None
        rocm_home_not_valid = (rocm_home and not os.path.exists(rocm_home))
        if (rocm_home_not_valid):
            raise BuildError("rocm_home paths must be specified and valid.",
                             "rocm_home='{}' valid={}."
                             .format(rocm_home, rocm_home_not_valid))
        for config in configs:
            amd_hipify(get_config_build_dir(args.build_dir, config))
    return rocm_home or ''
def run_android_tests(args, source_dir, config, cwd):
    sdk_tool_paths = android.get_sdk_tool_paths(args.android_sdk_path)
    device_dir = '/data/local/tmp'
    def adb_push(src, dest, **kwargs):
        return run_subprocess([sdk_tool_paths.adb, 'push', src, dest], **kwargs)
    def adb_shell(*args, **kwargs):
        return run_subprocess([sdk_tool_paths.adb, 'shell', *args], **kwargs)
    def run_adb_shell(cmd):
        # GCOV_PREFIX_STRIP specifies the depth of the directory hierarchy to strip and
        # GCOV_PREFIX specifies the root directory
        # for creating the runtime code coverage files.
        if args.code_coverage:
            adb_shell(
                'cd {0} && GCOV_PREFIX={0} GCOV_PREFIX_STRIP={1} {2}'.format(
                    device_dir, cwd.count(os.sep) + 1, cmd))
        else:
            adb_shell('cd {} && {}'.format(device_dir, cmd))
    if args.android_abi == 'x86_64':
        with contextlib.ExitStack() as context_stack:
            if args.android_run_emulator:
                avd_name = "ort_android"
                system_image = "system-images;android-{};google_apis;{}".format(
                    args.android_api, args.android_abi)
                android.create_virtual_device(sdk_tool_paths, system_image, avd_name)
                emulator_proc = context_stack.enter_context(
                    android.start_emulator(
                        sdk_tool_paths=sdk_tool_paths,
                        avd_name=avd_name,
                        extra_args=[
                            "-partition-size", "2047",
                            "-wipe-data"]))
                context_stack.callback(android.stop_emulator, emulator_proc)
            adb_push('testdata', device_dir, cwd=cwd)
            adb_push(
                os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),
                device_dir, cwd=cwd)
            adb_push('onnxruntime_test_all', device_dir, cwd=cwd)
            adb_shell('chmod +x {}/onnxruntime_test_all'.format(device_dir))
            adb_push('onnx_test_runner', device_dir, cwd=cwd)
            adb_shell('chmod +x {}/onnx_test_runner'.format(device_dir))
            run_adb_shell('{0}/onnxruntime_test_all'.format(device_dir))
            if args.use_nnapi:
                adb_shell('cd {0} && {0}/onnx_test_runner -e nnapi {0}/test'.format(device_dir))
            else:
                adb_shell('cd {0} && {0}/onnx_test_runner {0}/test'.format(device_dir))
            # run shared_lib_test if necessary
            if args.build_shared_lib:
                adb_push('libonnxruntime.so', device_dir, cwd=cwd)
                adb_push('onnxruntime_shared_lib_test', device_dir, cwd=cwd)
                adb_shell('chmod +x {}/onnxruntime_shared_lib_test'.format(device_dir))
                run_adb_shell(
                    'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} && {0}/onnxruntime_shared_lib_test'.format(
                        device_dir))
def run_ios_tests(args, source_dir, config, cwd):
    cpr = run_subprocess(["xcodebuild", "test", "-project", "./onnxruntime.xcodeproj",
                          "-configuration", config,
                          "-scheme",  "onnxruntime_test_all_xc", "-destination",
                          "platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
    if cpr.returncode == 0:
        cpr = run_subprocess(["xcodebuild", "test", "-project", "./onnxruntime.xcodeproj",
                              "-configuration", config,
                              "-scheme",  "onnxruntime_shared_lib_test_xc", "-destination",
                              "platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
    cpr.check_returncode()
def run_orttraining_test_orttrainer_frontend_separately(cwd):
    class TestNameCollecterPlugin:
        def __init__(self):
            self.collected = set()
        def pytest_collection_modifyitems(self, items):
            for item in items:
                print('item.name: ', item.name)
                test_name = item.name
                start = test_name.find('[')
                if start > 0:
                    test_name = test_name[:start]
                self.collected.add(test_name)
    import pytest
    plugin = TestNameCollecterPlugin()
    test_script_filename = os.path.join(cwd, "orttraining_test_orttrainer_frontend.py")
    pytest.main(['--collect-only', test_script_filename], plugins=[plugin])
    for test_name in plugin.collected:
        run_subprocess([
            sys.executable, '-m', 'pytest',
            'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)
def run_training_python_frontend_tests(cwd):
    run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)
    run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)
    run_subprocess([
        sys.executable, 'orttraining_test_transformers.py',
        'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)
    run_subprocess([
        sys.executable, 'orttraining_test_transformers.py',
        'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)
    run_subprocess([
        sys.executable, 'orttraining_test_transformers.py',
        'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)
    # TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.
    # shall revert to run_subprocess call once the segfault issue is resolved.
    run_orttraining_test_orttrainer_frontend_separately(cwd)
    # run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)
    run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)
    run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_checkpoint_storage.py'], cwd=cwd)
    run_subprocess([
        sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_checkpoint_functions.py'], cwd=cwd)
def run_training_python_frontend_e2e_tests(cwd):
    # frontend tests are to be added here:
    log.info("Running python frontend e2e tests.")
    run_subprocess(
        [sys.executable, 'orttraining_run_frontend_batch_size_test.py', '-v'],
        cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
    import torch
    ngpus = torch.cuda.device_count()
    if ngpus > 1:
        bert_pretrain_script = 'orttraining_run_bert_pretrain.py'
        # TODO: this test will be replaced with convergence test ported from backend
        log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
            ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))
        run_subprocess([
            'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
            bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)
        log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))
        run_subprocess([
            'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)
    # with orttraining_run_glue.py.
    # 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)
    #   for fine-tune tests.
    # 2. need to run test separately (not to mix between fp16
    #   and full precision runs. this need to be investigated).
    run_subprocess(
        [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],
        cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
    run_subprocess(
        [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],
        cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
    run_subprocess(
        [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],
        cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
    run_subprocess(
        [sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],
        cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
    run_subprocess(
        [sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],
        cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
    run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)
    run_subprocess([
        sys.executable, 'orttraining_test_transformers.py',
        'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
    for config in configs:
        log.info("Running tests for %s configuration", config)
        cwd = get_config_build_dir(build_dir, config)
        cwd = os.path.abspath(cwd)
        if args.android:
            run_android_tests(args, source_dir, config, cwd)
            continue
        elif args.ios:
            run_ios_tests(args, source_dir, config, cwd)
            continue
        dll_path_list = []
        if args.use_nuphar:
            dll_path_list.append(os.path.join(
                build_dir, config, "external", "tvm", config))
        if args.use_tensorrt:
            dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
        dll_path = None
        if len(dll_path_list) > 0:
            dll_path = os.pathsep.join(dll_path_list)
        if ctest_path is None:
            # Get the "Google Test Adapter" for vstest.
            if not os.path.exists(os.path.join(cwd,
                                               'googletestadapter.0.17.1')):
                run_subprocess(
                    ['nuget.exe', 'restore',
                     os.path.join(source_dir, 'packages.config'),
                     '-ConfigFile', os.path.join(source_dir, 'NuGet.config'),
                     '-PackagesDirectory', cwd])
            cwd2 = os.path.join(cwd, config)
            executables = ['onnxruntime_test_all.exe']
            if args.build_shared_lib:
                executables.append('onnxruntime_shared_lib_test.exe')
                executables.append('onnxruntime_global_thread_pools_test.exe')
            run_subprocess(
                ['vstest.console.exe', '--parallel',
                 '--TestAdapterPath:..\\googletestadapter.0.17.1\\build\\_common',  # noqa
                 '/Logger:trx', '/Enablecodecoverage', '/Platform:x64',
                 "/Settings:%s" % os.path.join(
                     source_dir, 'cmake\\codeconv.runsettings')] + executables,
                cwd=cwd2, dll_path=dll_path)
        else:
            ctest_cmd = [ctest_path, "--build-config", config, "--verbose", "--timeout", "3600"]
            run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)
        if args.enable_pybind:
            # Disable python tests for TensorRT because many tests are
            # not supported yet.
            if args.use_tensorrt:
                return
            # Disable python tests in a reduced build as we don't know which ops have been included and which
            # models can run
            if args.include_ops_by_config or args.minimal_build != 'off':
                return
            if is_windows():
                cwd = os.path.join(cwd, config)
            run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)
            if args.enable_symbolic_shape_infer_tests:
                run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],
                               cwd=cwd, dll_path=dll_path)
            # For CUDA enabled builds test IOBinding feature
            if args.use_cuda:
                # We need to have Torch installed to test the IOBinding feature
                # which currently uses Torch's allocator to allocate GPU memory for testing
                log.info("Testing IOBinding feature")
                run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)
            if not args.disable_ml_ops:
                run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)
            if args.enable_training and args.use_cuda:
                # run basic frontend tests
                run_training_python_frontend_tests(cwd=cwd)
            try:
                import onnx  # noqa
                onnx_test = True
            except ImportError as error:
                log.exception(error)
                log.warning("onnx is not installed. The ONNX tests will be skipped.")
                onnx_test = False
            if onnx_test:
                run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)
                run_subprocess([sys.executable, '-m', 'unittest', 'discover', '-s', 'quantization'],
                               cwd=cwd, dll_path=dll_path)
                if not args.disable_ml_ops:
                    run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],
                                   cwd=cwd, dll_path=dll_path)
                run_subprocess([sys.executable,
                                os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),
                                '--output_dir', 'test_models'], cwd=cwd)
                if not args.skip_onnx_tests:
                    run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)
                    if config != 'Debug':
                        run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)
            if not args.skip_keras_test:
                try:
                    import onnxmltools  # noqa
                    import keras  # noqa
                    onnxml_test = True
                except ImportError:
                    log.warning(
                        "onnxmltools and keras are not installed. "
                        "The keras tests will be skipped.")
                    onnxml_test = False
                if onnxml_test:
                    run_subprocess(
                        [sys.executable, 'onnxruntime_test_python_keras.py'],
                        cwd=cwd, dll_path=dll_path)
def nuphar_run_python_tests(build_dir, configs):
    """nuphar temporary function for running python tests separately
    as it requires ONNX 1.5.0
    """
    for config in configs:
        if config == 'Debug':
            continue
        cwd = get_config_build_dir(build_dir, config)
        if is_windows():
            cwd = os.path.join(cwd, config)
        dll_path = os.path.join(build_dir, config, "external", "tvm", config)
        # install onnx for shape inference in testing Nuphar scripts
        # this needs to happen after onnx_test_data preparation which
        # uses onnx 1.3.0
        run_subprocess(
            [sys.executable, '-m', 'pip', 'install', '--user', 'onnx==1.5.0'])
        run_subprocess(
            [sys.executable, 'onnxruntime_test_python_nuphar.py'],
            cwd=cwd, dll_path=dll_path)
def run_nodejs_tests(nodejs_binding_dir):
    args = ['npm', 'test', '--', '--timeout=2000']
    if is_windows():
        args = ['cmd', '/c'] + args
    run_subprocess(args, cwd=nodejs_binding_dir)
def build_python_wheel(
        source_dir, build_dir, configs, use_cuda, use_dnnl,
        use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,
        wheel_name_suffix, enable_training, nightly_build=False, featurizers_build=False, use_ninja=False):
    for config in configs:
        cwd = get_config_build_dir(build_dir, config)
        if is_windows() and not use_ninja:
            cwd = os.path.join(cwd, config)
        args = [sys.executable, os.path.join(source_dir, 'setup.py'),
                'bdist_wheel']
        # We explicitly override the platform tag in the name of the generated build wheel
        # so that we can install the wheel on Mac OS X versions 10.12+.
        # Without this explicit override, we will something like this while building on MacOS 10.14 -
        # [WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value (10.12)
        # than the version on which the Python interpreter was compiled (10.14) and will be ignored.
        # Since we need to support 10.12+, we explicitly override the platform tag.
        # See PR #3626 for more details
        if is_macOS():
            args += ['-p', 'macosx_10_12_x86_64']
        # Any combination of the following arguments can be applied
        if nightly_build:
            args.append('--nightly_build')
        if featurizers_build:
            args.append("--use_featurizers")
        if wheel_name_suffix:
            args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))
        if enable_training:
            args.append("--enable_training")
        # The following arguments are mutually exclusive
        if use_tensorrt:
            args.append('--use_tensorrt')
        elif use_cuda:
            args.append('--use_cuda')
        elif use_openvino:
            args.append('--use_openvino')
        elif use_dnnl:
            args.append('--use_dnnl')
        elif use_nuphar:
            args.append('--use_nuphar')
        elif use_vitisai:
            args.append('--use_vitisai')
        elif use_acl:
            args.append('--use_acl')
        elif use_armnn:
            args.append('--use_armnn')
        elif use_dml:
            args.append('--use_dml')
        run_subprocess(args, cwd=cwd)
def derive_linux_build_property():
    if is_windows():
        return "/p:IsLinuxBuild=\"false\""
    else:
        return "/p:IsLinuxBuild=\"true\""
def build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl):
    if not (is_windows() or is_linux()):
        raise BuildError(
            'Currently csharp builds and nuget package creation is only supportted '
            'on Windows and Linux platforms.')
    csharp_build_dir = os.path.join(source_dir, 'csharp')
    is_linux_build = derive_linux_build_property()
    # derive package name and execution provider based on the build args
    execution_provider = "/p:ExecutionProvider=\"None\""
    package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime\""
    if use_openvino:
        execution_provider = "/p:ExecutionProvider=\"openvino\""
        package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.OpenVino\""
    elif use_tensorrt:
        execution_provider = "/p:ExecutionProvider=\"tensorrt\""
        package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.TensorRT\""
    elif use_dnnl:
        execution_provider = "/p:ExecutionProvider=\"dnnl\""
        package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.DNNL\""
    elif use_cuda:
        package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Gpu\""
    else:
        pass
    # set build directory based on build_dir arg
    native_dir = os.path.normpath(os.path.join(source_dir, build_dir))
    ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_dir + "\""
    # dotnet restore
    cmd_args = ["dotnet", "restore", "OnnxRuntime.CSharp.sln", "--configfile", "Nuget.CSharp.config"]
    run_subprocess(cmd_args, cwd=csharp_build_dir)
    # build csharp bindings and create nuget package for each config
    for config in configs:
        if is_linux():
            native_build_dir = os.path.join(native_dir, config)
            cmd_args = ["make", "install", "DESTDIR=.//nuget-staging"]
            run_subprocess(cmd_args, cwd=native_build_dir)
        configuration = "/p:Configuration=\"" + config + "\""
        cmd_args = ["dotnet", "msbuild", "OnnxRuntime.CSharp.sln", configuration, package_name, is_linux_build,
                    ort_build_dir]
        run_subprocess(cmd_args, cwd=csharp_build_dir)
        cmd_args = [
            "dotnet", "msbuild", "OnnxRuntime.CSharp.proj", "/t:CreatePackage",
            package_name, configuration, execution_provider, is_linux_build, ort_build_dir]
        run_subprocess(cmd_args, cwd=csharp_build_dir)
def run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):
    # Currently only running tests on windows.
    if not is_windows():
        return
    csharp_source_dir = os.path.join(source_dir, 'csharp')
    is_linux_build = derive_linux_build_property()
    # define macros based on build args
    macros = ""
    if use_openvino:
        macros += "USE_OPENVINO;"
    if use_tensorrt:
        macros += "USE_TENSORRT;"
    if use_dnnl:
        macros += "USE_DNNL;"
    if use_cuda:
        macros += "USE_CUDA;"
    define_constants = ""
    if macros != "":
        define_constants = "/p:DefineConstants=\"" + macros + "\""
    # set build directory based on build_dir arg
    native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))
    ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_build_dir + "\""
    # Skip pretrained models test. Only run unit tests as part of the build
    # add "--verbosity", "detailed" to this command if required
    cmd_args = ["dotnet", "test", "test\\Microsoft.ML.OnnxRuntime.Tests\\Microsoft.ML.OnnxRuntime.Tests.csproj",
                "--filter", "FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels",
                is_linux_build, define_constants, ort_build_dir]
    run_subprocess(cmd_args, cwd=csharp_source_dir)
def is_cross_compiling_on_apple(args):
    if not is_macOS():
        return False
    if args.ios:
        return True
    if args.osx_arch != platform.machine():
        return True
    return False
def build_protoc_for_host(cmake_path, source_dir, build_dir, args):
    if (args.arm or args.arm64 or args.enable_windows_store) and \
            not (is_windows() or is_cross_compiling_on_apple(args)):
        raise BuildError(
            'Currently only support building protoc for Windows host while '
            'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')
    log.info(
        "Building protoc for host to be used in cross-compiled build process")
    protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')
    os.makedirs(protoc_build_dir, exist_ok=True)
    # Generate step
    cmd_args = [
        cmake_path,
        os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),
        '-Dprotobuf_BUILD_TESTS=OFF',
        '-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',
        '-Dprotobuf_BUILD_SHARED_LIBS=OFF'
    ]
    is_ninja = args.cmake_generator == 'Ninja'
    if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
        cmd_args += ['-G', args.cmake_generator]
    if is_windows():
        if not is_ninja:
            cmd_args += ['-T', 'host=x64']
    elif is_macOS():
        if args.use_xcode:
            cmd_args += ['-G', 'Xcode']
            # CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,
            # protoc for host should be built using host architecture
            # Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.
            cmd_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(
                'arm64' if platform.machine() == 'arm64' else 'x86_64')]
    run_subprocess(cmd_args, cwd=protoc_build_dir)
    # Build step
    cmd_args = [cmake_path,
                "--build", protoc_build_dir,
                "--config", "Release",
                "--target", "protoc"]
    run_subprocess(cmd_args)
    # Absolute protoc path is needed for cmake
    config_dir = ''
    suffix = ''
    if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):
        config_dir = 'Release'
    if is_windows():
        suffix = '.exe'
    expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)
    if not os.path.exists(expected_protoc_path):
        raise BuildError("Couldn't find {}. Host build of protoc failed.".format(expected_protoc_path))
    return expected_protoc_path
def generate_documentation(source_dir, build_dir, configs):
    operator_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')
    opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')
    for config in configs:
        # Copy the gen_contrib_doc.py.
        shutil.copy(
            os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'),
            os.path.join(build_dir, config))
        shutil.copy(
            os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'),
            os.path.join(build_dir, config))
        run_subprocess(
            [sys.executable,
             'gen_contrib_doc.py',
             '--output_path', operator_doc_path],
            cwd=os.path.join(build_dir, config))
        run_subprocess(
            [sys.executable,
             'gen_opkernel_doc.py',
             '--output_path', opkernel_doc_path],
            cwd=os.path.join(build_dir, config))
    docdiff = ''
    try:
        docdiff = subprocess.check_output(['git', 'diff', opkernel_doc_path])
    except subprocess.CalledProcessError:
        print('git diff returned non-zero error code')
    if len(docdiff) > 0:
        # Show warning instead of throwing exception, because it is
        # dependent on build configuration for including
        # execution propviders
        log.warning(
            'The updated opkernel document file ' + str(opkernel_doc_path) +
            ' is different from the checked in version. Consider '
            'regenerating the file with CPU, DNNL and CUDA providers enabled.')
        log.debug('diff:\n' + str(docdiff))
    docdiff = ''
    try:
        docdiff = subprocess.check_output(['git', 'diff', operator_doc_path])
    except subprocess.CalledProcessError:
        print('git diff returned non-zero error code')
    if len(docdiff) > 0:
        raise BuildError(
            'The updated operator document file ' +
            str(operator_doc_path) + ' must be checked in.\n diff:\n' +
            str(docdiff))
def main():
    args = parse_arguments()
    cmake_extra_defines = (args.cmake_extra_defines
                           if args.cmake_extra_defines else [])
    cross_compiling = args.arm or args.arm64 or args.android
    # If there was no explicit argument saying what to do, default
    # to update, build and test (for native builds).
    if not (args.update or args.clean or args.build or args.test):
        log.debug(
            "Defaulting to running update, build "
            "[and test for native builds].")
        args.update = True
        args.build = True
        if cross_compiling:
            args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'
        else:
            args.test = True
    if args.skip_tests:
        args.test = False
    if args.include_ops_by_config:
        from exclude_unused_ops_and_types import exclude_unused_ops_and_types
        exclude_unused_ops_and_types(args.include_ops_by_config,
                                     args.enable_reduced_operator_type_support,
                                     args.use_cuda)
    if args.use_tensorrt:
        args.use_cuda = True
    if args.build_wheel or args.gen_doc:
        args.enable_pybind = True
    if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
        args.build_shared_lib = True
    if args.build_nuget and cross_compiling:
        raise BuildError('Currently nuget package creation is not supported while cross-compiling')
    if args.enable_pybind and args.disable_exceptions:
        raise BuildError('Python bindings require exceptions to be enabled.')
    if args.minimal_build and args.disable_ort_format_load:
        raise BuildError('Minimal build requires loading ORT format models.')
    if args.nnapi_min_api:
        if not args.use_nnapi:
            raise BuildError("Using --nnapi_min_api requires --use_nnapi")
        if args.nnapi_min_api < 27:
            raise BuildError("--nnapi_min_api should be 27+")
    if args.code_coverage and not args.android:
        raise BuildError("Using --code_coverage requires --android")
    # Disabling unit tests for VAD-F as FPGA only supports
    # models with NCHW layout
    if args.use_openvino == "VAD-F_FP32":
        args.test = False
    configs = set(args.config)
    # setup paths and directories
    cmake_path = resolve_executable_path(args.cmake_path)
    ctest_path = None if args.use_vstest else resolve_executable_path(
        args.ctest_path)
    build_dir = args.build_dir
    script_dir = os.path.realpath(os.path.dirname(__file__))
    source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
    # if using cuda, setup cuda paths and env vars
    cuda_home, cudnn_home = setup_cuda_vars(args)
    mpi_home = args.mpi_home
    nccl_home = args.nccl_home
    acl_home = args.acl_home
    acl_libs = args.acl_libs
    armnn_home = args.armnn_home
    armnn_libs = args.armnn_libs
    # if using tensorrt, setup tensorrt paths
    tensorrt_home = setup_tensorrt_vars(args)
    # if using migraphx, setup migraphx paths
    migraphx_home = setup_migraphx_vars(args)
    # if using rocm, setup rocm paths
    rocm_home = setup_rocm_build(args, configs)
    os.makedirs(build_dir, exist_ok=True)
    log.info("Build started")
    if args.update:
        cmake_extra_args = []
        path_to_protoc_exe = args.path_to_protoc_exe
        if not args.skip_submodule_sync:
            update_submodules(source_dir)
        if is_windows():
            if args.cmake_generator == 'Ninja':
                if args.x86 or args.arm or args.arm64:
                    raise BuildError(
                        "To cross-compile with Ninja, load the toolset "
                        "environment for the target processor (e.g. Cross "
                        "Tools Command Prompt for VS)")
                cmake_extra_args = ['-G', args.cmake_generator]
            elif args.x86:
                cmake_extra_args = [
                    '-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator
                ]
            elif args.arm or args.arm64:
                # Cross-compiling for ARM(64) architecture
                # First build protoc for host to use during cross-compilation
                if path_to_protoc_exe is None:
                    path_to_protoc_exe = build_protoc_for_host(
                        cmake_path, source_dir, build_dir, args)
                if args.arm:
                    cmake_extra_args = ['-A', 'ARM']
                else:
                    cmake_extra_args = ['-A', 'ARM64']
                cmake_extra_args += ['-G', args.cmake_generator]
                # Cannot test on host build machine for cross-compiled
                # builds (Override any user-defined behaviour for test if any)
                if args.test:
                    log.warning(
                        "Cannot test on host build machine for cross-compiled "
                        "ARM(64) builds. Will skip test running after build.")
                    args.test = False
            else:
                if (args.msvc_toolset == '14.16' and
                        args.cmake_generator == 'Visual Studio 16 2019'):
                    # CUDA 10.0 requires _MSC_VER >= 1700 and
                    # _MSC_VER < 1920, aka Visual Studio version
                    # in [2012, 2019). In VS2019, we have to use
                    # Side-by-side minor version MSVC toolsets from
                    # Visual Studio 2017 14.16 is MSVC version
                    # 141 is MSVC Toolset Version
                    # Cuda VS extension should be installed to
                    # C:\Program Files (x86)\Microsoft Visual
                    # Studio\2019\Enterprise\MSBuild\Microsoft\VC\v160\BuildCustomizations  # noqa
                    toolset = 'v141,host=x64,version=' + args.msvc_toolset
                elif args.msvc_toolset:
                    toolset = 'host=x64,version=' + args.msvc_toolset
                else:
                    toolset = 'host=x64'
                if args.cuda_version:
                    toolset += ',cuda=' + args.cuda_version
                cmake_extra_args = [
                    '-A', 'x64', '-T', toolset, '-G', args.cmake_generator
                ]
            if args.enable_windows_store:
                cmake_extra_args.append(
                    '-DCMAKE_TOOLCHAIN_FILE=' + os.path.join(
                        source_dir, 'cmake', 'store_toolchain.cmake'))
            if args.enable_wcos:
                cmake_extra_args.append('-DCMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')
        elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
            cmake_extra_args += ['-G', args.cmake_generator]
        elif is_macOS():
            if args.use_xcode:
                cmake_extra_args += ['-G', 'Xcode']
            if not args.ios and not args.android and \
                    args.osx_arch == 'arm64' and platform.machine() == 'x86_64':
                if args.test:
                    log.warning(
                        "Cannot test ARM64 build on X86_64. Will skip test running after build.")
                    args.test = False
        if (args.android or args.ios or args.enable_windows_store
                or is_cross_compiling_on_apple(args)) and args.path_to_protoc_exe is None:
            # Cross-compiling for Android and iOS
            path_to_protoc_exe = build_protoc_for_host(
                cmake_path, source_dir, build_dir, args)
        if is_ubuntu_1604():
            if (args.arm or args.arm64):
                raise BuildError(
                    "Only Windows ARM(64) cross-compiled builds supported "
                    "currently through this script")
            if not is_docker() and not args.use_acl and not args.use_armnn:
                install_python_deps()
        if args.enable_pybind and is_windows():
            install_python_deps(args.numpy_version)
        if args.enable_onnx_tests:
            setup_test_data(build_dir, configs)
        generate_build_tree(
            cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home, mpi_home, nccl_home,
            tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
            path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args)
    if args.clean:
        clean_targets(cmake_path, build_dir, configs)
    # if using DML, perform initial nuget package restore
    setup_dml_build(args, cmake_path, build_dir, configs)
    if args.build:
        if args.parallel < 0:
            raise BuildError("Invalid parallel job count: {}".format(args.parallel))
        num_parallel_jobs = os.cpu_count() if args.parallel == 0 else args.parallel
        build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, args.target)
    if args.test:
        run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)
        # run nuphar python tests last, as it installs ONNX 1.5.0
        if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
            nuphar_run_python_tests(build_dir, configs)
        # run node.js binding tests
        if args.build_nodejs and not args.skip_nodejs_tests:
            nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "nodejs"))
            run_nodejs_tests(nodejs_binding_dir)
    if args.build:
        if args.build_wheel:
            nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
            build_python_wheel(
                source_dir,
                build_dir,
                configs,
                args.use_cuda,
                args.use_dnnl,
                args.use_tensorrt,
                args.use_openvino,
                args.use_nuphar,
                args.use_vitisai,
                args.use_acl,
                args.use_armnn,
                args.use_dml,
                args.wheel_name_suffix,
                args.enable_training,
                nightly_build=nightly_build,
                featurizers_build=args.use_featurizers,
                use_ninja=(args.cmake_generator == 'Ninja')
            )
        if args.build_nuget:
            build_nuget_package(
                source_dir,
                build_dir,
                configs,
                args.use_cuda,
                args.use_openvino,
                args.use_tensorrt,
                args.use_dnnl
            )
    if args.test and args.build_nuget:
        run_csharp_tests(
            source_dir,
            build_dir,
            args.use_cuda,
            args.use_openvino,
            args.use_tensorrt,
            args.use_dnnl)
    if args.gen_doc and (args.build or args.test):
        generate_documentation(source_dir, build_dir, configs)
    log.info("Build complete")
if __name__ == "__main__":
    try:
        sys.exit(main())
    except BaseError as e:
        log.error(str(e))
        sys.exit(1)
 | 
| 
	the-stack_106_32230 | 
	import numpy as np
import ants
from cinemri.registration import Registrator
from cinemri.contour import Contour, get_anterior_wall_data, mask_to_contour
from cinemri.utils import numpy_2d_to_ants, average_by_vicinity, plot_vs_on_frame
from enum import Enum, unique
@unique
class VSNormType(Enum):
    none = 0
    # normalise by the average motion along the anterior abdominal wall
    average_anterior_wall = 1
    # normalize by average motion in the vicinity of each point
    contour_vicinity = 2
@unique
class VSNormField(Enum):
    # use the masked deformation field of the abdominal cavity surroundings
    rest = 0
    # use the entire deformation filed between two frames
    complete = 1
@unique
class VSWarpingField(Enum):
    # use the masked deformation field of the abdominal cavity surroundings
    rest = 0
    # use deformation filed between abdominal cavity contours
    contours = 1
class VisceralSlideDetector:
    @staticmethod
    def calculate_visceral_slide(deformation_field, contour):
        """
        Calculates visceral slide along the specified contour given the specified deformation field
        Parameters
        ----------
        deformation_field : ndarray
           The 2-channels deformation field
        contour : Contour
           The the contour along which to calculate visceral slide
        Returns
        -------
        visceral_slide : ndarray
           Visceral slide along the specified contour given the specified deformation field
        """
        # Get contour and normal neighbours
        uv = np.column_stack((contour.u, contour.v))
        inner_deformation = deformation_field[contour.y, contour.x]
        inner_slide = (uv * inner_deformation).sum(1)
        outer_deformation = deformation_field[contour.y_neighbour, contour.x_neighbour]
        outer_slide = (uv * outer_deformation).sum(1)
        # When (x, y) == (x_neighbour, y_neighbour) it means that (x, y) is located
        # at the border of a frame and a real (x_neighbour, y_neighbour) is outside
        # of the frame. In this case the inner and the outer deformation is the same
        # and the visceral slide will be underestimated.
        # Hence to mitigate it, we set the outer slide to 0 for such point
        # TODO: think if an option better than this workaround is possible, e.g nearest-neighbour like approach
        inner_coord = np.column_stack((contour.x, contour.y))
        outer_coord = np.column_stack((contour.x_neighbour, contour.y_neighbour))
        coords_equal_inds = np.where((inner_coord == outer_coord).all(axis=1))[0]
        outer_slide[coords_equal_inds] = 0
        visceral_slide = inner_slide - outer_slide
        return visceral_slide
    @staticmethod
    def get_motion(deformation_field, x, y, u, v):
        """
        Gets motion along the (x, y) contour in direction specified by (u, v)
        Parameters
        ----------
        deformation_field : ndarray
            A deformation filed describing the motion
        x, y : list of int
            The coordinates specifying the contour along which to calculate the motion
        u, v : list if int
            The components of vectors corresponding to each (x, y) in the direction of which to calculate the motion
        Returns
        -------
        motion : ndarray of float
            The motion along the the (x, y) contour in the (u, v) direction
        """
        motion = []
        for i in range(len(x)):
            deformation = deformation_field[y[i], x[i]]
            current_motion = np.abs(deformation.dot([u[i], v[i]]))
            motion.append(current_motion)
        # Replace zeros with highest non 0
        zero_placeholder = np.min([value for value in motion if value > 0])
        motion = [value if value > 0 else zero_placeholder for value in motion]
        return np.array(motion)
    def normalize_vs_by_motion(self, visceral_slide, contour, normalization_type, normalization_df, norm_vicinity):
        """
        Normalises visceral slide by motion according to the specified normalisation type
        Parameters
        ----------
        visceral_slide : ndarray
           The visceral slide to normalise
        contour : Contour
           The contour for which the visceral slide was calculated
        normalization_type : VSNormType
           The type of normalisation to apply
        normalization_df : ndarray
           The 2-channels deformation field to use for normalisation
        norm_vicinity : int
           The size of vicinity for vicinity normalisation
        Returns
        -------
        visceral_slide : ndarray
           A normalised visceral slide
        """
        if normalization_type == VSNormType.average_anterior_wall:
            x_aw, y_aw, u_aw, v_aw = get_anterior_wall_data(contour.x_neighbour, contour.y_neighbour,
                                                            contour.u_normal, contour.v_normal)
            abdominal_wall_motion = self.get_motion(normalization_df, x_aw, y_aw, u_aw, v_aw)
            visceral_slide = visceral_slide / np.mean(abdominal_wall_motion)
        elif normalization_type == VSNormType.contour_vicinity:
            contour_motion = self.get_motion(normalization_df, contour.x_neighbour, contour.y_neighbour,
                                             contour.u_normal, contour.v_normal)
            contour_motion_averaged = average_by_vicinity(contour_motion, norm_vicinity)
            visceral_slide = visceral_slide / contour_motion_averaged
        return visceral_slide
class VisceralSlideDetectorDF(VisceralSlideDetector):
    def get_visceral_slide(self,
                           df_cavity,
                           df_rest,
                           df_normalization,
                           moving_mask,
                           normalization_type=VSNormType.none,
                           norm_vicinity=15):
        """
        Calculates visceral slide based on the passed deformation field and abdominal cavity mask
        and normalises it with specified normalisation option
        Parameters
        ----------
        df_cavity : ndarray
           The 2-channels masked deformation field of abdominal cavity content
        df_rest : ndarray
           The 2-channels masked deformation field of abdominal cavity surroundings
        df_normalization : ndarray
           The 2-channels deformation field to use for normalisation
        moving_mask : ndarray
           The abdominal cavity segmentation on a moving frame
        normalization_type : VSNormType, default = VSNormType.none
           The type of normalisation to apply
        norm_vicinity : int, default = 15
           The size of vicinity for vicinity normalisation
        Returns
        -------
        x, y, visceral_slide : ndarray
            Coordinates of moving_mask contour and absolute values of visceral slide at these coordinates
        """
        df_full = df_cavity + df_rest
        contour = Contour.from_mask(moving_mask)
        visceral_slide = self.calculate_visceral_slide(df_full, contour)
        visceral_slide = np.abs(visceral_slide)
        # Normalize with the provided option
        visceral_slide = self.normalize_vs_by_motion(visceral_slide,
                                                     contour,
                                                     normalization_type,
                                                     df_normalization,
                                                     norm_vicinity)
        return contour.x, contour.y, visceral_slide
class VisceralSlideDetectorReg(VisceralSlideDetector):
    def __init__(self):
        self.registrator = Registrator()
    def get_visceral_slide(self,
                           moving,
                           moving_mask,
                           fixed,
                           fixed_mask=None,
                           normalization_type=VSNormType.none,
                           normalization_field=VSNormField.rest,
                           norm_vicinity=15):
        """Calculates visceral slide based on the moving and fixed frames and their masks using image registration
        and normalises it with specified normalisation option
        Parameters
        ----------
        moving : ndarray
           Frame of cine MRI series to use as a moving image
        moving_mask : binary ndarray
           A binary segmentation of the abdominal cavity on moving frame
        fixed : ndarray
           Frame of cine MRI series to use as a fixed image
        fixed_mask : binary ndarray, optional
           A binary segmentation of the abdominal cavity on fixed frame
        normalization_type : VSNormType, default = VSNormType.none
           A type of visceral slide normalization to apply
        normalization_field : VSNormField, default=VSNormField.rest
           Specifies which deformation filed to use for visceral slide normalization
        norm_vicinity : int, default = 15
           A vicinity to use for VSNormType.contour_vicinity normalization type
        Returns
        -------
        x, y, visceral_slide : ndarray
            Coordinates of moving_mask contour and absolute values of visceral slide at these coordinates
        """
        moving = moving.astype(np.uint32)
        fixed = fixed.astype(np.uint32)
        moving_mask = moving_mask.astype(np.uint8)
        # We need to calculate the transformation between entire moving and fixed frame
        # if the moving mask is missing or we want to normalise the VS by the entire deformation filed
        complete_df_needed = (fixed_mask is None or
                             (normalization_type != VSNormType.none and normalization_field == VSNormField.complete))
        if complete_df_needed:
            # Register moving to fixed without mask
            transforms, complete_df = self.registrator.register(fixed, moving)
        if fixed_mask is None:
            # If the mask of the fixed image is not supplied, compute it through deformation of moving mask
            # Propagate moving mask to fixed
            fixed_mask = ants.apply_transforms(
                fixed=numpy_2d_to_ants(fixed),
                moving=numpy_2d_to_ants(moving_mask),
                transformlist=transforms["fwdtransforms"]
            ).numpy()
        fixed_mask = fixed_mask.astype(np.uint8)
        # Compute full deformation field as a sum of abdominal cavity and surroundings deformations
        deformation_field = self.get_full_deformation_field(fixed, moving, fixed_mask, moving_mask)
        contour = Contour.from_mask(moving_mask)
        visceral_slide = self.calculate_visceral_slide(deformation_field, contour)
        visceral_slide = np.abs(visceral_slide)
        normalization_df = self.rest_field if normalization_field == VSNormField.rest else complete_df
        visceral_slide = self.normalize_vs_by_motion(visceral_slide,
                                                     contour,
                                                     normalization_type,
                                                     normalization_df,
                                                     norm_vicinity)
        return contour.x, contour.y, visceral_slide
    def get_full_deformation_field(self, fixed, moving, fixed_cavity_mask, moving_cavity_mask):
        """
        Calculates the full deformation field
        Parameters
        ----------
        fixed, moving : ndarray
           A fixed and moving cine-MRI frame
        fixed_cavity_mask, moving_cavity_mask : ndarray
           Abdominal cavity segmentation masks on fixed and moving frames
        Returns
        -------
        """
        # Get cavity deformation field
        self.cavity_field = self.registrator.get_masked_deformation_field(fixed,
                                                                          moving,
                                                                          fixed_cavity_mask,
                                                                          moving_cavity_mask)
        # Get rest deformation field
        self.rest_field = self.registrator.get_masked_deformation_field(fixed,
                                                                        moving,
                                                                        1 - fixed_cavity_mask,
                                                                        1 - moving_cavity_mask)
        # Combine deformation fields into one
        return self.cavity_field + self.rest_field
class CumulativeVisceralSlideDetector:
    @staticmethod
    def warp_visceral_slide(x, y, visceral_slide, deformation_field):
        """
        Warps visceral slide by deformation field
        Parameters
        ----------
        x, y : ndarray
           The coordinates of visceral slide
        visceral_slide : ndarray
           The values of visceral slide
        deformation_field : ndarray
           A deformation field to perform warping
        Returns
        -------
        x, y, visceral_slide : list
           Visceral slide warped by deformation field
        """
        xy_warped = []
        for (current_x, current_y) in zip(x, y):
            u, v = deformation_field[current_y, current_x]
            xy_warped.append([round(current_x + u), round(current_y + v)])
        xy_warped = np.array(xy_warped)
        visceral_slide_warped = np.column_stack((xy_warped, visceral_slide))
        # Warped contour might have duplicated points
        # Find these points and take an average as VS slide value
        xy_warped_unique = np.unique(xy_warped, axis=0)
        visceral_slide_warped_unique = []
        for coord in xy_warped_unique:
            # Find all point with the current coordinate in the warped VS
            vs_at_coord = np.array([vs for vs in visceral_slide_warped if vs[0] == coord[0] and vs[1] == coord[1]])
            avg_vs = np.mean(vs_at_coord[..., 2])
            visceral_slide_warped_unique.append(avg_vs)
        return xy_warped_unique[:, 0], xy_warped_unique[:, 1], visceral_slide_warped_unique
    @staticmethod
    def add_visceral_slides(visceral_slide, visceral_slide_warped):
        """
        Adds two visceral slides. Possible coordinates mismatches are handled by choosing a
        coordinate closest by Euclidean distance
        Parameters
        ----------
        visceral_slide : tuple of list
           The coordinates and values of the first visceral slide
        visceral_slide_warped : tuple of list
           The coordinates and values of the second visceral slide warped to match the first one
        Returns
        -------
        visceral_slide : tuple of list
           The coordinates and values of the first visceral slide computed as addition of the first and the second one
        """
        x_target, y_target, vs_target = visceral_slide
        x_warped, y_warped, vs_warped = visceral_slide_warped
        # Since some coordinates might mismatch after deformation,
        # the closest point of the total VS is used to sum with the current VS value
        vs_added = []
        for (x, y, vs) in zip(x_target, y_target, vs_target):
            diff = np.sqrt((x - x_warped) ** 2 + (y - y_warped) ** 2)
            index = np.argmin(diff)
            vs_added.append(vs + vs_warped[index])
        return x_target, y_target, np.array(vs_added)
    def compute_cumulative_visceral_slide(self, visceral_slides, frames, warping_dfs, plot=False):
        """
        Computes cumulative visceral slide by adding visceral slides between subsequent cine-MRI frames pairs
        and averaging it
        Parameters
        ----------
        visceral_slides : list of tuple
           An ordered list of visceral slides subsequent cine-MRI frames pairs
        frames : list of ndarray
           An ordered list of cine-MRI frames that correspond to visceral slides
        warping_dfs : list of ndarray
           An ordered list of deformation fields to use for warping the cumulative visceral slide
        plot : bool, default=False
           A boolean flag indicating whether to visualise computation by plotting a current visceral slide,
           a warped cumulative visceral slide and new cumulative visceral slide at each step
        Returns
        -------
        total_x, total_y, total_vs : ndarray
           Coordinates and values of cumulative visceral slide
        """
        total_x = total_y = None
        total_vs = None
        for i, (x, y, vs) in enumerate(visceral_slides):
            if plot:
                frame = frames[i]
                plot_vs_on_frame(frame, x, y, vs, "VS {}".format(i))
            # At the first step, the visceral slide between first two frames is total
            if total_vs is None:
                total_x, total_y = x, y
                total_vs = vs
            else:
                # We want to transform the cumulative visceral slide to the frame corresponding to the current
                # visceral slide. That is the the current moving frame, hence we need the previous transformation
                # and we need to take the contour of the current moving image as the fixed image.
                warping_df = warping_dfs[i - 1]
                total_x_warped, total_y_warped, total_vs_warped = self.warp_visceral_slide(total_x,
                                                                                           total_y,
                                                                                           total_vs,
                                                                                           warping_df)
                if plot:
                    plot_vs_on_frame(frame, total_x_warped, total_y_warped, total_vs_warped,
                                       "Total VS warped {}".format(i - 1))
                total_x, total_y, total_vs = self.add_visceral_slides((x, y, vs),
                                                                      (total_x_warped, total_y_warped, total_vs_warped))
                if plot:
                    plot_vs_on_frame(frame, total_x, total_y, total_vs, "Total VS {}".format(i - 1))
        # Take the average of total visceral slide
        total_vs /= len(visceral_slides)
        return total_x, total_y, total_vs
class CumulativeVisceralSlideDetectorDF(CumulativeVisceralSlideDetector):
    def __init__(self):
        self.vs_detector = VisceralSlideDetectorDF()
    def get_visceral_slide(self,
                           moving_masks,
                           cavity_dfs,
                           rest_dfs,
                           warping_dfs,
                           normalization_dfs,
                           normalization_type=VSNormType.none,
                           norm_vicinity=15,
                           plot=False):
        """
        Computes cumulative visceral slide based on the specified deformation fields, moving masks
        and normalization parameters
        Parameters
        ----------
        moving_masks : list of ndarray
           An ordered list of abdominal cavity segmentation masks on moving frame
           for each frames pair of cine-MRI series
        cavity_dfs : list of ndarray
           An ordered list of masked abdominal cavity deformation fields
        rest_dfs : list of ndarray
           An ordered list of masked abdominal cavity surroundings deformation fields
        warping_dfs : list of ndarray
           An ordered list of deformation fields to use for warping of cumulative visceral slide during summation
        normalization_dfs : list of ndarray
           An ordered list of deformation fields to use for visceral slide normalization
        normalization_type : VSNormType, default=VSNormType.none
           A type of visceral slide normalization to apply
        norm_vicinity : int, default=15
           A vicinity to use for VSNormType.contour_vicinity normalization type
        plot : bool, default=False
           A boolean flag indicating whether to visualise computation by plotting a current visceral slide,
           a warped cumulative visceral slide and new cumulative visceral slide at each step
        Returns
        -------
        total_x, total_y, total_vs : ndarray
           Coordinates and values of cumulative visceral slide
        """
        visceral_slides = []
        for i in range(len(moving_masks)):
            moving_mask = moving_masks[i]
            cavity_df = cavity_dfs[i]
            rest_df = rest_dfs[i]
            normalization_df = normalization_dfs[i]
            x, y, visceral_slide = self.vs_detector.get_visceral_slide(cavity_df,
                                                                       rest_df,
                                                                       normalization_df,
                                                                       moving_mask,
                                                                       normalization_type,
                                                                       norm_vicinity)
            visceral_slides.append((x, y, visceral_slide))
        total_x, total_y, total_vs = self.compute_cumulative_visceral_slide(visceral_slides, moving_masks, warping_dfs, plot)
        return total_x, total_y, total_vs
class CumulativeVisceralSlideDetectorReg(CumulativeVisceralSlideDetector):
    def __init__(self):
        self.vs_detector = VisceralSlideDetectorReg()
        self.registrator = Registrator()
    def get_visceral_slide(self,
                           series,
                           masks,
                           warping_field=VSWarpingField.contours,
                           normalization_type=VSNormType.none,
                           normalization_field=VSNormField.rest,
                           norm_vicinity=15,
                           plot=False):
        """
        Computes the cumulative visceral slide across the series in a slice
        Total visceral slide is matched with the current vs by warping it with deformation field
        and finding the contour point which is the closest to the current one
        Parameters
        ----------
        series : ndarray
           A cine-MRI slice to compute the cumulative visceral slide
        masks : ndarray
           An abdominal cavity segmentation corresponding to the cine-MRI slice
        warping_field : VSWarpingField, default=VSWarpingField.contours
           Specifies which deformation filed to use for visceral slide warping during addition
        normalization_type : VSNormType, default = VSNormType.none
           A type of visceral slide normalization to apply
        normalization_field : VSNormField, default=VSNormField.rest
           Specifies which deformation filed to use for visceral slide normalization
        norm_vicinity : int
           A vicinity to use for VSNormType.contour_vicinity normalization type
        plot: bool
           A boolean flag indicating whether to visualise computation by plotting a current visceral slide,
           a warped cumulative visceral slide and new cumulative visceral slide at each step
        Returns
        -------
        total_x, total_y, total_vs : ndarray
           Coordinates and values of cumulative visceral slide
        """
        # First, compute and save visceral slide for each subsequent pair of frames and contours transformation
        visceral_slides = []
        warping_fields = []
        for i in range(1, len(series)):
            print("Processing pair {}".format(i))
            # Taking previous frames as moving and the next one as fixed
            moving = series[i - 1].astype(np.uint32)
            moving_mask = masks[i - 1].astype(np.uint32)
            fixed = series[i].astype(np.uint32)
            fixed_mask = masks[i].astype(np.uint32)
            # Get visceral slide
            x, y, visceral_slide = self.vs_detector.get_visceral_slide(moving,
                                                                       moving_mask,
                                                                       fixed,
                                                                       fixed_mask,
                                                                       normalization_type,
                                                                       normalization_field,
                                                                       norm_vicinity)
            visceral_slides.append((x, y, visceral_slide))
            if warping_field == VSWarpingField.contours:
                _, deformation_field = self.__contour_transforms(fixed_mask, moving_mask, np.iinfo(np.uint16).max)
                warping_fields.append(deformation_field)
            else:
                warping_fields.append(self.vs_detector.rest_field)
        total_x, total_y, total_vs = self.compute_cumulative_visceral_slide(visceral_slides, series, warping_fields,
                                                                            plot)
        return total_x, total_y, total_vs
    def __contour_transforms(self, fixed_mask, moving_mask, contour_value):
        """
        Computes transformation between contours of two masks by converting masks images to contour images
        and performing image registration
        Parameters
        ----------
        fixed_mask : ndarray
           An image of a fixed mask
        moving_mask : ndarray
           An image of a moving mask
        contour_value : int
            A value to fill in at the contour coordinates
        Returns
        -------
        transforms, deformation_field
           Transformations between moving and fixed contours computed by ANTS toolkit
        """
        fixed_contour = mask_to_contour(fixed_mask, contour_value)
        moving_contour = mask_to_contour(moving_mask, contour_value)
        return self.registrator.register(fixed_contour, moving_contour)
 | 
| 
	the-stack_106_32232 | 
	from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type,     msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
    assert not name.startswith("_"), name
    s = msat_declare_function(menv, name, c_type)
    s = msat_make_constant(menv, s)
    x_s = msat_declare_function(menv, name_next(name), c_type)
    x_s = msat_make_constant(menv, x_s)
    return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
    bool_type = msat_get_bool_type(menv)
    num_bits = ceil(log(enum_size, 2))
    b_vars = []
    for idx in range(num_bits):
        c_name = "{}{}".format(v_name, idx)
        b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
    vals = []
    x_vals = []
    for enum_val in range(enum_size):
        bit_val = format(enum_val, '0{}b'.format(num_bits))
        assert len(bit_val) == num_bits
        assert all(c in {'0', '1'} for c in bit_val)
        assign = [b_vars[idx] if c == '1' else
                  (msat_make_not(menv, b_vars[idx][0]),
                   msat_make_not(menv, b_vars[idx][1]))
                  for idx, c in enumerate(reversed(bit_val))]
        pred = assign[0][0]
        x_pred = assign[0][1]
        for it in assign[1:]:
            pred = msat_make_and(menv, pred, it[0])
            x_pred = msat_make_and(menv, x_pred, it[1])
        vals.append(pred)
        x_vals.append(x_pred)
    assert len(vals) == enum_size
    assert len(x_vals) == enum_size
    return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
    m_one = msat_make_number(menv, "-1")
    arg1 = msat_make_times(menv, arg1, m_one)
    return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
    geq = msat_make_geq(menv, arg0, arg1)
    return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
    return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
    leq = msat_make_leq(menv, arg0, arg1)
    return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
    n_arg0 = msat_make_not(menv, arg0)
    return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
    real_type = msat_get_rational_type(menv)
    delta = msat_declare_function(menv, delta_name, real_type)
    delta = msat_make_constant(menv, delta)
    return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
                                                   msat_term, msat_term):
    assert menv
    assert isinstance(menv, msat_env)
    assert enc
    assert isinstance(enc, LTLEncoder)
    int_type = msat_get_integer_type(menv)
    real_type = msat_get_rational_type(menv)
    r2s, x_r2s = decl_consts(menv, "r2s", int_type)
    s2r, x_s2r = decl_consts(menv, "s2r", int_type)
    delta, x_delta = decl_consts(menv, delta_name, real_type)
    sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
    receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
    curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
    for comp in [sender, receiver]:
        for s, x_s in comp.symb2next.items():
            curr2next[s] = x_s
    zero = msat_make_number(menv, "0")
    init = msat_make_and(menv, receiver.init, sender.init)
    trans = msat_make_and(menv, receiver.trans, sender.trans)
    # invar delta >= 0
    init = msat_make_and(menv, init,
                         msat_make_geq(menv, delta, zero))
    trans = msat_make_and(menv, trans,
                          msat_make_geq(menv, x_delta, zero))
    # delta > 0 -> (r2s' = r2s & s2r' = s2r)
    lhs = msat_make_gt(menv, delta, zero)
    rhs = msat_make_and(menv,
                        msat_make_equal(menv, x_r2s, r2s),
                        msat_make_equal(menv, x_s2r, s2r))
    trans = msat_make_and(menv, trans,
                          msat_make_impl(menv, lhs, rhs))
    # (G F !s.stutter) -> G (s.wait_ack -> F s.send)
    lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
    rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
                                    enc.make_F(sender.send)))
    ltl = msat_make_impl(menv, lhs, rhs)
    return TermMap(curr2next), init, trans, ltl
class Module:
    def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
                 *args, **kwargs):
        self.name = name
        self.menv = menv
        self.enc = enc
        self.symb2next = {}
        true = msat_make_true(menv)
        self.init = true
        self.trans = true
    def _symb(self, v_name, v_type):
        v_name = "{}_{}".format(self.name, v_name)
        return decl_consts(self.menv, v_name, v_type)
    def _enum(self, v_name: str, enum_size: int):
        c_name = "{}_{}".format(self.name, v_name)
        return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
    def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
                 in_c, x_in_c, out_c, x_out_c, delta):
        super().__init__(name, menv, enc)
        bool_type = msat_get_bool_type(menv)
        int_type = msat_get_integer_type(menv)
        real_type = msat_get_rational_type(menv)
        loc, x_loc = self._symb("l", bool_type)
        evt, x_evt = self._symb("evt", bool_type)
        msg_id, x_msg_id = self._symb("msg_id", int_type)
        timeout, x_timeout = self._symb("timeout", real_type)
        c, x_c = self._symb("c", real_type)
        self.move = evt
        self.stutter = msat_make_not(menv, evt)
        self.x_move = x_evt
        self.x_stutter = msat_make_not(menv, x_evt)
        self.send = loc
        self.wait_ack = msat_make_not(menv, loc)
        self.x_send = x_loc
        self.x_wait_ack = msat_make_not(menv, x_loc)
        self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
                          timeout: x_timeout, c: x_c}
        zero = msat_make_number(menv, "0")
        one = msat_make_number(menv, "1")
        base_timeout = one
        # send & c = 0 & msg_id = 0
        self.init = msat_make_and(menv,
                                  msat_make_and(menv, self.send,
                                                msat_make_equal(menv, c,
                                                                zero)),
                                  msat_make_equal(menv, msg_id, zero))
        # invar: wait_ack -> c <= timeout
        self.init = msat_make_and(
            menv, self.init,
            msat_make_impl(menv, self.wait_ack,
                           msat_make_leq(menv, c, timeout)))
        self.trans = msat_make_impl(menv, self.x_wait_ack,
                                    msat_make_leq(menv, x_c, x_timeout))
        # delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
        # c' = c + delta & out_c' = out_c
        lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
        rhs = msat_make_and(
            menv,
            msat_make_and(menv,
                          msat_make_iff(menv, x_loc, loc),
                          msat_make_equal(menv, x_msg_id, msg_id)),
            msat_make_and(menv,
                          msat_make_equal(menv, x_timeout, timeout),
                          msat_make_equal(menv, x_c,
                                          msat_make_plus(menv, c, delta))))
        rhs = msat_make_and(menv, rhs,
                            msat_make_equal(menv, x_out_c, out_c))
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        disc_t = msat_make_and(menv, self.move,
                               msat_make_equal(menv, delta, zero))
        # (send & send') ->
        # (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
        lhs = msat_make_and(menv, disc_t,
                            msat_make_and(menv, self.send, self.x_send))
        rhs = msat_make_and(
            menv,
            msat_make_and(menv,
                          msat_make_equal(menv, x_msg_id, msg_id),
                          msat_make_equal(menv, x_timeout, base_timeout)),
            msat_make_and(menv,
                          msat_make_equal(menv, x_c, zero),
                          msat_make_equal(menv, x_out_c, out_c)))
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (send & wait_ack') ->
        # (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
        lhs = msat_make_and(menv, disc_t,
                            msat_make_and(menv, self.send, self.x_wait_ack))
        rhs = msat_make_and(
            menv,
            msat_make_and(menv,
                          msat_make_equal(menv, x_msg_id,
                                          msat_make_plus(menv, msg_id, one)),
                          msat_make_equal(menv, x_timeout, base_timeout)),
            msat_make_and(menv,
                          msat_make_equal(menv, x_c, zero),
                          msat_make_equal(menv, x_out_c, out_c)))
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (wait_ack) -> (c' = 0 & out_c' = out_c &
        # (wait_ack' <-> (in_c != msg_id & c > timeout))
        lhs = msat_make_and(menv, disc_t, self.wait_ack)
        rhs_iff = msat_make_and(menv,
                                msat_make_not(menv,
                                              msat_make_equal(menv, in_c,
                                                              msg_id)),
                                msat_make_geq(menv, c, timeout))
        rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
        rhs = msat_make_and(menv,
                            msat_make_and(menv,
                                          msat_make_equal(menv, x_c, zero),
                                          msat_make_equal(menv, x_out_c,
                                                          out_c)),
                            rhs_iff)
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (wait_ack & wait_ack') -> (timeout' > timeout)
        lhs = msat_make_and(menv, disc_t,
                            msat_make_and(menv, self.wait_ack,
                                          self.x_wait_ack))
        rhs = msat_make_gt(menv, x_timeout, timeout)
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
        lhs = msat_make_and(menv, disc_t, self.wait_ack)
        rhs = msat_make_iff(menv, self.x_send,
                            msat_make_and(menv,
                                          msat_make_equal(menv, in_c, msg_id),
                                          msat_make_lt(menv, c, timeout)))
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (wait_ack & send') -> (timeout' = base_timeout)
        lhs = msat_make_and(menv, disc_t,
                            msat_make_and(menv, self.wait_ack, self.x_send))
        rhs = msat_make_equal(menv, x_timeout, base_timeout)
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
    def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
                 in_c, x_in_c, out_c, x_out_c, delta):
        super().__init__(name, menv, enc)
        bool_type = msat_get_bool_type(menv)
        loc, x_loc = self._symb("l", bool_type)
        self.wait = loc
        self.work = msat_make_not(menv, loc)
        self.x_wait = x_loc
        self.x_work = msat_make_not(menv, x_loc)
        self.symb2next = {loc: x_loc}
        zero = msat_make_number(menv, "0")
        # wait
        self.init = self.wait
        # delta > 0 -> loc' = loc & out_c' = out_c
        lhs = msat_make_gt(menv, delta, zero)
        rhs = msat_make_and(menv,
                            msat_make_iff(menv, x_loc, loc),
                            msat_make_equal(menv, x_out_c, out_c))
        self.trans = msat_make_impl(menv, lhs, rhs)
        disc_t = msat_make_equal(menv, delta, zero)
        # wait -> (wait' <-> in_c = out_c)
        lhs = msat_make_and(menv, disc_t, self.wait)
        rhs = msat_make_iff(menv, self.x_wait,
                            msat_make_equal(menv, in_c, out_c))
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (wait & wait') -> (out_c' = out_c)
        lhs = msat_make_and(menv, disc_t,
                            msat_make_and(menv, self.wait, self.x_wait))
        rhs = msat_make_equal(menv, x_out_c, out_c)
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # (wait & work') -> out_c' = in_c
        lhs = msat_make_and(menv, disc_t,
                            msat_make_and(menv, self.wait, self.x_work))
        rhs = msat_make_equal(menv, x_out_c, in_c)
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
        # work -> out_c' = out_c
        lhs = msat_make_and(menv, disc_t, self.work)
        rhs = msat_make_equal(menv, x_out_c, out_c)
        self.trans = msat_make_and(menv, self.trans,
                                   msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
    assert isinstance(env, PysmtEnv)
    mgr = env.formula_manager
    delta = mgr.Symbol(delta_name, types.REAL)
    r2s = mgr.Symbol("r2s", types.INT)
    s2r = mgr.Symbol("r2s", types.INT)
    s_l = mgr.Symbol("s_l", types.BOOL)
    s_evt = mgr.Symbol("s_evt", types.BOOL)
    s_msg_id = mgr.Symbol("s_msg_id", types.INT)
    s_timeout = mgr.Symbol("s_timeout", types.REAL)
    s_c = mgr.Symbol("s_c", types.REAL)
    r_l = mgr.Symbol("r_l", types.BOOL)
    symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
                       r_l])
    x_delta = symb_to_next(mgr, delta)
    x_r2s = symb_to_next(mgr, r2s)
    x_s2r = symb_to_next(mgr, s2r)
    x_s_l = symb_to_next(mgr, s_l)
    x_s_evt = symb_to_next(mgr, s_evt)
    x_s_msg_id = symb_to_next(mgr, s_msg_id)
    x_s_timeout = symb_to_next(mgr, s_timeout)
    x_s_c = symb_to_next(mgr, s_c)
    x_r_l = symb_to_next(mgr, r_l)
    res = []
    r0 = mgr.Real(0)
    r1 = mgr.Real(1)
    i0 = mgr.Int(0)
    i1 = mgr.Int(1)
    loc0 = Location(env, mgr.Equals(r2s, i0))
    loc0.set_progress(0, mgr.Equals(x_r2s, i0))
    hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
    hint.set_locs([loc0])
    res.append(hint)
    loc0 = Location(env, mgr.Equals(s_msg_id, i0))
    loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
    hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
    hint.set_locs([loc0])
    res.append(hint)
    loc0 = Location(env, s_evt)
    loc0.set_progress(1, mgr.Not(x_s_evt))
    loc1 = Location(env, mgr.Not(s_evt))
    loc1.set_progress(0, x_s_evt)
    hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
    hint.set_locs([loc0, loc1])
    res.append(hint)
    loc0 = Location(env, r_l)
    loc0.set_progress(1, mgr.Not(x_r_l))
    loc1 = Location(env, mgr.Not(r_l))
    loc1.set_progress(0, x_r_l)
    hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
    hint.set_locs([loc0, loc1])
    res.append(hint)
    return frozenset(res)
 | 
| 
	the-stack_106_32239 | 
	from pathlib import Path
import subprocess as sp
def get_model_path(model_name, model_type=None):
    """
    creates the path based on the model_name
    
    model_name: string value indicationg the <org>/<model>/<version>
    model_type: model type for braingen and kwyk model
    Returns
    -------
    model_path
    
    """
    # TO DO: model database can be a json file and be updated separately
    models = {"neuronets/ams/0.1.0": "meningioma_T1wc_128iso_v1.h5",
              "neuronets/braingen/0.1.0": ["generator_res_8",
                                 "generator_res_16",
                                 "generator_res_32",
                                 "generator_res_64",
                                 "generator_res_128",
                                 "generator_res_256"],
              "neuronets/brainy/0.1.0": "brain-extraction-unet-128iso-model.h5",
              "neuronets/kwyk/0.4.1": ["all_50_wn",
                                      "all_50_bwn_09_multi",
                                      "all_50_bvwn_multi_prior"],
              "UCL/SynthSeg/0.1": "SynthSeg.h5"
              }
    
    # model type should be given for braingen and kwyk
    if model_name in ["braingen","kwyk"] and model_type not in models[model_name]:
        raise Exception("Model type should be one of {} but it is {}".format(
          models[model_name], model_type))
        
    root_path = Path(__file__).resolve().parent / "trained-models"
    
    if model_name in ["neuronets/braingen/0.1.0", "neuronets/kwyk/0.4.1"]:
        raise Exception("This model has not added yet.")
        #model_file = os.path.join(models[model_name],model_type)
    else:
        model_file = models[model_name]
    # create the model path     
    model_path = root_path / model_name / model_file
    return model_path
def load_model(path):
    """ Returns the model object from file path"""
    
    if "kwyk" in path:
        from nobrainer.pediction import _get_pridictor
        model = _get_pridictor(path)
    else:
        from nobrainer.prediction import _get_model
        model = _get_model(path)
    return model
def get_repo(org, repo_url, repo_state):
    """
    downoads the related repo in the org/org_repo.
    org: str, organization name
    
    """
    repo_path = Path(__file__).resolve().parents[0] / org / "org_repo"
    if not repo_path.exists():
        p0 = sp.run(["git", "clone", repo_url, str(repo_path)], stdout=sp.PIPE,
                    stderr=sp.STDOUT ,text=True)
        print(p0.stdout)
        p1 = sp.run(["git", "checkout", repo_state], stdout=sp.PIPE,
                    stderr=sp.STDOUT ,text=True)
        print(p1.stdout)
        print(f"{org} repository is downloaded")
    else:
        print(f"{org} repository is available locally") | 
| 
	the-stack_106_32242 | 
	#!/usr/bin/env python
# encoding: utf-8
'''
@author: Jason Lee
@license: (C) Copyright @ Jason Lee
@contact: [email protected]
@file: quick_sort.py
@time: 2019/3/28 10:13
@desc:
'''
def quick_sort(data):
    if len(data) == 1:
        return
    quick_sort_core(data, 0, len(data) - 1)
def quick_sort_core(data, start, end):
    if len(data) == 1:
        return
    id = partition(data, start, end)
    if id > start:
        quick_sort_core(data, start, id - 1)
    if id < end:
        quick_sort_core(data, id + 1, end)
def partition(data, left, right):
    # compared with data[0], i starts from left, j starts from right
    i = left
    j = right
    while True:
        while data[i] <= data[left] and i < right:
            i += 1
        while data[j] > data[left] and j > left:
            j -= 1
        # stop when i meets j
        if i >= j:
            break
        data[i], data[j] = data[j], data[i]
    data[left], data[j] = data[j], data[left]
    return j
if __name__ == '__main__':
    data = [3,4,1,2,4,5,6]
    quick_sort(data)
    print(data) | 
| 
	the-stack_106_32243 | 
	# ipa.py - ipa transliteration module
# coding: utf-8
# The MIT License (MIT)
# Credit for IPA rules - Wikipedia, LionSlayer ...
# Copyright (c) 2018 Thura Hlaing
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import json
import pkgutil
class IPA():
    table = json.loads(
        pkgutil.get_data('myanmar', 'data/ipa.json').decode('utf-8')
    )
    vowels = 'aáeèioóôu'
    @classmethod
    def normalize(cls, ipa, prev):
        if prev:
            ipa = cls.add_ə(ipa, prev)
            ipa = cls.change_k_to_g(ipa, prev)
            ipa = cls.change_s_to_z(ipa, prev)
            ipa = cls.change_p_to_b(ipa, prev)
            ipa = cls.change_t_to_d(ipa, prev)
        return ipa
    @classmethod
    def add_ə(cls, ipa, prev):
        prev_len = 0
        if prev[-1] == 'ʰ' and len(prev[-2]) == 1:
            prev_len = 1
        if len(prev) == 1 or prev_len == 1:
            ipa = 'ə' + ipa
        return ipa
    @classmethod
    def change_k_to_g(cls, ipa, prev):
        # change k to g after vowel sound
        if ipa.startswith('k') and cls.ends_with_vowel(prev):
            ipa = 'g' + ipa[1:]
        return ipa
    @classmethod
    def change_s_to_z(cls, ipa, prev):
        # change s to z after vowel sound
        if ipa.startswith('s') and cls.ends_with_vowel(prev):
            ipa = 'z' + ipa[1:]
        return ipa
    @classmethod
    def change_p_to_b(cls, ipa, prev):
        # change pa to ba after vowel sound
        if ipa.startswith('p') and cls.has_vowel(ipa):
            ipa = 'b' + ipa[1:]
        return ipa
    @classmethod
    def change_t_to_d(cls, ipa, prev):
        # change t to d after vowel sound
        startswitht = ipa.startswith('t') and not ipa.startswith('th')
        if startswitht and cls.ends_with_vowel(prev):
            ipa = 'd' + ipa[1:]
        return ipa
    @classmethod
    def ends_with_vowel(cls, ipa):
        return ipa[-1] in 'aàeioun' or ipa.endswith('ng')
    @classmethod
    def has_vowel(cls, ipa):
        # if any of cls.vowels  exists in IPA
        return any(ipa.find(v) != -1 for v in cls.vowels)
 | 
| 
	the-stack_106_32244 | 
	#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from airflow.timetables.simple import NullTimetable, OnceTimetable
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
from tests.test_utils.timetables import cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
    metadata=k8s.V1ObjectMeta(name="my-name"),
    spec=k8s.V1PodSpec(
        containers=[
            k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
        ]
    ),
)
serialized_simple_dag_ground_truth = {
    "__version": 1,
    "dag": {
        "default_args": {
            "__type": "dict",
            "__var": {
                "depends_on_past": False,
                "retries": 1,
                "retry_delay": {"__type": "timedelta", "__var": 300.0},
                "max_retry_delay": {"__type": "timedelta", "__var": 600.0},
                "sla": {"__type": "timedelta", "__var": 100.0},
            },
        },
        "start_date": 1564617600.0,
        '_task_group': {
            '_group_id': None,
            'prefix_group_id': True,
            'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
            'tooltip': '',
            'ui_color': 'CornflowerBlue',
            'ui_fgcolor': '#000',
            'upstream_group_ids': [],
            'downstream_group_ids': [],
            'upstream_task_ids': [],
            'downstream_task_ids': [],
        },
        "is_paused_upon_creation": False,
        "_dag_id": "simple_dag",
        "doc_md": "### DAG Tutorial Documentation",
        "fileloc": None,
        "tasks": [
            {
                "task_id": "bash_task",
                "owner": "airflow",
                "retries": 1,
                "retry_delay": 300.0,
                "max_retry_delay": 600.0,
                "sla": 100.0,
                "_downstream_task_ids": [],
                "_inlets": [],
                "_is_dummy": False,
                "_outlets": [],
                "ui_color": "#f0ede4",
                "ui_fgcolor": "#000",
                "template_fields": ['bash_command', 'env'],
                "template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
                "bash_command": "echo {{ task.task_id }}",
                'label': 'bash_task',
                "_task_type": "BashOperator",
                "_task_module": "airflow.operators.bash",
                "pool": "default_pool",
                "executor_config": {
                    '__type': 'dict',
                    '__var': {
                        "pod_override": {
                            '__type': 'k8s.V1Pod',
                            '__var': PodGenerator.serialize_pod(executor_config_pod),
                        }
                    },
                },
                "doc_md": "### Task Tutorial Documentation",
            },
            {
                "task_id": "custom_task",
                "retries": 1,
                "retry_delay": 300.0,
                "max_retry_delay": 600.0,
                "sla": 100.0,
                "_downstream_task_ids": [],
                "_inlets": [],
                "_is_dummy": False,
                "_outlets": [],
                "_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
                "ui_color": "#fff",
                "ui_fgcolor": "#000",
                "template_fields": ['bash_command'],
                "template_fields_renderers": {},
                "_task_type": "CustomOperator",
                "_task_module": "tests.test_utils.mock_operators",
                "pool": "default_pool",
                'label': 'custom_task',
            },
        ],
        "timezone": "UTC",
        "_access_control": {
            "__type": "dict",
            "__var": {
                "test_role": {
                    "__type": "set",
                    "__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
                }
            },
        },
        "edge_info": {},
        "dag_dependencies": [],
    },
}
ROOT_FOLDER = os.path.realpath(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
    """Loads DAGs from a module for test."""
    dagbag = DagBag(module_path)
    return dagbag.dags
def make_simple_dag():
    """Make very simple DAG to verify serialization result."""
    with DAG(
        dag_id='simple_dag',
        default_args={
            "retries": 1,
            "retry_delay": timedelta(minutes=5),
            "max_retry_delay": timedelta(minutes=10),
            "depends_on_past": False,
            "sla": timedelta(seconds=100),
        },
        start_date=datetime(2019, 8, 1),
        is_paused_upon_creation=False,
        access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
        doc_md="### DAG Tutorial Documentation",
    ) as dag:
        CustomOperator(task_id='custom_task')
        BashOperator(
            task_id='bash_task',
            bash_command='echo {{ task.task_id }}',
            owner='airflow',
            executor_config={"pod_override": executor_config_pod},
            doc_md="### Task Tutorial Documentation",
        )
        return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
    """Make DAGs with user defined macros and filters using locally defined methods.
    For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
    The examples here test:
        (1) functions can be successfully displayed on UI;
        (2) templates with function macros have been rendered before serialization.
    """
    def compute_next_execution_date(dag, execution_date):
        return dag.following_schedule(execution_date)
    default_args = {'start_date': datetime(2019, 7, 10)}
    dag = DAG(
        'user_defined_macro_filter_dag',
        default_args=default_args,
        user_defined_macros={
            'next_execution_date': compute_next_execution_date,
        },
        user_defined_filters={'hello': lambda name: f'Hello {name}'},
        catchup=False,
    )
    BashOperator(
        task_id='echo',
        bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
        dag=dag,
    )
    return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
    """Collects DAGs to test."""
    dags = {}
    dags.update(make_simple_dag())
    dags.update(make_user_defined_macro_filter_dag())
    if dag_folder:
        if isinstance(dag_folder, (list, tuple)):
            patterns = dag_folder
        else:
            patterns = [dag_folder]
    else:
        patterns = [
            "airflow/example_dags",
            "airflow/providers/*/example_dags",
            "airflow/providers/*/*/example_dags",
        ]
    for pattern in patterns:
        for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
            dags.update(make_example_dags(directory))
    # Filter subdags as they are stored in same row in Serialized Dag table
    dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
    return dags
def serialize_subprocess(queue, dag_folder):
    """Validate pickle in a subprocess."""
    dags = collect_dags(dag_folder)
    for dag in dags.values():
        queue.put(SerializedDAG.to_json(dag))
    queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
    """Unit tests for stringified DAGs."""
    def setUp(self):
        super().setUp()
        BaseHook.get_connection = mock.Mock(
            return_value=Connection(
                extra=(
                    '{'
                    '"project_id": "mock", '
                    '"location": "mock", '
                    '"instance": "mock", '
                    '"database_type": "postgres", '
                    '"use_proxy": "False", '
                    '"use_ssl": "False"'
                    '}'
                )
            )
        )
        self.maxDiff = None
    def test_serialization(self):
        """Serialization and deserialization should work for every DAG and Operator."""
        dags = collect_dags()
        serialized_dags = {}
        for _, v in dags.items():
            dag = SerializedDAG.to_dict(v)
            SerializedDAG.validate_schema(dag)
            serialized_dags[v.dag_id] = dag
        # Compares with the ground truth of JSON string.
        self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
    def validate_serialized_dag(self, json_dag, ground_truth_dag):
        """Verify serialized DAGs match the ground truth."""
        assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
        json_dag['dag']['fileloc'] = None
        def sorted_serialized_dag(dag_dict: dict):
            """
            Sorts the "tasks" list and "access_control" permissions in the
            serialised dag python dictionary. This is needed as the order of
            items should not matter but assertEqual would fail if the order of
            items changes in the dag dictionary
            """
            dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
            dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
                dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
            )
            return dag_dict
        assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
    def test_deserialization_across_process(self):
        """A serialized DAG can be deserialized in another process."""
        # Since we need to parse the dags twice here (once in the subprocess,
        # and once here to get a DAG to compare to) we don't want to load all
        # dags.
        queue = multiprocessing.Queue()
        proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
        proc.daemon = True
        proc.start()
        stringified_dags = {}
        while True:
            v = queue.get()
            if v is None:
                break
            dag = SerializedDAG.from_json(v)
            assert isinstance(dag, DAG)
            stringified_dags[dag.dag_id] = dag
        dags = collect_dags("airflow/example_dags")
        assert set(stringified_dags.keys()) == set(dags.keys())
        # Verify deserialized DAGs.
        for dag_id in stringified_dags:
            self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
    def test_roundtrip_provider_example_dags(self):
        dags = collect_dags(
            [
                "airflow/providers/*/example_dags",
                "airflow/providers/*/*/example_dags",
            ]
        )
        # Verify deserialized DAGs.
        for dag in dags.values():
            serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
            self.validate_deserialized_dag(serialized_dag, dag)
    def validate_deserialized_dag(self, serialized_dag, dag):
        """
        Verify that all example DAGs work with DAG Serialization by
        checking fields between Serialized Dags & non-Serialized Dags
        """
        fields_to_check = dag.get_serialized_fields() - {
            # Doesn't implement __eq__ properly. Check manually
            'timezone',
            # Need to check fields in it, to exclude functions
            'default_args',
            "_task_group",
        }
        for field in fields_to_check:
            assert getattr(serialized_dag, field) == getattr(
                dag, field
            ), f'{dag.dag_id}.{field} does not match'
        if dag.default_args:
            for k, v in dag.default_args.items():
                if callable(v):
                    # Check we stored _something_.
                    assert k in serialized_dag.default_args
                else:
                    assert (
                        v == serialized_dag.default_args[k]
                    ), f'{dag.dag_id}.default_args[{k}] does not match'
        assert serialized_dag.timezone.name == dag.timezone.name
        for task_id in dag.task_ids:
            self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
        # Verify that the DAG object has 'full_filepath' attribute
        # and is equal to fileloc
        assert serialized_dag.full_filepath == dag.fileloc
    def validate_deserialized_task(
        self,
        serialized_task,
        task,
    ):
        """Verify non-airflow operators are casted to BaseOperator."""
        assert isinstance(serialized_task, SerializedBaseOperator)
        assert not isinstance(task, SerializedBaseOperator)
        assert isinstance(task, BaseOperator)
        fields_to_check = task.get_serialized_fields() - {
            # Checked separately
            '_task_type',
            'subdag',
            # Type is excluded, so don't check it
            '_log',
            # List vs tuple. Check separately
            'template_fields',
            # We store the string, real dag has the actual code
            'on_failure_callback',
            'on_success_callback',
            'on_retry_callback',
            # Checked separately
            'resources',
        }
        assert serialized_task.task_type == task.task_type
        assert set(serialized_task.template_fields) == set(task.template_fields)
        assert serialized_task.upstream_task_ids == task.upstream_task_ids
        assert serialized_task.downstream_task_ids == task.downstream_task_ids
        for field in fields_to_check:
            assert getattr(serialized_task, field) == getattr(
                task, field
            ), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
        if serialized_task.resources is None:
            assert task.resources is None or task.resources == []
        else:
            assert serialized_task.resources == task.resources
        # Check that for Deserialised task, task.subdag is None for all other Operators
        # except for the SubDagOperator where task.subdag is an instance of DAG object
        if task.task_type == "SubDagOperator":
            assert serialized_task.subdag is not None
            assert isinstance(serialized_task.subdag, DAG)
        else:
            assert serialized_task.subdag is None
    @parameterized.expand(
        [
            (datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
            (
                datetime(2019, 8, 1, tzinfo=timezone.utc),
                datetime(2019, 8, 2, tzinfo=timezone.utc),
                datetime(2019, 8, 2, tzinfo=timezone.utc),
            ),
            (
                datetime(2019, 8, 1, tzinfo=timezone.utc),
                datetime(2019, 7, 30, tzinfo=timezone.utc),
                datetime(2019, 8, 1, tzinfo=timezone.utc),
            ),
            (pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
        ]
    )
    def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
        dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
        BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
        serialized_dag = SerializedDAG.to_dict(dag)
        if not task_start_date or dag_start_date >= task_start_date:
            # If dag.start_date > task.start_date -> task.start_date=dag.start_date
            # because of the logic in dag.add_task()
            assert "start_date" not in serialized_dag["dag"]["tasks"][0]
        else:
            assert "start_date" in serialized_dag["dag"]["tasks"][0]
        dag = SerializedDAG.from_dict(serialized_dag)
        simple_task = dag.task_dict["simple_task"]
        assert simple_task.start_date == expected_task_start_date
    def test_deserialization_with_dag_context(self):
        with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
            BaseOperator(task_id='simple_task')
            # should not raise RuntimeError: dictionary changed size during iteration
            SerializedDAG.to_dict(dag)
    @parameterized.expand(
        [
            (datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
            (
                datetime(2019, 8, 1, tzinfo=timezone.utc),
                datetime(2019, 8, 2, tzinfo=timezone.utc),
                datetime(2019, 8, 1, tzinfo=timezone.utc),
            ),
            (
                datetime(2019, 8, 1, tzinfo=timezone.utc),
                datetime(2019, 7, 30, tzinfo=timezone.utc),
                datetime(2019, 7, 30, tzinfo=timezone.utc),
            ),
        ]
    )
    def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
        dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
        BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
        serialized_dag = SerializedDAG.to_dict(dag)
        if not task_end_date or dag_end_date <= task_end_date:
            # If dag.end_date < task.end_date -> task.end_date=dag.end_date
            # because of the logic in dag.add_task()
            assert "end_date" not in serialized_dag["dag"]["tasks"][0]
        else:
            assert "end_date" in serialized_dag["dag"]["tasks"][0]
        dag = SerializedDAG.from_dict(serialized_dag)
        simple_task = dag.task_dict["simple_task"]
        assert simple_task.end_date == expected_task_end_date
    @parameterized.expand(
        [
            (None, None, NullTimetable()),
            ("@weekly", "@weekly", cron_timetable("0 0 * * 0")),
            ("@once", "@once", OnceTimetable()),
            (
                {"__type": "timedelta", "__var": 86400.0},
                timedelta(days=1),
                delta_timetable(timedelta(days=1)),
            ),
        ]
    )
    def test_deserialization_schedule_interval(
        self,
        serialized_schedule_interval,
        expected_schedule_interval,
        expected_timetable,
    ):
        serialized = {
            "__version": 1,
            "dag": {
                "default_args": {"__type": "dict", "__var": {}},
                "_dag_id": "simple_dag",
                "fileloc": __file__,
                "tasks": [],
                "timezone": "UTC",
                "schedule_interval": serialized_schedule_interval,
            },
        }
        SerializedDAG.validate_schema(serialized)
        dag = SerializedDAG.from_dict(serialized)
        assert dag.schedule_interval == expected_schedule_interval
        assert dag.timetable == expected_timetable
    @parameterized.expand(
        [
            (relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
            (relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
            # Every friday
            (relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
            # Every second friday
            (relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
        ]
    )
    def test_roundtrip_relativedelta(self, val, expected):
        serialized = SerializedDAG._serialize(val)
        assert serialized == expected
        round_tripped = SerializedDAG._deserialize(serialized)
        assert val == round_tripped
    @parameterized.expand(
        [
            (None, {}),
            ({"param_1": "value_1"}, {"param_1": "value_1"}),
        ]
    )
    def test_dag_params_roundtrip(self, val, expected_val):
        """
        Test that params work both on Serialized DAGs & Tasks
        """
        dag = DAG(dag_id='simple_dag', params=val)
        BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
        serialized_dag = SerializedDAG.to_dict(dag)
        if val:
            assert "params" in serialized_dag["dag"]
        else:
            assert "params" not in serialized_dag["dag"]
        deserialized_dag = SerializedDAG.from_dict(serialized_dag)
        deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
        assert expected_val == deserialized_dag.params
        assert expected_val == deserialized_simple_task.params
    @parameterized.expand(
        [
            (None, {}),
            ({"param_1": "value_1"}, {"param_1": "value_1"}),
        ]
    )
    def test_task_params_roundtrip(self, val, expected_val):
        """
        Test that params work both on Serialized DAGs & Tasks
        """
        dag = DAG(dag_id='simple_dag')
        BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
        serialized_dag = SerializedDAG.to_dict(dag)
        if val:
            assert "params" in serialized_dag["dag"]["tasks"][0]
        else:
            assert "params" not in serialized_dag["dag"]["tasks"][0]
        deserialized_dag = SerializedDAG.from_dict(serialized_dag)
        deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
        assert expected_val == deserialized_simple_task.params
    def test_extra_serialized_field_and_operator_links(self):
        """
        Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
        This tests also depends on GoogleLink() registered as a plugin
        in tests/plugins/test_plugin.py
        The function tests that if extra operator links are registered in plugin
        in ``operator_extra_links`` and the same is also defined in
        the Operator in ``BaseOperator.operator_extra_links``, it has the correct
        extra link.
        """
        test_date = datetime(2019, 8, 1)
        dag = DAG(dag_id='simple_dag', start_date=test_date)
        CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
        serialized_dag = SerializedDAG.to_dict(dag)
        assert "bash_command" in serialized_dag["dag"]["tasks"][0]
        dag = SerializedDAG.from_dict(serialized_dag)
        simple_task = dag.task_dict["simple_task"]
        assert getattr(simple_task, "bash_command") == "true"
        #########################################################
        # Verify Operator Links work with Serialized Operator
        #########################################################
        # Check Serialized version of operator link only contains the inbuilt Op Link
        assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
            {'tests.test_utils.mock_operators.CustomOpLink': {}}
        ]
        # Test all the extra_links are set
        assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
        ti = TaskInstance(task=simple_task, execution_date=test_date)
        ti.xcom_push('search_query', "dummy_value_1")
        # Test Deserialized inbuilt link
        custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
        assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
        # Test Deserialized link registered via Airflow Plugin
        google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
        assert "https://www.google.com" == google_link_from_plugin
    def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
        """
        Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
        it can still deserialize the DAG (does not error) but just logs an error
        """
        class TaskStateLink(BaseOperatorLink):
            """OperatorLink not registered via Plugins nor a built-in OperatorLink"""
            name = 'My Link'
            def get_link(self, operator, dttm):
                return 'https://www.google.com'
        class MyOperator(BaseOperator):
            """Just a DummyOperator using above defined Extra Operator Link"""
            operator_extra_links = [TaskStateLink()]
            def execute(self, context):
                pass
        with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
            MyOperator(task_id='blah')
        serialized_dag = SerializedDAG.to_dict(dag)
        with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
            SerializedDAG.from_dict(serialized_dag)
            received_logs = log_output.output[0]
            expected_err_msg = (
                "Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
                "not registered"
            )
            assert expected_err_msg in received_logs
    def test_extra_serialized_field_and_multiple_operator_links(self):
        """
        Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
        This tests also depends on GoogleLink() registered as a plugin
        in tests/plugins/test_plugin.py
        The function tests that if extra operator links are registered in plugin
        in ``operator_extra_links`` and the same is also defined in
        the Operator in ``BaseOperator.operator_extra_links``, it has the correct
        extra link.
        """
        test_date = datetime(2019, 8, 1)
        dag = DAG(dag_id='simple_dag', start_date=test_date)
        CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
        serialized_dag = SerializedDAG.to_dict(dag)
        assert "bash_command" in serialized_dag["dag"]["tasks"][0]
        dag = SerializedDAG.from_dict(serialized_dag)
        simple_task = dag.task_dict["simple_task"]
        assert getattr(simple_task, "bash_command") == ["echo", "true"]
        #########################################################
        # Verify Operator Links work with Serialized Operator
        #########################################################
        # Check Serialized version of operator link only contains the inbuilt Op Link
        assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
            {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
            {'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
        ]
        # Test all the extra_links are set
        assert set(simple_task.extra_links) == {
            'BigQuery Console #1',
            'BigQuery Console #2',
            'airflow',
            'github',
            'google',
        }
        ti = TaskInstance(task=simple_task, execution_date=test_date)
        ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
        # Test Deserialized inbuilt link #1
        custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
        assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
        # Test Deserialized inbuilt link #2
        custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
        assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
        # Test Deserialized link registered via Airflow Plugin
        google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
        assert "https://www.google.com" == google_link_from_plugin
    class ClassWithCustomAttributes:
        """
        Class for testing purpose: allows to create objects with custom attributes in one single statement.
        """
        def __init__(self, **kwargs):
            for key, value in kwargs.items():
                setattr(self, key, value)
        def __str__(self):
            return f"{self.__class__.__name__}({str(self.__dict__)})"
        def __repr__(self):
            return self.__str__()
        def __eq__(self, other):
            return self.__dict__ == other.__dict__
        def __ne__(self, other):
            return not self.__eq__(other)
    @parameterized.expand(
        [
            (None, None),
            ([], []),
            ({}, {}),
            ("{{ task.task_id }}", "{{ task.task_id }}"),
            (["{{ task.task_id }}", "{{ task.task_id }}"]),
            ({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
            ({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
            (
                [{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
                [{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
            ),
            (
                {"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
                {"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
            ),
            (
                ClassWithCustomAttributes(
                    att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
                ),
                "ClassWithCustomAttributes("
                "{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
            ),
            (
                ClassWithCustomAttributes(
                    nested1=ClassWithCustomAttributes(
                        att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
                    ),
                    nested2=ClassWithCustomAttributes(
                        att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
                    ),
                    template_fields=["nested1"],
                ),
                "ClassWithCustomAttributes("
                "{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
                "'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
                "'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
                "'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
            ),
        ]
    )
    def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
        """
        Test that templated_fields exists for all Operators in Serialized DAG
        Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
        we want check that non-"basic" objects are turned in to strings after deserializing.
        """
        dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
        with dag:
            BashOperator(task_id="test", bash_command=templated_field)
        serialized_dag = SerializedDAG.to_dict(dag)
        deserialized_dag = SerializedDAG.from_dict(serialized_dag)
        deserialized_test_task = deserialized_dag.task_dict["test"]
        assert expected_field == getattr(deserialized_test_task, "bash_command")
    def test_dag_serialized_fields_with_schema(self):
        """
        Additional Properties are disabled on DAGs. This test verifies that all the
        keys in DAG.get_serialized_fields are listed in Schema definition.
        """
        dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
        # The parameters we add manually in Serialization needs to be ignored
        ignored_keys: set = {
            "is_subdag",
            "tasks",
            "has_on_success_callback",
            "has_on_failure_callback",
            "dag_dependencies",
        }
        keys_for_backwards_compat: set = {
            "_concurrency",
        }
        dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
        assert set(DAG.get_serialized_fields()) == dag_params
    def test_operator_subclass_changing_base_defaults(self):
        assert (
            BaseOperator(task_id='dummy').do_xcom_push is True
        ), "Precondition check! If this fails the test won't make sense"
        class MyOperator(BaseOperator):
            def __init__(self, do_xcom_push=False, **kwargs):
                super().__init__(**kwargs)
                self.do_xcom_push = do_xcom_push
        op = MyOperator(task_id='dummy')
        assert op.do_xcom_push is False
        blob = SerializedBaseOperator.serialize_operator(op)
        serialized_op = SerializedBaseOperator.deserialize_operator(blob)
        assert serialized_op.do_xcom_push is False
    def test_no_new_fields_added_to_base_operator(self):
        """
        This test verifies that there are no new fields added to BaseOperator. And reminds that
        tests should be added for it.
        """
        base_operator = BaseOperator(task_id="10")
        fields = base_operator.__dict__
        assert {
            '_BaseOperator__instantiated': True,
            '_dag': None,
            '_downstream_task_ids': set(),
            '_inlets': [],
            '_log': base_operator.log,
            '_outlets': [],
            '_upstream_task_ids': set(),
            'depends_on_past': False,
            'do_xcom_push': True,
            'doc': None,
            'doc_json': None,
            'doc_md': None,
            'doc_rst': None,
            'doc_yaml': None,
            'email': None,
            'email_on_failure': True,
            'email_on_retry': True,
            'end_date': None,
            'execution_timeout': None,
            'executor_config': {},
            'inlets': [],
            'label': '10',
            'max_retry_delay': None,
            'on_execute_callback': None,
            'on_failure_callback': None,
            'on_retry_callback': None,
            'on_success_callback': None,
            'outlets': [],
            'owner': 'airflow',
            'params': {},
            'pool': 'default_pool',
            'pool_slots': 1,
            'priority_weight': 1,
            'queue': 'default',
            'resources': None,
            'retries': 0,
            'retry_delay': timedelta(0, 300),
            'retry_exponential_backoff': False,
            'run_as_user': None,
            'sla': None,
            'start_date': None,
            'subdag': None,
            'task_concurrency': None,
            'task_id': '10',
            'trigger_rule': 'all_success',
            'wait_for_downstream': False,
            'weight_rule': 'downstream',
        } == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
     ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
 Some fields were added to the BaseOperator! Please add them to the list above and make sure that
 you add support for DAG serialization - you should add the field to
 `airflow/serialization/schema.json` - they should have correct type defined there.
 Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                         """
    def test_task_group_serialization(self):
        """
        Test TaskGroup serialization/deserialization.
        """
        from airflow.operators.dummy import DummyOperator
        from airflow.utils.task_group import TaskGroup
        execution_date = datetime(2020, 1, 1)
        with DAG("test_task_group_serialization", start_date=execution_date) as dag:
            task1 = DummyOperator(task_id="task1")
            with TaskGroup("group234") as group234:
                _ = DummyOperator(task_id="task2")
                with TaskGroup("group34") as group34:
                    _ = DummyOperator(task_id="task3")
                    _ = DummyOperator(task_id="task4")
            task5 = DummyOperator(task_id="task5")
            task1 >> group234
            group34 >> task5
        dag_dict = SerializedDAG.to_dict(dag)
        SerializedDAG.validate_schema(dag_dict)
        json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
        self.validate_deserialized_dag(json_dag, dag)
        serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
        assert serialized_dag.task_group.children
        assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
        def check_task_group(node):
            try:
                children = node.children.values()
            except AttributeError:
                # Round-trip serialization and check the result
                expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
                expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
                expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
                assert node
                assert SerializedBaseOperator.serialize_operator(node) == expected_dict
                return
            for child in children:
                check_task_group(child)
        check_task_group(serialized_dag.task_group)
    def test_edge_info_serialization(self):
        """
        Tests edge_info serialization/deserialization.
        """
        from airflow.operators.dummy import DummyOperator
        from airflow.utils.edgemodifier import Label
        with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
            task1 = DummyOperator(task_id="task1")
            task2 = DummyOperator(task_id="task2")
            task1 >> Label("test label") >> task2
        dag_dict = SerializedDAG.to_dict(dag)
        SerializedDAG.validate_schema(dag_dict)
        json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
        self.validate_deserialized_dag(json_dag, dag)
        serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
        assert serialized_dag.edge_info == dag.edge_info
    @parameterized.expand(
        [
            ("poke", False),
            ("reschedule", True),
        ]
    )
    def test_serialize_sensor(self, mode, expect_custom_deps):
        from airflow.sensors.base import BaseSensorOperator
        class DummySensor(BaseSensorOperator):
            def poke(self, context):
                return False
        op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
        blob = SerializedBaseOperator.serialize_operator(op)
        if expect_custom_deps:
            assert "deps" in blob
        else:
            assert "deps" not in blob
        serialized_op = SerializedBaseOperator.deserialize_operator(blob)
        assert op.deps == serialized_op.deps
    @parameterized.expand(
        [
            ({"on_success_callback": lambda x: print("hi")}, True),
            ({}, False),
        ]
    )
    def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
        """
        Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
        in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
        When the callback is not set, has_on_success_callback should not be stored in Serialized blob
        and so default to False on de-serialization
        """
        dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
        BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
        serialized_dag = SerializedDAG.to_dict(dag)
        if expected_value:
            assert "has_on_success_callback" in serialized_dag["dag"]
        else:
            assert "has_on_success_callback" not in serialized_dag["dag"]
        deserialized_dag = SerializedDAG.from_dict(serialized_dag)
        assert deserialized_dag.has_on_success_callback is expected_value
    @parameterized.expand(
        [
            ({"on_failure_callback": lambda x: print("hi")}, True),
            ({}, False),
        ]
    )
    def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
        """
        Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
        in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
        When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
        and so default to False on de-serialization
        """
        dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
        BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
        serialized_dag = SerializedDAG.to_dict(dag)
        if expected_value:
            assert "has_on_failure_callback" in serialized_dag["dag"]
        else:
            assert "has_on_failure_callback" not in serialized_dag["dag"]
        deserialized_dag = SerializedDAG.from_dict(serialized_dag)
        assert deserialized_dag.has_on_failure_callback is expected_value
    @parameterized.expand(
        [
            (
                ['task_1', 'task_5', 'task_2', 'task_4'],
                ['task_1', 'task_5', 'task_2', 'task_4'],
            ),
            (
                {'task_1', 'task_5', 'task_2', 'task_4'},
                ['task_1', 'task_2', 'task_4', 'task_5'],
            ),
            (
                ('task_1', 'task_5', 'task_2', 'task_4'),
                ['task_1', 'task_5', 'task_2', 'task_4'],
            ),
            (
                {
                    "staging_schema": [
                        {"key:": "foo", "value": "bar"},
                        {"key:": "this", "value": "that"},
                        "test_conf",
                    ]
                },
                {
                    "staging_schema": [
                        {"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
                        {
                            "__type": "dict",
                            "__var": {"key:": "this", "value": "that"},
                        },
                        "test_conf",
                    ]
                },
            ),
            (
                {"task3": "test3", "task2": "test2", "task1": "test1"},
                {"task1": "test1", "task2": "test2", "task3": "test3"},
            ),
            (
                ('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
                ['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
            ),
        ]
    )
    def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
        """Test Serialized Sets are sorted while list and tuple preserve order"""
        serialized_obj = SerializedDAG._serialize(object_to_serialized)
        if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
            serialized_obj = serialized_obj["__var"]
        assert serialized_obj == expected_output
def test_kubernetes_optional():
    """Serialisation / deserialisation continues to work without kubernetes installed"""
    def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
        if level == 0 and name.partition('.')[0] == 'kubernetes':
            raise ImportError("No module named 'kubernetes'")
        return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
    with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
        # load module from scratch, this does not replace any already imported
        # airflow.serialization.serialized_objects module in sys.modules
        spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        # if we got this far, the module did not try to load kubernetes, but
        # did it try to access airflow.kubernetes.*?
        imported_airflow = {
            c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
        }
        assert "kubernetes" not in imported_airflow
        # pod loading is not supported when kubernetes is not available
        pod_override = {
            '__type': 'k8s.V1Pod',
            '__var': PodGenerator.serialize_pod(executor_config_pod),
        }
        with pytest.raises(RuntimeError):
            module.BaseSerialization.from_dict(pod_override)
        # basic serialization should succeed
        module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
 | 
| 
	the-stack_106_32245 | 
	try:
    from ub.modules.sql_helper import SESSION, BASE
except ImportError:
    raise AttributeError
from sqlalchemy import Column, String, UnicodeText
class bot_pm_ban(BASE):
    __tablename__ = "bot_pm_ban_sql"
    sender = Column(String(14), primary_key=True)
    def __init__(self, sender):
        self.sender = str(sender)
bot_pm_ban.__table__.create(checkfirst=True)
def is_botpmbanned(sender_id):
    try:
        return SESSION.query(bot_pm_ban).all()
    except BaseException:
        return None
    finally:
        SESSION.close()
def botban(sender):
    adder = bot_pm_ban(str(sender))
    SESSION.add(adder)
    SESSION.commit()
def botunban(sender):
    rem = SESSION.query(bot_pm_ban).get((str(sender)))
    if rem:
        SESSION.delete(rem)
        SESSION.commit()
 | 
| 
	the-stack_106_32247 | 
	import json
import tensorflow as tf
from tensorflow import keras
model = keras.models.load_model('model.h5')
with open('dictionary.txt', 'r') as file:
    diction = json.load(file)
def review_encode(s):
    encoded = []
    for word in s:
        if word.lower() in diction:
            encoded.append(diction[word.lower()])
        else:
            encoded.append(0)
    return encoded
with open('text.txt', encoding='utf-8') as f:
    for line in f.readlines():
        nline = line.replace(',', '').replace('.', '').replace('/', '').strip().split(' ')
        encode = review_encode(nline)
        encode = keras.preprocessing.sequence.pad_sequences([encode], padding='post', maxlen=200)
        predict = model.predict(encode)
        # print(line)
        # print(encode)
        for i in predict:
            for t in i:
                answer = t * 100
                print('%.3f' % answer + '%') | 
| 
	the-stack_106_32248 | 
	import threading,time
import queue
# li=[1,2,3,4,5]
# def pri():
#     while li:
#         a=li[-1]
#         print(a)
#         time.sleep(1)
#         try:
#             li.remove(a)
#         except Exception as e:
#             print('----',a,e)
#
# if __name__ == '__main__':
#     t1 = threading.Thread(target=pri, args=())
#     t1.start()
#     t2 = threading.Thread(target=pri, args=())
#     t2.start()
# q=queue.LifoQueue()
#
# q.put(34)
# q.put(56)
# q.put(12)
#
# #优先级
# q=queue.PriorityQueue()
# q.put([5,100])
# q.put([7,200])
# q.put([3,"hello"])
# q.put([4,{"name":"alex"}])
# if __name__ == '__main__':
#     while 1:
#         data = q.get()
#         print(data)
# from contextlib import contextmanager
#
#
# @contextmanager
# def make_context():
#     print('enter')
#     try:
#         yield "ok"
#     except RuntimeError as err:
#         print( 'error', err)
#     finally:
#         print('exit')
# if __name__ == '__main__':
#     with make_context() as value:
#      print(value)
# def consumer(name):
#     print("--->ready to eat baozi...")
#     while True:
#         new_baozi = yield
#         print("[%s] is eating baozi %s" % (name,new_baozi))
#         #time.sleep(1)
#
# def producer():
#
#     r = con.__next__()
#     r = con2.__next__()
#     n = 0
#     while 1:
#         time.sleep(1)
#         print("\033[32;1m[producer]\033[0m is making baozi %s and %s" %(n,n+1) )
#         con.send(n)
#         con2.send(n+1)
#
#         n +=2
#
#
# if __name__ == '__main__':
#     con = consumer("c1")
#     con2 = consumer("c2")
#     p = producer()
# from  greenlet import greenlet
#
#
# def test1():
#     print(12)
#     gr2.switch()
#     print(34)
#     gr2.switch()
#
#
# def test2():
#     print(56)
#     gr1.switch()
#     print(78)
#
#
# if __name__ == '__main__':
#     gr1 = greenlet(test1)
#     gr2 = greenlet(test2)
#     gr1.switch()
import gevent
import requests,time
start=time.time()
def f(url):
    print('GET: %s' % url)
    resp =requests.get(url)
    data = resp.text
    print('%d bytes received from %s.' % (len(data), url))
gevent.joinall([
        gevent.spawn(f, 'https://www.python.org/'),
        gevent.spawn(f, 'https://www.yahoo.com/'),
        gevent.spawn(f, 'https://www.baidu.com/'),
        gevent.spawn(f, 'https://www.sina.com.cn/'),
])
print("cost time:",time.time()-start)
 | 
| 
	the-stack_106_32249 | 
	import pygame as pg
import random
from settings import *
from termcolor import colored
class Player(pg.sprite.Sprite):
    def __init__(self, game, x, y, 
                 health = PLAYER_HEALTH, 
                 damage = PLAYER_DAMAGE, 
                 armor = PLAYER_ARMOR, 
                 weapon = 'weapon1', 
                 keys = 0, 
                 potions = 0, 
                 books = 0, 
                 health_upgrade = 0, 
                 armor_upgrade = 0,
                 moves = 0,
                 max_health = PLAYER_HEALTH,
                 max_armor = PLAYER_ARMOR,
                 score = 0):
        self._layer = PLAYER_LAYER
        self.groups = game.all_sprites
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = game.player_img
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
        self.rect.x = self.x
        self.rect.y = self.y
        try:
            self.weapon_img = game.item_images[weapon]
        except KeyError:
            self.weapon_img = weapon
        self.coins = int(self.game.load_data[0])
        self.health_upgrade = health_upgrade
        self.max_health = (health_upgrade * 20) + PLAYER_HEALTH
        self.max_armor = armor_upgrade + PLAYER_ARMOR
        self.armor_upgrade = armor_upgrade
        self.health = health
        self.damage = damage
        self.armor = armor
        self.keys = keys
        self.potions = potions
        self.books = books
        self.moves = moves
        self.score = score
    
    def move(self, dx = 0, dy = 0):
        if not self.collide(dx * TILESIZE, dy * TILESIZE):
            self.x += dx * TILESIZE
            self.y += dy * TILESIZE
            self.game.footstep.play()
            self.moves += 1
    def collide(self, dx = 0, dy = 0):
        for wall in self.game.walls:
            if wall.x == self.x + dx and wall.y == self.y + dy:
                self.game.sound.play()
                return True
        for mob in self.game.mobs:
            if mob.x == self.x + dx and mob.y == self.y + dy:
                mob.health -= self.damage
                return True
        for chest in self.game.chests:
            if chest.x == self.x + dx and chest.y == self.y + dy:
                if self.keys > 0:
                    chest.kill()
                    self.random_item(self.game, chest.x, chest.y)
                    self.keys -= 1
                    self.score += 6
                    return True
                self.game.sound.play()
                return True
        for interact in self.game.interacts:
            if interact.x == self.x + dx and interact.y == self.y + dy:
                print(colored(interact.text, 'blue'))
                self.game.sound.play()
                return True
        for travel in self.game.travels:
            if travel.x == self.x + dx and travel.y == self.y + dy:
                if travel.name == 'travel':
                    random_travel = random.choice(TRAVEL_LIST)
                    if random_travel == self.game.map_name[:-4]:
                        self.game.new(f'map.tmx', 
                                        self.health, 
                                        self.damage, 
                                        self.armor, 
                                        self.weapon_img, 
                                        self.keys, 
                                        self.potions, 
                                        self.books, 
                                        self.health_upgrade, 
                                        self.armor_upgrade, 
                                        self.moves,
                                        self.max_health,
                                        self.max_armor,
                                        self.score)
                    else:
                        self.game.new(f'{random_travel}.tmx', 
                                        self.health, 
                                        self.damage, 
                                        self.armor, 
                                        self.weapon_img, 
                                        self.keys, 
                                        self.potions, 
                                        self.books, 
                                        self.health_upgrade, 
                                        self.armor_upgrade,
                                        self.moves,
                                        self.max_health,
                                        self.max_armor,
                                        self.score)
                else:
                    self.game.new(f'{travel.name}.tmx', 
                                    self.health, self.damage, 
                                    self.armor, 
                                    self.weapon_img, 
                                    self.keys, 
                                    self.potions, 
                                    self.books, 
                                    self.health_upgrade, 
                                    self.armor_upgrade,
                                    self.moves,
                                    self.max_health,
                                    self.max_armor,
                                    self.score)
                self.game.run()
                return True
        return False
    def item_ontop(self, x, y):
        for item in self.game.items:
            if item.x == self.x and item.y == self.y:
                if item.type == 'heart' and self.health < self.max_health:
                    item.kill()
                    self.score += 2
                    self.add_health(ITEMS['heart_amount'])
                if item.type == 'coin':
                    item.kill()
                    self.score += 2
                    self.coins += 1
                if item.type == 'key':
                    item.kill()
                    self.score += 2
                    self.keys += 1
                if item.type == 'potion':
                    item.kill()
                    self.score += 2
                    self.potions += 1
                if item.type == 'book':
                    item.kill()
                    self.score += 2
                    self.books += 1
                if 'weapon' in item.type:
                    item.kill()
                    self.score += 2
                    x = str(item.type) + '_amount'
                    self.better_damage(item.type, ITEMS[x])
                if 'armor' in item.type:
                    item.kill()
                    self.score += 2
                    x = str(item.type) + '_amount'
                    self.add_armor(ITEMS[x])
    def random_item(self, game, x, y):
        item = random.choice(list(self.game.item_images))
        Item(self.game, x, y, item)
    def better_damage(self, img, amount):
        if self.damage < PLAYER_DAMAGE + amount:
            self.weapon_img = self.game.item_images[img]
            self.damage = PLAYER_DAMAGE + amount
    def add_armor(self, amount):
        self.armor += amount
        if self.armor > self.max_armor:
            self.armor = self.max_armor
    def add_health(self, amount):
        self.health += amount
        if self.health > self.max_health:
            self.health = self.max_health
    def update(self):
        self.rect.x = self.x
        self.rect.y = self.y
        if self.item_ontop(self.x, self.y):
            self.x += dx * TILESIZE
            self.y += dy * TILESIZE
class Mob(pg.sprite.Sprite):
    def __init__(self, game, x, y, type):
        self._layer = MOB_LAYER
        self.groups = game.all_sprites, game.mobs
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = game.mob_images[type]
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
        self.rect.x = x
        self.rect.y = y
        self.type = type
        if 'mob' in self.type:
            self.health = MOBS[str(self.type) + '_health']
            self.damage = MOBS[str(self.type) + '_damage']
    def collide(self, dx = 0, dy = 0):
        for wall in self.game.walls:
            if wall.x == self.x + dx and wall.y == self.y + dy:
                return True
        for chest in self.game.chests:
            if chest.x == self.x + dx and chest.y == self.y + dy:
                return True
        for mob in self.game.mobs:
            if mob.x == self.x + dx and mob.y == self.y + dy:
                return True
        for interact in self.game.interacts:
            if interact.x == self.x + dx and interact.y == self.y + dy:
                return True
        for travel in self.game.travels:
            if travel.x == self.x + dx and travel.y == self.y + dy:
                return True
        return False
    def player_collide(self, dx = 0, dy = 0):
        if self.game.player.x == self.x + dx and self.game.player.y == self.y + dy:
            if self.game.player.armor > 0:
                self.game.player.armor -= 1
            else:
                self.game.player.health -= self.damage
            return True
        return False
    def move_toward(self):
        dx = self.game.player.x - self.x
        dy = self.game.player.y - self.y
        d = abs(dx) + abs(dy)
        if dy < 0:
            if not self.move(dy = -1):
                self.move(dx = random.randint(-1, 1))
        elif dx < 0:
            if not self.move(dx = -1):
                self.move(dy = random.randint(-1, 1))
        elif dy > 0:
            if not self.move(dy = 1):
                self.move(dx = random.randint(-1, 1))
        elif dx > 0:
            if not self.move(dx = 1):
                self.move(dy = random.randint(-1, 1))
    def move(self, dx = 0, dy = 0):
        if not self.collide(dx * TILESIZE, dy * TILESIZE) and not self.player_collide(dx * TILESIZE, dy * TILESIZE):
            self.x += dx * TILESIZE
            self.y += dy * TILESIZE
            return True
        return False
    def check_item(self, x, y):
        for item in self.game.items:
            if item.x == self.x and item.y == self.y:
                return True
        return False
    def update(self):
        self.rect.x = self.x
        self.rect.y = self.y
        if self.health <= 0:
            self.kill()
            self.game.player.coins += 1
            self.game.player.score += 4
            rand = random.randint(1, 20)
            if rand == 1:
                Player.random_item(self, self.game, self.x, self.y)
            if not self.check_item(self.x, self.y):
                self.game.map_img.blit(self.game.skull_img, (self.x, self.y))
class Wall(pg.sprite.Sprite):
    def __init__(self, game, x, y):
        self.groups = game.all_sprites, game.walls
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = game.wall_img
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
        self.rect.x = x
        self.rect.y = y
class Obstacle(pg.sprite.Sprite):
    def __init__(self, game, x, y, w, h):
        self.groups = game.walls
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.rect = pg.Rect(x, y, w, h)
        self.x = x
        self.y = y
class Item(pg.sprite.Sprite):
    def __init__(self, game, x, y, type):
        self._layer = ITEMS_LAYER
        self.groups = game.all_sprites, game.items
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = game.item_images[type]
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
        self.rect.x = x
        self.rect.y = y
        self.type = type
class Chest(pg.sprite.Sprite):
    def __init__(self, game, x, y):
        self._layer = PLAYER_LAYER
        self.groups = game.all_sprites, game.chests
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = game.chest_img
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
        self.rect.x = x
        self.rect.y = y
class Interact(pg.sprite.Sprite):
    def __init__(self, game, x, y, text):
        self._layer = WALL_LAYER
        self.groups = game.interacts
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.rect = pg.Rect(x, y, TILESIZE, TILESIZE)
        self.x = x
        self.y = y
        self.text = text
class Travel(pg.sprite.Sprite):
    def __init__(self, game, x, y, w, h, name):
        self.groups = game.travels
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.rect = pg.Rect(x, y, w, h)
        self.x = x
        self.y = y
        self.name = name | 
| 
	the-stack_106_32251 | 
	import joblib
import numpy as np
import seldon_core
from seldon_core.user_model import SeldonComponent
from typing import Dict, List, Union, Iterable
import os
import logging
import yaml
logger = logging.getLogger(__name__)
JOBLIB_FILE = "model.joblib"
class SKLearnServer(SeldonComponent):
    def __init__(self, model_uri: str = None, method: str = "predict_proba"):
        super().__init__()
        self.model_uri = model_uri
        self.method = method
        self.ready = False
        logger.info(f"Model uri: {self.model_uri}")
        logger.info(f"method: {self.method}")
        self.load()
    def load(self):
        logger.info("load")
        model_file = os.path.join(
            seldon_core.Storage.download(self.model_uri), JOBLIB_FILE
        )
        logger.info(f"model file: {model_file}")
        self._joblib = joblib.load(model_file)
        self.ready = True
    def predict(
        self, X: np.ndarray, names: Iterable[str], meta: Dict = None
    ) -> Union[np.ndarray, List, str, bytes]:
        try:
            if not self.ready:
                self.load()
            if self.method == "predict_proba":
                logger.info("Calling predict_proba")
                result = self._joblib.predict_proba(X)
            elif self.method == "decision_function":
                logger.info("Calling decision_function")
                result = self._joblib.decision_function(X)
            else:
                logger.info("Calling predict")
                result = self._joblib.predict(X)
            return result
        except Exception as ex:
            logging.exception("Exception during predict")
    def init_metadata(self):
        file_path = os.path.join(self.model_uri, "metadata.yaml")
        try:
            with open(file_path, "r") as f:
                return yaml.safe_load(f.read())
        except FileNotFoundError:
            logger.debug(f"metadata file {file_path} does not exist")
            return {}
        except yaml.YAMLError:
            logger.error(
                f"metadata file {file_path} present but does not contain valid yaml"
            )
            return {}
 | 
| 
	the-stack_106_32256 | 
	import os
import torch
import argparse
from dataset import Dataset
from utils import compute_F1, compute_exact_match
from torch.utils.data import DataLoader
from transformers import AdamW
from tqdm import tqdm
from trainer import train, valid
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, AdamW
from torch.utils.tensorboard import SummaryWriter
from knockknock import email_sender
import datetime
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
writer = SummaryWriter()
parser = argparse.ArgumentParser()
parser.add_argument('--patience' ,  type = int, default=3)
parser.add_argument('--batch_size' , type = int, default=8)
parser.add_argument('--max_epoch' ,  type = int, default=2)
parser.add_argument('--base_trained_model', type = str, default = 'bert-base-uncased', help =" pretrainned model from 🤗")
parser.add_argument('--pretrained_model' , type = str,  help = 'pretrainned model')
parser.add_argument('--gpu_number' , type = int,  default = 0, help = 'which GPU will you use?')
parser.add_argument('--debugging' , type = bool,  default = False, help = "Don't save file")
parser.add_argument('--log_file' , type = str,  default = f'logs/log_{now_time}.txt',)
parser.add_argument('--dev_path' ,  type = str,  default = '../data/MultiWOZ_2.1/dev_data.json')
parser.add_argument('--train_path' , type = str,  default = '../data/MultiWOZ_2.1/train_data.json')
parser.add_argument('--do_train' , default = True, help = 'do train or not', action=argparse.BooleanOptionalAction)
args = parser.parse_args()
def makedirs(path): 
   try: 
        os.makedirs(path) 
   except OSError: 
       if not os.path.isdir(path): 
           raise
       
args = parser.parse_args()
# @email_sender(recipient_emails=["[email protected]"], sender_email="[email protected]")
def main():
    makedirs("./data"); makedirs("./logs"); makedirs("./model");
    
    
    # import pdb; pdb.set_trace()
    tokenizer = AutoTokenizer.from_pretrained(args.base_trained_model, use_fast=True)
    model = AutoModelForQuestionAnswering.from_pretrained(args.base_trained_model)
    train_dataset = Dataset(args.train_path, 'train', tokenizer, False)
    val_dataset = Dataset(args.dev_path, 'dev', tokenizer, False)
    train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True)
    dev_loader = DataLoader(val_dataset, args.batch_size, shuffle=True)
    optimizer = AdamW(model.parameters(), lr=5e-5, weight_decay=0.01)
    log_file = open(args.log_file, 'w')
    device = torch.device(f'cuda:{args.gpu_number}' if torch.cuda.is_available() else 'cpu')
    torch.cuda.set_device(device) # change allocation of current GPU
    torch.cuda.empty_cache()
    if args.pretrained_model:
        print("use trained model")
        log_file.write("use trained model")
        model.load_state_dict(torch.load(args.pretrained_model))
    
    log_file.write(str(args))
    model.to(device)
    penalty = 0
    min_loss = float('inf')
    for epoch in range(args.max_epoch):
        print(f"Epoch : {epoch}")
        # if args.do_train:
            # train(model, train_loader, optimizer, device)
        pred_texts, ans_texts, loss = valid(model, dev_loader, device, tokenizer,log_file)
        
        EM, F1 = 0, 0
        for iter, (pred_text, ans_text) in enumerate(zip(pred_texts, ans_texts)):
            EM += compute_exact_match(pred_text, ans_text)
            F1 += compute_F1(pred_text, ans_text)
        
        print("Epoch : %d, EM : %.04f, F1 : %.04f, Loss : %.04f" % (epoch, EM/iter, F1/iter, loss))
        log_file.writelines("Epoch : %d, EM : %.04f, F1 : %.04f, Loss : %.04f" % (epoch, EM/iter, F1/iter, loss))
        writer.add_scalar("EM", EM/iter, epoch)
        writer.add_scalar("F1", F1/iter, epoch)
        writer.add_scalar("loss",loss, epoch)
        if loss < min_loss:
            print("New best")
            min_loss = loss
            penalty = 0
            if not args.debugging:
                torch.save(model.state_dict(), f"model/{args.now_time}.pt")
        else:
            penalty +=1
            if penalty>args.patience:
                print(f"early stopping at epoch {epoch}")
                break
    writer.close()
    log_file.close()
    
    
    return {'EM' : EM/iter, 'F1' : F1/iter}
    
if __name__ =="__main__":
    main()
     | 
| 
	the-stack_106_32259 | 
	#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
    assert_equal,
)
from test_framework.messages import (
    COIN,
    tx_from_hex,
)
class TxnMallTest(BitcoinTestFramework):
    def set_test_params(self):
        self.num_nodes = 3
        self.supports_cli = False
    def skip_test_if_missing_module(self):
        self.skip_if_no_wallet()
    def add_options(self, parser):
        parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
                            help="Test double-spend of 1-confirmed transaction")
        parser.add_argument("--segwit", dest="segwit", default=False, action="store_true",
                            help="Test behaviour with SegWit txn (which should fail)")
    def setup_network(self):
        # Start with split network:
        super().setup_network()
        self.disconnect_nodes(1, 2)
    def run_test(self):
        if self.options.segwit:
            output_type = "p2sh-segwit"
        else:
            output_type = "legacy"
        # All nodes should start with 1,250 CPU:
        starting_balance = 1250
        for i in range(3):
            assert_equal(self.nodes[i].getbalance(), starting_balance)
        self.nodes[0].settxfee(.001)
        node0_address1 = self.nodes[0].getnewaddress(address_type=output_type)
        node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 1219)
        node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
        node0_address2 = self.nodes[0].getnewaddress(address_type=output_type)
        node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 29)
        node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
        assert_equal(self.nodes[0].getbalance(),
                     starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
        # Coins are sent to node1_address
        node1_address = self.nodes[1].getnewaddress()
        # Send tx1, and another transaction tx2 that won't be cloned
        txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
        txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
        # Construct a clone of tx1, to be malleated
        rawtx1 = self.nodes[0].getrawtransaction(txid1, 1)
        clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"], "sequence": rawtx1["vin"][0]["sequence"]}]
        clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["address"]: rawtx1["vout"][0]["value"],
                         rawtx1["vout"][1]["scriptPubKey"]["address"]: rawtx1["vout"][1]["value"]}
        clone_locktime = rawtx1["locktime"]
        clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
        # createrawtransaction randomizes the order of its outputs, so swap them if necessary.
        clone_tx = tx_from_hex(clone_raw)
        if (rawtx1["vout"][0]["value"] == 40 and clone_tx.vout[0].nValue != 40*COIN or rawtx1["vout"][0]["value"] != 40 and clone_tx.vout[0].nValue == 40*COIN):
            (clone_tx.vout[0], clone_tx.vout[1]) = (clone_tx.vout[1], clone_tx.vout[0])
        # Use a different signature hash type to sign.  This creates an equivalent but malleated clone.
        # Don't send the clone anywhere yet
        tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_tx.serialize().hex(), None, "ALL|ANYONECANPAY")
        assert_equal(tx1_clone["complete"], True)
        # Have node0 mine a block, if requested:
        if (self.options.mine_block):
            self.nodes[0].generate(1)
            self.sync_blocks(self.nodes[0:2])
        tx1 = self.nodes[0].gettransaction(txid1)
        tx2 = self.nodes[0].gettransaction(txid2)
        # Node0's balance should be starting balance, plus 50CPU for another
        # matured block, minus tx1 and tx2 amounts, and minus transaction fees:
        expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
        if self.options.mine_block:
            expected += 50
        expected += tx1["amount"] + tx1["fee"]
        expected += tx2["amount"] + tx2["fee"]
        assert_equal(self.nodes[0].getbalance(), expected)
        if self.options.mine_block:
            assert_equal(tx1["confirmations"], 1)
            assert_equal(tx2["confirmations"], 1)
        else:
            assert_equal(tx1["confirmations"], 0)
            assert_equal(tx2["confirmations"], 0)
        # Send clone and its parent to miner
        self.nodes[2].sendrawtransaction(node0_tx1["hex"])
        txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
        if self.options.segwit:
            assert_equal(txid1, txid1_clone)
            return
        # ... mine a block...
        self.nodes[2].generate(1)
        # Reconnect the split network, and sync chain:
        self.connect_nodes(1, 2)
        self.nodes[2].sendrawtransaction(node0_tx2["hex"])
        self.nodes[2].sendrawtransaction(tx2["hex"])
        self.nodes[2].generate(1)  # Mine another block to make sure we sync
        self.sync_blocks()
        # Re-fetch transaction info:
        tx1 = self.nodes[0].gettransaction(txid1)
        tx1_clone = self.nodes[0].gettransaction(txid1_clone)
        tx2 = self.nodes[0].gettransaction(txid2)
        # Verify expected confirmations
        assert_equal(tx1["confirmations"], -2)
        assert_equal(tx1_clone["confirmations"], 2)
        assert_equal(tx2["confirmations"], 1)
        # Check node0's total balance; should be same as before the clone, + 100 CPU for 2 matured,
        # less possible orphaned matured subsidy
        expected += 100
        if (self.options.mine_block):
            expected -= 50
        assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
    TxnMallTest().main()
 | 
| 
	the-stack_106_32260 | 
	import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import slicing.slicer as slicer
file_name = 'insurance.csv'
dataset = pd.read_csv(file_name)
attributes_amount = len(dataset.values[0])
# for now working with regression datasets, assuming that target attribute is the last one
# currently non-categorical features are not supported and should be binned
y = dataset.iloc[:, attributes_amount - 1:attributes_amount].values
# starting with one not including id field
x = dataset.iloc[:, 0:attributes_amount - 1].values
# list of numerical columns
non_categorical = [1, 3]
for row in x:
    for attribute in non_categorical:
        # <attribute - 2> as we already excluded from x id column
        row[attribute - 1] = int(row[attribute - 1] / 5)
# hot encoding of categorical features
enc = OneHotEncoder(handle_unknown='ignore')
x = enc.fit_transform(x).toarray()
complete_x = []
complete_y = []
counter = 0
all_features = enc.get_feature_names()
# train model on a whole dataset
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
for item in x_test:
    complete_x.append((counter, item))
    complete_y.append((counter, y_test[counter]))
    counter = counter + 1
x_size = counter
model = LinearRegression()
model.fit(x_train, y_train)
preds = (model.predict(x_test) - y_test) ** 2
f_l2 = sum(preds)/x_size
errors = []
counter = 0
for pred in preds:
    errors.append((counter, pred))
    counter = counter + 1
# alpha is size significance coefficient
# verbose option is for returning debug info while creating slices and printing it
# k is number of top-slices we want
# w is a weight of error function significance (1 - w) is a size significance propagated into optimization function
slicer.process(all_features, model, complete_x, f_l2, x_size, y_test, errors, debug=True, alpha=5, k=10,
               w=0.5, loss_type=0)
 | 
| 
	the-stack_106_32261 | 
	import tensorflow as tf
import numpy as np
import os
import json
# Spectral band names to read related GeoTIFF files
band_names = ['B01', 'B02', 'B03', 'B04', 'B05',
              'B06', 'B07', 'B08', 'B8A', 'B09', 'B11', 'B12']
def prep_example(bands, BigEarthNet_19_labels, BigEarthNet_19_labels_multi_hot, patch_name):
    return tf.train.Example(
            features=tf.train.Features(
                feature={
                    'B01': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B01']))),
                    'B02': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B02']))),
                    'B03': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B03']))),
                    'B04': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B04']))),
                    'B05': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B05']))),
                    'B06': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B06']))),
                    'B07': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B07']))),
                    'B08': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B08']))),
                    'B8A': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B8A']))),
                    'B09': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B09']))),
                    'B11': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B11']))),
                    'B12': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=np.ravel(bands['B12']))),
                    'BigEarthNet-19_labels': tf.train.Feature(
                        bytes_list=tf.train.BytesList(
                            value=[i.encode('utf-8') for i in BigEarthNet_19_labels])),
                    'BigEarthNet-19_labels_multi_hot': tf.train.Feature(
                        int64_list=tf.train.Int64List(value=BigEarthNet_19_labels_multi_hot)),
                    'patch_name': tf.train.Feature(
                        bytes_list=tf.train.BytesList(value=[patch_name.encode('utf-8')]))
                }))
    
def create_split(root_folder, patch_names, TFRecord_writer, label_indices, GDAL_EXISTED, RASTERIO_EXISTED, UPDATE_JSON):
    label_conversion = label_indices['label_conversion']
    BigEarthNet_19_label_idx = {v: k for k, v in label_indices['BigEarthNet-19_labels'].iteritems()}
    if GDAL_EXISTED:
        import gdal
    elif RASTERIO_EXISTED:
        import rasterio
    progress_bar = tf.contrib.keras.utils.Progbar(target = len(patch_names))
    for patch_idx, patch_name in enumerate(patch_names):
        patch_folder_path = os.path.join(root_folder, patch_name)
        bands = {}
        for band_name in band_names:
            # First finds related GeoTIFF path and reads values as an array
            band_path = os.path.join(
                patch_folder_path, patch_name + '_' + band_name + '.tif')
            if GDAL_EXISTED:
                band_ds = gdal.Open(band_path,  gdal.GA_ReadOnly)
                raster_band = band_ds.GetRasterBand(1)
                band_data = raster_band.ReadAsArray()
                bands[band_name] = np.array(band_data)
            elif RASTERIO_EXISTED:
                band_ds = rasterio.open(band_path)
                band_data = np.array(band_ds.read(1))
                bands[band_name] = np.array(band_data)
        
        original_labels_multi_hot = np.zeros(
            len(label_indices['original_labels'].keys()), dtype=int)
        BigEarthNet_19_labels_multi_hot = np.zeros(len(label_conversion),dtype=int)
        patch_json_path = os.path.join(
            patch_folder_path, patch_name + '_labels_metadata.json')
        with open(patch_json_path, 'rb') as f:
            patch_json = json.load(f)
        original_labels = patch_json['labels']
        for label in original_labels:
            original_labels_multi_hot[label_indices['original_labels'][label]] = 1
        for i in range(len(label_conversion)):
            BigEarthNet_19_labels_multi_hot[i] = (
                    np.sum(original_labels_multi_hot[label_conversion[i]]) > 0
                ).astype(int)
        BigEarthNet_19_labels = []
        for i in np.where(BigEarthNet_19_labels_multi_hot == 1)[0]:
            BigEarthNet_19_labels.append(BigEarthNet_19_label_idx[i])
        if UPDATE_JSON:
            patch_json['BigEarthNet_19_labels'] = BigEarthNet_19_labels
            with open(patch_json_path, 'wb') as f:
                json.dump(patch_json, f)
        example = prep_example(
            bands, 
            BigEarthNet_19_labels,
            BigEarthNet_19_labels_multi_hot,
            patch_name
        )
        TFRecord_writer.write(example.SerializeToString())
        progress_bar.update(patch_idx)
def prep_tf_record_files(root_folder, out_folder, split_names, patch_names_list, label_indices, GDAL_EXISTED, RASTERIO_EXISTED, UPDATE_JSON):
    try:
        writer_list = []
        for split_name in split_names:
            writer_list.append(
                    tf.python_io.TFRecordWriter(os.path.join(
                        out_folder, split_name + '.tfrecord'))
                )
    except:
        print('ERROR: TFRecord writer is not able to write files')
        exit()
    for split_idx in range(len(patch_names_list)):
        print('INFO: creating the split of', split_names[split_idx], 'is started')
        create_split(
            root_folder, 
            patch_names_list[split_idx], 
            writer_list[split_idx],
            label_indices,
            GDAL_EXISTED, 
            RASTERIO_EXISTED, 
            UPDATE_JSON
            )
        writer_list[split_idx].close()
         | 
| 
	the-stack_106_32262 | 
	import numpy as np
import pytest
from numpy.testing import assert_allclose
from robogym.envs.dactyl.full_perpendicular import make_env, make_simple_env
from robogym.utils import rotation
def test_cube_mass():
    env = make_env(constants=dict(randomize=False))
    sim = env.unwrapped.sim
    cube_id = sim.model.body_name2id("cube:middle")
    # The mass of the giiker cube is 90g
    assert_allclose(sim.model.body_subtreemass[cube_id], 0.09, atol=0.005)
@pytest.mark.parametrize(
    "goal_generation",
    [
        "face_curr",
        "face_free",
        "face_cube_solver",
        "unconstrained_cube_solver",
        "full_unconstrained",
        "release_cube_solver",
    ],
)
def test_goal_info(goal_generation):
    constants = {
        "goal_generation": goal_generation,
        "randomize_face_angles": False,
        "randomize": False,
    }
    # There is some small chance that cube can get into invalid state in simulation
    # which will cause cube solver to fail. Fixing the seed here to mitigate this
    # issue.
    env = make_env(constants=constants, starting_seed=12312)
    env.reset()
    _, _, goal_info = env.unwrapped.goal_info()
    assert "goal" in goal_info
    assert "goal_type" in goal_info["goal"]
def test_make_simple_env():
    env = make_simple_env(
        parameters={
            "simulation_params": dict(cube_appearance="vision", hide_target=True)
        }
    )
    env.reset()
    sim = env.sim  # there is no wrapper.
    sticker_geoms = [g for g in sim.model.geom_names if g.startswith("cube:sticker:")]
    assert len(sticker_geoms) == 9 * 6
def test_observe():
    # Test observation matches simulation state.
    env = make_simple_env()
    env.reset()
    simulation = env.mujoco_simulation
    obs = env.observe()
    qpos = simulation.qpos
    qpos[simulation.qpos_idxs["target_all_joints"]] = 0.0
    qvel = simulation.qvel
    qvel[simulation.qvel_idxs["target_all_joints"]] = 0.0
    true_obs = {
        "cube_pos": simulation.get_qpos("cube_position"),
        "cube_quat": rotation.quat_normalize(simulation.get_qpos("cube_rotation")),
        "hand_angle": simulation.get_qpos("hand_angle"),
        "fingertip_pos": simulation.shadow_hand.observe()
        .fingertip_positions()
        .flatten(),
        "qpos": qpos,
        "qvel": qvel,
    }
    for obs_key, true_val in true_obs.items():
        assert np.allclose(
            obs[obs_key], true_val
        ), f"Value for obs {obs_key} {obs[obs_key]} doesn't match true value {true_val}."
def test_informative_obs():
    WHITELIST = [
        # The position of the goal is zeroed
        "relative_goal_pos",
        "noisy_relative_goal_pos",
        "goal_pos",
        # Not all episodes end with a fall, i.e. it might be all zeros
        "fell_down",
    ]
    env = make_env(constants=dict(randomize=False, max_timesteps_per_goal=50))
    obs = env.reset()
    done = False
    all_obs = [obs]
    while not done:
        obs, reward, done, info = env.step(env.action_space.sample())
        all_obs.append(obs)
    all_obs.append(env.reset())  # one more reset at the end
    # Collect all obs and index by key.
    keys = set(all_obs[0].keys())
    assert len(keys) > 0
    combined_obs_by_keys = {key: [] for key in keys}
    for obs in all_obs:
        assert set(obs.keys()) == keys
        for key in keys:
            combined_obs_by_keys[key].append(obs[key])
    # Make sure that none of the keys has all-constant obs.
    for key, obs in combined_obs_by_keys.items():
        assert len(obs) == len(all_obs)
        if key in WHITELIST:
            continue
        obs0 = obs[0]
        equals = [np.array_equal(obs0, obs_i) for obs_i in obs]
        # If ob0 is equal to all other obs, all obs are equal, i.e. the observation
        # contains no information whatsoever. This is usually bad (e.g. we had an issue
        # in the past where qpos was aways set to all-zeros).
        assert not np.all(equals), "observations for {} are all equal to {}".format(
            key, obs0
        )
def test_min_episode_length():
    min_steps = 1000
    env_1 = make_env(constants=dict(min_episode_length=min_steps), starting_seed=12312)
    env_1.reset()
    # fix seed to avoid stochastic tests
    num_fallen = 0
    for _ in range(min_steps):
        o, r, d, i = env_1.step(env_1.action_space.sample())
        assert not d
        if i["fell_down"]:
            num_fallen += 1
    assert num_fallen > 0
    env_2 = make_env(constants=dict(min_episode_length=-1), starting_seed=12312)
    env_2.reset()
    # fix seed to avoid stochastic tests
    for t in range(min_steps):
        o, r, d, i = env_2.step(env_2.action_space.sample())
        if d:
            break
    assert t < min_steps - 1
 | 
| 
	the-stack_106_32263 | 
	import time
import electrumx.lib.util as util
def sessions_lines(data):
    '''A generator returning lines for a list of sessions.
    data is the return value of rpc_sessions().'''
    fmt = ('{:<6} {:<5} {:>17} {:>5} '
           '{:>7} {:>7} {:>5} {:>5} {:>7} '
           '{:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
    yield fmt.format('ID', 'Flags', 'Client', 'Proto',
                     'Cost', 'XCost', 'Reqs', 'Txs', 'Subs',
                     'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer')
    for (id_, flags, peer, client, proto, cost, extra_cost, reqs, txs_sent, subs,
         recv_count, recv_size, send_count, send_size, conn_time) in data:
        yield fmt.format(id_, flags, client, proto,
                         f'{int(cost):,d}',
                         f'{int(extra_cost):,d}',
                         f'{reqs:,d}',
                         f'{txs_sent:,d}',
                         f'{subs:,d}',
                         f'{recv_count:,d}',
                         f'{recv_size // 1024:,d}',
                         f'{send_count:,d}',
                         f'{send_size // 1024:,d}',
                         util.formatted_time(conn_time, sep=''), peer)
def groups_lines(data):
    '''A generator returning lines for a list of groups.
    data is the return value of rpc_groups().'''
    fmt = ('{:<14} {:>9} {:>8} {:>8} {:>6} {:>6} {:>8}'
           '{:>7} {:>9} {:>7} {:>9}')
    yield fmt.format('Name', 'Sessions', 'Cost', 'Retained', 'Reqs', 'Txs', 'Subs',
                     'Recv', 'Recv KB', 'Sent', 'Sent KB')
    for (name, session_count, cost, retained_cost, reqs, txs_sent, subs,
         recv_count, recv_size, send_count, send_size) in data:
        yield fmt.format(name,
                         f'{session_count:,d}',
                         f'{int(cost):,d}',
                         f'{int(retained_cost):,d}',
                         f'{reqs:,d}',
                         f'{txs_sent:,d}',
                         f'{subs:,d}',
                         f'{recv_count:,d}',
                         f'{recv_size // 1024:,d}',
                         f'{send_count:,d}',
                         f'{send_size // 1024:,d}')
def peers_lines(data):
    '''A generator returning lines for a list of peers.
    data is the return value of rpc_peers().'''
    def time_fmt(t):
        if not t:
            return 'Never'
        return util.formatted_time(now - t)
    now = time.time()
    fmt = ('{:<62} {:<6} {:>5} {:>5} {:<17} {:>4} '
           '{:>4} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}')
    yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min',
                     'Max', 'Pruning', 'Last Good', 'Last Try',
                     'Tries', 'Source', 'IP Address')
    for item in data:
        features = item['features']
        hostname = item['host']
        host = features['hosts'][hostname]
        yield fmt.format(hostname[:62],
                         item['status'],
                         host.get('tcp_port') or '',
                         host.get('ssl_port') or '',
                         features['server_version'] or 'unknown',
                         features['protocol_min'],
                         features['protocol_max'],
                         features['pruning'] or '',
                         time_fmt(item['last_good']),
                         time_fmt(item['last_try']),
                         item['try_count'],
                         item['source'][:20],
                         item['ip_addr'] or '')
 | 
| 
	the-stack_106_32265 | 
	# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from contextlib import contextmanager
import copy
import enum
from functools import partial
import re
import unittest
import types
import warnings
import weakref
import functools
import itertools as it
from absl import logging
from absl.testing import absltest, parameterized
import numpy as np
import concurrent.futures
import jax
import jax.numpy as jnp
from jax import float0, jit, grad, device_put, jacfwd, jacrev, hessian
from jax import api, core, lax
from jax.core import Primitive
from jax.interpreters import ad
from jax.interpreters import xla
from jax.interpreters.sharded_jit import PartitionSpec as P
from jax.lib import xla_bridge as xb
from jax import test_util as jtu
from jax import tree_util
from jax import linear_util as lu
import jax._src.util
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class CPPJitTest(jtu.BufferDonationTestCase):
  """Shared tests between the Python and the C++ jax,jit implementations.
  Because the Python implementation supports more features, we need to have the
  Python tests that extend the C++ tests (and not the other way around).
  """
  @property
  def jit(self):
    # Right now, the CPP tests also test the Python code-path when jaxlib is
    # too old.
    # TODO(jblespiau,phawkins): Remove this when jaxlib has been released.
    # This is in the future, because we are making a breaking change to
    # Tensorflow.
    return jax.api._cpp_jit
  def test_jit_of_noncallable(self):
    self.assertRaisesRegex(TypeError, "Expected a callable value.*",
                           lambda: self.jit(3))
  def test_jit_of_generator(self):
    def gen(x):
      yield x
    self.assertRaisesRegex(TypeError,
                           "Expected a function, got a generator function.*",
                           lambda: self.jit(gen))
  @parameterized.parameters([
      # Integer support
      (1, 2, 3, 4, 5),
      # Numpy array support
      (
          np.asarray(1, np.int32),
          np.asarray(2, np.int32),
          np.asarray(3, np.int32),
          np.asarray(4, np.int32),
          np.asarray(5, np.int32),
      ),
  ])
  def test_jit_static_args(self, one, two, three, four, five):
    side = []
    # For the CPP jit, we need to clear the cache to prevent cache hits between
    # parameterized tests.
    if hasattr(self.jit, "cache_clear"):
      self.jit.cache_clear()
    def f(x, y, z, flag=False, flag2=False):
      del flag2  # unused
      assert flag
      side.append(None)
      return 100 * x + 10 * y + z
    f1 = self.jit(f, static_argnums=(3, 4))
    assert f1(one, two, three, True, False) == 123
    assert len(side) == 1
    assert f1(one, two, three, True, False) == 123
    assert len(side) == 1  # Obvious cache hit.
    assert f1(two, one, three, True, False) == 213
    assert len(side) == 1  # Should cache hit because same signature.
    assert f1(two, one, three, True, True) == 213
    assert len(side) == 2
    side[:] = []
    f2 = self.jit(f, static_argnums=(0, 2, 3, 4))
    assert f2(1, 2, 3, True, False) == 123
    assert len(side) == 1
    assert f2(1, 3, 3, True, False) == 133
    assert len(side) == 1
    assert f2(2, 2, 3, True, False) == 223
    assert len(side) == 2
    assert f2(2, 4, 3, True, False) == 243
    assert len(side) == 2
    assert f2(2, 4, 3, True, True) == 243
    assert len(side) == 3
    assert f2(2, 5, 3, True, True) == 253
    assert len(side) == 3
  def test_static_args_equality(self):
    class A():
      def __hash__(self):
        return 1
      def __eq__(self, other):
        return isinstance(other, A)
    side = []
    def f(x, static_arg):
      del static_arg
      side.append(None)
      return x * 100
    f1 = self.jit(f, static_argnums=(1,))
    self.assertEqual(f1(1, A()), 100)
    self.assertLen(side, 1)
    self.assertEqual(f1(1, A()), 100)
    self.assertLen(side, 1)
    if self.jit == jax.api._cpp_jit:
      self.assertEqual(f1._cpp_jitted_f._cache_size(), 1)
  @parameterized.parameters([
      (1, 2, 3),
      (
          np.asarray(1, np.int32),
          np.asarray(2, np.int32),
          np.asarray(3, np.int32),
      ),
  ])
  def test_jit_kwargs(self, one, two, three):
    side = []
    # For the CPP jit, we need to clear the cache to prevent cache hits between
    # parameterized tests.
    if hasattr(self.jit, "cache_clear"):
      self.jit.cache_clear()
    def f(x, y, z):
      print(x, y, z)
      side.append(None)
      return 100 * x + 10 * y + z
    f = self.jit(f)
    assert f(one, two, three) == 123
    assert len(side) == 1
    assert f(one, two, three) == 123
    assert len(side) == 1
    assert f(one, two, z=three) == 123
    assert len(side) == 2  # actually recompiles from kwarg
    assert f(one, two, z=three) == 123
    assert len(side) == 2  # but should still cache
    f(one, two, z=np.zeros(3))  # doesn't crash
    if config.x64_enabled:
      # In the above call, three is of a new type (int64), thus it should
      # trigger a new compilation.
      assert len(side) == 3
  def test_jit_device(self):
    device = xb.devices()[-1]
    x = self.jit(lambda x: x, device=device)(3.)
    self.assertIsInstance(x, xla.DeviceArray)
    self.assertEqual(x.device_buffer.device(), device)
  def test_complex_support(self):
    self.assertEqual(self.jit(lambda x: x + 1)(1 + 1j), 2 + 1j)
  def test_jit_with_many_args_works(self):
    @self.jit
    def f(args_list):
      return sum(args_list)
    self.assertEqual(f(list(range(500))), sum(range(500)))
  # Jit and Donate arguments
  def test_jit_donate_argnums_warning_raised(self):
    x = jnp.array([1.0, 2.0], jnp.float32)
    y = jnp.array([1, 2], jnp.int32)
    f = self.jit(lambda x, y: x.sum() + y.sum(), donate_argnums=(0, 1))
    with warnings.catch_warnings(record=True) as w:
      warnings.simplefilter("always")
      f(x, y)
      self.assertLen(w, 1)
      self.assertTrue(issubclass(w[-1].category, UserWarning))
      self.assertIn(
          "Some donated buffers were not usable: f32[2]{0}, s32[2]{0}",
          str(w[-1].message))
  @jtu.skip_on_devices("cpu")  # In/out aliasing not supported on CPU.
  def test_jit_donate_argnums_invalidates_input(self):
    # We can't just use `lambda x: x` because JAX simplifies this away to an
    # empty XLA computation.
    move = self.jit(lambda x: x + x - x, donate_argnums=0)
    x = jnp.ones([])
    y = move(x)
    self.assertDeleted(x)
    self.assertEqual(y, 1.)
  @jtu.skip_on_devices("cpu")  # In/out aliasing not supported on CPU.
  def test_jit_donate_argnums_static_argnums(self):
    jit_fun = self.jit(
        lambda a, b, c, d: ((a + b + c), (a + b + d)),
        static_argnums=(0, 1),
        donate_argnums=(2, 3))
    c = jax.device_put(jnp.array([1., 1.]))
    d = jax.device_put(jnp.array([1., 1., 1.]))
    e, f = jit_fun(1, 2, c, d)
    np.testing.assert_allclose(e, jnp.array([4., 4.]))
    np.testing.assert_allclose(f, jnp.array([4., 4., 4.]))
    self.assertDeleted(c)
    self.assertDeleted(d)
  @jtu.skip_on_devices("cpu")  # In/out aliasing not supported on CPU.
  def test_jnp_array_copy(self):
    # https://github.com/google/jax/issues/3412
    @partial(self.jit, donate_argnums=(0,))
    def _test(array):
      return array.at[0].set(77)
    x = jnp.asarray([0, 1])
    x_copy = jnp.array(x, copy=True)
    with warnings.catch_warnings():
      warnings.simplefilter("ignore")
      _test(x)  # donation
    # Gives: RuntimeError: Invalid argument: CopyToHostAsync() called on invalid buffer.
    print(x_copy)  # doesn't crash
  def test_jit_global_cache(self):
    def f(x):
      assert python_should_be_executing
      return x
    python_should_be_executing = True
    self.jit(f)(2)
    python_should_be_executing = False
    self.jit(f)(3)
  def test_jit_shallow_copy(self):
    def f(x):
      return copy.copy(x)
    self.jit(f)(1)
  def test_jit_deep_copy(self):
    def f(x):
      return copy.deepcopy(x)
    self.jit(f)(1)
  def test_disable_jit(self):
    effects = []
    @self.jit
    def f(x):
      effects.append(1)
      return x
    with api.disable_jit():
      f(2)
      f(2)
    assert len(effects) == 2
    f(2)
    f(2)
    assert len(effects) == 3
  def test_static_argnum_errors_on_keyword_arguments(self):
    f = self.jit(lambda x: x, static_argnums=0)
    msg = ("jitted function has static_argnums=(0,), donate_argnums=() but was "
           "called with only 0 positional arguments.")
    with self.assertRaisesRegex(ValueError, re.escape(msg)):
      f(x=4)
  def test_static_argnum_on_method(self):
    class A:
      @functools.partial(self.jit, static_argnums=(0,))
      def my_func_jit(self, x):
        return x+2
    A().my_func_jit(3)
  def test_static_argnum_on_static_method_is_not_supported(self):
    with self.assertRaisesRegex(TypeError, "Expected a callable value"):
      class A:
        @functools.partial(self.jit, static_argnums=(0,))
        @classmethod
        def my_classmethod_jit(cls, x):
          return x+2
  def test_classmethod_is_not_supported(self):
    with self.assertRaisesRegex(TypeError, "Expected a callable value"):
      class A:
        @functools.partial(self.jit)
        @staticmethod
        def my_staticmethod_jit(x):
          return x + 2
  def test_concurrent_jit(self):
    @self.jit
    def f(x):
      return x + x - 3.
    xs = [np.random.randn(i) for i in range(10)]
    with concurrent.futures.ThreadPoolExecutor() as executor:
      futures = [executor.submit(partial(f, x)) for x in xs]
      ys = [f.result() for f in futures]
    for x, y in zip(xs, ys):
      self.assertAllClose(x * 2 - 3., y)
  def test_trivial_computations(self):
    x = jnp.array([1, 2, 3])
    y = self.jit(lambda x: x)(x)
    self.assertIs(x, y)
    z1, z2 = self.jit(lambda x: (x, x))(x)
    self.assertIs(z1, z2)
    x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
    z1, z2, z3 = self.jit(lambda x, y: (y, 1, x))(x1, x2)
    self.assertIs(z1, x2)
    self.assertIs(z3, x1)
    self.assertEqual(z2, 1)
  def test_jit_bad_input(self):
    def f(x):
      return x
    self.assertRaisesRegex(
        TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
        lambda: self.jit(f)("foo"))
  def test_jit_on_all_devices(self):
    # Verifies we can run the same computation on every device present, even
    # if they are, for example, different models of GPU.
    data = np.random.rand(1000).astype(np.float32)
    f = self.jit(jnp.negative)
    for device in jax.local_devices():
      x = device_put(data, device=device)
      np.testing.assert_array_equal(-data, f(x))
  def test_jit_nested_donate_ignored(self):
    jit_fun = self.jit(lambda x: self.jit(lambda y: y**2, donate_argnums=0)(x))
    a = jax.device_put(jnp.array(1))
    # NOTE(mattjj): stopped raising error here and instead just ignored
    # with self.assertRaisesRegex(ValueError, "nested.*not supported"):
    #   jit_fun(a)
    jit_fun(a)  # doesn't crash
  def test_jit_reference_dropping(self):
    x = jnp.ones(10)
    f = (lambda x: lambda: x)(x)  # reference to x in f's closure
    g = self.jit(f)
    x = weakref.ref(x)      # no more strong ref to x in this scope
    assert x() is not None  # x is still around
    f()                     # f runs
    g()                     # g runs
    g()                     # g runs a second time
    del f                   # delete the raw callable
    assert x() is not None  # x is still around
    g()                     # g still runs
    del g                   # no more references to x
    assert x() is None      # x is gone
  def test_jit_raises_on_first_invocation_on_non_hashable_static_argnum(self):
    if self.jit != jax.api._python_jit:
      raise unittest.SkipTest("this test only applies to _python_jit")
    f = lambda x, y: x + 3
    jitted_f = self.jit(f, static_argnums=(1,))
    msg = ("Non-hashable static arguments are not supported, as this can lead "
           "to unexpected cache-misses. Static argument (index 1) of type "
           "<class 'numpy.ndarray'> for function <lambda> is non-hashable.")
    with self.assertRaisesRegex(ValueError, re.escape(msg)):
      jitted_f(1, np.asarray(1))
  def test_cpp_jit_raises_on_non_hashable_static_argnum(self):
    if self.jit != jax.api._cpp_jit:
      raise unittest.SkipTest("this test only applies to _cpp_jit")
    f = lambda x, y: x + 3
    jitted_f = jax.api._cpp_jit(f, static_argnums=[1])
    jitted_f(1, 1)
    msg = ("Non-hashable static arguments are not supported. An error occured "
           "while trying to hash an object of type <class 'numpy.ndarray'>, 1. "
           "The error was:\nTypeError: unhashable type: 'numpy.ndarray'")
    with self.assertRaisesRegex(ValueError, re.escape(msg)):
      jitted_f(1, np.asarray(1))
    class HashableWithoutEq:
      def __hash__(self):
        return 1
      def __eq__(self, other):
        raise NotImplementedError(
            "A Python error is as is, without stack trace")
    with self.assertRaisesRegex(
        ValueError,
        re.escape("static arguments should be comparable using __eq__")):
      jitted_f(1, HashableWithoutEq())
  def test_cpp_jitted_function_returns_PyBuffer(self):
    if self.jit != jax.api._cpp_jit:
      raise unittest.SkipTest("this test only applies to _cpp_jit")
    jitted_f = self.jit(lambda a: a + 1)
    jitted_f(1)
    self.assertIsInstance(jitted_f(2), xla._CppDeviceArray)
  @jtu.skip_on_devices("cpu")
  def test_explicit_backend(self):
    f = lambda x: x + 1
    jitted_f = jit(f, backend=jtu.device_under_test())
    jitted_f_cpu = jit(f, backend="cpu")
    result = jitted_f(1.)
    result_cpu = jitted_f_cpu(1.)
    self.assertEqual(result.device_buffer.platform(), jtu.device_under_test())
    self.assertEqual(result_cpu.device_buffer.platform(), "cpu")
  @jtu.skip_on_devices("cpu")
  def test_mismatched_nested_backends(self):
    @partial(jit, backend=jtu.device_under_test())
    def f(x):
      return jit(lambda x: x + 1, backend="cpu")(x)
    with self.assertRaisesRegex(
        ValueError,
        f"Outer-jit backend specification {jtu.device_under_test()} must match "
        f"explicit inner-jit backend specification cpu."):
      f(1.)
  def test_omnistaging(self):
    # See https://github.com/google/jax/issues/5206
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    key_list = [None]
    def init():
      key, subkey = jax.random.split(key_list[0])
      key_list[0] = key
      return jax.random.normal(subkey, ())
    key_list[0] = np.array([2384771982, 3928867769], dtype=np.uint32)
    init()
    self.jit(init)()
    self.assertIsInstance(key_list[0], core.Tracer)
  def test_jit_wrapped_attributes(self):
    def f(x: int) -> int:
      """docstring of f."""
      return x + 1
    f.some_value = 4
    jf = self.jit(f)
    for attr in ["doc", "name", "module", "qualname", "annotations"]:
      self.assertEqual(
        {attr: getattr(f, f"__{attr}__")},
        {attr: getattr(jf, f"__{attr}__")})
    self.assertEqual(f.some_value, jf.some_value)
class PythonJitTest(CPPJitTest):
  @property
  def jit(self):
    return jax.api._python_jit
class APITest(jtu.JaxTestCase):
  def test_grad_bad_input(self):
    def f(x):
      return x
    self.assertRaisesRegex(
        TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
        lambda: grad(f)("foo"))
  def test_grad_argnums(self):
    def f(x, y, z, flag=False):
      assert flag
      return 1.0 * x + 2.0 * y + 3.0 * z
    assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0
    assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0
    assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)
  def test_value_and_grad_argnums(self):
    def f(x, y, z, flag=False):
      assert flag
      return 1.0 * x + 2.0 * y + 3.0 * z
    y = f(1.0, 1.0, 1.0, flag=True)
    assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)
    assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)
    assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))
  def test_grad_of_jit(self):
    side = []
    @jit
    def f(x):
      side.append(None)
      return x * x
    assert grad(f)(1.0) == 2.0
    assert len(side) == 1
    assert grad(f)(2.0) == 4.0
    assert len(side) == 1
  def test_jit_of_grad(self):
    side = []
    @jit
    def f(x):
      side.append(None)
      return x * x
    g = jit(grad(f))
    assert g(1.0) == 2.0
    assert len(side) == 1
    assert g(2.0) == 4.0
    assert len(side) == 1
  def test_bad_input(self):
    def f(x):
      return x
    self.assertRaisesRegex(
      TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
      lambda: grad(f)("foo"))
    self.assertRaisesRegex(
      TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
      lambda: jit(f)("foo"))
  def test_grad_tuple_output(self):
    jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,
                     "Gradient only defined for scalar-output functions. ")
  def test_grad_unit_output(self):
    jtu.check_raises(lambda: grad(lambda x: ())(np.zeros(3)), TypeError,
                     "Gradient only defined for scalar-output functions. ")
  def test_grad_nonscalar_output(self):
    jtu.check_raises(lambda: grad(lambda x: x)(np.zeros(3)), TypeError,
                     "Gradient only defined for scalar-output functions. ")
  def test_unwrapped_numpy(self):
    def f(x):
      return np.exp(x)
    with self.assertRaisesRegex(Exception, "The numpy.ndarray conversion .*"):
      grad(f)(np.zeros(3))
  def test_binop_mismatch(self):
    def f(x, y):
      return x + y
    jtu.check_raises(
        lambda: f(jnp.zeros(3), jnp.zeros(4)),
        TypeError,
        "add got incompatible shapes for broadcasting: (3,), (4,).")
    jtu.check_raises(
        lambda: grad(f)(np.zeros(3), np.zeros(4)),
        TypeError,
        "add got incompatible shapes for broadcasting: (3,), (4,).")
  def test_dot_mismatch(self):
    def f(x, y):
      return jnp.dot(x, y)
    self.assertRaisesRegex(
      TypeError, "Incompatible shapes for dot: got \\(3L?,\\) and \\(4L?,\\).",
      lambda: grad(f)(np.zeros(3), np.zeros(4)))
  def test_abstract_error_message(self):
    for castfun in [float, complex, int]:
      def f(x):
        return castfun(x)
      self.assertRaisesRegex(
          TypeError,
          f"[Tt]ry using `x.astype\\({castfun.__name__}\\)`",
          lambda: jit(f)(1.0))
  def test_switch_value_jit(self):
    def f(x):
      y = x > 0
      if y:
        return x
      else:
        return -x
    assert grad(f)(1.0) == 1.0
    assert grad(f)(-1.0) == -1.0
    with self.assertRaisesRegex(core.ConcretizationTypeError,
                                "Abstract tracer value"):
      jit(f)(1)
  def test_list_index_err(self):
    L = [1, 2, 3]
    def f(n):
      return L[n]
    assert jit(f, static_argnums=(0,))(0) == L[0]
    self.assertRaisesRegex(
        TypeError,
        r"The __index__\(\) method was called on the JAX Tracer object.*",
        lambda: jit(f)(0))
  def test_range_err(self):
    def f(x, n):
      for i in range(n):
        x = x + i
      return x
    assert jit(f, static_argnums=(1,))(0, 5) == 10
    self.assertRaisesRegex(
        TypeError,
        r"The __index__\(\) method was called on the JAX Tracer object.*",
        lambda: jit(f)(0, 5))
  def test_cast_int(self):
    f = lambda x: int(x)
    self.assertRaisesRegex(
        TypeError,
        "('(?:JaxprTracer|DynamicJaxprTracer)' object cannot be interpreted as an integer"
        "|Abstract tracer value encountered where concrete value is expected.*)", lambda: jit(f)(0))
  def test_casts(self):
    for castfun in [hex, oct]:
      f = lambda x: castfun(x)
      self.assertRaisesRegex(
          TypeError,
          r"The __index__\(\) method was called on the JAX Tracer object.*", lambda: jit(f)(0))
  def test_unimplemented_interpreter_rules(self):
    foo_p = Primitive('foo')
    def foo(x):
      return foo_p.bind(x)
    jtu.check_raises(lambda: foo(1.0), NotImplementedError,
                     "Evaluation rule for 'foo' not implemented")
    jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
                     "Abstract evaluation for 'foo' not implemented")
    jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
                     "Differentiation rule for 'foo' not implemented")
    foo_p.def_abstract_eval(lambda x: x)
    jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
                     "XLA translation rule for primitive 'foo' not found")
    foo_p.def_impl(lambda x: x)
    ad.defjvp(foo_p, lambda g, x: foo(g))
    jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
                     "Transpose rule (for reverse-mode differentiation) for 'foo' not implemented")
  def test_device_put_and_get(self):
    x = np.arange(12.).reshape((3, 4)).astype("float32")
    dx = api.device_put(x)
    self.assertIsInstance(dx, xla.DeviceArray)
    x2 = api.device_get(dx)
    self.assertIsInstance(x2, np.ndarray)
    assert np.all(x == x2)
    y = [x, (2 * x, 3 * x)]
    dy = api.device_put(y)
    y2 = api.device_get(dy)
    self.assertIsInstance(y2, list)
    self.assertIsInstance(y2[0], np.ndarray)
    assert np.all(y2[0] == x)
    self.assertIsInstance(y2[1], tuple)
    self.assertIsInstance(y2[1][0], np.ndarray)
    assert np.all(y2[1][0] == 2 * x)
    self.assertIsInstance(y2[1][1], np.ndarray)
    assert np.all(y2[1][1] == 3 * x)
  def test_device_get_scalar(self):
    x = np.arange(12.).reshape((3, 4)).astype("float32")
    x = api.device_put(x)
    self.assertIsInstance(x, xla.DeviceArray)
    y = [x, 2]
    y2 = api.device_get(y)
    self.assertIsInstance(y2, list)
    self.assertIsInstance(y2[0], np.ndarray)
    assert np.all(y2[0] == x)
    self.assertIsInstance(y2[1], int)
    self.assertEqual(y2[1], 2)
  @parameterized.parameters([(3,)], [(2, 0)])
  def test_device_put_across_devices(self, shape):
    if len(api.local_devices()) < 2:
      raise unittest.SkipTest("this test requires multiple devices")
    d1, d2 = api.local_devices()[:2]
    data = np.random.randn(*shape).astype(np.float32)
    x = api.device_put(data, device=d1)
    self.assertEqual(x.device_buffer.device(), d1)
    y = api.device_put(x, device=d2)
    self.assertEqual(y.device_buffer.device(), d2)
    np.testing.assert_array_equal(data, np.array(y))
    # Make sure these don't crash
    api.device_put(x)
    api.device_put(y)
  @jtu.skip_on_devices("cpu")
  def test_device_put_across_platforms(self):
    default_device = jax.devices()[0]
    cpu_device = jax.devices("cpu")[0]
    np_arr = np.array([1,2,3])
    scalar = 1
    device_arr = jnp.array([1,2,3])
    assert device_arr.device_buffer.device() is default_device
    for val in [np_arr, device_arr, scalar]:
      x = api.device_put(val, device=cpu_device)
      self.assertEqual(x.device_buffer.device(), cpu_device)
  @jtu.skip_on_devices("tpu")
  def test_jacobian(self):
    R = np.random.RandomState(0).randn
    A = R(4, 3)
    x = R(3)
    f = lambda x: jnp.dot(A, x)
    assert np.allclose(jacfwd(f)(x), A)
    assert np.allclose(jacrev(f)(x), A)
    f = lambda x: jnp.tanh(jnp.dot(A, x))
    assert np.allclose(jacfwd(f)(x), jacrev(f)(x))
  @jtu.skip_on_devices("tpu")
  def test_hessian(self):
    R = np.random.RandomState(0).randn
    A = R(4, 4)
    x = R(4)
    f = lambda x: jnp.dot(x, jnp.dot(A, x))
    assert np.allclose(hessian(f)(x), A + A.T)
  def test_std_basis(self):
    basis = api._std_basis(jnp.zeros(3))
    assert getattr(basis, "shape", None) == (3, 3)
    assert np.allclose(basis, np.eye(3))
    basis = api._std_basis(jnp.zeros((3, 3)))
    assert getattr(basis, "shape", None) == (9, 3, 3)
    assert np.allclose(basis, np.eye(9).reshape(9, 3, 3))
    basis = api._std_basis([0., (jnp.zeros(3), jnp.zeros((3, 4)))])
    assert isinstance(basis, list) and len(basis) == 2
    assert getattr(basis[0], "shape", None) == (16,)
    assert isinstance(basis[1], tuple) and len(basis[1]) == 2
    assert getattr(basis[1][0], "shape", None) == (16, 3)
    assert getattr(basis[1][1], "shape", None) == (16, 3, 4)
  @jtu.skip_on_devices("tpu")
  def test_jacobian_on_pytrees(self):
    for jacfun in [jacfwd, jacrev]:
      ans = jacfun(lambda x, y: (x, y))(0., 1.)
      expected = (1., 0.)
      self.assertAllClose(ans, expected, check_dtypes=False)
      ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)
      expected = (0., 1.)
      self.assertAllClose(ans, expected, check_dtypes=False)
      ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)
      expected = ((1., 0.),
                  (0., 1.),)
      self.assertAllClose(ans, expected, check_dtypes=False)
      ans = jacfun(lambda x: x[:2])((1., 2., 3.))
      expected = ((1., 0., 0.),
                  (0., 1., 0.))
      self.assertAllClose(ans, expected, check_dtypes=False)
      R = np.random.RandomState(0).randn
      x = R(2)
      y = R(3)
      ans = jacfun(lambda x, y: {'x': x, 'xy': jnp.outer(x, y)})(x, y)
      expected = {'x': np.eye(2),
                  'xy': np.kron(np.eye(2), y[:, None]).reshape(2, 3, 2)}
      self.assertAllClose(ans, expected, check_dtypes=False)
  @jtu.skip_on_devices("tpu")
  def test_hessian_on_pytrees(self):
    ans = hessian(lambda x: jnp.array(x)**2)((1., 2.))
    expected = ((np.array([2., 0.]), np.array([0., 0.])),
                (np.array([0., 0.]), np.array([0., 2.])))
    self.assertAllClose(ans, expected, check_dtypes=False)
  @jtu.skip_on_devices("tpu")
  def test_issue1372(self):
    def quad(x):
      return jnp.dot(x, x)
    def f(x, u):
      return quad(x) + quad(u)
    x, u = jnp.ones(5), jnp.ones(2)
    rev = jacrev
    fwd = jacfwd
    # Diagonal entries
    self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))
    self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))
    self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))
    self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))
    self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))
    self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))
    self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))
    self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))
    # Off-diagonal entries by reverse-mode on the outside
    self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))
    self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))
    self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))
    self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))
    # Off-diagonal entries by forward-mode on the outside
    self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))
    self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))
    self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))
    self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))
  def test_large_device_constant(self):
    ans = jit(lambda x: 2 * x)(jnp.ones(int(2e6)))  # doesn't crash
    self.assertAllClose(ans, np.ones(int(2e6)) * 2., check_dtypes=False)
  def test_grad_and_aux_basic(self):
    g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)
    self.assertAllClose(g, grad(lambda x: x**3)(3.))
    self.assertAllClose(aux, [9.], check_dtypes=False)
  def test_grad_and_aux_error(self):
    with self.assertRaisesRegex(TypeError, "two-element tuple"):
      grad(lambda x: (1, 2, 3), has_aux=True)(1.)
    with self.assertRaisesRegex(TypeError, "two-element tuple"):
      grad(lambda x: x, has_aux=True)(1.)
    with self.assertRaisesRegex(TypeError, "two-element tuple"):
      grad(lambda x: (x,), has_aux=True)(1.)
  def test_grad_and_aux_nested(self):
    def f(x):
      g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
      return aux[0]
    f2 = lambda x: x**3
    self.assertEqual(grad(f)(4.), grad(f2)(4.))
    self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
    self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
    def f(x):
      g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
      return aux[0] * jnp.sin(x)
    f2 = lambda x: x**3 * jnp.sin(x)
    self.assertEqual(grad(f)(4.), grad(f2)(4.))
    self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
    self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
  def test_grad_and_aux_constant(self):
    g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)
    self.assertEqual(g, grad(lambda x: x**3)(4.))
    self.assertEqual(aux, [4.])
    g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)
    self.assertEqual(g, grad(lambda x: x**3)(4.))
    self.assertEqual(aux, [4.**2, 4.])
  def test_grad_and_aux_no_tracers(self):
    # see https://github.com/google/jax/issues/1950
    def f(x):
      aux = dict(identity=x, p1=x+1)
      return x ** 2, aux
    _, aux = jax.grad(f, has_aux=True)(3.)
    self.assertIsInstance(aux, dict)
    for val in aux.values():
      self.assertNotIsInstance(val, core.Tracer)
  def test_jvp_mismatched_arguments(self):
    self.assertRaisesRegex(
      TypeError,
      ("primal and tangent arguments to jax.jvp must have the same tree "
       "structure"),
      lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), ()))
    # If primals and tangents must both be tuples or both lists
    self.assertRaisesRegex(
      TypeError,
      ("primal and tangent arguments to jax.jvp must have the same tree "
       "structure"),
      lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), [np.float32(2)]))
    self.assertRaisesRegex(
      TypeError,
      "primal and tangent arguments to jax.jvp do not match.",
      lambda: api.jvp(lambda x: -x, (np.float16(2),), (np.float32(4),)))
    # If primals and tangents are not of the same shape then raise error
    fun = lambda x: x+1
    with self.assertRaisesRegex(
      ValueError, "jvp called with different primal and tangent shapes"):
      api.jvp(fun, (jnp.array([1.,2.,3.]),), (jnp.array([1.,2.,3.,4.]),))
    with self.assertRaisesRegex(
      ValueError, "jvp called with different primal and tangent shapes"):
      api.jvp(fun, (jnp.float32(10.),), (jnp.array([1.,2.,3.], dtype=jnp.float32),))
    with self.assertRaisesRegex(
      ValueError, "jvp called with different primal and tangent shapes"):
      api.jvp(fun, (jnp.array([1.,2.,3.], dtype=jnp.float32),), (jnp.float32(20.),))
    with self.assertRaisesRegex(
      ValueError, "jvp called with different primal and tangent shapes"):
      api.jvp(fun, (jnp.array([1.,2.,3.]),), (20.,))
  def test_jvp_non_tuple_arguments(self):
    def f(x, y): return x + y
    self.assertRaisesRegex(
        TypeError,
        "primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.",
        lambda: api.jvp(f, 0., (1.,)))
    self.assertRaisesRegex(
        TypeError,
        "primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.",
        lambda: api.jvp(f, (0.,), np.array([1., 2.])))
  def test_vjp_mismatched_arguments(self):
    _, pullback = api.vjp(lambda x, y: x * y, np.float32(3), np.float32(4))
    self.assertRaisesRegex(
      TypeError,
      "Tree structure of cotangent input.*does not match",
      lambda: pullback((np.float32(7), np.float32(100))))
    self.assertRaisesRegex(
      TypeError,
      "Type of cotangent input to vjp pullback.*is not the expected tangent type",
      lambda: pullback((np.float16(42))))
  def test_jvp_jit_cached(self):
    """Bug in caching in presence of JVP and JIT."""
    def func(x):
      def inner(y):
        return y * x
      # Must have two calls to the inner jit (the second one hits the cache)
      res1 = api.jit(inner)(4.)
      res2 = api.jit(inner)(5.)
      return res1 + res2
    self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)))
  def test_linear_transpose_abstract(self):
    x = types.SimpleNamespace(shape=(3,), dtype=np.float32)
    y = jnp.arange(3, dtype=np.float32)
    transpose_fun = api.linear_transpose(lambda x: 2 * x, x)
    z, = transpose_fun(y)
    self.assertArraysEqual(2 * y, z, check_dtypes=True)
  def test_linear_transpose_error(self):
    with self.assertRaisesRegex(
        TypeError, "linear_transpose only supports float and complex inputs"):
      api.linear_transpose(lambda x: x, 1)
    transpose_fun = api.linear_transpose(lambda x: [x, x], 1.0)
    with self.assertRaisesRegex(TypeError, "cotangent tree does not match"):
      transpose_fun(1.0)
    transpose_fun = api.linear_transpose(lambda x: jnp.stack([x, x]), 1.0)
    with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
      transpose_fun(1.0)
    transpose_fun = api.linear_transpose(lambda x: 1j * x, 1.0)
    with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
      transpose_fun(1.0)
    transpose_fun = api.linear_transpose(lambda x: x, 1.0)
    with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
      transpose_fun(1j)
  def test_linear_transpose_complex(self):
    f = lambda x: (1 + 2j) * x
    transpose = api.linear_transpose(f, 1j)
    actual, = transpose(3 + 4j)
    expected = -5 + 10j
    self.assertEqual(actual, expected)
  def test_complex_grad_raises_error(self):
    self.assertRaises(TypeError, lambda: grad(lambda x: jnp.sin(x))(1 + 2j))
  def test_holomorphic_grad(self):
    out = grad(lambda x: jnp.sin(x), holomorphic=True)(1 + 2j)
    expected = 2.0327230070196656 - 3.0518977991518j
    self.assertAllClose(out, expected, check_dtypes=False)
  def test_nonholomorphic_grad(self):
    zs = 0.5j * np.arange(5) + np.arange(5)
    def f(z):
      return jnp.sum(jnp.cos(jnp.abs(z)))
    ans = grad(f)(zs)
    expected = np.array([ 0.        +0.j,
                          -0.80430663+0.40215331j,
                          -0.70368982+0.35184491j,
                           0.1886467 -0.09432335j,
                           0.86873727-0.43436864j])
    self.assertAllClose(ans, expected, check_dtypes=False,
                        atol=jtu.default_gradient_tolerance,
                        rtol=jtu.default_gradient_tolerance)
  def test_complex_output_jacrev_raises_error(self):
    self.assertRaises(TypeError, lambda: jacrev(lambda x: jnp.sin(x))(1 + 2j))
  def test_nonholomorphic_jacrev(self):
    # code based on https://github.com/google/jax/issues/603
    zs = 0.5j * np.arange(5) + np.arange(5)
    def f(z):
      return jnp.cos(jnp.linalg.norm(2 * z))
    ans = jacrev(f)(zs)
    expected = grad(f)(zs)
    self.assertAllClose(ans, expected)
  def test_complex_input_jacfwd_raises_error(self):
    self.assertRaises(TypeError, lambda: jacfwd(lambda x: jnp.sin(x))(1 + 2j))
  def test_legacy_devicearray_repr(self):
    dx = device_put(3.)
    str(dx.item())  # doesn't crash
  def test_devicearray_repr(self):
    x = device_put(jnp.zeros(3))
    self.assertIsInstance(x, xla.DeviceArray)
    repr(x)  # doesn't crash
    x = device_put(jnp.ones(3) + 1j * jnp.ones(3))
    self.assertIsInstance(x, xla.DeviceArray)
    repr(x)  # doesn't crash
  def test_devicearray_delete(self):
    x = device_put(1.)
    x.delete()
    self.assertRaisesRegex(RuntimeError, "DeviceArray has been deleted.",
                           lambda: repr(x))
  def test_devicearray_block_until_ready(self):
    x = device_put(1.)
    y = x.block_until_ready()
    # Tests mostly that block_until_ready() does not produce an error.
    self.assertTrue(y is x)
  def test_devicearray_weakref_friendly(self):
    x = device_put(1.)
    y = weakref.ref(x)
    self.assertEqual(y(), 1.)
    del x
    self.assertIsNone(y())
  def test_namedtuple_transparency(self):
    # See https://github.com/google/jax/issues/446
    Point = collections.namedtuple("Point", ["x", "y"])
    def f(pt):
      return jnp.sqrt(pt.x ** 2 + pt.y ** 2)
    pt = Point(1., 2.)
    f(pt)  # doesn't crash
    g = api.grad(f)(pt)
    self.assertIsInstance(g, Point)
    f_jit = api.jit(f)
    self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)
  def test_namedtuple_subclass_transparency(self):
    # See https://github.com/google/jax/issues/806
    Point = collections.namedtuple("Point", ["x", "y"])
    class ZeroPoint(Point):
      def is_zero(self):
        return (self.x == 0) and (self.y == 0)
    pt = ZeroPoint(0., 0.)
    def f(pt):
      return 0. if pt.is_zero() else jnp.sqrt(pt.x ** 2 + pt.y ** 2)
    f(pt)  # doesn't crash
    _ = api.grad(f)(pt)
    self.assertIsInstance(pt, ZeroPoint)
  @parameterized.parameters(1, 2, 3)
  def test_shape_dtype_struct(self, i):
    s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=jnp.float32)
    self.assertEqual(s.shape, (i, 2, 3))
    self.assertEqual(s.dtype, jnp.float32)
    self.assertEqual(s.ndim, 3)
    self.assertEqual(s.size, i * 2 * 3)
    self.assertLen(s, i)
    for f in (str, repr):
      self.assertEqual(
          f(s), "ShapeDtypeStruct(shape=({}, 2, 3), dtype=float32)".format(i))
  def test_shape_dtype_struct_scalar(self):
    s = api.ShapeDtypeStruct(shape=(), dtype=jnp.float32)
    self.assertEmpty(s.shape)
    self.assertEqual(s.size, 1)
    self.assertEqual(s.ndim, 0)
    with self.assertRaisesRegex(TypeError, "len[(][)] of unsized object"):
      _ = len(s)
  def test_eval_shape(self):
    def fun(x, y):
      return jnp.tanh(jnp.dot(x, y) + 3.)
    x = jnp.ones((2, 3))
    y = jnp.ones((3, 4))
    out_shape = api.eval_shape(fun, x, y)
    self.assertEqual(out_shape.shape, (2, 4))
  def test_eval_shape_constants(self):
    def fun():
      x = jnp.ones((2, 3))
      y = jnp.ones((3, 4))
      return jnp.tanh(jnp.dot(x, y) + 3.)
    out_shape = api.eval_shape(fun)
    self.assertEqual(out_shape.shape, (2, 4))
  def test_eval_shape_tuple_unpacking(self):
    def fun(x, y):
      a, b = x
      return a + b + y
    x = (jnp.ones(2), jnp.ones(2))
    y = 3.
    out_shape = api.eval_shape(fun, x, y)
    self.assertEqual(out_shape.shape, (2,))
  def test_eval_shape_tuple_itemgetting(self):
    def fun(x, y):
      return x[0] + x[1] + y
    x = (jnp.ones(2), jnp.ones(2))
    y = 3.
    out_shape = api.eval_shape(fun, x, y)
    self.assertEqual(out_shape.shape, (2,))
  def test_eval_shape_output_dict(self):
    def fun(x, y):
      return {'hi': x[0] + x[1] + y}
    x = (jnp.ones(2), jnp.ones(2))
    y = 3.
    out_shape = api.eval_shape(fun, x, y)
    out_shape = tree_util.tree_map(np.shape, out_shape)
    self.assertEqual(out_shape, {'hi': (2,)})
  def test_eval_shape_shape_error(self):
    def fun(x, y):
      return jnp.tanh(jnp.dot(x, y) + 3.)
    x = jnp.ones((3, 3))
    y = jnp.ones((4, 4))
    self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))
  def test_eval_shape_duck_typing(self):
    def fun(A, b, x):
      return jnp.dot(A, x) + b
    class MyArgArray(object):
      def __init__(self, shape, dtype):
        self.shape = shape
        self.dtype = dtype
    A = MyArgArray((3, 4), jnp.float32)
    b = MyArgArray((5,), jnp.float32)
    x = MyArgArray((4, 5), jnp.float32)
    out_shape = api.eval_shape(fun, A, b, x)
    self.assertEqual(out_shape.shape, (3, 5))
  def test_eval_shape_duck_typing2(self):
    # https://github.com/google/jax/issues/5683
    class EasyDict(dict):
      def __init__(self, *args, **kwargs):
          super().__init__(*args, **kwargs)
          self.__dict__ = self
    x = EasyDict(shape=(3,), dtype=np.dtype('float32'))
    out_shape = api.eval_shape(lambda x: x, x)  # doesn't crash
    self.assertEqual(out_shape.shape, (3,))
  def test_eval_shape_names(self):
    def fun(x, y):
      return lax.psum(x, 'i') + y
    class MyArgArray(object):
      def __init__(self, shape, dtype, named_shape):
        self.shape = shape
        self.dtype = dtype
        self.named_shape = named_shape
    x = MyArgArray((3, 2), jnp.float32, {'i': 10})
    y = MyArgArray((3, 2), jnp.float32, {'j': 5})
    with core.extend_axis_env('i', 10, None):
      with core.extend_axis_env('j', 5, None):
        out_shape = api.eval_shape(fun, x, y)
    self.assertEqual(out_shape.named_shape, {'j': 5})
  def test_issue_871(self):
    T = jnp.array([[1., 2.], [3., 4.], [5., 6.]])
    x = jnp.array([1, 2, 3])
    msg = ("linearized function called on tangent values inconsistent with "
           "the original primal values")
    y, f_jvp = api.linearize(jnp.sum, x)
    with self.assertRaisesRegex(ValueError, msg):
      f_jvp(T)
    y, f_jvp = api.linearize(api.jit(jnp.sum), x)
    with self.assertRaisesRegex(ValueError, msg):
      f_jvp(T)
  def test_partial_eval_lower(self):
    # this is a simplified model of a bug that arose when we first used @jit in
    # a jvp rule. it's in this file because we want to use make_jaxpr.
    # NOTE(mattjj): I no longer understand what this was meant to test. My guess
    # is it was related to staging out the broadcast into a jaxpr to be
    # transposed, but after #1749 that's no longer a problem. After changing
    # make_jaxpr (and jit) to stage out sub-calls fully, this test started to
    # fail; I left it in as skipped because deleting tests feels wrong.
    raise unittest.SkipTest("obsolete test")
    @api.jit
    def f(a, b, c):
      a = lax.broadcast(a, (2,))
      return lax.select(a, b, c)
    a = np.ones((3, 3), dtype=np.bool_)
    b = np.ones((2, 3, 3))
    c = np.ones((2, 3, 3))
    jaxpr = api.make_jaxpr(lambda b, c: f(a, b, c))(b, c)
    subjaxpr = next(eqn.params["call_jaxpr"] for eqn in jaxpr.jaxpr.eqns
                    if "call_jaxpr" in eqn.params)
    self.assertEqual(len(subjaxpr.eqns), 1)
  def test_grad_of_int_errors(self):
    # Errors without allow_int=True
    dfn = grad(lambda x: x ** 2)
    self.assertRaisesRegex(
      TypeError,
      (r"grad requires real- or complex-valued inputs \(input dtype that is a "
       r"sub-dtype of np.floating or np.complexfloating\), but got int.*."),
      lambda: dfn(3))
  def test_jvp_of_int_identity(self):
    primals = (1,)
    tangents = (np.zeros(shape=(), dtype=float0),)
    _, out = api.jvp(lambda x: x, primals, tangents)
    self.assertEqual(out, np.zeros(shape=(), dtype=float0))
  def test_jvp_of_int_add(self):
    primals = (2,)
    tangents = (np.zeros(shape=(), dtype=float0),)
    _, out_tangent = api.jvp(lambda x: x+1, primals, tangents)
    self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
  def test_jit_jvp_of_int(self):
    primals = (2,)
    tangents = (np.zeros(shape=(), dtype=float0),)
    _, out_tangent = api.jvp(jax.jit(lambda x: x+1), primals, tangents)
    self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
  def test_vjp_of_int_index(self):
    primal, fn_vjp = api.vjp(lambda x, i: x[i], np.ones(2)*2, 1)
    tangent_x, tangent_i = fn_vjp(1.)
    self.assertEqual(primal, 2.)
    self.assertAllClose(tangent_x, jnp.array([0., 1.]))
    self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
  def test_vjp_of_int_shapes(self):
    out, fn_vjp = api.vjp(lambda x: lax.reshape(x, (2, 2)), np.ones((4, 1),
                                                                    dtype=int))
    tangent, = fn_vjp(out)
    self.assertArraysEqual(tangent, np.zeros(shape=(4, 1), dtype=float0))
  def test_jit_vjp_of_int(self):
    primal, fn_vjp = api.vjp(lambda x, y: x+y, 2, 1)
    tangent_x, tangent_i = jax.jit(fn_vjp)(1)
    self.assertEqual(primal, 3)
    self.assertEqual(tangent_x, np.zeros(shape=(), dtype=float0))
    self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
  def test_vjp_of_int_fulllike(self):
    # Regression test for tangent and cotangent mismatch in convert_element_type
    # transpose rule wrt a ConstVar
    f = lax.full_like
    out, vjp = api.vjp(f, np.zeros((2, 2)), 1)
    self.assertAllClose(out, jnp.ones((2, 2)))
    tangent_x, tangent_y = vjp(out)
    self.assertAllClose(tangent_x, jnp.zeros((2, 2)))
    self.assertEqual(tangent_y, np.zeros(shape=(), dtype=float0))
  def test_grad_of_int(self):
    # Need real-valued output, but testing integer input.
    out = api.grad(lambda x: x+0., allow_int=True)(1)
    self.assertEqual(out, np.zeros(shape=(), dtype=float0))
  def test_grad_of_bool(self):
    def cond(pred):
      return lax.cond(pred, lambda _: 1., lambda _: 2., 1.)
    value, grd = api.value_and_grad(cond, allow_int=True)(True)
    self.assertEqual(value, 1.)
    self.assertEqual(grd, np.zeros(shape=(), dtype=float0))
  def test_grad_of_int_index(self):
    grad_x, grad_i = api.grad(lambda x, i: x[i], argnums=(0, 1),
                              allow_int=True)(np.ones(2), 1)
    self.assertAllClose(grad_x, jnp.array([0., 1.]))
    self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
  def test_jit_grad_of_int(self):
    grad_f = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True)
    grad_x, grad_i = jax.jit(grad_f)(np.ones(2), 1)
    self.assertAllClose(grad_x, jnp.array([0., 1.]))
    self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
  def test_float0_reshape(self):
    # dtype-agnostic operations are supported
    float0_array = jax.grad(lambda x: jnp.sum(x+0.),
                            allow_int=True)(np.ones((2, 4), dtype=int))
    self.assertArraysEqual(float0_array.reshape((4, 2)),
                           np.zeros((4, 2), dtype=float0))
    self.assertArraysEqual(float0_array.transpose(),
                           np.zeros((4, 2), dtype=float0))
  def test_float0_error(self):
    # float0 is incompatible with other dtypes
    float0_array = jax.grad(lambda x: x+0., allow_int=True)(1)
    error_text = "float0s do not support any operations by design"
    with self.assertRaisesRegex(TypeError, error_text):
      # dispatch via DeviceArray
      _ = float0_array + jnp.zeros(())
    with self.assertRaisesRegex(TypeError, error_text):
      # dispatch via lax
      _ = lax.add(float0_array, jnp.zeros(()))
  def test_grad_complex_result_errors(self):
    dfn = grad(lambda x: x ** 2 + 1j)
    self.assertRaisesRegex(
      TypeError,
      (r"grad requires real-valued outputs \(output dtype that is a "
       r"sub-dtype of np.floating\), but got complex.*"),
      lambda: dfn(3.))
  def test_holomorphic_grad_of_float_errors(self):
    dfn = grad(lambda x: x ** 2, holomorphic=True)
    self.assertRaisesRegex(
      TypeError,
      (r"grad with holomorphic=True requires inputs with complex dtype, "
       r"but got float.*"),
      lambda: dfn(3.))
  def test_holomorphic_jacrev_of_float_errors(self):
    dfn = jacrev(lambda x: x ** 2, holomorphic=True)
    self.assertRaisesRegex(
      TypeError,
      (r"jacrev with holomorphic=True requires inputs with complex dtype, "
       r"but got float.*"),
      lambda: dfn(3.))
  def test_holomorphic_jacfwd_of_float_errors(self):
    dfn = jacfwd(lambda x: x ** 2, holomorphic=True)
    self.assertRaisesRegex(
      TypeError,
      (r"jacfwd with holomorphic=True requires inputs with complex dtype, "
       r"but got float.*"),
      lambda: dfn(3.))
  def test_jacfwd_of_complex_errors(self):
    dfn = jacfwd(lambda x: x ** 2)
    self.assertRaisesRegex(
      TypeError,
      (r"jacfwd requires real-valued inputs \(input dtype that is a "
       r"sub-dtype of np.floating\), but got complex.*"),
      lambda: dfn(3. + 1j))
  def test_xla_computation(self):
    # these tests basically check the examples in the xla_computation docstring
    def e(x):
      return jnp.sin(jnp.cos(x))
    c = api.xla_computation(e)(2.)
    self.assertIn('cosine', c.as_hlo_text())
    self.assertIn('sine', c.as_hlo_text())
    def f(x):
      return x - lax.psum(x, 'i')
    axis_env = [('i', 4)]
    c = api.xla_computation(f, axis_env=axis_env)(2)
    self.assertIn('all-reduce', c.as_hlo_text())
    self.assertIn('replica_groups={{0,1,2,3}}', c.as_hlo_text())
    def g(x):
      rowsum = lax.psum(x, 'i')
      colsum = lax.psum(x, 'j')
      allsum = lax.psum(x, ('i', 'j'))
      return rowsum, colsum, allsum
    axis_env = [('i', 4), ('j', 2)]
    c = api.xla_computation(g, axis_env=axis_env)(5.)
    self.assertIn('all-reduce', c.as_hlo_text())
    self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.as_hlo_text())
    self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
    self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.as_hlo_text())
    def h(x):
      rowsum = lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]])
      colsum = lax.psum(x, 'j')
      return rowsum, colsum
    axis_env = [('i', 4), ('j', 2)]
    c = api.xla_computation(h, axis_env=axis_env)(5.)
    self.assertIn('all-reduce', c.as_hlo_text())
    self.assertIn('replica_groups={{0,2},{4,6},{1,3},{5,7}}', c.as_hlo_text())
    self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
  def test_xla_computation_args(self):
    def foo(x, y, z):
      return x + y + z
    c = api.xla_computation(foo)(1., 2., 3.)
    self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
    c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
    param_shapes = c.program_shape().parameter_shapes()
    self.assertEqual(len(param_shapes), 1)
    self.assertEqual(param_shapes[0].xla_element_type(),
                     xb.xla_client.PrimitiveType.TUPLE)
  def test_xla_computation_duck_typing(self):
    def foo(x, y, z):
      return x + y + z
    x = jax.ShapeDtypeStruct((), np.float32)
    y = jax.ShapeDtypeStruct((), np.float32)
    z = jax.ShapeDtypeStruct((), np.float32)
    c = api.xla_computation(foo)(x, y, z)
    self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
    c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
    param_shapes = c.program_shape().parameter_shapes()
    self.assertEqual(len(param_shapes), 1)
    self.assertEqual(param_shapes[0].xla_element_type(),
                     xb.xla_client.PrimitiveType.TUPLE)
  def test_staging_out_multi_replica(self):
    def f(x):
      return api.pmap(jnp.mean)(x)
    xla_comp = api.xla_computation(f)
    xla_comp(jnp.arange(8)).as_hlo_text()  # doesn't crash
  def test_xla_computation_instantiate_constant_outputs(self):
    def f():
      return jnp.zeros((3, 4))
    if config.omnistaging_enabled:
      xla_comp = api.xla_computation(f)()
    else:
      xla_comp = api.xla_computation(f, instantiate_const_outputs=True)()
    out_shape, = xla_comp.program_shape().result_shape().tuple_shapes()
    self.assertEqual(out_shape.dimensions(), (3, 4))
  def test_xla_computation_static_argnums(self):
    def f(x, y):
      return x + y
    xla_comp = api.xla_computation(f, static_argnums=(1,))(2, 3)
    hlo_text = xla_comp.as_hlo_text()
    self.assertIn("constant(3)", hlo_text)
    # The static arguments should be removed from the function being compiled,
    # thus the function should have only a single argument.
    self.assertIn("parameter.1", hlo_text)
    self.assertNotIn("parameter.2", hlo_text)
  def test_xla_computation_return_shape(self):
    _, shape_tree = api.xla_computation(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
                                        return_shape=True)(np.int32(1))
    expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
                api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
    self.assertEqual(shape_tree, expected)
  def test_xla_computation_partitioned(self):
    def f(x, y):
      return jnp.dot(x, y) + 1
    x = jax.ShapeDtypeStruct((8, 8), np.float32)
    y = jax.ShapeDtypeStruct((8, 16), np.float32)
    xla_comp = api.xla_computation(f, in_parts=(P(2, 2), None),
                                   out_parts=P(4, 1))(x, y)
    hlo_text = xla_comp.as_hlo_text()
    self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
    self.assertIn('sharding={replicated}', hlo_text)
    self.assertIn('sharding={{devices=[4,1]0,1,2,3}}', hlo_text)
  def test_xla_computation_replicated_and_partitioned(self):
    def f(x, y):
      return jnp.dot(x, y), lax.psum(x, 'i')
    x = jax.ShapeDtypeStruct((8, 8), np.float32)
    y = jax.ShapeDtypeStruct((8, 16), np.float32)
    axis_env = [('i', 4)]
    xla_comp = api.xla_computation(f, axis_env=axis_env,
                                   in_parts=(P(2, 2), None),
                                   out_parts=(P(4, 1), None))(x, y)
    hlo_text = xla_comp.as_hlo_text()
    self.assertIn('all-reduce', hlo_text)
    self.assertIn('replica_groups={{0,1,2,3}}', hlo_text)
    self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
    self.assertIn('sharding={replicated}', hlo_text)
    self.assertIn('sharding={{devices=[4,1]0,1,2,3}, {replicated}}', hlo_text)
  def test_xla_computation_psum_constant(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test requires omnistaging")
    f = lambda: jax.lax.psum(1, "i")
    api.xla_computation(f, axis_env=[("i", 2)])()  # doesn't crash
  @jtu.skip_on_devices("cpu", "gpu")
  @jtu.ignore_warning(message="Some donated buffers were not usable")
  def test_xla_computation_donate_argnums(self):
    api.xla_computation(lambda x: None, donate_argnums=(0,))(3)  # doesn't crash
  def test_concurrent_device_get_and_put(self):
    def f(x):
      for _ in range(100):
        y = jax.device_put(x)
        x = jax.device_get(y)
      return x
    xs = [np.random.randn(i) for i in range(10)]
    with concurrent.futures.ThreadPoolExecutor() as executor:
      futures = [executor.submit(partial(f, x)) for x in xs]
      ys = [f.result() for f in futures]
    for x, y in zip(xs, ys):
      self.assertAllClose(x, y)
  def test_dtype_warning(self):
    # cf. issue #1230
    if config.x64_enabled:
      raise unittest.SkipTest("test only applies when x64 is disabled")
    def check_warning(warn, nowarn):
      with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        nowarn()  # get rid of extra startup warning
        prev_len = len(w)
        nowarn()
        assert len(w) == prev_len
        warn()
        assert len(w) > 0
        msg = str(w[-1].message)
        expected_prefix = "Explicitly requested dtype "
        self.assertEqual(expected_prefix, msg[:len(expected_prefix)])
        prev_len = len(w)
        nowarn()
        assert len(w) == prev_len
    check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
                  lambda: jnp.array([1, 2, 3], dtype="float32"))
    check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
                  lambda: jnp.array([1, 2, 3], dtype=float))
    check_warning(lambda: jnp.ones(3, dtype=np.float64),
                  lambda: jnp.ones(3))
    check_warning(lambda: jnp.ones(3, dtype=np.float64),
                  lambda: jnp.ones(3, dtype=float))
    check_warning(lambda: jnp.ones_like(3, dtype=np.int64),
                  lambda: jnp.ones_like(3, dtype=np.int32))
    check_warning(lambda: jnp.zeros(3, dtype="int64"),
                  lambda: jnp.zeros(3, dtype="int32"))
    check_warning(lambda: jnp.zeros_like(3, dtype="float64"),
                  lambda: jnp.zeros_like(3, dtype="float32"))
    check_warning(lambda: jnp.full((2, 3), 1, dtype="int64"),
                  lambda: jnp.full((2, 3), 1))
    check_warning(lambda: jnp.ones(3).astype("float64"),
                  lambda: jnp.ones(3).astype("float32"))
    check_warning(lambda: jnp.eye(3, dtype=np.float64),
                  lambda: jnp.eye(3))
    check_warning(lambda: jnp.arange(3, dtype=np.float64),
                  lambda: jnp.arange(3, dtype=np.float32))
    check_warning(lambda: jnp.linspace(0, 3, dtype=np.float64),
                  lambda: jnp.linspace(0, 3, dtype=np.float32))
    check_warning(lambda: jnp.tri(2, dtype="float64"),
                  lambda: jnp.tri(2, dtype="float32"))
    check_warning(lambda: jnp.arange(1).astype("float64"),
                  lambda: jnp.arange(1).astype(float))
    check_warning(lambda: jnp.arange(1.0).astype("int64"),
                  lambda: jnp.arange(1.0).astype(int))
  def test_vmap_preserves_docstr(self):
    def superfun(a):
      """Does things with stuff."""
      pass
    self.assertRegex(api.vmap(superfun).__doc__, "\n".join([
        "Vectorized version of superfun.*",
        "",
        "Original documentation:",
        "",
        superfun.__doc__,
    ]))
  def test_vmap_in_axes_list(self):
    # https://github.com/google/jax/issues/2367
    dictionary = {'a': 5., 'b': jnp.ones(2)}
    x = jnp.zeros(3)
    y = jnp.arange(3.)
    def f(dct, x, y):
      return dct['a'] + dct['b'] + x + y
    out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)
    out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)
    self.assertAllClose(out1, out2)
  def test_vmap_in_axes_tree_prefix_error(self):
    # https://github.com/google/jax/issues/795
    self.assertRaisesRegex(
        ValueError,
        "vmap in_axes specification must be a tree prefix of the corresponding "
        r"value, got specification \(0, 0\) for value tree "
        r"PyTreeDef\(tuple, \[\*\]\).",
        lambda: api.vmap(lambda x: x, in_axes=(0, 0))(jnp.ones(3))
    )
  def test_vmap_in_axes_leaf_types(self):
    with self.assertRaisesRegex(
        TypeError, r"vmap in_axes must be an int, None, or .*"):
      api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
  def test_vmap_out_axes_leaf_types(self):
    with self.assertRaisesRegex(
        TypeError, r"vmap out_axes must be an int, None, or .*"):
      api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
  def test_vmap_unbatched_object_passthrough_issue_183(self):
    # https://github.com/google/jax/issues/183
    fun = lambda f, x: f(x)
    vfun = api.vmap(fun, (None, 0))
    ans = vfun(lambda x: x + 1, jnp.arange(3))
    self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False)
  def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):
    # https://github.com/google/jax/issues/705
    def h(a, b):
      return jnp.sum(a) + jnp.sum(b)
    X = np.random.randn(10, 4)
    U = np.random.randn(10, 2)
    with self.assertRaisesRegex(
        ValueError,
        "vmap got inconsistent sizes for array axes to be mapped:\n"
        r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
        r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
        "so\n"
        "arg 0 has an axis to be mapped of size 10\n"
        "arg 1 has an axis to be mapped of size 2"):
      api.vmap(h, in_axes=(0, 1))(X, U)
    with self.assertRaisesRegex(
        ValueError,
        "vmap got inconsistent sizes for array axes to be mapped:\n"
        r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
        r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
        r"arg 2 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
        "so\n"
        "args 0, 2 have axes to be mapped of size 10\n"
        "arg 1 has an axis to be mapped of size 2"):
      api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X)
    with self.assertRaisesRegex(
        ValueError,
        "vmap got inconsistent sizes for array axes to be mapped:\n"
        "the tree of axis sizes is:\n"
        r"\(10, \[2, 2\]\)"):
      api.vmap(h, in_axes=(0, 1))(X, [U, U])
    with self.assertRaisesRegex(
        ValueError, "vmap got arg 0 of rank 0 but axis to be mapped 0"):
      # The mapped inputs cannot be scalars
      api.vmap(lambda x: x)(1.)
    with self.assertRaisesRegex(
        ValueError, "vmap must have at least one non-None value in in_axes"):
      # If the output is mapped, there must be a non-None in_axes
      api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))
    with self.assertRaisesRegex(
        ValueError, "vmap got arg 0 of rank 1 but axis to be mapped 1"):
      api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))
    # Error is: TypeError: only integer scalar arrays can be converted to a scalar index
    with self.assertRaisesRegex(
        ValueError,
        "vmap out_axes specification must be a tree prefix of the "
        "corresponding value.*"):
      api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.]))
    with self.assertRaisesRegex(
        ValueError, "vmap has mapped output but out_axes is None"):
      # If the output is mapped, then there must be some out_axes specified
      api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.]))
  def test_vmap_structured_in_axes(self):
    A, B, C, D = 2, 3, 4, 5
    K = 6  # batch size
    x = np.ones((K, A, B))  # batch axis in different locations
    y = np.ones((B, K, C))
    z = np.ones((C, D, K))
    def foo(tree_arg):
      x, (y, z) = tree_arg
      return jnp.dot(x, jnp.dot(y, z))
    tree = (x, (y, z))
    vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))
    self.assertEqual(vfoo(tree).shape, (6, 2, 5))
    Point = collections.namedtuple("Point", ["x", "y"])
    tree = (x, Point(y, z))
    vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))
    self.assertEqual(vfoo(tree).shape, (6, 2, 5))
    def foo(tree_arg):
      x, dct = tree_arg
      y, z = dct['a'], dct['b']
      return jnp.dot(x, jnp.dot(y, z))
    tree = (x, {'a': y, 'b': z})
    vfoo = api.vmap(foo, in_axes=((0, {'a': 1, 'b': 2}),))
    self.assertEqual(vfoo(tree).shape, (6, 2, 5))
    tree = (x, collections.OrderedDict([('a', y), ('b', z)]))
    vfoo = api.vmap(
        foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))
    self.assertEqual(vfoo(tree).shape, (6, 2, 5))
  def test_pmap_global_cache(self):
    def f(x, y):
      return x, y
    x = np.ones((1, 1, 1))
    # All defaults
    with jtu.assert_num_jit_and_pmap_compilations(1):
      for _ in range(2):
        api.pmap(f)(x, x)
    # With axis name
    with jtu.assert_num_jit_and_pmap_compilations(1):
      for _ in range(2):
        api.pmap(f, 'i')(x, x)
    # With in_axes and out_axes
    if config.omnistaging_enabled:
      for x_in, y_in, x_out, y_out in it.product(*((0, 1, 2) for _ in range(4))):
        with jtu.assert_num_jit_and_pmap_compilations(1):
          for _ in range(2):
            api.pmap(f, 'i', in_axes=(x_in, y_in), out_axes=(x_out, y_out))(x, x)
    # Forward-mode AD on the outside
    with jtu.assert_num_jit_and_pmap_compilations(1):
      for _ in range(2):
        api.jvp(api.pmap(f), (x, x), (x, x))
    # Reverse-mode AD on the outside. One compilation for forward, one for backward.
    with jtu.assert_num_jit_and_pmap_compilations(2):
      for _ in range(2):
        api.vjp(api.pmap(f), x, x)[1]((x, x))
  def test_device_array_repr(self):
    rep = jnp.ones(()) + 1.
    self.assertStartsWith(repr(rep), "DeviceArray")
  def test_device_array_hash(self):
    rep = jnp.ones(()) + 1.
    self.assertIsInstance(rep, jax.interpreters.xla.DeviceArray)
    msg = "JAX DeviceArray, like numpy.ndarray, is not hashable."
    with self.assertRaisesRegex(TypeError, msg):
      hash(rep)
    with self.assertRaisesRegex(TypeError, msg):
      hash(rep.device_buffer)
  def test_grad_without_enough_args_error_message(self):
    # https://github.com/google/jax/issues/1696
    def f(x, y): return x + y
    df = api.grad(f, argnums=0)
    self.assertRaisesRegex(
        TypeError,
        "differentiating with respect to argnums=0 requires at least 1 "
        "positional arguments to be passed by the caller, but got only 0 "
        "positional arguments.",
        lambda: partial(df, x=0.)(y=1.))
  def test_grad_of_jit_compilation_caching(self):
    if not hasattr(self, "assertLogs"):
      raise unittest.SkipTest("test requires assertLogs (python 3)")
    lax.add(1, 2)  # make sure some initial warnings are already printed
    sin = api.jit(jnp.sin)
    prev_level = logging.get_verbosity()
    try:
      logging.set_verbosity('DEBUG')
      with self.assertLogs(level=logging.DEBUG) as l:
        ans1 = api.grad(sin)(2.)
        ans2 = api.grad(sin)(3.)
    finally:
      logging.set_verbosity(prev_level)
    self.assertLen(l.output, 2)
    self.assertAllClose(ans1, np.cos(2.), check_dtypes=False)
    self.assertAllClose(ans2, np.cos(3.), check_dtypes=False)
  def test_trivial_computations(self):
    x = jnp.array([1, 2, 3])
    y = api.jit(lambda x: x)(x)
    self.assertIs(x, y)
    z1, z2 = api.jit(lambda x: (x, x))(x)
    self.assertIs(z1, z2)
    x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
    z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)
    self.assertIs(z1, x2)
    self.assertIs(z3, x1)
    self.assertEqual(z2, 1)
  def test_nested_jit_hoisting(self):
    @api.jit
    def f(x, y):
      z = 2 * x
      return y + z, 3
    @api.jit
    def g(x):
      return f(2, x)
    jaxpr_subcomp = xla.jaxpr_subcomp
    jaxprs = []
    def jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):
      jaxprs.append(jaxpr)
      return jaxpr_subcomp(c, jaxpr, *args, **kwargs)
    try:
      xla.jaxpr_subcomp = jaxpr_subcomp_and_collect
      ans = g(3)
    finally:
      xla.jaxpr_subcomp = jaxpr_subcomp
    self.assertEqual(ans, (7, 3))
    self.assertLen(jaxprs, 2)
    outer_jaxpr, inner_jaxpr = jaxprs
    self.assertLen(outer_jaxpr.eqns, 1)
    self.assertEqual(outer_jaxpr.eqns[0].primitive.name, 'xla_call')
    subjaxpr_1 = outer_jaxpr.eqns[0].params["call_jaxpr"]
    self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))
    self.assertLen(inner_jaxpr.eqns, 2 if config.omnistaging_enabled else 3)
    self.assertEqual(inner_jaxpr.eqns[-2].primitive.name, 'mul')
    self.assertEqual(inner_jaxpr.eqns[-1].primitive.name, 'add')
  def test_primitive_compilation_cache(self):
    with jtu.count_primitive_compiles() as count:
      lax.add(1, 2)
      lax.add(2, 3)
    self.assertEqual(count[0], 1)
  def test_arange_jit(self):
    # see https://github.com/google/jax/issues/553
    def fun(x):
      r = jnp.arange(x.shape[0])[x]
      return r
    jit(fun)(jnp.array([0, 1, 2], dtype=jnp.int32))  # doesn't crash
  def helper_save_tracer(self, x):
    self._saved_tracer = x
    return x
  def test_escaped_tracers_different_top_level_traces(self):
    api.jit(self.helper_save_tracer)(0.)
    with self.assertRaisesRegex(
        core.UnexpectedTracerError, "Encountered an unexpected tracer"):
      api.jit(lambda x: self._saved_tracer)(0.)
  def test_escaped_tracers_cant_lift_sublevels(self):
    api.jit(self.helper_save_tracer)(0.)
    with self.assertRaisesRegex(
        core.UnexpectedTracerError,
        re.compile(
          "Encountered an unexpected tracer",
          re.DOTALL)):
      api.jit(lambda x: x)(self._saved_tracer)
  def test_escaped_tracers_tracer_from_higher_level(self):
    api.grad(self.helper_save_tracer)(0.)
    with self.assertRaisesRegex(
        core.UnexpectedTracerError,
        re.compile(
          "Encountered an unexpected tracer.*Tracer from a higher level",
          re.DOTALL)):
      api.grad(lambda x: x)(self._saved_tracer)
  def test_escaped_tracers_incompatible_sublevel(self):
    def func1(x):
      api.jit(self.helper_save_tracer)(0.)
      # Use the tracer
      return x + self._saved_tracer
    with self.assertRaisesRegex(
        core.UnexpectedTracerError,
        re.compile("Encountered an unexpected tracer",
                   re.DOTALL)):
      api.jit(func1)(2.)
  def test_escaped_tracers_cant_lift(self):
    def func1(x):
      api.grad(self.helper_save_tracer)(0.)
      return x + self._saved_tracer
    with self.assertRaisesRegex(
        core.UnexpectedTracerError,
        re.compile("Encountered an unexpected tracer.*Can't lift",
                   re.DOTALL)):
      api.grad(func1)(2.)
  def test_escaped_tracers_not_among_input_tracers(self):
    def func1(x):
      api.grad(self.helper_save_tracer)(x)
      # Use the tracer
      return x + self._saved_tracer
    with self.assertRaisesRegex(
        core.UnexpectedTracerError,
        re.compile(
          "Encountered an unexpected tracer.*Tracer not among input tracers",
          re.DOTALL)):
      api.jit(func1)(2.)
  def test_escaped_tracer_omnistaging(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test is omnistaging-specific")
    count = 1
    @jit
    def f():
      nonlocal count
      count = jnp.add(count, 1)
    f()  # leaked a tracer! but currently undetected
    def f(x, c):
      jnp.add(count, 1)
      return None, None
    @jit
    def g():
      lax.scan(f, None, None, length=2)
    with self.assertRaisesRegex(core.UnexpectedTracerError,
                                "was created on line"):
      g()
  def test_escaped_tracer_omnistaging_top_trace(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test is omnistaging-specific")
    count = 1
    def f(_, __):
      nonlocal count
      count = jnp.add(count, 1)
      return None, None
    lax.scan(f, None, None, length=2)  # leaked a tracer! (of level 1!)
    with self.assertRaisesRegex(core.UnexpectedTracerError,
                                "was created on line"):
      # The following call will try and raise the ones array to the count tracer
      # level, which is no longer live.
      jax.jit(jnp.add)(jnp.ones(()), count)
  def test_pmap_static_kwarg_error_message(self):
    # https://github.com/google/jax/issues/3007
    def f(a, b):
      return a + b
    g = jax.pmap(f, static_broadcasted_argnums=(1,))
    msg = (r"pmapped function has static_broadcasted_argnums=\(1,\) but was "
           r"called with only 1 positional argument. All static broadcasted "
           r"arguments must be passed positionally.")
    with self.assertRaisesRegex(ValueError, msg):
      g(jnp.ones((1, 1)), b=1)
  def test_vmap_unmapped_last(self):
    @partial(jax.vmap, out_axes=-1)
    def f(x):
      return np.zeros((2,))
    f(np.zeros((5,)))
  # TODO(jakevdp): re-enable this if possible.
  @unittest.skipIf(True, "broken by convert_element_type change.")
  def test_xla_constant_dedup(self):
    y = np.array([7, 14], dtype=np.float32)
    def f(x):
      return x + y + y
    x = np.array([1, 2], dtype=np.float32)
    hlo_lines = jax.xla_computation(f)(x).as_hlo_text().split('\n')
    hlo_lines = set([s.strip() for s in hlo_lines])
    self.assertIn('constant.1 = f32[2]{0} constant({7, 14})', hlo_lines)
    self.assertNotIn('constant.2 = f32[2]{0} constant({7, 14})', hlo_lines)
  def test_omnistaging_flag(self):
    if FLAGS.jax_omnistaging:
      jaxpr = api.make_jaxpr(lambda: jnp.add(1, 1))()
      self.assertLen(jaxpr.jaxpr.eqns, 1)
    else:
      # omnistaging can be enabled programmatically without setting the flag,
      # but that shouldn't happen in tests
      jaxpr = api.make_jaxpr(lambda: jnp.add(1, 1))()
      self.assertLen(jaxpr.jaxpr.eqns, 0)
  def test_eval_context(self):
    @jit
    def f():
      with core.eval_context():
        assert jnp.add(1, 1) == 2
    f()  # doesn't crash
  def test_concrete_error_because_arg(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test is omnistaging-specific")
    @jax.jit
    def f(x, y):
      if x > y:
        return x
      else:
        return y
    msg = r"at flattened positions \[0, 1\]"
    with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
      f(1, 2)
  def test_concrete_error_because_const(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test is omnistaging-specific")
    @jax.jit
    def f():
      assert jnp.add(1, 1) > 0
    msg = "on these lines"
    with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
      f()
  def test_xla_computation_zeros_doesnt_device_put(self):
    raise unittest.SkipTest("broken test")  # TODO(mattjj): fix
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test is omnistaging-specific")
    with jtu.count_device_put() as count:
      api.xla_computation(lambda: jnp.zeros(3))()
    self.assertEqual(count[0], 0)
  def test_join_concrete_arrays_with_omnistaging(self):
    # https://github.com/google/jax/issues/4622
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test is omnistaging-specific")
    x = jnp.array([1., 2., 3.])
    y = jnp.array([1., 2., 4.])
    @jit
    def f():
      core.lattice_join(core.ConcreteArray(x), core.ConcreteArray(y))
    f()  # doesn't crash
  def test_linearize_aval_error(self):
    # https://github.com/google/jax/issues/4622
    f = lambda x: x
    # these should not error
    _, f_jvp = api.linearize(f, 1.)
    f_jvp(1.)
    _, f_jvp = api.linearize(f, np.ones(2, np.int32))
    f_jvp(np.zeros(2, float0))
    # these should error
    _, f_jvp = api.linearize(f, 1.)
    with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
      f_jvp(1)
    _, f_jvp = api.linearize(f, np.ones(2, np.int32))
    with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
      f_jvp(np.ones(2, np.int32))
  def test_grad_of_token_consuming_primitive(self):
    # https://github.com/google/jax/issues/5463
    tokentest_p = core.Primitive("tokentest")
    tokentest_p.def_impl(partial(xla.apply_primitive, tokentest_p))
    tokentest_p.def_abstract_eval(lambda x, y: x)
    xla.translations[tokentest_p] = lambda c, x, y:  x
    ad.defjvp(tokentest_p, (lambda g, x, token: x), None)
    token = jax.lax.create_token(123)
    arr = jnp.ones((3, 2))
    res, vjp_fun = jax.vjp(lambda x: tokentest_p.bind(x, token), arr)
    # Should not crash.
    vjp_fun(arr)
  def test_jit_returning_token(self):
    x = jax.jit(jax.lax.create_token)(1.0)
    self.assertIsInstance(x, jax.interpreters.xla.Token)
  def test_leak_checker_catches_a_jit_leak(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      lst = []
      @jit
      def f(x):
        lst.append(x)
        return x
      with self.assertRaisesRegex(Exception, r"Leaked trace"):
        f(3)
  def test_leak_checker_catches_a_pmap_leak(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      lst = []
      @api.pmap
      def f(x):
        lst.append(x)
        return x
      with self.assertRaisesRegex(Exception, r"Leaked trace"):
        f(np.ones(1))
  def test_leak_checker_catches_a_grad_leak(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      lst = []
      def f(x):
        lst.append(x)
        return x
      with self.assertRaisesRegex(Exception, r"Leaked trace"):
        api.grad(f)(3.)
  def test_leak_checker_avoids_false_positives(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      @jit
      def f(x):
        return x
      f(3)  # doesn't crash
      api.vmap(f)(np.arange(3))  # doesn't crash
      api.grad(f)(3.)  # doesn't crash
      @api.pmap
      def f(x):
        return x
      f(np.ones(1))  # doesn't crash
      api.vmap(f)(np.ones((1, 1)))  # doesn't crash
  def test_leak_checker_catches_a_scan_leak(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      lst = []
      to_scan = lambda c, x: (lst.append(c) or jnp.sin(c), None)
      with self.assertRaisesRegex(Exception, r"Leaked trace"):
        lax.scan(to_scan, 1., np.arange(3.))
  def test_leak_checker_avoids_false_positives_scan(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      to_scan = lambda c, x: (jnp.sin(c), None)
      lax.scan(to_scan, 1., np.arange(3.))  # doesn't crash
  def test_leak_checker_avoids_false_positives_scan_jvp(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      to_scan = lambda c, x: (c, None)
      def f(x):
        lax.scan(to_scan, x, None, length=1)
      api.jvp(f, (3.,), (1.,))  # doesn't crash
  def test_leak_checker_avoids_false_positives_scan_vmap(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      to_scan = lambda c, _: (1., None)
      @api.vmap
      def f(x):
        lax.scan(to_scan, x, None, length=1)
      f(np.arange(5.))  # doesn't crash
  def test_leak_checker_avoids_false_positives_scan_vmap_2(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      to_scan = lambda c, _: (c, None)
      @api.vmap
      def f(x):
        lax.scan(to_scan, x, None, length=1)
      f(np.arange(5.))  # doesn't crash
  def test_leak_checker_catches_a_sublevel_leak(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    with core.checking_leaks():
      @jit
      def f(x):
        lst = []
        @jit
        def g(x):
          lst.append(x)
          return x
        x = g(x)
        return x
      with self.assertRaisesRegex(Exception, r"Leaked sublevel"):
        f(3)
  def test_default_backend(self):
    first_local_device = api.local_devices()[0]
    self.assertEqual(first_local_device.platform, api.default_backend())
  def test_dunder_jax_array(self):
    # https://github.com/google/jax/pull/4725
    class AlexArray:
      def __init__(self, jax_val):
        self.jax_val = jax_val
      def __jax_array__(self):
        return self.jax_val
      dtype = property(lambda self: self.jax_val.dtype)
      shape = property(lambda self: self.jax_val.shape)
    x = AlexArray(jnp.array([1., 2., 3.]))
    y = jnp.sin(x)
    self.assertAllClose(y, jnp.sin(jnp.array([1., 2., 3.])))
    y = api.grad(api.jit(lambda x: jnp.sin(x).sum()))(x)
    self.assertAllClose(y, jnp.cos(jnp.array([1., 2., 3.])))
    x = AlexArray(jnp.array([[1., 2., 3.]]))
    y = api.pmap(jnp.sin)(x)
    self.assertAllClose(y, jnp.sin(jnp.array([[1., 2., 3.]])))
    x = jnp.array(1)
    a = AlexArray(x)
    for f in [jnp.isscalar, jnp.size, jnp.shape, jnp.dtype]:
      self.assertEqual(f(x), f(a))
  def test_constant_handler_mro(self):
    # https://github.com/google/jax/issues/6129
    class Foo(enum.IntEnum):
      bar = 1
    @api.pmap
    def f(_):
      return Foo.bar
    ans = f(jnp.arange(1))  # doesn't crash
    expected = jnp.arange(1) + 1
    self.assertAllClose(ans, expected)
class RematTest(jtu.JaxTestCase):
  def test_remat_basic(self):
    @api.remat
    def g(x):
      return lax.sin(lax.sin(x)), 3.
    def f(x):
      x, _ = g(x)
      return x
    ans = f(2.)
    expected = np.sin(np.sin(2.))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans, f_lin = api.linearize(f, 2.)
    expected = np.sin(np.sin(2.))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = f_lin(3.)
    expected = np.cos(np.sin(2.)) * np.cos(2.) * 3.
    self.assertAllClose(ans, expected, check_dtypes=False)
    sin_calls = []
    cos_calls = []
    sin_impl = lax.sin_p.impl
    cos_impl = lax.cos_p.impl
    try:
      lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))
      lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))
      f_lin(3.)
    finally:
      lax.sin_p.def_impl(sin_impl)
      lax.cos_p.def_impl(cos_impl)
    self.assertEqual(len(sin_calls), 1)
    self.assertEqual(len(cos_calls), 2)
  def test_remat_freevars(self):
    def f1(x):
      y = 2 * jnp.sin(x)
      z = jnp.cos(x) * jnp.sin(y)
      return z
    def f2(x):
      y = 2 * jnp.sin(x)
      z = api.remat(lambda x: jnp.cos(x) * jnp.sin(y))(x)
      return z
    ans, f_lin = api.linearize(f2, 2.)
    expected, f_lin_expected = api.linearize(f1, 2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = f_lin(3.)
    expected = f_lin_expected(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_grad_python_control_flow(self):
    @partial(api.remat, concrete=True)
    def g(x):
      if x > 0:
        return lax.sin(x), 3.
      else:
        return lax.cos(x), 4.
    def f(x):
      x, _ = g(x)
      return x
    ans = f(2.)
    expected = np.sin(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(f)(2.)
    expected = np.cos(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_jit(self):
    @api.remat
    def g(x):
      return lax.sin(lax.sin(x))
    def f_(x):
      return g(x)
    f = api.jit(f_)
    ans = f(2.)
    expected = np.sin(np.sin(2.))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(f)(2.)
    expected = np.cos(np.sin(2.)) * np.cos(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jit(api.grad(f_))(2.)
    expected = np.cos(np.sin(2.)) * np.cos(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_vmap(self):
    @api.remat
    def g(x):
      return lax.sin(lax.sin(x))
    x = np.arange(3.)
    ans = api.vmap(g)(x)
    expected = np.sin(np.sin(x))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jacfwd(g)(x)
    expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jacrev(g)(x)
    expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_higher_order_autodiff(self):
    def f(x):
      return lax.cos(lax.sin(x))
    g = api.remat(f)
    ans = api.grad(api.grad(g))(3.)
    expected = api.grad(api.grad(f))(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_scan(self):
    to_scan = lambda c, x: (jnp.sin(c), None)
    def f_noremat(x):
      y, _ = lax.scan(to_scan, x, np.arange(3.))
      return y
    def f_yesremat(x):
      y, _ = lax.scan(api.remat(to_scan), x, np.arange(3.))
      return y
    ans = f_yesremat(4.)
    expected = f_noremat(4.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(f_yesremat)(4.)
    expected = api.grad(f_noremat)(4.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)
    scan_eqn, = jaxpr.jaxpr.eqns
    self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
    jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)
    scan_eqn, = jaxpr.jaxpr.eqns
    self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
  def test_remat_no_redundant_flops(self):
    # see https://github.com/google/jax/pull/1749#issuecomment-558267584
    @api.jit
    def g(x):
      return f(2., x)
    @api.remat
    def f(x, y):
      return jnp.sin(x) * y
    # We swap out sin_p's impl rule to count how many times it's invoked
    called = []
    sin_impl = lax.sin_p.impl
    try:
      lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))
      api.grad(g)(3.)
    finally:
      lax.sin_p.def_impl(sin_impl)
    num_calls = len(called)
    self.assertLessEqual(num_calls, 1)
  def test_remat_binomial_checkpointing(self):
    def binom_checkpoint(funs):
      if len(funs) == 1:
        return funs[0]
      else:
        f1 = binom_checkpoint(funs[:len(funs)//2])
        f2 = binom_checkpoint(funs[len(funs)//2:])
        return api.remat(lambda x: f1(f2(x)))
    f1 = binom_checkpoint([jnp.sin, jnp.sin, jnp.sin, jnp.sin])
    f2 = lambda x: jnp.sin(jnp.sin(jnp.sin(jnp.sin(x))))
    x = 4.
    self.assertAllClose(f1(x), f2(x), check_dtypes=False)
    self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)
  def test_remat_symbolic_zeros(self):
    # code from https://github.com/google/jax/issues/1907
    key = jax.random.PRNGKey(0)
    key, split = jax.random.split(key)
    n = 5
    def func(D0):
      def shift(R, dR, **unused_kwargs):
        return R + dR
      def apply_fn(R):
        return D0 * R
      Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,
                                 dtype=jnp.float32)
      def move(R,i):
        F = apply_fn(R)
        return shift(R, 0.001 * F), jnp.array([0.])
      move = api.remat(move)
      R, temp = lax.scan(move, Rinit, jnp.arange(2))
      return R[0, 0]
    api.grad(func)(5.0)  # doesn't crash
  def test_remat_jit2(self):
    @api.jit
    def f(x):
      y = 2 * x
      @api.remat
      def g():
        return y
      return g()
    self.assertAllClose(f(3), 6, check_dtypes=False)
  def test_remat_nontrivial_env(self):
    # simplified from https://github.com/google/jax/issues/2030
    @api.remat
    def foo(state, dt=0.5, c=1):
      u, u_t = state
      u_tt = c**2 * u
      u_t = u_t + u_tt * dt
      return (u, u_t)
    @partial(api.jit, static_argnums=(1,))
    def _multi_step(state, count, dt, c):
      f = lambda s, _: (foo(s, dt, c), _)
      return lax.scan(f, state, None, count)
    def multi_step(state, count, dt=1/jnp.sqrt(2), c=1):
      return _multi_step(state, count, dt, c)
    def loss(u0, target, steps, dt=1/jnp.sqrt(2), c=1):
      init = (u0, jnp.zeros_like(u0))
      (uf, _), _ = multi_step(init, steps, dt, c)
      return ((uf - target) ** 2).mean()
    target = jnp.zeros((128, 128))
    u0 = jnp.ones_like(target)
    loss(u0, target, 10)  # doesn't crash
  def test_remat_jit3(self):
    # https://github.com/google/jax/issues/2180
    def f(w, x):
      a = jnp.dot(x, w)
      b = jnp.einsum("btd,bTd->btT", a, a)
      c = jnp.einsum("btT,btd->btd", b, a)
      return jnp.sum(c)
    w = jnp.ones([1, 1])
    x = jnp.ones([1, 1, 1])
    f = api.remat(f)
    api.grad(f)(w, x)  # doesn't crash
    @api.jit
    def mul(a, b):
      return a * b
    def f(w, x):
      a = mul(w, x)
      b = mul(a, a)
      return b
    w = 1.
    x = 1.
    f = api.remat(f)
    api.grad(f)(w, x)  # doesn't crash
  def test_remat_scan2(self):
    # https://github.com/google/jax/issues/1963
    def scan_bug(x0):
      f = lambda x, _: (x + 1, None)
      def scanned_f(x, _):
        return lax.scan(f, x, xs=None, length=1)[0], None
      x, _ = jax.remat(scanned_f)(x0, None)
      return x
    jax.grad(scan_bug)(1.0)  # doesn't crash
  def test_remat_jit_static_argnum(self):
    # https://github.com/google/jax/issues/2833
    if config.omnistaging_enabled:
      raise unittest.SkipTest("test only works without omnistaging")  # see next test
    def f(a_bool, y):
      if a_bool:
        return y + 1
      else:
        return y
    api.jit(api.remat(f, concrete=True), static_argnums=0)(True, 1)  # no crash
  def test_remat_jit_static_argnum_omnistaging(self):
    # https://github.com/google/jax/issues/2833
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")  # see previous test
    def named_call(f):
      def named_f(*args):
        f_ = lu.wrap_init(lambda: (f(*args),))
        out, = core.call_p.bind(f_)
        return out
      return named_f
    def f(a_bool, y):
      if a_bool:
        return y + 1
      else:
        return y
    api.jit(named_call(f), static_argnums=0)(True, 1)  # no crash
  def test_remat_eval_counter(self):
    # https://github.com/google/jax/issues/2737
    add_one_p = Primitive('add_one')
    add_one = add_one_p.bind
    num_evals = 0
    @contextmanager
    def assertEvals(n):
      start = num_evals
      yield
      assert num_evals - start == n
    def add_one_impl(x):
      nonlocal num_evals
      num_evals += 1
      return x + 1
    add_one_p.def_impl(add_one_impl)
    def add_one_jvp(pin, tin):
      pout = add_one(pin[0])
      return pout, pout * tin[0]
    ad.primitive_jvps[add_one_p] = add_one_jvp
    add_one_p.def_abstract_eval(lambda x: x)
    v = np.zeros((1,))
    f = jax.remat(add_one)
    g = jax.remat(lambda x: add_one(f(x)))
    # 2 calls needed to evaluate g
    with assertEvals(2):
      _, vjp = jax.vjp(g, v)
    # 2 calls made while transposing g, 1 call made while transposing f
    with assertEvals(3):
      vjp(v)
    @jax._src.util.curry
    def call(f, *args):
      return jax.core.call(
          jax.linear_util.wrap_init(lambda *args: [f(*args)]),
          *args, name='foo')[0]
    f = call(add_one)
    g = jax.remat(lambda x: add_one(f(x)))
    # 2 calls needed to evaluate g
    with assertEvals(2):
      _, vjp = jax.vjp(g, v)
    # 2 calls made while transposing g, no reevaluation for transposition of f
    with assertEvals(2):
      vjp(v)
  def test_escaped_tracer_remat(self):
    # b/169779185
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    def f():
      seq = [jnp.zeros([])]
      def g():
        seq[0] += 1  # this is line 7 btw
        return seq[0]
      api.remat(g)()
      api.remat(g)()
    with self.assertRaisesRegex(core.UnexpectedTracerError, "global state"):
      api.jit(f)()
class JaxprTest(jtu.JaxTestCase):
  def test_scalar_literals(self):
    jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
    self.assertLen(jaxpr.jaxpr.constvars, 0)
  def test_abstract_inputs(self):
    jaxpr = api.make_jaxpr(lambda x: x + 2.)(
        types.SimpleNamespace(shape=(), dtype=np.float32))
    self.assertEqual(jaxpr.in_avals[0].shape, ())
    self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)
  def test_const(self):
    def fun(x):
      return (x, 1., np.zeros(1))
    if config.omnistaging_enabled:
      expected = """
      { lambda a ; b.
      let
      in (b, 1.0, a) }
      """
    else:
      expected = """
      { lambda b ; a.
      let
      in (a, 1.0, b) }
      """
    jaxpr = api.make_jaxpr(fun)(0.)
    self.assertMultiLineStrippedEqual(expected, str(jaxpr))
  def test_cond(self):
    def f(x):
      return lax.cond(x >= 0.,
                      x + 1.,
                      lambda xt: xt + x,
                      x + 2.,
                      lambda xf: xf - x)
    if config.omnistaging_enabled:
      expected = """
      { lambda  ; a.
        let b = ge a 0.0
            c = add a 1.0
            d = add a 2.0
            e = convert_element_type[ new_dtype=int32
                                      weak_type=False ] b
            f = cond[ branches=( { lambda  ; e_ a b c.
                                   let d = sub c a
                                   in (d,) }
                                 { lambda  ; a f_ b c.
                                   let d = add b a
                                   in (d,) } )
                      linear=(False, False, False, False) ] e a a c d
        in (f,) }
        """
    else:
      expected = """
      { lambda  ; a.
        let b = ge a 0.0
            c = convert_element_type[ new_dtype=int32
                                      weak_type=False ] b
            d = convert_element_type[ new_dtype=float32
                                      weak_type=False ] a
            e = convert_element_type[ new_dtype=float32
                                      weak_type=False ] a
            f = add a 1.0
            g = add a 2.0
            h = cond[ branches=( { lambda  ; e_ c a b.
                                   let d = sub b c
                                   in (d,) }
                                 { lambda  ; c f_ a b.
                                   let d = add a c
                                   in (d,) } )
                      linear=(False, False, False, False) ] c d e f g
        in (h,) }
      """
    jaxpr = api.make_jaxpr(f)(3.)
    self.assertMultiLineStrippedEqual(expected, str(jaxpr))
  def test_make_jaxpr_static_argnums(self):
    def f(x, y):
      return x + y
    jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)
    self.assertIn('3', str(jaxpr))
  def test_make_jaxpr_return_shape(self):
    _, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
                                   return_shape=True)(np.int32(1))
    expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
                api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
    self.assertEqual(shape_tree, expected)
  def test_make_jaxpr_axis_env(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    def f(x):
      return x - lax.psum(x, 'i')
    jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)
    self.assertIn('psum', str(jaxpr))
  def test_make_jaxpr_named(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    def f(x):
      return x - lax.psum(x, 'i')
    x = types.SimpleNamespace(
        shape=(2, 3), dtype=jnp.float32, named_shape={'i': 10})
    jaxpr = api.make_jaxpr(f, axis_env=[('i', 10)])(x)
    named_shapes = [v.aval.named_shape for v in jaxpr.jaxpr.eqns[1].invars]
    self.assertEqual(named_shapes, [{'i': 10}, {}])
class CustomJVPTest(jtu.JaxTestCase):
  def test_basic(self):
    @api.custom_jvp
    def f(x):
      return jnp.sin(x)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * jnp.cos(x) * g
    f.defjvp(f_jvp)
    x = 3.
    self.assertAllClose(f(x), jnp.sin(x))
    self.assertAllClose(api.jvp(f, (x,), (1.,)),
                        (jnp.sin(x), 2 * jnp.cos(x)))
    self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
  def test_invariance(self):
    @api.custom_jvp
    def f(x):
      return jnp.cos(2 * x) / 2.
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return (f(x), 3 * g)
    f.defjvp(f_jvp)
    def f2(x):
      y, _ = api.jvp(f, (x,), (x,))
      return y
    def f3(x):
      y, _ = api.jvp(f2, (x,), (x,))
      return y
    x = 1.
    self.assertAllClose(api.jvp(f, (x,), (x,)),
                        api.jvp(f2, (x,), (x,)),
                        check_dtypes=False)
    self.assertAllClose(api.jvp(f, (x,), (x,)),
                        api.jvp(f3, (x,), (x,)),
                        check_dtypes=False)
  def test_python_control_flow(self):
    @api.custom_jvp
    def f(x):
      if x > 0:
        return jnp.sin(x)
      else:
        return jnp.cos(x)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      if x > 0:
        return f(x), 2 * g
      else:
        return f(x), 3 * g
    f.defjvp(f_jvp)
    x = 2.
    self.assertAllClose(f(x), jnp.sin(x))
    self.assertAllClose(f(-x), jnp.cos(-x))
    self.assertAllClose(api.jvp(f, (x,), (1.,)),
                        (jnp.sin(x), 2.),
                        check_dtypes=False)
    self.assertAllClose(api.jvp(f, (-x,), (1.,)),
                        (jnp.cos(-x), 3.),
                        check_dtypes=False)
    self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)
    self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)
  def test_vmap(self):
    @api.custom_jvp
    def f(x):
      assert jnp.ndim(x) == 0
      return jnp.sin(x)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      assert jnp.ndim(x) == jnp.ndim(g) == 0
      return f(x), 2 * jnp.cos(x) * g
    f.defjvp(f_jvp)
    x = jnp.arange(3.)
    xx = jnp.arange(6.).reshape(2, 3)
    # vmap of f
    self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
    self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
    # vmap of jvp of f
    self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),
                        (jnp.sin(x), 2 * jnp.cos(x) * x))
    self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),
                        (jnp.sin(xx), 2 * jnp.cos(xx) * xx))
    # jvp of vmap of f
    self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),
                        (jnp.sin(x), 2 * jnp.cos(x) * x))
    self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),
                        (jnp.sin(xx), 2 * jnp.cos(xx) * xx))
    # vmap of jvp of vmap of f
    self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),
                        (jnp.sin(xx), 2 * jnp.cos(xx) * xx))
  def test_jit(self):
    @api.custom_jvp
    def f(x):
      return jnp.sin(x)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * jnp.cos(x) * g
    f.defjvp(f_jvp)
    x = 3.
    # jit
    self.assertAllClose(api.jit(f)(x), jnp.sin(x))
    self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
    # jit of jvp
    self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),
                        (jnp.sin(x), 2 * jnp.cos(x) * x),
                        check_dtypes=False)
    # jvp of jit
    self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),
                        (jnp.sin(x), 2 * jnp.cos(x) * x),
                        check_dtypes=False)
  def test_pytrees(self):
    @api.custom_jvp
    def f(x):
      return {'b': jnp.sin(x['a'])}
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}
    f.defjvp(f_jvp)
    x = {'a': 3.}
    self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
    self.assertAllClose(api.jvp(f, (x,), (x,)),
                        ({'b': jnp.sin(x['a'])},
                         {'b': 2 * jnp.cos(x['a']) * x['a']}),
                        check_dtypes=False)
  def test_kwargs(self):
    # from https://github.com/google/jax/issues/1938
    @api.custom_jvp
    def my_fun(x, y, c=1.):
      return c * (x + y)
    def my_jvp(primals, tangents):
      x, y, c = primals
      t_x, t_y, t_c = tangents
      return my_fun(x, y, c), t_c
    my_fun.defjvp(my_jvp)
    f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
    f(10., 5.)  # doesn't crash
    api.jvp(f, (10., 5.), (1., 1.))  # doesn't crash
  def test_initial_style(self):
    @api.custom_jvp
    def f(x):
      return 3 * x
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * g
    f.defjvp(f_jvp)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.grad(foo)(3.)
    expected = 2.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.jit(foo))(3.)
    expected = 2.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jit(api.grad(foo))(3.)
    expected = 2.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.grad(foo))(3.)
    expected = 0.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.grad(api.jit(foo)))(3.)
    expected = 0.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.jit(api.grad(foo)))(3.)
    expected = 0.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jit(api.grad(api.grad(foo)))(3.)
    expected = 0.
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_initial_style_vmap(self):
    @api.custom_jvp
    def f(x):
      assert jnp.ndim(x) == 0
      return 3 * x
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * g
    f.defjvp(f_jvp)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.vmap(foo)(jnp.ones(3))
    expected = 3. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.vmap(api.jit(foo))(jnp.ones(3))
    expected = 3. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jit(api.vmap(foo))(jnp.ones(3))
    expected = 3. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_closed_over_tracers_error_message(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    def f(x):
      @api.custom_jvp
      def g(y):
        return x + y
      def g_jvp(primals, tangents):
        return g(x), 2 * primals[0]
      g.defjvp(g_jvp)
      return g(1.)
    self.assertRaises(ad.CustomJVPException, lambda: api.jvp(f, (3.,), (1.,)))
    self.assertRaises(ad.CustomJVPException, lambda: api.grad(f)(3.))
  def test_nondiff_arg(self):
    @partial(api.custom_jvp, nondiff_argnums=(0,))
    def app(f, x):
      return f(x)
    def app_jvp(f, primals, tangents):
      (x,), (t,) = primals, tangents
      return app(f, x), 3 * t
    app.defjvp(app_jvp)
    ans = app(lambda x: 2 * x, 1)
    expected = 2
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jvp(lambda x: app(lambda y: 2 * y, x), (1.,), (1.,))
    expected = (2., 3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_nondiff_arg_jit_tracer(self):
    @partial(api.custom_jvp, nondiff_argnums=(0,))
    def f(x, y):
      return x * y
    def f_jvp(x, primals, tangents):
      (y,), (t_y,) = primals, tangents
      return f(x, y), 5 * t_y
    f.defjvp(f_jvp)
    @jit
    def g(x, y):
      return f(x, y)
    ans = api.jvp(lambda y: g(2., y), (3.,), (1.,))
    expected = (6., 5.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_nondiff_arg_hiding_jvp_tracer(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    def f(x):
      @partial(api.custom_jvp, nondiff_argnums=(0,))
      def g(h, x):
        return h(x)
      @g.defjvp
      def g_jvp(h, primals, tangents):
        x, = primals
        t, = tangents
        return g(h, x), 2. * t
      h = lambda y: x + y  # capture x
      return g(h, x)
    with self.assertRaisesRegex(ad.CustomJVPException, "Detected differentiation"):
      api.jvp(f, (2.,), (1.,))
  def test_vmap_axes(self):
    raise unittest.SkipTest("TODO")  # TODO(mattjj): write test
  def test_pmap(self):
    raise unittest.SkipTest("TODO")  # TODO(mattjj): write test
  def test_missing_jvp_rule_error_message(self):
    @api.custom_jvp
    def foo(x):
      return x ** 2
    self.assertRaisesRegex(
        AttributeError,
        r"No JVP defined for custom_jvp function foo using defjvp.",
        lambda: foo(2))
    self.assertRaisesRegex(
        AttributeError,
        r"No JVP defined for custom_jvp function foo using defjvp.",
        lambda: api.jvp(foo, (2.,), (1.,)))
    self.assertRaisesRegex(
        AttributeError,
        r"No JVP defined for custom_jvp function foo using defjvp.",
        lambda: api.grad(foo)(2.))
  def test_jvp_rule_inconsistent_pytree_structures_error_message(self):
    @api.custom_jvp
    def f(x):
      return (x**2,)
    @f.defjvp
    def foo_jvp(primals, tangents):
      x, = primals
      t, = tangents
      return f(x), [2 * x * t, x]
    f(2.)  # doesn't crash
    self.assertRaisesRegex(
        TypeError,
        re.escape(
            "Custom JVP rule must produce primal and tangent outputs "
            "with equal container (pytree) structures, but got "
            "{} and {} respectively.".format(
                tree_util.tree_structure((1,)),
                tree_util.tree_structure([1, 2]))
        ),
        lambda: api.jvp(f, (2.,), (1.,)))
  def test_primal_tangent_aval_disagreement_error_message(self):
    @api.custom_jvp
    def f(x):
      return x ** 2
    @f.defjvp
    def foo_jvp(primals, tangents):
      x, = primals
      t, = tangents
      return f(x), jnp.reshape(t, (1,))
    f(2.)  # doesn't crash
    self.assertRaisesRegex(
        TypeError,
        re.escape(
            "Custom JVP rule must produce primal and tangent outputs "
            "with equal shapes and dtypes, but got float32[] and float32[1] "
            "respectively."),
        lambda: api.jvp(f, (jnp.float32(2.),), (jnp.float32(1.),)))
  def test_jvp_rule_doesnt_return_pair_error_message(self):
    # https://github.com/google/jax/issues/2516
    @api.custom_jvp
    def f(x):
      return x ** 2
    @f.defjvp
    def foo_jvp(primals, tangents):
      x, = primals
      t, = tangents
      return t
    f(2.)  # doesn't crash
    self.assertRaisesRegex(
        TypeError,
        re.escape(
            "Custom JVP rule must produce a pair (list or tuple of length two) "
            "representing primal and tangent outputs, got 1.0"),
        lambda: api.jvp(f, (2.,), (1.,)))
  def test_multiple_rule_invocations(self):
    @jax.custom_jvp
    def expit(x):
      return 1 / (1 + lax.exp(-x))
    @expit.defjvp
    def _expit_jvp(primals, tangents):
      (x,), (t,) = primals, tangents
      ans = expit(x)
      t_out = t * ans * (1 - ans)
      return ans, t_out
    def scanned_fun(c, _):
      return [expit(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
    def foo(x):
      c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)
      return c[-1]
    # just make sure these don't crash
    foo(3.)
    grad(foo)(3.)
    grad(lambda x: jax.vmap(foo)(x).sum())(jnp.arange(3.))
  def test_hard_stuff(self):
    arr = jnp.ones((5, 2, 2))
    api.jit(jax.vmap(jnp.linalg.det))(arr)  # doesn't crash
  def test_hard_stuff2(self):
    @jax.custom_jvp
    def f(x):
      return lax.tie_in(x, np.zeros(x.shape, x.dtype))
    @f.defjvp
    def f_jvp(primals, tangents):
      x, = primals
      t, = tangents
      return f(x), t
    # don't crash
    jax.jit(jax.vmap(f))(jnp.arange(3.))
    jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
    jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
    jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
    jax.jvp(jax.vmap(f), (jnp.arange(3.),), (jnp.ones(3),))
  def test_hard_stuff3(self):
    @jax.custom_jvp
    def relu(x):
      return jnp.maximum(x, 0)
    @relu.defjvp
    def _relu_jvp(primals, tangents):
      x, = primals
      t, = tangents
      return relu(x), lax.select(x > 0, t, lax.full_like(t, 0))
    def scanned_fun(c, _):
      return [relu(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
    def f(x):
      c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)
      return c[-1]
    # don't crash
    jax.jit(jax.vmap(f))(jnp.arange(3.))
    jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
    jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
    jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
    jax.jvp(jax.jit(jax.vmap(f)), (jnp.arange(3.),), (jnp.ones(3),))
  def test_eval_shape(self):
    @jax.custom_jvp
    def expit(x):
      return 1 / (1 + lax.exp(-x))
    @expit.defjvp
    def _expit_jvp(primals, tangents):
      (x,), (t,) = primals, tangents
      ans = expit(x)
      t_out = t * ans * (1 - ans)
      return ans, t_out
    # don't crash
    api.eval_shape(expit, jnp.ones((2, 3)))
    api.eval_shape(api.grad(lambda x: expit(x).sum()), jnp.ones((2, 3)))
  def test_jaxpr_zeros(self):
    # from https://github.com/google/jax/issues/2657
    @api.custom_jvp
    def f(A, b):
      return A @ b
    def f_jvp(primals, tangents):
      A, b = primals
      dA, db = tangents
      z = f(A, b)
      dz = A @ db + dA @ b
      return z, dz
    f.defjvp(f_jvp)
    def experiment(theta):
      def step(q, _):
        z = f(jnp.eye(3), jnp.ones(3) * theta)
        q += z[0]
        return q, q
      q = 0.
      q, _ = lax.scan(step, q, None, 4)
      return q
    grad(experiment)(1.)  # doesn't crash
  def test_linear_in_scan(self):
    @api.custom_jvp
    def f(x):
      return -x
    @f.defjvp
    def f_jvp(primals, tangents):
      x, = primals
      x_dot, = tangents
      return f(x), f(x_dot)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.grad(foo)(3.)
    expected = -1.
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_custom_jvps_first_rule_is_none(self):
    # https://github.com/google/jax/issues/3389
    @api.custom_jvp
    def f(x, y):
      return x ** 2 * y
    f.defjvps(None, lambda x_dot, primal_out, x, y: 2 * x * y * x_dot)
    ans = grad(f, 1)(2., 3.)  # doesn't crash
    expected = 12.
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_concurrent_initial_style(self):
    # https://github.com/google/jax/issues/3843
    def unroll(param, sequence):
      def scan_f(prev_state, inputs):
        return prev_state, jax.nn.sigmoid(param * inputs)
      return jnp.sum(jax.lax.scan(scan_f, None, sequence)[1])
    def run():
      return jax.grad(unroll)(jnp.array(1.0), jnp.array([1.0]))
    expected = run()
    # we just don't want this to crash
    n_workers = 2
    with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as e:
      futures = []
      for _ in range(n_workers):
        futures.append(e.submit(run))
      results = [f.result() for f in futures]
    for ans in results:
      self.assertAllClose(ans, expected)
  def test_nondiff_argnums_vmap_tracer(self):
    # https://github.com/google/jax/issues/3964
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    @partial(jax.custom_jvp, nondiff_argnums=(0, 2))
    def sample(shape, param, seed):
      return jax.random.uniform(key=seed, shape=shape, minval=param)
    @sample.defjvp
    def sample_jvp(shape, seed, primals, tangents):
      param, = primals
      dparam, = tangents
      dparam = jnp.broadcast_to(dparam, shape)
      samples = sample(shape, param, seed)
      return samples, samples * dparam  # dummy jvp for proof of concept
    # check these don't crash
    jax.vmap(lambda seed: sample((2,3), 1., seed))(
        jax.random.split(jax.random.PRNGKey(1), 10))
    jax.jvp(lambda x: sample((2, 3), x, jax.random.PRNGKey(1)),
            (1.,), (1.,))
  def test_fun_with_nested_calls_2(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    def call(f, *args):
      f = api.custom_jvp(f)
      f.defjvp(lambda primals, tangents: (f(*primals), sum(tangents)))
      return f(*args)
    def fun_with_nested_calls_2(x):
      def bar(y):
        def baz(w):
          q = call(lambda x: y, x)
          q = q + call(lambda: y)
          q = q + call(lambda y: w + y, y)
          q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q
          return q
        return api.jit(baz)(x)
      return call(bar, x)
    # test these don't crash
    self.assertAllClose(api.jit(fun_with_nested_calls_2)(3.),
                        fun_with_nested_calls_2(3.))
    api.vmap(fun_with_nested_calls_2)(jnp.arange(3.))
  def test_closure_with_vmap(self):
    if not config.omnistaging_enabled:
      raise unittest.SkipTest("test only works with omnistaging")
    # https://github.com/google/jax/issues/3822
    alpha = np.float32(2.)
    def sample(seed):
      @api.custom_jvp
      def f(alpha):
        return jax.random.gamma(seed, alpha, shape=[])
      @f.defjvp
      def f_jvp(primal, tangent):
        alpha = primal
        dalpha = tangent
        sample = f(alpha)
        partial_alpha = lax.random_gamma_grad(alpha, sample)
        return sample, partial_alpha * dalpha
      return f(alpha)
    api.vmap(sample)(jax.random.split(jax.random.PRNGKey(1), 3))  # don't crash
  def test_float0(self):
    @api.custom_jvp
    def f(x, y):
      return x, y
    def f_jvp(primals, _):
      # we need a defined (non-float0) tangent to trigger the rule
      return primals, (2., 1)
    f.defjvp(f_jvp)
    primals = (2., 3)
    tangents = (np.ones(()), np.zeros((), float0),)
    expected_tangents = (2., np.zeros((), float0))
    self.assertArraysEqual(api.jvp(f, primals, tangents),
                           (primals, expected_tangents))
  def test_float0_initial_style(self):
    @api.custom_jvp
    def f(x, y):
      return x, y
    def f_jvp(primals, _):
      x, y = primals
      return (x, y), (2., 1)
    f.defjvp(f_jvp)
    def foo(x, y):
      out, _ = lax.scan(lambda c, _: (f(*c), None), (x, y), None, length=1)
      return out
    primals = (2., 3)
    tangents = (np.ones(()), np.zeros((), float0),)
    expected_tangents = (2., np.zeros((), float0))
    self.assertArraysEqual(api.jvp(foo, primals, tangents),
                           (primals, expected_tangents))
  def test_remat(self):
    @api.custom_jvp
    def f(x):
      return jnp.sin(x)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * jnp.cos(x) * g
    f.defjvp(f_jvp)
    @api.remat
    def g(x):
      return f(f(x))
    ans = g(2.)
    expected = np.sin(np.sin(2.))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(g)(2.)
    expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_higher_order(self):
    @api.custom_jvp
    def f(x):
      return jnp.sin(x)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * jnp.cos(x) * g
    f.defjvp(f_jvp)
    def g(x):
      return f(f(x))
    ans = api.grad(api.grad(api.remat(g)))(2.)
    expected = api.grad(api.grad(g))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.remat(api.grad(g)))(2.)
    expected = api.grad(api.grad(g))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)
    expected = api.grad(api.grad(api.grad(g)))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_initial_style_vmap_2(self):
    # This is like test_initial_style_vmap except the primal function closes
    # over an array constant.
    y = jnp.array([1., 2., 3.])
    @api.custom_jvp
    def f(x):
      assert jnp.ndim(x) == 0
      return 3 * x * jnp.sum(y)
    def f_jvp(primals, tangents):
      x, = primals
      g, = tangents
      return f(x), 2 * g
    f.defjvp(f_jvp)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
    expected = 2. * jnp.ones(3)
    self.assertAllClose(ans, expected, check_dtypes=False)
class CustomVJPTest(jtu.JaxTestCase):
  def test_basic(self):
    @api.custom_vjp
    def f(x):
      return jnp.sin(x)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    x = 3.
    self.assertAllClose(f(x), jnp.sin(x))
    self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
    self.assertAllClose(api.value_and_grad(f)(x),
                        (jnp.sin(x), 2 * jnp.cos(x)))
  def test_invariance(self):
    @api.custom_vjp
    def f(x):
      return jnp.cos(2 * x) / 2.
    def f_fwd(x):
      return (f(x), x)
    def f_rev(x, g):
      return (g * 3,)
    f.defvjp(f_fwd, f_rev)
    def f2(x):
      y, _ = api.value_and_grad(f)(x)
      return y
    def f3(x):
      y, _ = api.value_and_grad(f2)(x)
      return y
    x = 1.
    self.assertAllClose(f(x), f2(x), check_dtypes=False)
    self.assertAllClose(f(x), f3(x), check_dtypes=False)
    self.assertAllClose(api.grad(f)(x), api.grad(f2)(x),
                        check_dtypes=False)
    self.assertAllClose(api.grad(f)(x), api.grad(f3)(x),
                        check_dtypes=False)
  def test_python_control_flow(self):
    @api.custom_vjp
    def f(x):
      if x > 0:
        return jnp.sin(x)
      else:
        return jnp.cos(x)
    def f_fwd(x):
      if x > 0:
        return f(x), x
      else:
        return f(x), x
    def f_rev(x, g):
      if x > 0:
        return (2 * g,)
      else:
        return (3 * g,)
    f.defvjp(f_fwd, f_rev)
    x = 2.
    self.assertAllClose(f(x), jnp.sin(x))
    self.assertAllClose(f(-x), jnp.cos(-x))
    self.assertAllClose(api.value_and_grad(f)(x), (jnp.sin(x), 2.),
                        check_dtypes=False)
    self.assertAllClose(api.value_and_grad(f)(-x), (jnp.cos(-x), 3.),
                        check_dtypes=False)
  def test_vmap(self):
    @api.custom_vjp
    def f(x):
      assert jnp.ndim(x) == 0
      return jnp.sin(x)
    def f_fwd(x):
      assert jnp.ndim(x) == 0
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    x = jnp.arange(3.)
    xx = jnp.arange(6.).reshape(2, 3)
    # vmap of f
    self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
    self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
    # vmap of grad of f
    self.assertAllClose(api.vmap(api.grad(f))(x), 2 * jnp.cos(x))
    self.assertAllClose(api.vmap(api.value_and_grad(f))(x),
                        (jnp.sin(x), 2 * jnp.cos(x)))
    self.assertAllClose(api.vmap(api.vmap(api.grad(f)))(xx), 2 * jnp.cos(xx))
    self.assertAllClose(api.vmap(api.vmap(api.value_and_grad(f)))(xx),
                        (jnp.sin(xx), 2 * jnp.cos(xx)))
    # grad of vmap of f
    self.assertAllClose(api.grad(lambda x: api.vmap(f)(x).sum())(x),
                        2 * jnp.cos(x))
    self.assertAllClose(api.grad(lambda x: api.vmap(api.vmap(f))(x).sum())(xx),
                        2 * jnp.cos(xx))
    # vmap of grad of vmap of f
    self.assertAllClose(api.vmap(api.grad(lambda x: api.vmap(f)(x).sum()))(xx),
                        2 * jnp.cos(xx))
  def test_jit(self):
    @api.custom_vjp
    def f(x):
      return jnp.sin(x)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    x = 3.
    # jit
    self.assertAllClose(api.jit(f)(x), jnp.sin(x))
    self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
    # jit of grad
    self.assertAllClose(api.jit(api.grad(f))(x), 2 * jnp.cos(x),
                        check_dtypes=False)
    # grad of jit
    self.assertAllClose(api.grad(api.jit(f))(x), 2 * jnp.cos(x),
                        check_dtypes=False)
  def test_pytrees(self):
    @api.custom_vjp
    def f(x):
      return {'b': jnp.sin(x['a'])}
    def f_fwd(x):
      return f(x), {'r': jnp.cos(x['a'])}
    def f_bwd(res, g):
      cos_x = res['r']
      return ({'a': 2 * cos_x * g['b']},)
    f.defvjp(f_fwd, f_bwd)
    x = {'a': 3.}
    self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
    self.assertAllClose(api.grad(lambda x: f(x)['b'])(x),
                        {'a': 2 * jnp.cos(x['a'])})
  def test_jvp_error(self):
    @api.custom_vjp
    def f(x):
      return jnp.sin(x)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    self.assertRaisesRegex(
        TypeError,
        r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
        lambda: api.jvp(f, (3.,), (1.,)))
    self.assertRaisesRegex(
        TypeError,
        r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
        lambda: api.jvp(api.vmap(f), (jnp.arange(3.),), (jnp.ones(3),)))
    self.assertRaisesRegex(
        TypeError,
        r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
        lambda: api.jvp(jit(f), (3.,), (1.,)))
  def test_kwargs(self):
    # from https://github.com/google/jax/issues/1938
    @api.custom_vjp
    def my_fun(x, y, c=1.):
      return c * (x + y)
    my_fun.defvjp(lambda x, y, c=1.: (my_fun(c, y, c), None),
                  lambda _, g: (g, g, g))
    f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
    f(10., 5.)  # doesn't crash
    api.grad(f)(10., 5.)  # doesn't crash
  def test_initial_style(self):
    @api.custom_vjp
    def f(x):
      return jnp.sin(x)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.grad(foo)(3.)
    expected = 2. * jnp.cos(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.grad(foo))(3.)
    expected = -2. * jnp.sin(3.)
    self.assertAllClose(ans, expected)
  def test_initial_style_vmap(self):
    @api.custom_vjp
    def f(x):
      assert jnp.ndim(x) == 0
      return 3 * x
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.vmap(foo)(jnp.arange(3.))
    expected = 3. * jnp.arange(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
    expected = 2. * jnp.cos(jnp.arange(3.))
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_nondiff_arg(self):
    @partial(api.custom_vjp, nondiff_argnums=(0,))
    def app(f, x):
      return f(x)
    def app_fwd(f, x):
      return app(f, x), jnp.cos(x)
    def app_rev(f, cos_x, g):
      return (cos_x * g,)
    app.defvjp(app_fwd, app_rev)
    ans = app(lambda x: 2 * x, 1)
    expected = 2
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.value_and_grad(lambda x: app(lambda y: 2 * y, x))(1.)
    expected = (2., jnp.cos(1.))
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_nondiff_arg_tracer(self):
    # This test is now skipped because we decided not to support this behavior
    # anymore (namely, nondiff args can't be tracers), but
    # test_closed_over_tracer is a replacement test for analogous behavior that
    # we do support
    raise unittest.SkipTest("removed support for tracers in nondiff args")
    @partial(api.custom_vjp, nondiff_argnums=(0,))
    def f(x, y):
      return x * y
    def f_fwd(x, y):
      return f(x, y), jnp.cos(y)
    def f_rev(x, cos_y, g):
      return (cos_y * g,)
    f.defvjp(f_fwd, f_rev)
    @jit
    def g(x, y):
      return f(x, y)
    ans = g(2, 3.)
    expected = 6.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(g, 1)(2., 3.)
    expected = jnp.cos(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_closed_over_tracer(self):
    # This test is similar to test_nondiff_arg_tracer except it uses lexical
    # closure rather than the nondiff_argnums mechanism. We decided to disallow
    # tracers in nondiff_argnums to greatly simplify bookkeeping while still
    # supporting the cases for which it is necessary.
    def outer(x):
      @api.custom_vjp
      def f(y):
        return x * y
      def f_fwd(y):
        return f(y), jnp.cos(y)
      def f_rev(cos_y, g):
        return (cos_y * g,)
      f.defvjp(f_fwd, f_rev)
      return f
    @jit
    def g(x, y):
      return outer(x)(y)
    ans = g(2, 3.)
    expected = 6.
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(g, 1)(2., 3.)
    expected = jnp.cos(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_closed_over_tracer2(self):
    def outer(x):
      @api.custom_vjp
      def f(y):
        return x * y
      def f_fwd(y):
        return f(y), jnp.cos(y)
      def f_rev(cos_y, g):
        return (cos_y * g,)
      f.defvjp(f_fwd, f_rev)
      return f
    @api.vmap
    def g(x):
      return outer(x)(3.)
    ans = g(np.arange(3.))
    expected = np.arange(3.) * 3
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_closed_over_tracer3(self):
    def outer(x):
      @api.custom_vjp
      def f(y):
        return x * y
      def f_fwd(y):
        return f(y), (x, jnp.cos(y))
      def f_rev(res, g):
        x, cos_y = res
        return (cos_y * g * x,)
      f.defvjp(f_fwd, f_rev)
      return api.grad(f)
    @api.vmap
    def g(x):
      return outer(x)(3.)
    ans = g(np.arange(3.))
    expected = np.cos(3.) * np.arange(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_nondiff_arg_tracer_error(self):
    # This is similar to the old (now skipped) test_nondiff_arg_tracer, except
    # we're testing for the error message that that usage pattern now raises.
    @partial(api.custom_vjp, nondiff_argnums=(0,))
    def f(x, y):
      return x * y
    def f_fwd(x, y):
      return f(x, y), jnp.cos(y)
    def f_rev(x, cos_y, g):
      return (cos_y * g,)
    f.defvjp(f_fwd, f_rev)
    @jit
    def g(x, y):
      return f(x, y)
    with self.assertRaisesRegex(core.UnexpectedTracerError, "custom_vjp"):
      _ = g(2, 3.)
    with self.assertRaisesRegex(core.UnexpectedTracerError, "custom_vjp"):
      _ = api.grad(g, 1)(2., 3.)
  def test_vmap_axes(self):
    raise unittest.SkipTest("TODO")  # TODO(mattjj): write test
  def test_pmap(self):
    raise unittest.SkipTest("TODO")  # TODO(mattjj): write test
  def test_missing_vjp_rule_error(self):
    @api.custom_vjp
    def foo(x):
      return x ** 2
    self.assertRaisesRegex(
        AttributeError,
        r"No VJP defined for custom_vjp function foo using defvjp.",
        lambda: foo(2))
    self.assertRaisesRegex(
        AttributeError,
        r"No VJP defined for custom_vjp function foo using defvjp.",
        lambda: api.grad(foo)(2.))
  def test_vjp_rule_inconsistent_pytree_structures_error(self):
    @api.custom_vjp
    def f(x):
      return x
    def foo_fwd(x):
      return x, None
    def foo_bwd(_, g):
      return (g, g)
    f.defvjp(foo_fwd, foo_bwd)
    f(2)  # doesn't crash
    self.assertRaisesRegex(
        TypeError,
        re.escape(
            "Custom VJP rule must produce an output with the same container "
            "(pytree) structure as the args tuple of the primal function, "
            "and in particular must produce a tuple of length equal to the "
            "number of arguments to the primal function, but got VJP output "
            "structure {} for primal input structure {}.".format(
                tree_util.tree_structure((1, 1)),
                tree_util.tree_structure((1,)))
        ),
        lambda: api.grad(f)(2.))
  def test_vjp_bwd_returns_non_tuple_error(self):
    @api.custom_vjp
    def f(x):
      return x
    def foo_fwd(x):
      return x, None
    def foo_bwd(_, g):
      return 2. * g  # Should be a tuple
    f.defvjp(foo_fwd, foo_bwd)
    with self.assertRaisesRegex(TypeError, "Custom VJP rule .* must produce a tuple"):
      api.grad(f)(3.)
  def test_issue2511(self):
    arr = jnp.ones((5, 2, 2))
    foo = lambda x: api.vmap(jnp.linalg.det, (0,))(x)
    api.jit(foo)(arr)  # doesn't crash
  def test_lowering_out_of_traces(self):
    # https://github.com/google/jax/issues/2578
    class F(collections.namedtuple("F", ["a"])):
      def __call__(self, x):
        return jax.nn.relu(self.a) * x
    @jax.jit
    def g(f, x):
      return f(x)
    jax.grad(g, argnums=(1,))(F(2.0), 0.)  # doesn't crash
  def test_nondiff_argnums_stop_gradient(self):
    # This test is now skipped because we decided not to support this behavior
    # anymore (namely, nondiff args can't be tracers), but test_clip_gradient is
    # a replacement showing behavior we do support.
    raise unittest.SkipTest("removed support for tracers in nondiff args")
    # https://github.com/google/jax/issues/2784
    @partial(api.custom_vjp, nondiff_argnums=(0, 1))
    def _clip_gradient(lo, hi, x):
      return x  # identity function
    def clip_gradient_fwd(lo, hi, x):
      # return x, None
      return x, (hi, )
    def clip_gradient_bwd(lo, hi, _, g):
      return (jnp.clip(g, lo, hi),)
    _clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)
    def clip_gradient(x):
      lo = -1
      hi = x + 1  # causes things to break
      return _clip_gradient(lo, hi, x)
    jax.grad(clip_gradient)(1.)  # doesn't crash
  def test_clip_gradient(self):
    # https://github.com/google/jax/issues/2784
    @api.custom_vjp
    def _clip_gradient(lo, hi, x):
      return x  # identity function when not differentiating
    def clip_gradient_fwd(lo, hi, x):
      return x, (lo, hi,)
    def clip_gradient_bwd(res, g):
      lo, hi = res
      return (None, None, jnp.clip(g, lo, hi),)
    _clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)
    def clip_gradient(x):
      lo = -0.1
      hi = x + 0.1
      return _clip_gradient(lo, hi, x)
    g = jax.grad(clip_gradient)(0.1)  # doesn't crash
    self.assertAllClose(g, jnp.array(0.2))
  def test_nestable_vjp(self):
    # Verify that https://github.com/google/jax/issues/3667 is resolved.
    def f(x):
      return x ** 2
    @api.custom_vjp
    def g(x):
      return f(x)
    def g_fwd(x):
      y, f_vjp = api.vjp(f, x)
      return y, f_vjp
    def g_bwd(f_vjp, y_bar):
      return f_vjp(y_bar)
    g.defvjp(g_fwd, g_bwd)
    # Check that VJP can be nested in simple situations.  For this to pass,
    # vjp has to return a PyTree.
    _, g_vjp = api.vjp(g, 1.0)
    y, = g_vjp(1.0)
    self.assertAllClose(y, jnp.array(2.0))
    # Check that VJP can be nested in complex situations.  For this to pass,
    # vjp can't treat the closed-over tracer x as a static argument.
    @jit
    def z(x):
      _, g_vjp = api.vjp(g, x)
      return g_vjp
    y, = z(1.0)(3.0)
    self.assertAllClose(y, jnp.array(6.0))
  def test_initial_style_vmap_2(self):
    # https://github.com/google/jax/issues/4173
    x = jnp.ones((10, 3))
    # Create the custom function
    @api.custom_vjp
    def custom_fun(x):
      return x.sum()
    def forward(x):
      return x.sum(), (jnp.ones_like(x),)
    def backward(res, g):
      return g * res[0],
    custom_fun.defvjp(forward, backward)
    def train_fun(x):
      def summed_fun(x):
        return api.vmap(custom_fun)(x).sum()
      return api.grad(summed_fun)(x)
    def scan_body(carry, inputs):
      x = carry
      return carry, train_fun(x)
    scan_range = jnp.arange(4)
    lax.scan(scan_body, x, scan_range)  # don't crash
  def test_initial_style_vmap_3(self):
    # This is like test_initial_style_vmap except the primal function closes
    # over an array constant.
    y = jnp.array([1., 2., 3.])
    @api.custom_vjp
    def f(x):
      assert jnp.ndim(x) == 0
      return 3 * x * jnp.sum(y)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    def foo(x):
      out, _  = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
      return out
    ans = api.vmap(foo)(jnp.arange(3.))
    expected = 3. * jnp.arange(3.) * 6
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
    expected = 2. * jnp.cos(jnp.arange(3.))
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_bwd_closes_over_tracer(self):
    def f(y):
      @jax.custom_vjp
      def f(x):
        return 2. * jnp.sin(x)
      def fwd(x):
        return f(x), ()
      def bwd(_, g):
        return (2. * jnp.cos(y) * g,)  # capture!
      f.defvjp(fwd, bwd)
      return jax.grad(f)(1.)
    ans = jax.jit(f)(2.)
    self.assertAllClose(ans, 2. * jnp.cos(2.))
    ans = jax.vmap(f)(jnp.arange(3.))
    self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
    ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
    self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
    ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
    self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
    ans = jax.grad(f)(4.)
    self.assertAllClose(ans, -2. * jnp.sin(4.))
  def test_fwd_closes_over_tracer(self):
    def f(y):
      @jax.custom_vjp
      def f(x):
        return 2. * jnp.sin(x)
      def fwd(x):
        return f(x), y
      def bwd(y, g):
        return (2. * jnp.cos(y) * g,)  # capture!
      f.defvjp(fwd, bwd)
      return jax.grad(f)(1.)
    ans = jax.jit(f)(2.)
    self.assertAllClose(ans, 2. * jnp.cos(2.))
    ans = jax.vmap(f)(jnp.arange(3.))
    self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
    ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
    self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
    ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
    self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
    ans = jax.grad(f)(4.)
    self.assertAllClose(ans, -2. * jnp.sin(4.))
  def test_float0(self):
    @api.custom_vjp
    def f(x, _):
      return x
    def f_fwd(x, _):
      # we need a defined (non-float0) tangent to trigger the rule
      return x, (2., 1)
    def f_rev(*_):
      return (2., 1)
    f.defvjp(f_fwd, f_rev)
    x = 2.
    y = 3
    self.assertEqual(api.grad(f, allow_int=True, argnums=(0, 1))(x, y),
                     (2., np.zeros(shape=(), dtype=float0)))
  def test_float0_initial_style(self):
    @api.custom_vjp
    def f(x):
      return x
    def f_fwd(x):
      return x, (2., x)
    def f_rev(*_):
      return ((2., 1),)
    f.defvjp(f_fwd, f_rev)
    def foo(x, y):
      out, _ = lax.scan(lambda c, _: (f(c), None), (x, y), None, length=1)
      return out[0]
    x = 2.
    y = 3
    self.assertEqual(api.grad(foo, allow_int=True, argnums=(0, 1))(x, y),
                     (2., np.zeros(shape=(), dtype=float0)))
  def test_remat(self):
    @api.custom_vjp
    def f(x):
      return jnp.sin(x)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    @api.remat
    def g(x):
      return f(f(x))
    ans = g(2.)
    expected = np.sin(np.sin(2.))
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(g)(2.)
    expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_remat_higher_order(self):
    @api.custom_vjp
    def f(x):
      return jnp.sin(x)
    def f_fwd(x):
      return f(x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (2 * cos_x * g,)
    f.defvjp(f_fwd, f_rev)
    def g(x):
      return f(f(x))
    ans = api.grad(api.grad(api.remat(g)))(2.)
    expected = api.grad(api.grad(g))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.remat(api.grad(g)))(2.)
    expected = api.grad(api.grad(g))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
    ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)
    expected = api.grad(api.grad(api.grad(g)))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_bwd_nones(self):
    @api.custom_vjp
    def f(x, y):
      return x * jnp.sin(y)
    def f_fwd(x, y):
      return f(x, y), jnp.cos(y)
    def f_rev(cos, g):
      return (None, 2 * cos * g)
    f.defvjp(f_fwd, f_rev)
    ans = api.grad(lambda x: f(x, x))(3.)
    expected = 2 * jnp.cos(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_bwd_nones_vmap(self):
    @api.custom_vjp
    def f(x, y):
      return x * jnp.sin(y)
    def f_fwd(x, y):
      return f(x, y), jnp.cos(y)
    def f_rev(cos, g):
      return (None, 2 * cos * g)
    f.defvjp(f_fwd, f_rev)
    ans = api.grad(lambda x: api.vmap(f)(x, x).sum())(jnp.arange(3.))
    expected = 2 * jnp.cos(jnp.arange(3.))
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_bwd_nones_pytree(self):
    @api.custom_vjp
    def f(xs, y):
      x1, x2 = xs
      return x1 * x2 * jnp.sin(y)
    def f_fwd(xs, y):
      return f(xs, y), jnp.cos(y)
    def f_rev(cos, g):
      return (None, 2 * cos * g)
    f.defvjp(f_fwd, f_rev)
    ans = api.grad(lambda x: f((x, x), x))(3.)
    expected = 2 * jnp.cos(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_custom_vjp_closure_4521(self):
    # https://github.com/google/jax/issues/4521
    @api.custom_vjp
    def g(x, y):
      return None
    def g_fwd(x, y):
      return None, y
    def g_bwd(residuals, z_bar):
      assert False
    g.defvjp(g_fwd, g_bwd)
    def f(xs, y):
      v_g = api.vmap(g, in_axes=(0, None), out_axes=None)
      v_g(xs, y)
    def scan_body(xs, _):
      y = jnp.zeros(1)
      _, vjp_f = api.vjp(f, xs, y)
      vjp_f(None)
      return xs, None
    lax.scan(scan_body, jnp.ones(5), None, 100)  # doesn't crash
  def test_float0_bwd_none(self):
    @api.custom_vjp
    def f(i, x):
      return jnp.sin(x)
    def f_fwd(i, x):
      return f(i, x), jnp.cos(x)
    def f_rev(cos_x, g):
      return (None, 2 * cos_x * g)
    f.defvjp(f_fwd, f_rev)
    ans = api.grad(f, 1)(jnp.array([1, 2]), 3.)  # doesn't crash
    expected = 2 * jnp.cos(3.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_custom_gradient(self):
    @api.custom_gradient
    def f(x):
      return x ** 2, lambda g: (g * x,)
    self.assertAllClose(f(3.), 9., check_dtypes=False)
    self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
    self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
  def test_custom_gradient_2(self):
    @api.custom_gradient
    def f(x, y):
      return x * y, lambda g: (y, x)
    self.assertAllClose(f(3., 4.), 12., check_dtypes=False)
    self.assertAllClose(api.grad(f, argnums=(0, 1))(3., 4.), (4., 3.),
                        check_dtypes=False)
  def test_custom_gradient_3(self):
    @api.custom_gradient
    def f(x):
      vjp = lambda g: (jnp.cos(x) * jnp.array([3., 4., 5.]),)
      return jnp.sum(jnp.sin(x)), vjp
    self.assertAllClose(f(jnp.arange(3)), jnp.sum(jnp.sin(jnp.arange(3.))),
                        check_dtypes=False)
    self.assertAllClose(
        api.grad(f)(jnp.arange(3.)),
        api.grad(lambda x: jnp.sum(jnp.sin(x)))(jnp.arange(3.)) * jnp.array([3., 4., 5.]),
        check_dtypes=False)
  def test_custom_gradient_can_return_singleton_value_in_vjp(self):
    @api.custom_gradient
    def f(x):
      return x ** 2, lambda g: g * x
    self.assertAllClose(f(3.), 9., check_dtypes=False)
    self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
    self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
  def test_closure_convert(self):
    def minimize(objective_fn, x0):
      converted_fn, aux_args = api.closure_convert(objective_fn, x0)
      return _minimize(converted_fn, x0, *aux_args)
    @partial(api.custom_vjp, nondiff_argnums=(0,))
    def _minimize(objective_fn, x0, *args):
      _ = objective_fn(x0, *args)
      return jnp.cos(x0)
    def fwd(objective_fn, x0, *args):
      y = _minimize(objective_fn, x0, *args)
      return y, (y, args)
    def rev(objective_fn, res, g):
      y, args = res
      x0_bar = 17. * y
      args_bars = [42. * a for a in args]
      return (x0_bar, *args_bars)
    _minimize.defvjp(fwd, rev)
    def obj(c, x):
      return jnp.sum((x - c) ** 2.)
    def solve(c, x):
      def closure(x):
        return obj(c, x)
      return jnp.sum(minimize(closure, x))
    c, x = jnp.ones(2), jnp.zeros(2)
    self.assertAllClose(solve(c, x), 2.0, check_dtypes=False)
    g_c, g_x = api.grad(solve, argnums=(0, 1))(c, x)
    self.assertAllClose(g_c, 42. * jnp.ones(2), check_dtypes=False)
    self.assertAllClose(g_x, 17. * jnp.ones(2), check_dtypes=False)
class CustomTransposeTest(jtu.JaxTestCase):
  def transpose(self, f, x_example):
    def transposed(y):
      x, = api.linear_transpose(f, x_example)(y)
      return x
    return transposed
  def test_linear_call(self):
    def f(x, y):
      def fn(r, x): return x / r
      def tp(r, t): return t / r
      return x + api.linear_call(fn, tp, y, x)
    def f_ref(x, y):
      return x + x / y
    x = jnp.ones(2) * 6.
    y = jnp.ones(2) * 3.
    self.assertAllClose(f(x, y), f_ref(x, y))
    f1     = lambda x: f(x, y)
    f1_ref = lambda x: f_ref(x, y)
    self.assertAllClose(self.transpose(f1,     x)(x),
                        self.transpose(f1_ref, x)(x))
  def test_linear_call_incorrect_transpose(self):
    def f(x, y):
      def fn(r, x): return x / r
      def tp(r, t): return t / (2. * r)  # nb: not the true transpose
      return x + api.linear_call(fn, tp, y, x)
    def f_ref(x, y):
      return x + x / y
    x = jnp.ones(2) * 6.
    y = jnp.ones(2) * 3.
    self.assertAllClose(f(x, y), f_ref(x, y))
    f1     = lambda x: f(x, y)
    f1_ref = lambda x: f_ref(x, 2. * y)  # nb: double the reference divisor
    self.assertAllClose(self.transpose(f1,     x)(x),
                        self.transpose(f1_ref, x)(x))
  def test_linear_call_transpose_transpose_transpose(self):
    def fn(r, x): return x / r
    def tp(r, t): return t / (2. * r)  # nb: untrue transpose
    def f_(x, y):
      return x + api.linear_call(fn, tp, y, x)
    x = jnp.ones(2) * 6.
    y = jnp.ones(2) * 3.
    f = lambda x: f_(x, y)
    ft   = self.transpose(f,   x)
    ftt  = self.transpose(ft,  x)
    fttt = self.transpose(ftt, x)
    self.assertAllClose(ft(x), x + tp(y, x))
    self.assertAllClose(f(x),  ftt(x))
    self.assertAllClose(ft(x), fttt(x))
  def test_linear_call_scalar_to_vector(self):
    def f(c, x):
      def fn(_, x):
        return [x, x]
      def tp(_, t):
        t1, t2 = t
        return t1 + t2
      return api.linear_call(fn, tp, (), c * x)
    def f_ref(c, x):
      return [c * x, c * x]
    c, x = 2., 3.
    t = [4., 5.]
    self.assertAllClose(f(c, x), f_ref(c, x))
    self.assertAllClose(self.transpose(partial(f,     c), x)(t),
                        self.transpose(partial(f_ref, c), x)(t))
  def test_linear_call_nested(self):
    # identity function with an untrue transpose of 0
    def id_(x):
      def f(_, x): return x
      def t(_, t): return 0.
      return api.linear_call(f, t, (), x)
    # identity function with an untrue transpose of 7, and where both
    # forward and transpose have custom transpositions that should
    # never end up invoked.
    def f(x):
      def f_(_, x): return id_(x)
      def t_(_, t): return id_(7.)
      return api.linear_call(f_, t_, (), x)
    x = 5.
    id_t  = self.transpose(id_,  x)
    id_tt = self.transpose(id_t, x)
    ft   = self.transpose(f,    x)
    ftt  = self.transpose(ft,   x)
    fttt = self.transpose(ftt,  x)
    self.assertAllClose(id_(x),   x)
    self.assertAllClose(id_t(x),  0.)
    self.assertAllClose(id_tt(x), x)
    self.assertAllClose(f(x),    x)
    self.assertAllClose(ft(x),   7.)
    self.assertAllClose(ftt(x),  x)
    self.assertAllClose(fttt(x), 7.)
class InvertibleADTest(jtu.JaxTestCase):
  def test_invertible_basic(self):
    def f(x):
      return (jnp.exp(x) * 4) * x
    finv = jax.invertible(f)
    x = jnp.ones((5,))
    if config.omnistaging_enabled:
      expected = """
      { lambda  ; a b.
        let c = exp a
            d = mul c 4.0
            e = mul d a
            f = mul b a
            g = div e a
            h = mul b g
            i = div g 4.0
            j = mul f 4.0
            _ = log i
            k = mul j i
            l = add_any h k
        in (l,) }
      """
    else:
      expected = """
      { lambda  ; a b.
        let c = exp a
            d = mul c 4.0
            e = mul d a
            f = div e a
            g = mul b f
            h = mul b a
            i = mul h 4.0
            j = div f 4.0
            k = mul i j
            l = add_any g k
        in (l,) }
      """
    jaxpr = jax.make_jaxpr(lambda p, ct: jax.vjp(finv, p)[1](ct))(x, x)
    self.assertMultiLineStrippedEqual(expected, str(jaxpr))
    self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x)))(x),
                        jax.value_and_grad(lambda x: np.sum(finv(x)))(x),
                        check_dtypes=True)
  def test_invertible_blocks(self):
    # NB: This is the reversible ResNet block
    def mk_reversible_block(f, g):
      @jax.custom_ivjp
      def rev_block(x1, x2):
        y1 = f(x2) + x1
        y2 = g(y1) + x2
        return y1, y2
      @rev_block.defivjp
      def rev_block_ivjp(xs, ys, dys):
        (y1, y2) = ys
        (dy1, dy2) = dys
        dgo, dx2 = dy2, dy2
        go, gvjp = jax.vjp(g, y1)
        dy1 += gvjp(dgo)[0]
        del gvjp
        x2 = y2 - go
        dfo, dx1 = dy1, dy1
        fo, fvjp = jax.vjp(f, x2)
        dx2 += fvjp(dfo)[0]
        del fvjp
        x1 = y1 - fo
        return (x1, x2), (dx1, dx2)
      return rev_block
    rev_block = mk_reversible_block(jnp.sin, jnp.cos)
    def g(x1, x2):
      for i in range(2):
        x1, x2 = rev_block(x1, x2)
      return x1, x2
    def reduce(f, x1, x2):
      y1, y2 = f(x1, x2)
      return np.sum(y1) + np.sum(y2)
    x = np.ones((1,))
    # FIXME: This breaks when argnums is left as default (i.e. 0), because JVP prunes
    #        zero tangents from call primitives.
    self.assertAllClose(jax.value_and_grad(partial(reduce, jax.invertible(g)), argnums=(0, 1))(x, x + 2),
                        jax.value_and_grad(partial(reduce, g), argnums=(0, 1))(x, x + 2),
                        check_dtypes=True)
  def test_invertible_partial_diff(self):
    # Check that we don't have to differentiate with respect to inputs
    # of the invertible function.
    def f(x, y):
      return (jnp.exp(x) * 4) * x, y + 4
    finv = jax.invertible(f)
    o = np.ones((5,))
    self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x, o)[0]))(o),
                        jax.value_and_grad(lambda x: np.sum(finv(x, o)[0]))(o),
                        check_dtypes=True)
  def test_invertible_pytree(self):
    def f(x, y):
      return jnp.exp(x[0]) * x[1] + y
    finv = jax.invertible(f)
    o = np.ones((5,))
    self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f((x, x), x)[0]))(o),
                        jax.value_and_grad(lambda x: np.sum(finv((x, x), x)[0]))(o),
                        check_dtypes=True)
class DeprecatedCustomTransformsTest(jtu.JaxTestCase):
  def test_defvjp_all(self):
    foo_p = Primitive('foo')
    def foo(x): return 2. * foo_p.bind(x)
    ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (4 * g * jnp.sin(x),)))
    val_ans, grad_ans = api.value_and_grad(foo)(3.)
    self.assertAllClose(val_ans, 2 * 3.**2, check_dtypes=False)
    self.assertAllClose(grad_ans, 4 * 2 * np.sin(3.), check_dtypes=False)
  def test_defvjp_all_const(self):
    foo_p = Primitive('foo')
    def foo(x): return foo_p.bind(x)
    ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (12.,)))
    val_ans, grad_ans = api.value_and_grad(foo)(3.)
    self.assertAllClose(val_ans, 9., check_dtypes=False)
    self.assertAllClose(grad_ans, 12.)
  def test_defvjp_all_higher_order_revmode(self):
    foo_p = Primitive('foo')
    def foo(x): return 2. * foo_p.bind(x)
    ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (g * x ** 2,)))
    ans = api.grad(api.grad(foo))(3.)
    self.assertAllClose(ans, 2 * 2 * 3., check_dtypes=False)
  def test_defvjp_all_multiple_arguments(self):
    # also tests passing in symbolic zero tangents b/c we differentiate wrt only
    # the first argument in one case
    foo_p = Primitive('foo')
    def foo(x, y): return foo_p.bind(x, y)
    def vjpfun(x, y):
      out = x**2 + y**3
      vjp = lambda g: (g + x + y, g * x * 9.)
      return out, vjp
    ad.defvjp_all(foo_p, vjpfun)
    val_ans, grad_ans = api.value_and_grad(foo)(3., 4.)
    self.assertAllClose(val_ans, 3.**2 + 4.**3, check_dtypes=False)
    self.assertAllClose(grad_ans, 1. + 3. + 4., check_dtypes=False)
    ans = api.grad(foo, (0, 1))(3., 4.)
    self.assertAllClose(ans, (1. + 3. + 4., 1. * 3. * 9.), check_dtypes=False)
  def test_defvjp_all_custom_transforms(self):
    @api.custom_transforms
    def foo(x):
      return jnp.sin(x)
    api.defvjp_all(foo, lambda x: (jnp.sin(x), lambda g: (g * x,)))
    val_ans, grad_ans = api.value_and_grad(foo)(3.)
    self.assertAllClose(val_ans, np.sin(3.), check_dtypes=False)
    self.assertAllClose(grad_ans, 3., check_dtypes=False)
  # TODO(mattjj): add defvjp_all test with pytree arguments
  def test_defvjp(self):
    @api.custom_transforms
    def foo(x, y):
      return jnp.sin(x * y)
    api.defvjp(foo, None, lambda g, _, x, y: g * x * y)
    val_ans, grad_ans = api.value_and_grad(foo)(3., 4.)
    self.assertAllClose(val_ans, np.sin(3. * 4.), check_dtypes=False)
    self.assertAllClose(grad_ans, 0., check_dtypes=False)
    ans_0, ans_1 = api.grad(foo, (0, 1))(3., 4.)
    self.assertAllClose(ans_0, 0., check_dtypes=False)
    self.assertAllClose(ans_1, 3. * 4., check_dtypes=False)
  def test_defvjp_higher_order(self):
    @api.custom_transforms
    def foo(x):
      return jnp.sin(2. * x)
    api.defvjp(foo, lambda g, _, x: g * jnp.cos(x))
    ans = api.grad(api.grad(foo))(2.)
    expected = api.grad(api.grad(jnp.sin))(2.)
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_defvjp_use_ans(self):
    @api.custom_transforms
    def foo(x, y):
      return jnp.sin(x * y)
    api.defvjp(foo, None, lambda g, ans, x, y: g * x * y + jnp.cos(ans))
    val_ans, grad_ans = api.value_and_grad(foo, 1)(3., 4.)
    self.assertAllClose(val_ans, np.sin(3. * 4.), check_dtypes=False)
    self.assertAllClose(grad_ans, 3. * 4. + np.cos(np.sin(3. * 4)),
                        check_dtypes=False)
  def test_custom_transforms_eval_with_pytrees(self):
    @api.custom_transforms
    def f(x):
      a, b = x[0], x[1]
      return {'hi': 2 * a, 'bye': 2 * b}
    ans = f((1, 2))
    self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})
  def test_custom_transforms_jit_with_pytrees(self):
    @api.custom_transforms
    def f(x):
      a, b = x[0], x[1]
      return {'hi': 2 * a, 'bye': 2 * b}
    ans = jit(f)((1, 2))
    self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})
  def test_custom_transforms_jit_with_pytrees_consts(self):
    # The purpose of this test is to exercise the custom_transforms default
    # translation rule in how it deals with constants that are too large to be
    # treated as literals (at the time of writing).
    z = np.arange(10.)
    @api.custom_transforms
    def f(x):
      a, b = x[0], x[1]
      return {'hi': 2 * a, 'bye': z * b}
    ans = jit(f)((1, 2))
    self.assertAllClose(ans, {'hi': 2 * 1, 'bye': z * 2}, check_dtypes=False)
  def test_custom_transforms_jvp_with_pytrees(self):
    @api.custom_transforms
    def f(x):
      a, b = x[0], x[1]
      return {'hi': 2 * a, 'bye': 2 * b}
    ans, out_tangent = api.jvp(f, ((1., 2.),), ((3., 4.),))
    self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})
    self.assertEqual(out_tangent, {'hi': 2 * 3, 'bye': 2 * 4})
  def test_custom_transforms_vmap_with_pytrees(self):
    raise unittest.SkipTest("Test deprecated custom_transforms")
    @api.custom_transforms
    def f(x):
      a, b = x[0], x[1]
      return {'hi': 2 * a, 'bye': 2 * b}
    ans = api.vmap(f)((np.arange(3), np.ones((3, 2))))
    expected = {'hi': 2 * np.arange(3), 'bye': 2 * np.ones((3, 2))}
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_custom_transforms_jvp_with_closure(self):
    def f(x):
      @api.custom_transforms
      def g(y):
        return x * y
      return g(x)
    ans = api.grad(f)(1.)
    expected = 2.
    self.assertAllClose(ans, expected, check_dtypes=False)
  def test_custom_vjp_zeros(self):
    @api.custom_transforms
    def f(x, y):
      return 2 * x, 3 * y
    def f_vjp(x, y):
      return (2 * x, 3 * y), lambda ts: (4 * ts[0], 5 * ts[1])
    api.defvjp_all(f, f_vjp, )
    api.grad(lambda x, y: f(x, y)[0])(1., 2.)  # doesn't crash
  def test_custom_transforms_vjp_nones(self):
    core.skip_checks = True  # Fails with checks
    # issue raised by jsnoek@ and jumper@
    @jax.custom_transforms
    def solve(a, b):
      return jnp.dot(jnp.linalg.inv(a), b)
    # print(solve(a, b))
    def solve_vjp(a, b):
      x = solve(a, b)
      def vjp(x_tangent):
        dx = jnp.dot(solve(a, x_tangent), x.T)
        out = (dx, b * 0.)
        return out
      return x, vjp
    jax.defvjp_all(solve, solve_vjp)
    gf = grad(lambda a,b: jnp.sum(solve(a, b)))
    n = 3
    a_in = jnp.linspace(0, 1, n)[:, None]
    a = jnp.dot(a_in, a_in.T) + jnp.eye(n) * 0.1
    real_x = np.random.RandomState(0).randn(n)
    b = jnp.dot(a + jnp.eye(a.shape[0]), real_x)
    print(gf(a, b))  # doesn't crash
class BufferDonationTest(jtu.BufferDonationTestCase):
  @jtu.skip_on_devices("cpu")  # In/out aliasing not supported on CPU.
  def test_pmap_donate_argnums_invalidates_input(self):
    move = api.pmap(lambda x: x + x - x, donate_argnums=0)
    n = jax.local_device_count()
    x = api.pmap(lambda x: x)(jnp.ones([n]))
    y = move(x)
    self.assertDeleted(x)
    np.testing.assert_allclose(y, [1.] * n)
  def test_pmap_nested_donate_ignored(self):
    pmap_fun = jit(lambda x: api.pmap(lambda y: y ** 2, donate_argnums=0)(x))
    a = api.pmap(lambda x: x)(jnp.array([1]))
    # NOTE(mattjj): stopped raising error here and instead just ignored
    # with self.assertRaisesRegex(ValueError, "nested.*not supported"):
    #   pmap_fun(a)
    pmap_fun(a)  # doesn't crash
class NamedCallTest(jtu.JaxTestCase):
  def test_default_name(self):
    @api.named_call
    def my_test_function(x):
      return x**2
    @jax.jit
    def f(x):
      return my_test_function(x)
    c = jax.xla_computation(f)(2)
    self.assertIn("my_test_function", c.as_hlo_text())
  def test_non_jaxtype_arg(self):
    # For the test to fail without the invalid JaxType filter we need to pass
    # in a valid JaxType that forces the invalid Jaxtype to be raised to an
    # abstract value.
    def f(not_a_jaxtype, a_jaxtype):
      # then Jax needs to try and evaluate the abstractified non-JaxType
      if not_a_jaxtype:
        return a_jaxtype
      return 0
    f = api.named_call(f, name="test")
    out = jax.jit(f, static_argnums=(0,))("not a Jaxtype", 1)
    self.assertEqual(out, 1)
  @parameterized.parameters(jax.jit, jax.grad, jax.vmap, jax.remat)
  def test_jax_transforms(self, transform):
    f = jnp.sum
    x = jnp.array([1.])
    unnamed_out = transform(f)(x)
    named_out = transform(api.named_call(f, name="test"))(x)
    self.assertEqual(unnamed_out, named_out)
  def test_static_argnums(self):
    f = api.named_call(lambda x, y: y if x else None, name="test")
    f = jax.jit(f, static_argnums=(0,))
    out = f(True, 5)
    self.assertEqual(out, 5)
  def test_partial_eval(self):
    f = api.named_call(lambda x, y: y if x else None, name="test")
    f = jax.jit(functools.partial(f, True))
    out = f(5)
    self.assertEqual(out, 5)
if __name__ == '__main__':
  absltest.main(testLoader=jtu.JaxTestLoader())
 | 
| 
	the-stack_106_32267 | 
	#!/usr/bin/env python3
# Copyright (c) 2015-2016 The XRJV1 Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import XRJV1TestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE
class PrioritiseTransactionTest(XRJV1TestFramework):
    def __init__(self):
        super().__init__()
        self.setup_clean_chain = True
        self.num_nodes = 1
        self.txouts = gen_return_txouts()
    def setup_network(self):
        self.nodes = []
        self.is_network_split = False
        self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
        self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
    def run_test(self):
        utxo_count = 90
        utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
        base_fee = self.relayfee*100 # our transactions are smaller than 100kb
        txids = []
        # Create 3 batches of transactions at 3 different fee rate levels
        range_size = utxo_count // 3
        for i in range(3):
            txids.append([])
            start_range = i * range_size
            end_range = start_range + range_size
            txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
        # Make sure that the size of each group of transactions exceeds
        # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
        # more transactions.
        mempool = self.nodes[0].getrawmempool(True)
        sizes = [0, 0, 0]
        for i in range(3):
            for j in txids[i]:
                assert(j in mempool)
                sizes[i] += mempool[j]['size']
            assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
        # add a fee delta to something in the cheapest bucket and make sure it gets mined
        # also check that a different entry in the cheapest bucket is NOT mined (lower
        # the priority to ensure its not mined due to priority)
        self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
        self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
        self.nodes[0].generate(1)
        mempool = self.nodes[0].getrawmempool()
        print("Assert that prioritised transaction was mined")
        assert(txids[0][0] not in mempool)
        assert(txids[0][1] in mempool)
        high_fee_tx = None
        for x in txids[2]:
            if x not in mempool:
                high_fee_tx = x
        # Something high-fee should have been mined!
        assert(high_fee_tx != None)
        # Add a prioritisation before a tx is in the mempool (de-prioritising a
        # high-fee transaction so that it's now low fee).
        self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
        # Add everything back to mempool
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        # Check to make sure our high fee rate tx is back in the mempool
        mempool = self.nodes[0].getrawmempool()
        assert(high_fee_tx in mempool)
        # Now verify the modified-high feerate transaction isn't mined before
        # the other high fee transactions. Keep mining until our mempool has
        # decreased by all the high fee size that we calculated above.
        while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
            self.nodes[0].generate(1)
        # High fee transaction should not have been mined, but other high fee rate
        # transactions should have been.
        mempool = self.nodes[0].getrawmempool()
        print("Assert that de-prioritised transaction is still in mempool")
        assert(high_fee_tx in mempool)
        for x in txids[2]:
            if (x != high_fee_tx):
                assert(x not in mempool)
        # Create a free, low priority transaction.  Should be rejected.
        utxo_list = self.nodes[0].listunspent()
        assert(len(utxo_list) > 0)
        utxo = utxo_list[0]
        inputs = []
        outputs = {}
        inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
        outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
        raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
        tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
        txid = self.nodes[0].sendrawtransaction(tx_hex)
        # A tx that spends an in-mempool tx has 0 priority, so we can use it to
        # test the effect of using prioritise transaction for mempool acceptance
        inputs = []
        inputs.append({"txid": txid, "vout": 0})
        outputs = {}
        outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
        raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
        tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
        tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
        try:
            self.nodes[0].sendrawtransaction(tx2_hex)
        except JSONRPCException as exp:
            assert_equal(exp.error['code'], -26) # insufficient fee
            assert(tx2_id not in self.nodes[0].getrawmempool())
        else:
            assert(False)
        # This is a less than 1000-byte transaction, so just set the fee
        # to be the minimum for a 1000 byte transaction and check that it is
        # accepted.
        self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
        print("Assert that prioritised free transaction is accepted to mempool")
        assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
        assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
    PrioritiseTransactionTest().main()
 | 
| 
	the-stack_106_32268 | 
	#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A TaskGroup is a collection of closely related tasks on the same DAG that should be grouped
together when the DAG is displayed graphically.
"""
import functools
from inspect import signature
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, TypeVar, overload
from airflow.utils.task_group import TaskGroup
if TYPE_CHECKING:
    from airflow.models import DAG
F = TypeVar("F", bound=Callable[..., Any])
task_group_sig = signature(TaskGroup.__init__)
# This covers the @task_group() case. Annotations are copied from the TaskGroup
# class, only providing a default to 'group_id' (this is optional for the
# decorator and defaults to the decorated function's name). Please keep them in
# sync with TaskGroup when you can! Note that since this is an overload, these
# argument defaults aren't actually used at runtime--the real implementation
# does not use them, and simply rely on TaskGroup's defaults, so it's not
# disastrous if they go out of sync with TaskGroup.
@overload
def task_group(
    group_id: Optional[str] = None,
    prefix_group_id: bool = True,
    parent_group: Optional["TaskGroup"] = None,
    dag: Optional["DAG"] = None,
    default_args: Optional[Dict[str, Any]] = None,
    tooltip: str = "",
    ui_color: str = "CornflowerBlue",
    ui_fgcolor: str = "#000",
    add_suffix_on_collision: bool = False,
) -> Callable[[F], F]:
    ...
# This covers the @task_group case (no parentheses).
@overload
def task_group(python_callable: F) -> F:
    ...
def task_group(python_callable=None, *tg_args, **tg_kwargs):
    """
    Python TaskGroup decorator.
    This wraps a function into an Airflow TaskGroup. When used as the
    ``@task_group()`` form, all arguments are forwarded to the underlying
    TaskGroup class. Can be used to parametrize TaskGroup.
    :param python_callable: Function to decorate.
    :param tg_args: Positional arguments for the TaskGroup object.
    :param tg_kwargs: Keyword arguments for the TaskGroup object.
    """
    def wrapper(f):
        # Setting group_id as function name if not given in kwarg group_id
        if not tg_args and 'group_id' not in tg_kwargs:
            tg_kwargs['group_id'] = f.__name__
        task_group_bound_args = task_group_sig.bind_partial(*tg_args, **tg_kwargs)
        @functools.wraps(f)
        def factory(*args, **kwargs):
            # Generate signature for decorated function and bind the arguments when called
            # we do this to extract parameters so we can annotate them on the DAG object.
            # In addition, this fails if we are missing any args/kwargs with TypeError as expected.
            # Apply defaults to capture default values if set.
            # Initialize TaskGroup with bound arguments
            with TaskGroup(
                *task_group_bound_args.args,
                add_suffix_on_collision=True,
                **task_group_bound_args.kwargs,
            ):
                # Invoke function to run Tasks inside the TaskGroup
                return f(*args, **kwargs)
        return factory
    if callable(python_callable):
        return wrapper(python_callable)
    return wrapper
 | 
| 
	the-stack_106_32269 | 
	#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import libs
import pandas as pd
# # Dataset
# [SP500 Since 1950](https://www.kaggle.com/datasets/benjibb/sp500-since-1950?resource=download)
# In[2]:
df = pd.read_csv('GSPC.csv')
# In[3]:
df
# In[4]:
df.info()
# In[5]:
df.drop('Date', axis=1, inplace=True)
# In[10]:
df.sample(5)
# In[11]:
df[-5::]
# In[ ]:
# separar a úlitma linha do dataframe
futuro = df[-1::]
# In[14]:
futuro
# In[15]:
presente = df.drop(df[-1::].index, axis=0)
# In[16]:
presente.tail()
# In[17]:
presente['target'] = presente['Close'][1:len(presente)].reset_index(drop=True)
# In[20]:
presente.tail()
# In[19]:
presente.head()
# In[22]:
prev = presente[-1::].drop('target', axis =1)
# In[23]:
prev
# In[24]:
treino = presente.drop(presente[-1::].index, axis=0)
# In[26]:
treino
# In[31]:
treino.loc[treino['target'] > treino['Close'], 'target'] = 1
treino.loc[treino['target'] != 1, 'target'] = 0
treino['target'] = treino['target'].astype(int)
# In[32]:
treino
# In[37]:
y = treino['target']
X = treino.drop('target', axis=1)
# In[34]:
y
# In[38]:
X
# In[39]:
from sklearn.model_selection import train_test_split
X_tr, X_ts, y_tr, y_ts = train_test_split(X,y, test_size = 0.3)
from sklearn.ensemble import ExtraTreesClassifier
modelo = ExtraTreesClassifier()
modelo.fit(X_tr, y_tr)
resultado = modelo.score(X_ts, y_ts)
print (f'Acurácia: {resultado}')
# In[40]:
prev
# In[43]:
modelo.predict(prev)
# In[42]:
futuro
# In[44]:
prev['target'] = modelo.predict(prev)
# In[45]:
prev
# In[46]:
futuro
# In[47]:
presente
# In[48]:
presente = presente.append(futuro, sort=True)
# In[49]:
presente.tail()
# In[ ]:
 | 
| 
	the-stack_106_32270 | 
	text = input()
result_string = []
digits = [int(i) for i in text if i.isdigit()]
non_digits = [i for i in text if not i.isdigit()]
take_list = [num for idx, num in enumerate(digits) if idx % 2 == 0]
skip_list = [num for idx, num in enumerate(digits) if idx % 2 != 0]
take_skip_list = list(zip(take_list, skip_list))
start = 0
end = 0
for step in take_skip_list:
    take = step[0]
    skip = step[1]
    if step[0] != 0:
        end += take
        result_string += non_digits[start: end]
        start += take
    if skip != 0:
        end = start + skip
        start += skip
print("".join(result_string))
 | 
| 
	the-stack_106_32272 | 
	import torch
from esm import pretrained, ProteinBertModel
class FBModel(object):
    def __init__(self, name, repr_layer=[-1], random_init=False):
        self.name_ = name
        self.repr_layer_ = repr_layer
        model, alphabet = pretrained.load_model_and_alphabet(name)
        if random_init:
            # ESM-1b with random initialization, for computational control.
            model = ProteinBertModel(
                args=model.args,
                alphabet=alphabet,
            )
        
        model.eval()
        if torch.cuda.is_available():
            model = model.cuda()
        self.model_ = model
        self.alphabet_ = alphabet
        self.unk_idx_ = alphabet.tok_to_idx['<unk>']
        assert(all(
            -(model.num_layers + 1) <= i <= model.num_layers
            for i in [ -1 ]
        ))
        self.repr_layers_ = [
            (i + model.num_layers + 1) % (model.num_layers + 1)
            for i in [ -1 ]
        ]
        self.vocabulary_ = {
            tok: self.alphabet_.tok_to_idx[tok]
            for tok in self.alphabet_.tok_to_idx
            if '<' not in tok and tok != '.' and tok != '-'
        }
 | 
| 
	the-stack_106_32273 | 
	#!/usr/bin/python
# -*- coding: utf-8 -*-
# 'local_data_dir': '/Users/eric/data/mms',
# example of setting your local data directory on macOS
# 'local_data_dir': 'c:\users\eric\data\mms', and Windows
import os
CONFIG = {'local_data_dir': 'pydata',
          'debug_mode': False,
          'download_only': False,
          'no_download': False}
# override local data directory with environment variables
if os.environ.get('ROOT_DATA_DIR'):
    CONFIG['local_data_dir'] = \
        os.sep.join([os.environ['ROOT_DATA_DIR'], 'mms'])
if os.environ.get('MMS_DATA_DIR'):
    CONFIG['local_data_dir'] = os.environ['MMS_DATA_DIR']
 | 
| 
	the-stack_106_32274 | 
	"""
This module implements runtime logic to orchestrate the following steps
  1. Attach to a target application
  2. Resolve and activate probes
  3. Resolve and program pmc events
  4. Collect profile data for reporting
Author: Manikandan Dhamodharan, Morgan Stanley
"""
import logging
from xpedite.txn.classifier       import DefaultClassifier
from xpedite.types            import ResultOrder
LOGGER = logging.getLogger(__name__)
class AbstractRuntime(object):
  """
  Base class for xpedite runtime to orchestrate a profile session.
  The Abstract runtime provides implemetation to resolve/enable probes and pmu events
  """
  def __init__(self, app, probes):
    """
    Creates a new instance of Abstract runtime
    The constructor also instantiates caches for pmu events and topdown metrics
    :param app: an instance of xpedite app, to interact with target application
    :type app: xpedite.profiler.app.XpediteApp
    :param probes: List of probes to be enabled for the current profile session
    """
    from xpedite.profiler.resolver import ProbeResolver
    from xpedite.pmu.eventsDb import EventsDbCache
    from xpedite.pmu.topdown import TopdownCache
    from xpedite.profiler.app import pingApp
    pingApp(app)
    self.app = app
    self.probes = probes
    self.probeResolver = ProbeResolver()
    self.cpuInfo = None
    self.eventsDbCache = EventsDbCache()
    self.topdownCache = TopdownCache(self.eventsDbCache)
    self.topdownMetrics = None
    self.eventSet = None
  @staticmethod
  def formatProbes(probes):
    """
    Formats a list of probes to string
    :param probes: List of probes to be enabled for the current profile session
    """
    probeStr = ''
    for probe in probes:
      probeStr = probeStr + '\n\t{}'.format(probe)
    return probeStr
  def enableProbes(self, probes):
    """
    Enables the list of given probes
    :param probes: List of probes to be enabled for the current profile session
    """
    from xpedite.profiler.probeAdmin import ProbeAdmin
    LOGGER.debug('Enabling probes %s', self.formatProbes(probes))
    if probes:
      if self.eventSet:
        errMsg = ProbeAdmin.enablePMU(self.app, self.eventSet)
        if errMsg:
          msg = 'failed to enable PMU ({})'.format(errMsg)
          LOGGER.error(msg)
          raise Exception(msg)
      (errCount, errMsg) = ProbeAdmin.updateProbes(self.app, probes, targetState=True)
      if errCount > 0:
        msg = 'failed to enable probes ({} error(s))\n{}'.format(errCount, errMsg)
        LOGGER.error(msg)
        raise Exception(msg)
    else:
      LOGGER.warn('failed to enable probes - Invalid or empty probes argument')
  def resolveProbes(self, probes):
    """
    Checks the validity of the list of given probes
    :param probes: List of probes to be enabled for the current profile session
    """
    from xpedite.types.probe  import AnchoredProbe
    anchoredProbes = []
    LOGGER.debug('Resolving probes %s', self.formatProbes(probes))
    for probe in probes:
      if not probe.isAnchored():
        resolvedProbes = self.probeResolver.resolveAnchoredProbe(self.app, probe)
        if resolvedProbes:
          for rp in resolvedProbes:
            anchoredProbe = AnchoredProbe(
              probe.name, filePath=rp.filePath, lineNo=rp.lineNo, attributes=rp.attributes,
                isActive=rp.isActive, sysName=rp.sysName
            )
            anchoredProbes.append(anchoredProbe)
            LOGGER.debug('Resolved probe %s to anchored probe %s', probe, anchoredProbe)
        else:
          raise Exception('probe {} cannot be located in app. Please check if it\'s a valid probe'.format(
            probe.sysName
          ))
      else:
        anchoredProbes.append(probe)
    return anchoredProbes
  @staticmethod
  def resolveEvents(eventsDb, cpuSet, events):
    """
    Resolves a list of given pmu events from events database
    :param eventsDb: Handle to database of PMU events for the target cpu
    :param events: List of PMU events to be enabled for the current profile session
    :param cpuSet: List of cpu, where the userspace pmu collection will be enabled
    """
    from xpedite.pmu.pmuctrl import PMUCtrl
    return PMUCtrl.resolveEvents(eventsDb, cpuSet, events)
  @staticmethod
  def aggregatePmc(pmc):
    """
    Aggreagtes given pmu events to create a unique list of events
    :param pmc: PMU events to be enabled for the current profile session
    """
    from collections import OrderedDict
    events = OrderedDict()
    for counter in pmc:
      if isinstance(counter, list):
        for event in counter:
          events.update({event:0})
      else:
        events.update({counter:0})
    return events.keys()
  def resolveTopdownMetrics(self, pmc):
    """
    Resolves pmu events for given topdown metrics.
    The method resolves a list of pmu events, for one or more nodes in the topdown hierarchy
    :param pmc: PMU events to be enabled for the current profile session
    """
    import copy
    from xpedite.pmu.event import TopdownMetrics
    from xpedite.pmu.event import Event, TopdownNode, Metric
    pmc = copy.copy(pmc)
    topdownNodes = [i for i, counter in enumerate(pmc) if isinstance(counter, (TopdownNode, Metric))]
    if topdownNodes:
      topdown = self.topdownCache.get(self.cpuInfo.cpuId)
      self.topdownMetrics = TopdownMetrics()
      for index in topdownNodes:
        node = pmc[index]
        topdownNode = self.topdownMetrics.add(topdown, node)
        pmc[index] = [Event(
          event.name.title().replace('_', '').replace(' ', ''), event.name
        ) for event in topdownNode.events]
    return pmc
class Runtime(AbstractRuntime):
  """Xpedite suite runtime to orchestrate profile session"""
  def __init__(self, app, probes, pmc=None, cpuSet=None, pollInterval=4, samplesFileSize=None, benchmarkProbes=None):
    """
    Creates a new profiler runtime
    Construction of the runtime will execute the following steps
    1. Starts the xpedite app to attach to profiling target
    2. Queries and resolves location of probes in profile info
    3. Load events and topdown database for the target cpu's micro architecture
    4. Resolves pmu events and topdown metrics from events database/topdown hierarchy
    5. Opens xpedite device driver to program pmu events and enable userspace pmc collection
    6. Activates resolved probes and begins sample collection in the target process
    :param app: an instance of xpedite app, to interact with target application
    :type app: xpedite.profiler.app.XpediteApp
    :param probes: List of probes to be enabled for the current profile session
    :param pmc: PMU events to be enabled for the current profile session
    :param cpuSet: List of cpu, where the userspace pmu collection will be enabled
    :type cpuSet: int
    :param pollInterval: Sample collection period in milli seconds
    :type pollInterval: int
    :param samplesFileSize: Max size of data files used to store samples
    :type samplesFileSize: int
    :param benchmarkProbes: optional map to override probes used for benchmarks,
                            defaults to active probes of the current profile session
    """
    from xpedite.dependencies     import Package, DEPENDENCY_LOADER
    DEPENDENCY_LOADER.load(Package.Numpy, Package.FuncTools)
    if len(probes) < 2:
      raise Exception('invalid request - profiling needs at least two named probes to be enabled. Found only {}'.format(
        probes
      ))
    try:
      AbstractRuntime.__init__(self, app, probes)
      self.benchmarkProbes = benchmarkProbes
      self.cpuInfo = app.getCpuInfo()
      eventsDb = self.eventsDbCache.get(self.cpuInfo.cpuId) if pmc else None
      if pmc:
        LOGGER.debug('detected %s', eventsDb.uarchSpec)
        pmc = self.resolveTopdownMetrics(pmc)
        pmc = self.aggregatePmc(pmc)
      if not self.app.dryRun:
        if pmc:
          self.eventSet = self.app.enablePMU(eventsDb, cpuSet, pmc)
        anchoredProbes = self.resolveProbes(probes)
        self.enableProbes(anchoredProbes)
        self.app.beginProfile(pollInterval, samplesFileSize)
      else:
        if pmc:
          self.eventSet = self.resolveEvents(eventsDb, cpuSet, pmc)
        LOGGER.warn('DRY Run selected - xpedite won\'t enable probes')
    except Exception as ex:
      LOGGER.exception('failed to start profiling')
      raise ex
  def report(self, reportName=None, benchmarkPaths=None, classifier=DefaultClassifier(), txnFilter=None,
      reportThreshold=3000, resultOrder=ResultOrder.WorstToBest):
    """
    Ends active profile session and generates reports.
    This method executes the following steps
    1. Ends samples collection and disconnects tcp connection to target
    2. Gathers sample files for the current profile session and loads elapsed time and pmu counters
    3. Groups related counters to build transactions and timelines
    4. Generates html report and stores results
    :param reportName: Name of the profile report (Default value = None)
    :type reportName: str
    :param benchmarkPaths: List of stored reports from previous runs, for benchmarking (Default value = None)
    :param classifier: Predicate to classify transactions into different categories (Default value = DefaultClassifier()
    :type classifier: xpedite.txn.classifier.ProbeDataClassifier
    :param txnFilter: Lambda to filter transactions prior to report generation
    :type txnFilter: callable accepting a txn instance and returns a bool
    :param reportThreshold: Threshold for number of transactions rendered in html reports (Default value = 3000)
    :type reportThreshold: int
    :param resultOrder: Default sort order of transactions in latency constituent reports
    :type resultOrder: xpedite.pmu.ResultOrder
    """
    from xpedite.profiler.reportgenerator import ReportGenerator
    from xpedite.txn.repo import TxnRepoFactory
    from xpedite.pmu.event       import Event
    try:
      if not self.app.dryRun:
        try:
          self.app.endProfile()
        except Exception:
          pass
        if self.eventSet:
          self.app.disablePMU()
      repoFactory = TxnRepoFactory()
      pmc = [Event(req.name, req.uarchName) for req in self.eventSet.requests()] if self.eventSet  else []
      repo = repoFactory.buildTxnRepo(
        self.app, self.cpuInfo, self.probes, self.topdownCache, self.topdownMetrics,
        pmc, self.benchmarkProbes, benchmarkPaths
      )
      reportName = reportName if reportName else self.app.name
      reportGenerator = ReportGenerator(reportName)
      return reportGenerator.generateReport(
        self.app, repo, classifier, resultOrder, reportThreshold, txnFilter, benchmarkPaths
      )
    except Exception as ex:
      LOGGER.exception('failed to generate report')
      raise ex
 | 
| 
	the-stack_106_32275 | 
	from datetime import datetime
import flask
from config import Config
import requests
class OlhoVivoClient():
    """ An Client that centralizes the comunication with OlhoVivo API """
    def __init__(self):
        self.url =  Config.OLHOVIVO_URL
        self.token = Config.OLHOVIVO_KEY
        self.session = requests.session()
        self.is_connected = False
        self._connect()
        
    def _connect(self):
        """ Initialize the authentication with OlhoVivo API"""
        endpoint = self.url + '/Login/Autenticar?token=' + self.token
        response = self.session.post(endpoint)
        self.is_connected = response.status_code == 200
        return self.is_connected
    def _get(self, endpoint):
        """ Default get request for all methods"""
        
        url = self.url + endpoint
        response = self.session.get(url)
        return response
    def lines(self, term):
        """ Search lines by a term in OlhoVivo API"""
        # Need Improvement
        if self.is_connected: 
            endpoint = f'/Linha/Buscar?termosBusca={term}'
            response = self._get(endpoint)
            lines = response.json()
            line_objects = []
            for line in lines:
                line_objects.append(Line.fromdict(line))
            return line_objects
    
    def busstops_of_line(self, code):
        if self.is_connected:
            endpoint = f'/Parada/BuscarParadasPorLinha?codigoLinha={code}'
            response = self._get(endpoint)
            busstops = response.json()
            busstop_objects = []
            for busstop in busstops:
                busstop_objects.append(BusStop.fromdict(busstop))
            return busstop_objects
    def preview(self, line, busstop):
         if self.is_connected:
            endpoint = f'/Previsao?codigoParada={busstop}&codigoLinha={line}'
            response = self._get(endpoint)
            prevision = response.json()
            try:
                buses_on_the_way = prevision['p']['l'][0]['vs'] # Lista de veiculos e suas previsoes
            except TypeError:
                buses_on_the_way = []
            finally:
                previsions = []
                for p in buses_on_the_way:
                    previsions.append(Prevision.fromdict(p))
                return previsions
class Line():
    """ Bus Lines of SPTrans """
    # Needs improvement
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
    def __repr__(self):
        """ Define how a Line object will be printed """
        return f"{self.lt}-{self.tl} - From {self.get_origin()} To {self.get_destiny()}"
    def get_origin(self):
        if self.sl == 1:
            return self.tp
        else:
            return self.ts
    
    def get_destiny(self):
        if self.sl == 2:
            return self.tp
        else:
            return self.ts
        
    @classmethod
    def fromdict(cls, attr : dict):
        """ Instance a Line object from a dictionary """
        
        # Implement rules here 
        return Line(**attr)
class BusStop():
    """ Bus Stops """
    # Needs improvement
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
    @classmethod
    def fromdict(cls, attr : dict):
        """ Instance a BusStop object from a dictionary """
        
        # Implement rules here 
        return cls(**attr)
    def __str__(self):
        return f"Parada {self.cp} | Nome {self.np} | Endereço: {self.ed}"
        
class Prevision():
    """ Prevision for a bus """
    # Needs improvement
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
    @classmethod
    def fromdict(cls, attr : dict):
        """ Instance a prevision object from a dictionary """
        
        # Implement rules here 
        return cls(**attr)
    
    def __str__(self):
        return f"Bus {self.p} | Prevision {self.t} | Localization - Lat:{self.py} x Long: {self.px}" | 
| 
	the-stack_106_32276 | 
	import sys
if len(sys.argv) != 3:
    print('Uso ex09-08.py LARGURA LINHAS')
else:
    try:
        LARGURA = int(sys.argv[1])
        LINHA = int(sys.argv[2])
    except:
        print('Voce informou valores invalidos')
    else:
        nome_do_arquivo = 'mobydick.txt'
        def verifica_pagina(arquivo, linha, pagina):
            if(linha==linhas):
                rodape = f'= {nome_do_arquivo} - Pagina: {pagina} ='
                arquivo.write(rodape.center(LARGURA - 1) + '\n')
                pagina += 1
                linha = 1
            return linha, pagina
        def escreve(arquivo, linha, nlinhas, pagina):
            arquivo.write(linha+'\n')
            return verifica_pagina(arquivo, nlinhas, pagina)
        entrada = open(nome_do_arquivo, encoding='utf-8')
        saida = open('saida_paginada.txt', 'w', encoding='utf-8')
        pagina = 1
        linhas = 1
        for linha in entrada.readlines():
            palavras = linha.rstrip().split(" ")
            linha = ''
            for p in palavras:
                p = p.strip()
                if len(linha) + len(p) + 1 > LARGURA:
                    linhas, pagina = escreve(saida, linha, linhas, pagina)
                    linha = ''
                linha += p+''
            if(linha != ''):
                linhas, pagina = escreve(saida, linha, linhas, pagina)
        while(linhas!=1):
            linhas, pagina = escreve(saida, '', linha, pagina)
        entrada.close()
        saida.close() | 
| 
	the-stack_106_32277 | 
	#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class ActionProperty(object):
    def __init__(self):
        """
        Attributes:
          swaggerTypes (dict): The key is attribute name and the value is attribute type.
          attributeMap (dict): The key is attribute name and the value is json key in definition.
        """
        self.swaggerTypes = {
            
            'bandwidthProfileId': 'str',
            
            
            'priorityLevel': 'str',
            
            
            'experienceLevel': 'str',
            
            
            'destinations': 'list[str]',
            
            
            'pathPreference': 'str',
            
            
            'maintainExperience': 'str',
            
            
            'trustLevel': 'str',
            
            
            'pathControlFlag': 'bool',
            
            
            'pathPreferenceFlag': 'bool',
            
            
            'PrimaryPathPref': 'list[str]',
            
            
            'SecondaryPathPref': 'list[str]',
            
            
            'PathOfLastResort': 'str',
            
            
            'TertiaryPathPref': 'list[str]',
            
            
            'relevanceLevel': 'str'
            
        }
        self.attributeMap = {
            
            'bandwidthProfileId': 'bandwidthProfileId',
            
            'priorityLevel': 'priorityLevel',
            
            'experienceLevel': 'experienceLevel',
            
            'destinations': 'destinations',
            
            'pathPreference': 'pathPreference',
            
            'maintainExperience': 'maintainExperience',
            
            'trustLevel': 'trustLevel',
            
            'pathControlFlag': 'pathControlFlag',
            
            'pathPreferenceFlag': 'pathPreferenceFlag',
            
            'PrimaryPathPref': 'PrimaryPathPref',
            
            'SecondaryPathPref': 'SecondaryPathPref',
            
            'PathOfLastResort': 'PathOfLastResort',
            
            'TertiaryPathPref': 'TertiaryPathPref',
            
            'relevanceLevel': 'relevanceLevel'
            
        }       
        
        #ID of the bandwidth profile
        
        self.bandwidthProfileId = None # str
        
        #priority level for a policy
        
        self.priorityLevel = None # str
        
        #experience level for a policy
        
        self.experienceLevel = None # str
        
        
        self.destinations = None # list[str]
        
        #path preference for a policy
        
        self.pathPreference = None # str
        
        
        self.maintainExperience = None # str
        
        #trust level for a policy
        
        self.trustLevel = None # str
        
        #path control flag
        
        self.pathControlFlag = None # bool
        
        #path preference flag
        
        self.pathPreferenceFlag = None # bool
        
        
        self.PrimaryPathPref = None # list[str]
        
        
        self.SecondaryPathPref = None # list[str]
        
        
        self.PathOfLastResort = None # str
        
        
        self.TertiaryPathPref = None # list[str]
        
        #relevance level for a policy
        
        self.relevanceLevel = None # str
        
 | 
| 
	the-stack_106_32278 | 
	import re
import subprocess
import sys
import pytest
import ray
from ray.test_utils import run_string_as_driver_nonblocking
def test_worker_stdout():
    script = """
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
def foo(out_str, err_str):
    print(out_str)
    print(err_str, file=sys.stderr)
ray.get(foo.remote("abc", "def"))
    """
    proc = run_string_as_driver_nonblocking(script)
    out_str = proc.stdout.read().decode("ascii")
    err_str = proc.stderr.read().decode("ascii")
    assert out_str.endswith("abc\n")
    assert err_str.split("\n")[-2].endswith("def")
def test_output():
    # Use subprocess to execute the __main__ below.
    outputs = subprocess.check_output(
        [sys.executable, __file__, "_ray_instance"],
        stderr=subprocess.STDOUT).decode()
    lines = outputs.split("\n")
    for line in lines:
        print(line)
    assert len(lines) == 3, lines
    logging_header = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}\sINFO\s"
    assert re.match(
        logging_header + r"resource_spec.py:\d+ -- Starting Ray with [0-9\.]+ "
        r"GiB memory available for workers and up to [0-9\.]+ GiB "
        r"for objects. You can adjust these settings with .*?.", lines[0])
    assert re.match(
        logging_header +
        r"services.py:\d+ -- View the Ray dashboard at .*?localhost:\d+?.*",
        lines[1])
if __name__ == "__main__":
    if len(sys.argv) > 1 and sys.argv[1] == "_ray_instance":
        ray.init(num_cpus=1)
        ray.shutdown()
    else:
        sys.exit(pytest.main(["-v", __file__]))
 | 
| 
	the-stack_106_32280 | 
	# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import absolute_import, unicode_literals
import hashlib
import os
import random
import re
import string
import tempfile
import time
import warnings
import pickle
from django.conf import settings
from django.core import management
from django.core.cache import get_cache
from django.core.cache.backends.base import (CacheKeyWarning,
    InvalidCacheBackendError)
from django.core.context_processors import csrf
from django.db import router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import (HttpResponse, HttpRequest, StreamingHttpResponse,
    QueryDict)
from django.middleware.cache import (FetchFromCacheMiddleware,
    UpdateCacheMiddleware, CacheMiddleware)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory
from django.test.utils import override_settings, IgnorePendingDeprecationWarningsMixin
from django.utils import six, timezone, translation, unittest
from django.utils.cache import (patch_vary_headers, get_cache_key,
    learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
    return 42
class C:
    def m(n):
        return 24
class DummyCacheTests(unittest.TestCase):
    # The Dummy cache backend doesn't really behave like a test backend,
    # so it has different test requirements.
    backend_name = 'django.core.cache.backends.dummy.DummyCache'
    def setUp(self):
        self.cache = get_cache(self.backend_name)
    def test_simple(self):
        "Dummy cache backend ignores cache set calls"
        self.cache.set("key", "value")
        self.assertEqual(self.cache.get("key"), None)
    def test_add(self):
        "Add doesn't do anything in dummy cache backend"
        self.cache.add("addkey1", "value")
        result = self.cache.add("addkey1", "newvalue")
        self.assertEqual(result, True)
        self.assertEqual(self.cache.get("addkey1"), None)
    def test_non_existent(self):
        "Non-existent keys aren't found in the dummy cache backend"
        self.assertEqual(self.cache.get("does_not_exist"), None)
        self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
    def test_get_many(self):
        "get_many returns nothing for the dummy cache backend"
        self.cache.set('a', 'a')
        self.cache.set('b', 'b')
        self.cache.set('c', 'c')
        self.cache.set('d', 'd')
        self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {})
        self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {})
    def test_delete(self):
        "Cache deletion is transparently ignored on the dummy cache backend"
        self.cache.set("key1", "spam")
        self.cache.set("key2", "eggs")
        self.assertEqual(self.cache.get("key1"), None)
        self.cache.delete("key1")
        self.assertEqual(self.cache.get("key1"), None)
        self.assertEqual(self.cache.get("key2"), None)
    def test_has_key(self):
        "The has_key method doesn't ever return True for the dummy cache backend"
        self.cache.set("hello1", "goodbye1")
        self.assertEqual(self.cache.has_key("hello1"), False)
        self.assertEqual(self.cache.has_key("goodbye1"), False)
    def test_in(self):
        "The in operator doesn't ever return True for the dummy cache backend"
        self.cache.set("hello2", "goodbye2")
        self.assertEqual("hello2" in self.cache, False)
        self.assertEqual("goodbye2" in self.cache, False)
    def test_incr(self):
        "Dummy cache values can't be incremented"
        self.cache.set('answer', 42)
        self.assertRaises(ValueError, self.cache.incr, 'answer')
        self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
    def test_decr(self):
        "Dummy cache values can't be decremented"
        self.cache.set('answer', 42)
        self.assertRaises(ValueError, self.cache.decr, 'answer')
        self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
    def test_data_types(self):
        "All data types are ignored equally by the dummy cache"
        stuff = {
            'string'    : 'this is a string',
            'int'       : 42,
            'list'      : [1, 2, 3, 4],
            'tuple'     : (1, 2, 3, 4),
            'dict'      : {'A': 1, 'B' : 2},
            'function'  : f,
            'class'     : C,
        }
        self.cache.set("stuff", stuff)
        self.assertEqual(self.cache.get("stuff"), None)
    def test_expiration(self):
        "Expiration has no effect on the dummy cache"
        self.cache.set('expire1', 'very quickly', 1)
        self.cache.set('expire2', 'very quickly', 1)
        self.cache.set('expire3', 'very quickly', 1)
        time.sleep(2)
        self.assertEqual(self.cache.get("expire1"), None)
        self.cache.add("expire2", "newvalue")
        self.assertEqual(self.cache.get("expire2"), None)
        self.assertEqual(self.cache.has_key("expire3"), False)
    def test_unicode(self):
        "Unicode values are ignored by the dummy cache"
        stuff = {
            'ascii': 'ascii_value',
            'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
            'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
            'ascii2': {'x' : 1 }
            }
        for (key, value) in stuff.items():
            self.cache.set(key, value)
            self.assertEqual(self.cache.get(key), None)
    def test_set_many(self):
        "set_many does nothing for the dummy cache backend"
        self.cache.set_many({'a': 1, 'b': 2})
        self.cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
    def test_delete_many(self):
        "delete_many does nothing for the dummy cache backend"
        self.cache.delete_many(['a', 'b'])
    def test_clear(self):
        "clear does nothing for the dummy cache backend"
        self.cache.clear()
    def test_incr_version(self):
        "Dummy cache versions can't be incremented"
        self.cache.set('answer', 42)
        self.assertRaises(ValueError, self.cache.incr_version, 'answer')
        self.assertRaises(ValueError, self.cache.incr_version, 'does_not_exist')
    def test_decr_version(self):
        "Dummy cache versions can't be decremented"
        self.cache.set('answer', 42)
        self.assertRaises(ValueError, self.cache.decr_version, 'answer')
        self.assertRaises(ValueError, self.cache.decr_version, 'does_not_exist')
class BaseCacheTests(object):
    # A common set of tests to apply to all cache backends
    def _get_request_cache(self, path):
        request = HttpRequest()
        request.META = {
            'SERVER_NAME': 'testserver',
            'SERVER_PORT': 80,
        }
        request.path = request.path_info = path
        request._cache_update_cache = True
        request.method = 'GET'
        return request
    def test_simple(self):
        # Simple cache set/get works
        self.cache.set("key", "value")
        self.assertEqual(self.cache.get("key"), "value")
    def test_add(self):
        # A key can be added to a cache
        self.cache.add("addkey1", "value")
        result = self.cache.add("addkey1", "newvalue")
        self.assertEqual(result, False)
        self.assertEqual(self.cache.get("addkey1"), "value")
    def test_prefix(self):
        # Test for same cache key conflicts between shared backend
        self.cache.set('somekey', 'value')
        # should not be set in the prefixed cache
        self.assertFalse(self.prefix_cache.has_key('somekey'))
        self.prefix_cache.set('somekey', 'value2')
        self.assertEqual(self.cache.get('somekey'), 'value')
        self.assertEqual(self.prefix_cache.get('somekey'), 'value2')
    def test_non_existent(self):
        # Non-existent cache keys return as None/default
        # get with non-existent keys
        self.assertEqual(self.cache.get("does_not_exist"), None)
        self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
    def test_get_many(self):
        # Multiple cache keys can be returned using get_many
        self.cache.set('a', 'a')
        self.cache.set('b', 'b')
        self.cache.set('c', 'c')
        self.cache.set('d', 'd')
        self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'})
        self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'})
    def test_delete(self):
        # Cache keys can be deleted
        self.cache.set("key1", "spam")
        self.cache.set("key2", "eggs")
        self.assertEqual(self.cache.get("key1"), "spam")
        self.cache.delete("key1")
        self.assertEqual(self.cache.get("key1"), None)
        self.assertEqual(self.cache.get("key2"), "eggs")
    def test_has_key(self):
        # The cache can be inspected for cache keys
        self.cache.set("hello1", "goodbye1")
        self.assertEqual(self.cache.has_key("hello1"), True)
        self.assertEqual(self.cache.has_key("goodbye1"), False)
    def test_in(self):
        # The in operator can be used to inspect cache contents
        self.cache.set("hello2", "goodbye2")
        self.assertEqual("hello2" in self.cache, True)
        self.assertEqual("goodbye2" in self.cache, False)
    def test_incr(self):
        # Cache values can be incremented
        self.cache.set('answer', 41)
        self.assertEqual(self.cache.incr('answer'), 42)
        self.assertEqual(self.cache.get('answer'), 42)
        self.assertEqual(self.cache.incr('answer', 10), 52)
        self.assertEqual(self.cache.get('answer'), 52)
        self.assertEqual(self.cache.incr('answer', -10), 42)
        self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
    def test_decr(self):
        # Cache values can be decremented
        self.cache.set('answer', 43)
        self.assertEqual(self.cache.decr('answer'), 42)
        self.assertEqual(self.cache.get('answer'), 42)
        self.assertEqual(self.cache.decr('answer', 10), 32)
        self.assertEqual(self.cache.get('answer'), 32)
        self.assertEqual(self.cache.decr('answer', -10), 42)
        self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
    def test_close(self):
        self.assertTrue(hasattr(self.cache, 'close'))
        self.cache.close()
    def test_data_types(self):
        # Many different data types can be cached
        stuff = {
            'string'    : 'this is a string',
            'int'       : 42,
            'list'      : [1, 2, 3, 4],
            'tuple'     : (1, 2, 3, 4),
            'dict'      : {'A': 1, 'B' : 2},
            'function'  : f,
            'class'     : C,
        }
        self.cache.set("stuff", stuff)
        self.assertEqual(self.cache.get("stuff"), stuff)
    def test_cache_read_for_model_instance(self):
        # Don't want fields with callable as default to be called on cache read
        expensive_calculation.num_runs = 0
        Poll.objects.all().delete()
        my_poll = Poll.objects.create(question="Well?")
        self.assertEqual(Poll.objects.count(), 1)
        pub_date = my_poll.pub_date
        self.cache.set('question', my_poll)
        cached_poll = self.cache.get('question')
        self.assertEqual(cached_poll.pub_date, pub_date)
        # We only want the default expensive calculation run once
        self.assertEqual(expensive_calculation.num_runs, 1)
    def test_cache_write_for_model_instance_with_deferred(self):
        # Don't want fields with callable as default to be called on cache write
        expensive_calculation.num_runs = 0
        Poll.objects.all().delete()
        my_poll = Poll.objects.create(question="What?")
        self.assertEqual(expensive_calculation.num_runs, 1)
        defer_qs = Poll.objects.all().defer('question')
        self.assertEqual(defer_qs.count(), 1)
        self.assertEqual(expensive_calculation.num_runs, 1)
        self.cache.set('deferred_queryset', defer_qs)
        # cache set should not re-evaluate default functions
        self.assertEqual(expensive_calculation.num_runs, 1)
    def test_cache_read_for_model_instance_with_deferred(self):
        # Don't want fields with callable as default to be called on cache read
        expensive_calculation.num_runs = 0
        Poll.objects.all().delete()
        my_poll = Poll.objects.create(question="What?")
        self.assertEqual(expensive_calculation.num_runs, 1)
        defer_qs = Poll.objects.all().defer('question')
        self.assertEqual(defer_qs.count(), 1)
        self.cache.set('deferred_queryset', defer_qs)
        self.assertEqual(expensive_calculation.num_runs, 1)
        runs_before_cache_read = expensive_calculation.num_runs
        cached_polls = self.cache.get('deferred_queryset')
        # We only want the default expensive calculation run on creation and set
        self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
    def test_expiration(self):
        # Cache values can be set to expire
        self.cache.set('expire1', 'very quickly', 1)
        self.cache.set('expire2', 'very quickly', 1)
        self.cache.set('expire3', 'very quickly', 1)
        time.sleep(2)
        self.assertEqual(self.cache.get("expire1"), None)
        self.cache.add("expire2", "newvalue")
        self.assertEqual(self.cache.get("expire2"), "newvalue")
        self.assertEqual(self.cache.has_key("expire3"), False)
    def test_unicode(self):
        # Unicode values can be cached
        stuff = {
            'ascii': 'ascii_value',
            'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
            'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
            'ascii2': {'x' : 1 }
            }
        # Test `set`
        for (key, value) in stuff.items():
            self.cache.set(key, value)
            self.assertEqual(self.cache.get(key), value)
        # Test `add`
        for (key, value) in stuff.items():
            self.cache.delete(key)
            self.cache.add(key, value)
            self.assertEqual(self.cache.get(key), value)
        # Test `set_many`
        for (key, value) in stuff.items():
            self.cache.delete(key)
        self.cache.set_many(stuff)
        for (key, value) in stuff.items():
            self.assertEqual(self.cache.get(key), value)
    def test_binary_string(self):
        # Binary strings should be cacheable
        from zlib import compress, decompress
        value = 'value_to_be_compressed'
        compressed_value = compress(value.encode())
        # Test set
        self.cache.set('binary1', compressed_value)
        compressed_result = self.cache.get('binary1')
        self.assertEqual(compressed_value, compressed_result)
        self.assertEqual(value, decompress(compressed_result).decode())
        # Test add
        self.cache.add('binary1-add', compressed_value)
        compressed_result = self.cache.get('binary1-add')
        self.assertEqual(compressed_value, compressed_result)
        self.assertEqual(value, decompress(compressed_result).decode())
        # Test set_many
        self.cache.set_many({'binary1-set_many': compressed_value})
        compressed_result = self.cache.get('binary1-set_many')
        self.assertEqual(compressed_value, compressed_result)
        self.assertEqual(value, decompress(compressed_result).decode())
    def test_set_many(self):
        # Multiple keys can be set using set_many
        self.cache.set_many({"key1": "spam", "key2": "eggs"})
        self.assertEqual(self.cache.get("key1"), "spam")
        self.assertEqual(self.cache.get("key2"), "eggs")
    def test_set_many_expiration(self):
        # set_many takes a second ``timeout`` parameter
        self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
        time.sleep(2)
        self.assertEqual(self.cache.get("key1"), None)
        self.assertEqual(self.cache.get("key2"), None)
    def test_delete_many(self):
        # Multiple keys can be deleted using delete_many
        self.cache.set("key1", "spam")
        self.cache.set("key2", "eggs")
        self.cache.set("key3", "ham")
        self.cache.delete_many(["key1", "key2"])
        self.assertEqual(self.cache.get("key1"), None)
        self.assertEqual(self.cache.get("key2"), None)
        self.assertEqual(self.cache.get("key3"), "ham")
    def test_clear(self):
        # The cache can be emptied using clear
        self.cache.set("key1", "spam")
        self.cache.set("key2", "eggs")
        self.cache.clear()
        self.assertEqual(self.cache.get("key1"), None)
        self.assertEqual(self.cache.get("key2"), None)
    def test_long_timeout(self):
        '''
        Using a timeout greater than 30 days makes memcached think
        it is an absolute expiration timestamp instead of a relative
        offset. Test that we honour this convention. Refs #12399.
        '''
        self.cache.set('key1', 'eggs', 60*60*24*30 + 1) #30 days + 1 second
        self.assertEqual(self.cache.get('key1'), 'eggs')
        self.cache.add('key2', 'ham', 60*60*24*30 + 1)
        self.assertEqual(self.cache.get('key2'), 'ham')
        self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60*60*24*30 + 1)
        self.assertEqual(self.cache.get('key3'), 'sausage')
        self.assertEqual(self.cache.get('key4'), 'lobster bisque')
    def test_forever_timeout(self):
        '''
        Passing in None into timeout results in a value that is cached forever
        '''
        self.cache.set('key1', 'eggs', None)
        self.assertEqual(self.cache.get('key1'), 'eggs')
        self.cache.add('key2', 'ham', None)
        self.assertEqual(self.cache.get('key2'), 'ham')
        self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
        self.assertEqual(self.cache.get('key3'), 'sausage')
        self.assertEqual(self.cache.get('key4'), 'lobster bisque')
    def test_zero_timeout(self):
        '''
        Passing in None into timeout results in a value that is cached forever
        '''
        self.cache.set('key1', 'eggs', 0)
        self.assertEqual(self.cache.get('key1'), None)
        self.cache.add('key2', 'ham', 0)
        self.assertEqual(self.cache.get('key2'), None)
        self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
        self.assertEqual(self.cache.get('key3'), None)
        self.assertEqual(self.cache.get('key4'), None)
    def test_float_timeout(self):
        # Make sure a timeout given as a float doesn't crash anything.
        self.cache.set("key1", "spam", 100.2)
        self.assertEqual(self.cache.get("key1"), "spam")
    def perform_cull_test(self, initial_count, final_count):
        """This is implemented as a utility method, because only some of the backends
        implement culling. The culling algorithm also varies slightly, so the final
        number of entries will vary between backends"""
        # Create initial cache key entries. This will overflow the cache, causing a cull
        for i in range(1, initial_count):
            self.cache.set('cull%d' % i, 'value', 1000)
        count = 0
        # Count how many keys are left in the cache.
        for i in range(1, initial_count):
            if self.cache.has_key('cull%d' % i):
                count = count + 1
        self.assertEqual(count, final_count)
    def test_invalid_keys(self):
        """
        All the builtin backends (except memcached, see below) should warn on
        keys that would be refused by memcached. This encourages portable
        caching code without making it too difficult to use production backends
        with more liberal key rules. Refs #6447.
        """
        # mimic custom ``make_key`` method being defined since the default will
        # never show the below warnings
        def func(key, *args):
            return key
        old_func = self.cache.key_func
        self.cache.key_func = func
        try:
            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter("always")
                # memcached does not allow whitespace or control characters in keys
                self.cache.set('key with spaces', 'value')
                self.assertEqual(len(w), 2)
                self.assertIsInstance(w[0].message, CacheKeyWarning)
            with warnings.catch_warnings(record=True) as w:
                warnings.simplefilter("always")
                # memcached limits key length to 250
                self.cache.set('a' * 251, 'value')
                self.assertEqual(len(w), 1)
                self.assertIsInstance(w[0].message, CacheKeyWarning)
        finally:
            self.cache.key_func = old_func
    def test_cache_versioning_get_set(self):
        # set, using default version = 1
        self.cache.set('answer1', 42)
        self.assertEqual(self.cache.get('answer1'), 42)
        self.assertEqual(self.cache.get('answer1', version=1), 42)
        self.assertEqual(self.cache.get('answer1', version=2), None)
        self.assertEqual(self.v2_cache.get('answer1'), None)
        self.assertEqual(self.v2_cache.get('answer1', version=1), 42)
        self.assertEqual(self.v2_cache.get('answer1', version=2), None)
        # set, default version = 1, but manually override version = 2
        self.cache.set('answer2', 42, version=2)
        self.assertEqual(self.cache.get('answer2'), None)
        self.assertEqual(self.cache.get('answer2', version=1), None)
        self.assertEqual(self.cache.get('answer2', version=2), 42)
        self.assertEqual(self.v2_cache.get('answer2'), 42)
        self.assertEqual(self.v2_cache.get('answer2', version=1), None)
        self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
        # v2 set, using default version = 2
        self.v2_cache.set('answer3', 42)
        self.assertEqual(self.cache.get('answer3'), None)
        self.assertEqual(self.cache.get('answer3', version=1), None)
        self.assertEqual(self.cache.get('answer3', version=2), 42)
        self.assertEqual(self.v2_cache.get('answer3'), 42)
        self.assertEqual(self.v2_cache.get('answer3', version=1), None)
        self.assertEqual(self.v2_cache.get('answer3', version=2), 42)
        # v2 set, default version = 2, but manually override version = 1
        self.v2_cache.set('answer4', 42, version=1)
        self.assertEqual(self.cache.get('answer4'), 42)
        self.assertEqual(self.cache.get('answer4', version=1), 42)
        self.assertEqual(self.cache.get('answer4', version=2), None)
        self.assertEqual(self.v2_cache.get('answer4'), None)
        self.assertEqual(self.v2_cache.get('answer4', version=1), 42)
        self.assertEqual(self.v2_cache.get('answer4', version=2), None)
    def test_cache_versioning_add(self):
        # add, default version = 1, but manually override version = 2
        self.cache.add('answer1', 42, version=2)
        self.assertEqual(self.cache.get('answer1', version=1), None)
        self.assertEqual(self.cache.get('answer1', version=2), 42)
        self.cache.add('answer1', 37, version=2)
        self.assertEqual(self.cache.get('answer1', version=1), None)
        self.assertEqual(self.cache.get('answer1', version=2), 42)
        self.cache.add('answer1', 37, version=1)
        self.assertEqual(self.cache.get('answer1', version=1), 37)
        self.assertEqual(self.cache.get('answer1', version=2), 42)
        # v2 add, using default version = 2
        self.v2_cache.add('answer2', 42)
        self.assertEqual(self.cache.get('answer2', version=1), None)
        self.assertEqual(self.cache.get('answer2', version=2), 42)
        self.v2_cache.add('answer2', 37)
        self.assertEqual(self.cache.get('answer2', version=1), None)
        self.assertEqual(self.cache.get('answer2', version=2), 42)
        self.v2_cache.add('answer2', 37, version=1)
        self.assertEqual(self.cache.get('answer2', version=1), 37)
        self.assertEqual(self.cache.get('answer2', version=2), 42)
        # v2 add, default version = 2, but manually override version = 1
        self.v2_cache.add('answer3', 42, version=1)
        self.assertEqual(self.cache.get('answer3', version=1), 42)
        self.assertEqual(self.cache.get('answer3', version=2), None)
        self.v2_cache.add('answer3', 37, version=1)
        self.assertEqual(self.cache.get('answer3', version=1), 42)
        self.assertEqual(self.cache.get('answer3', version=2), None)
        self.v2_cache.add('answer3', 37)
        self.assertEqual(self.cache.get('answer3', version=1), 42)
        self.assertEqual(self.cache.get('answer3', version=2), 37)
    def test_cache_versioning_has_key(self):
        self.cache.set('answer1', 42)
        # has_key
        self.assertTrue(self.cache.has_key('answer1'))
        self.assertTrue(self.cache.has_key('answer1', version=1))
        self.assertFalse(self.cache.has_key('answer1', version=2))
        self.assertFalse(self.v2_cache.has_key('answer1'))
        self.assertTrue(self.v2_cache.has_key('answer1', version=1))
        self.assertFalse(self.v2_cache.has_key('answer1', version=2))
    def test_cache_versioning_delete(self):
        self.cache.set('answer1', 37, version=1)
        self.cache.set('answer1', 42, version=2)
        self.cache.delete('answer1')
        self.assertEqual(self.cache.get('answer1', version=1), None)
        self.assertEqual(self.cache.get('answer1', version=2), 42)
        self.cache.set('answer2', 37, version=1)
        self.cache.set('answer2', 42, version=2)
        self.cache.delete('answer2', version=2)
        self.assertEqual(self.cache.get('answer2', version=1), 37)
        self.assertEqual(self.cache.get('answer2', version=2), None)
        self.cache.set('answer3', 37, version=1)
        self.cache.set('answer3', 42, version=2)
        self.v2_cache.delete('answer3')
        self.assertEqual(self.cache.get('answer3', version=1), 37)
        self.assertEqual(self.cache.get('answer3', version=2), None)
        self.cache.set('answer4', 37, version=1)
        self.cache.set('answer4', 42, version=2)
        self.v2_cache.delete('answer4', version=1)
        self.assertEqual(self.cache.get('answer4', version=1), None)
        self.assertEqual(self.cache.get('answer4', version=2), 42)
    def test_cache_versioning_incr_decr(self):
        self.cache.set('answer1', 37, version=1)
        self.cache.set('answer1', 42, version=2)
        self.cache.incr('answer1')
        self.assertEqual(self.cache.get('answer1', version=1), 38)
        self.assertEqual(self.cache.get('answer1', version=2), 42)
        self.cache.decr('answer1')
        self.assertEqual(self.cache.get('answer1', version=1), 37)
        self.assertEqual(self.cache.get('answer1', version=2), 42)
        self.cache.set('answer2', 37, version=1)
        self.cache.set('answer2', 42, version=2)
        self.cache.incr('answer2', version=2)
        self.assertEqual(self.cache.get('answer2', version=1), 37)
        self.assertEqual(self.cache.get('answer2', version=2), 43)
        self.cache.decr('answer2', version=2)
        self.assertEqual(self.cache.get('answer2', version=1), 37)
        self.assertEqual(self.cache.get('answer2', version=2), 42)
        self.cache.set('answer3', 37, version=1)
        self.cache.set('answer3', 42, version=2)
        self.v2_cache.incr('answer3')
        self.assertEqual(self.cache.get('answer3', version=1), 37)
        self.assertEqual(self.cache.get('answer3', version=2), 43)
        self.v2_cache.decr('answer3')
        self.assertEqual(self.cache.get('answer3', version=1), 37)
        self.assertEqual(self.cache.get('answer3', version=2), 42)
        self.cache.set('answer4', 37, version=1)
        self.cache.set('answer4', 42, version=2)
        self.v2_cache.incr('answer4', version=1)
        self.assertEqual(self.cache.get('answer4', version=1), 38)
        self.assertEqual(self.cache.get('answer4', version=2), 42)
        self.v2_cache.decr('answer4', version=1)
        self.assertEqual(self.cache.get('answer4', version=1), 37)
        self.assertEqual(self.cache.get('answer4', version=2), 42)
    def test_cache_versioning_get_set_many(self):
        # set, using default version = 1
        self.cache.set_many({'ford1': 37, 'arthur1': 42})
        self.assertEqual(self.cache.get_many(['ford1','arthur1']),
                         {'ford1': 37, 'arthur1': 42})
        self.assertEqual(self.cache.get_many(['ford1','arthur1'], version=1),
                         {'ford1': 37, 'arthur1': 42})
        self.assertEqual(self.cache.get_many(['ford1','arthur1'], version=2), {})
        self.assertEqual(self.v2_cache.get_many(['ford1','arthur1']), {})
        self.assertEqual(self.v2_cache.get_many(['ford1','arthur1'], version=1),
                         {'ford1': 37, 'arthur1': 42})
        self.assertEqual(self.v2_cache.get_many(['ford1','arthur1'], version=2), {})
        # set, default version = 1, but manually override version = 2
        self.cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
        self.assertEqual(self.cache.get_many(['ford2','arthur2']), {})
        self.assertEqual(self.cache.get_many(['ford2','arthur2'], version=1), {})
        self.assertEqual(self.cache.get_many(['ford2','arthur2'], version=2),
                         {'ford2': 37, 'arthur2': 42})
        self.assertEqual(self.v2_cache.get_many(['ford2','arthur2']),
                         {'ford2': 37, 'arthur2': 42})
        self.assertEqual(self.v2_cache.get_many(['ford2','arthur2'], version=1), {})
        self.assertEqual(self.v2_cache.get_many(['ford2','arthur2'], version=2),
                         {'ford2': 37, 'arthur2': 42})
        # v2 set, using default version = 2
        self.v2_cache.set_many({'ford3': 37, 'arthur3': 42})
        self.assertEqual(self.cache.get_many(['ford3','arthur3']), {})
        self.assertEqual(self.cache.get_many(['ford3','arthur3'], version=1), {})
        self.assertEqual(self.cache.get_many(['ford3','arthur3'], version=2),
                         {'ford3': 37, 'arthur3': 42})
        self.assertEqual(self.v2_cache.get_many(['ford3','arthur3']),
                         {'ford3': 37, 'arthur3': 42})
        self.assertEqual(self.v2_cache.get_many(['ford3','arthur3'], version=1), {})
        self.assertEqual(self.v2_cache.get_many(['ford3','arthur3'], version=2),
                         {'ford3': 37, 'arthur3': 42})
        # v2 set, default version = 2, but manually override version = 1
        self.v2_cache.set_many({'ford4': 37, 'arthur4': 42}, version=1)
        self.assertEqual(self.cache.get_many(['ford4','arthur4']),
                         {'ford4': 37, 'arthur4': 42})
        self.assertEqual(self.cache.get_many(['ford4','arthur4'], version=1),
                         {'ford4': 37, 'arthur4': 42})
        self.assertEqual(self.cache.get_many(['ford4','arthur4'], version=2), {})
        self.assertEqual(self.v2_cache.get_many(['ford4','arthur4']), {})
        self.assertEqual(self.v2_cache.get_many(['ford4','arthur4'], version=1),
                         {'ford4': 37, 'arthur4': 42})
        self.assertEqual(self.v2_cache.get_many(['ford4','arthur4'], version=2), {})
    def test_incr_version(self):
        self.cache.set('answer', 42, version=2)
        self.assertEqual(self.cache.get('answer'), None)
        self.assertEqual(self.cache.get('answer', version=1), None)
        self.assertEqual(self.cache.get('answer', version=2), 42)
        self.assertEqual(self.cache.get('answer', version=3), None)
        self.assertEqual(self.cache.incr_version('answer', version=2), 3)
        self.assertEqual(self.cache.get('answer'), None)
        self.assertEqual(self.cache.get('answer', version=1), None)
        self.assertEqual(self.cache.get('answer', version=2), None)
        self.assertEqual(self.cache.get('answer', version=3), 42)
        self.v2_cache.set('answer2', 42)
        self.assertEqual(self.v2_cache.get('answer2'), 42)
        self.assertEqual(self.v2_cache.get('answer2', version=1), None)
        self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
        self.assertEqual(self.v2_cache.get('answer2', version=3), None)
        self.assertEqual(self.v2_cache.incr_version('answer2'), 3)
        self.assertEqual(self.v2_cache.get('answer2'), None)
        self.assertEqual(self.v2_cache.get('answer2', version=1), None)
        self.assertEqual(self.v2_cache.get('answer2', version=2), None)
        self.assertEqual(self.v2_cache.get('answer2', version=3), 42)
        self.assertRaises(ValueError, self.cache.incr_version, 'does_not_exist')
    def test_decr_version(self):
        self.cache.set('answer', 42, version=2)
        self.assertEqual(self.cache.get('answer'), None)
        self.assertEqual(self.cache.get('answer', version=1), None)
        self.assertEqual(self.cache.get('answer', version=2), 42)
        self.assertEqual(self.cache.decr_version('answer', version=2), 1)
        self.assertEqual(self.cache.get('answer'), 42)
        self.assertEqual(self.cache.get('answer', version=1), 42)
        self.assertEqual(self.cache.get('answer', version=2), None)
        self.v2_cache.set('answer2', 42)
        self.assertEqual(self.v2_cache.get('answer2'), 42)
        self.assertEqual(self.v2_cache.get('answer2', version=1), None)
        self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
        self.assertEqual(self.v2_cache.decr_version('answer2'), 1)
        self.assertEqual(self.v2_cache.get('answer2'), None)
        self.assertEqual(self.v2_cache.get('answer2', version=1), 42)
        self.assertEqual(self.v2_cache.get('answer2', version=2), None)
        self.assertRaises(ValueError, self.cache.decr_version, 'does_not_exist', version=2)
    def test_custom_key_func(self):
        # Two caches with different key functions aren't visible to each other
        self.cache.set('answer1', 42)
        self.assertEqual(self.cache.get('answer1'), 42)
        self.assertEqual(self.custom_key_cache.get('answer1'), None)
        self.assertEqual(self.custom_key_cache2.get('answer1'), None)
        self.custom_key_cache.set('answer2', 42)
        self.assertEqual(self.cache.get('answer2'), None)
        self.assertEqual(self.custom_key_cache.get('answer2'), 42)
        self.assertEqual(self.custom_key_cache2.get('answer2'), 42)
    def test_cache_write_unpickable_object(self):
        update_middleware = UpdateCacheMiddleware()
        update_middleware.cache = self.cache
        fetch_middleware = FetchFromCacheMiddleware()
        fetch_middleware.cache = self.cache
        request = self._get_request_cache('/cache/test')
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertEqual(get_cache_data, None)
        response = HttpResponse()
        content = 'Testing cookie serialization.'
        response.content = content
        response.set_cookie('foo', 'bar')
        update_middleware.process_response(request, response)
        get_cache_data = fetch_middleware.process_request(request)
        self.assertNotEqual(get_cache_data, None)
        self.assertEqual(get_cache_data.content, content.encode('utf-8'))
        self.assertEqual(get_cache_data.cookies, response.cookies)
        update_middleware.process_response(request, get_cache_data)
        get_cache_data = fetch_middleware.process_request(request)
        self.assertNotEqual(get_cache_data, None)
        self.assertEqual(get_cache_data.content, content.encode('utf-8'))
        self.assertEqual(get_cache_data.cookies, response.cookies)
def custom_key_func(key, key_prefix, version):
    "A customized cache key function"
    return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
class DBCacheTests(BaseCacheTests, TransactionTestCase):
    available_apps = ['cache']
    backend_name = 'django.core.cache.backends.db.DatabaseCache'
    def setUp(self):
        # Spaces are used in the table name to ensure quoting/escaping is working
        self._table_name = 'test cache table'
        management.call_command('createcachetable', self._table_name, verbosity=0, interactive=False)
        self.cache = get_cache(self.backend_name, LOCATION=self._table_name, OPTIONS={'MAX_ENTRIES': 30})
        self.prefix_cache = get_cache(self.backend_name, LOCATION=self._table_name, KEY_PREFIX='cacheprefix')
        self.v2_cache = get_cache(self.backend_name, LOCATION=self._table_name, VERSION=2)
        self.custom_key_cache = get_cache(self.backend_name, LOCATION=self._table_name, KEY_FUNCTION=custom_key_func)
        self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=self._table_name, KEY_FUNCTION='cache.tests.custom_key_func')
    def tearDown(self):
        from django.db import connection
        cursor = connection.cursor()
        cursor.execute('DROP TABLE %s' % connection.ops.quote_name(self._table_name))
        connection.commit()
    def test_cull(self):
        self.perform_cull_test(50, 29)
    def test_zero_cull(self):
        self.cache = get_cache(self.backend_name, LOCATION=self._table_name, OPTIONS={'MAX_ENTRIES': 30, 'CULL_FREQUENCY': 0})
        self.perform_cull_test(50, 18)
    def test_old_initialization(self):
        self.cache = get_cache('db://%s?max_entries=30&cull_frequency=0' % self._table_name)
        self.perform_cull_test(50, 18)
    def test_second_call_doesnt_crash(self):
        with six.assertRaisesRegex(self, management.CommandError,
                "Cache table 'test cache table' could not be created"):
            management.call_command(
               'createcachetable',
                self._table_name,
                verbosity=0,
                interactive=False
            )
    def test_clear_commits_transaction(self):
        # Ensure the database transaction is committed (#19896)
        self.cache.set("key1", "spam")
        self.cache.clear()
        transaction.rollback()
        self.assertEqual(self.cache.get("key1"), None)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
    pass
class DBCacheRouter(object):
    """A router that puts the cache table on the 'other' database."""
    def db_for_read(self, model, **hints):
        if model._meta.app_label == 'django_cache':
            return 'other'
    def db_for_write(self, model, **hints):
        if model._meta.app_label == 'django_cache':
            return 'other'
    def allow_syncdb(self, db, model):
        if model._meta.app_label == 'django_cache':
            return db == 'other'
class CreateCacheTableForDBCacheTests(TestCase):
    multi_db = True
    def test_createcachetable_observes_database_router(self):
        old_routers = router.routers
        try:
            router.routers = [DBCacheRouter()]
            # cache table should not be created on 'default'
            with self.assertNumQueries(0, using='default'):
                management.call_command('createcachetable', 'cache_table',
                                        database='default',
                                        verbosity=0, interactive=False)
            # cache table should be created on 'other'
            # one query is used to create the table and another one the index
            with self.assertNumQueries(2, using='other'):
                management.call_command('createcachetable', 'cache_table',
                                        database='other',
                                        verbosity=0, interactive=False)
        finally:
            router.routers = old_routers
class LocMemCacheTests(unittest.TestCase, BaseCacheTests):
    backend_name = 'django.core.cache.backends.locmem.LocMemCache'
    def setUp(self):
        self.cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30})
        self.prefix_cache = get_cache(self.backend_name, KEY_PREFIX='cacheprefix')
        self.v2_cache = get_cache(self.backend_name, VERSION=2)
        self.custom_key_cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30}, KEY_FUNCTION=custom_key_func)
        self.custom_key_cache2 = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30}, KEY_FUNCTION='cache.tests.custom_key_func')
        # LocMem requires a hack to make the other caches
        # share a data store with the 'normal' cache.
        self.prefix_cache._cache = self.cache._cache
        self.prefix_cache._expire_info = self.cache._expire_info
        self.v2_cache._cache = self.cache._cache
        self.v2_cache._expire_info = self.cache._expire_info
        self.custom_key_cache._cache = self.cache._cache
        self.custom_key_cache._expire_info = self.cache._expire_info
        self.custom_key_cache2._cache = self.cache._cache
        self.custom_key_cache2._expire_info = self.cache._expire_info
    def tearDown(self):
        self.cache.clear()
    def test_cull(self):
        self.perform_cull_test(50, 29)
    def test_zero_cull(self):
        self.cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30, 'CULL_FREQUENCY': 0})
        self.perform_cull_test(50, 19)
    def test_old_initialization(self):
        self.cache = get_cache('locmem://?max_entries=30&cull_frequency=0')
        self.perform_cull_test(50, 19)
    def test_multiple_caches(self):
        "Check that multiple locmem caches are isolated"
        mirror_cache = get_cache(self.backend_name)
        other_cache = get_cache(self.backend_name, LOCATION='other')
        self.cache.set('value1', 42)
        self.assertEqual(mirror_cache.get('value1'), 42)
        self.assertEqual(other_cache.get('value1'), None)
    def test_incr_decr_timeout(self):
        """incr/decr does not modify expiry time (matches memcached behavior)"""
        key = 'value'
        _key = self.cache.make_key(key)
        self.cache.set(key, 1, timeout=self.cache.default_timeout*10)
        expire = self.cache._expire_info[_key]
        self.cache.incr(key)
        self.assertEqual(expire, self.cache._expire_info[_key])
        self.cache.decr(key)
        self.assertEqual(expire, self.cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
@unittest.skipUnless(
    any(cache['BACKEND'].startswith('django.core.cache.backends.memcached.')
        for cache in settings.CACHES.values()),
    "memcached not available")
class MemcachedCacheTests(unittest.TestCase, BaseCacheTests):
    def setUp(self):
        for cache_key, cache in settings.CACHES.items():
            if cache['BACKEND'].startswith('django.core.cache.backends.memcached.'):
                break
        random_prefix = ''.join(random.choice(string.ascii_letters) for x in range(10))
        self.cache = get_cache(cache_key)
        self.prefix_cache = get_cache(cache_key, KEY_PREFIX=random_prefix)
        self.v2_cache = get_cache(cache_key, VERSION=2)
        self.custom_key_cache = get_cache(cache_key, KEY_FUNCTION=custom_key_func)
        self.custom_key_cache2 = get_cache(cache_key, KEY_FUNCTION='cache.tests.custom_key_func')
    def tearDown(self):
        self.cache.clear()
    def test_invalid_keys(self):
        """
        On memcached, we don't introduce a duplicate key validation
        step (for speed reasons), we just let the memcached API
        library raise its own exception on bad keys. Refs #6447.
        In order to be memcached-API-library agnostic, we only assert
        that a generic exception of some kind is raised.
        """
        # memcached does not allow whitespace or control characters in keys
        self.assertRaises(Exception, self.cache.set, 'key with spaces', 'value')
        # memcached limits key length to 250
        self.assertRaises(Exception, self.cache.set, 'a' * 251, 'value')
    # Explicitly display a skipped test if no configured cache uses MemcachedCache
    @unittest.skipUnless(
        any(cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache'
            for cache in settings.CACHES.values()),
        "cache with python-memcached library not available")
    def test_memcached_uses_highest_pickle_version(self):
        # Regression test for #19810
        for cache_key, cache in settings.CACHES.items():
            if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
                self.assertEqual(get_cache(cache_key)._cache.pickleProtocol,
                                 pickle.HIGHEST_PROTOCOL)
class FileBasedCacheTests(unittest.TestCase, BaseCacheTests):
    """
    Specific test cases for the file-based cache.
    """
    backend_name = 'django.core.cache.backends.filebased.FileBasedCache'
    def setUp(self):
        self.dirname = tempfile.mkdtemp()
        self.cache = get_cache(self.backend_name, LOCATION=self.dirname, OPTIONS={'MAX_ENTRIES': 30})
        self.prefix_cache = get_cache(self.backend_name, LOCATION=self.dirname, KEY_PREFIX='cacheprefix')
        self.v2_cache = get_cache(self.backend_name, LOCATION=self.dirname, VERSION=2)
        self.custom_key_cache = get_cache(self.backend_name, LOCATION=self.dirname, KEY_FUNCTION=custom_key_func)
        self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=self.dirname, KEY_FUNCTION='cache.tests.custom_key_func')
    def tearDown(self):
        self.cache.clear()
    def test_hashing(self):
        """Test that keys are hashed into subdirectories correctly"""
        self.cache.set("foo", "bar")
        key = self.cache.make_key("foo")
        keyhash = hashlib.md5(key.encode()).hexdigest()
        keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
        self.assertTrue(os.path.exists(keypath))
    def test_subdirectory_removal(self):
        """
        Make sure that the created subdirectories are correctly removed when empty.
        """
        self.cache.set("foo", "bar")
        key = self.cache.make_key("foo")
        keyhash = hashlib.md5(key.encode()).hexdigest()
        keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
        self.assertTrue(os.path.exists(keypath))
        self.cache.delete("foo")
        self.assertTrue(not os.path.exists(keypath))
        self.assertTrue(not os.path.exists(os.path.dirname(keypath)))
        self.assertTrue(not os.path.exists(os.path.dirname(os.path.dirname(keypath))))
    def test_cull(self):
        self.perform_cull_test(50, 29)
    def test_old_initialization(self):
        self.cache = get_cache('file://%s?max_entries=30' % self.dirname)
        self.perform_cull_test(50, 29)
class CustomCacheKeyValidationTests(unittest.TestCase):
    """
    Tests for the ability to mixin a custom ``validate_key`` method to
    a custom cache backend that otherwise inherits from a builtin
    backend, and override the default key validation. Refs #6447.
    """
    def test_custom_key_validation(self):
        cache = get_cache('cache.liberal_backend://')
        # this key is both longer than 250 characters, and has spaces
        key = 'some key with spaces' * 15
        val = 'a value'
        cache.set(key, val)
        self.assertEqual(cache.get(key), val)
class GetCacheTests(unittest.TestCase):
    def test_simple(self):
        cache = get_cache('locmem://')
        from django.core.cache.backends.locmem import LocMemCache
        self.assertIsInstance(cache, LocMemCache)
        from django.core.cache import cache
        self.assertIsInstance(cache, get_cache('default').__class__)
        cache = get_cache(
            'django.core.cache.backends.dummy.DummyCache', **{'TIMEOUT': 120})
        self.assertEqual(cache.default_timeout, 120)
        self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
    def test_close(self):
        from django.core import signals
        cache = get_cache('cache.closeable_cache.CacheClass')
        self.assertFalse(cache.closed)
        signals.request_finished.send(self.__class__)
        self.assertTrue(cache.closed)
@override_settings(
        CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
        CACHE_MIDDLEWARE_SECONDS=1,
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
            },
        },
        USE_I18N=False,
)
class CacheUtils(TestCase):
    """TestCase for django.utils.cache functions."""
    def setUp(self):
        self.path = '/cache/test/'
        self.cache = get_cache('default')
    def tearDown(self):
        self.cache.clear()
    def _get_request(self, path, method='GET'):
        request = HttpRequest()
        request.META = {
            'SERVER_NAME': 'testserver',
            'SERVER_PORT': 80,
        }
        request.method = method
        request.path = request.path_info = "/cache/%s" % path
        return request
    def test_patch_vary_headers(self):
        headers = (
            # Initial vary, new headers, resulting vary.
            (None, ('Accept-Encoding',), 'Accept-Encoding'),
            ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
            ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
            ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
            ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
            ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
            (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
            ('Cookie,     Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
            ('Cookie    ,     Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
        )
        for initial_vary, newheaders, resulting_vary in headers:
            response = HttpResponse()
            if initial_vary is not None:
                response['Vary'] = initial_vary
            patch_vary_headers(response, newheaders)
            self.assertEqual(response['Vary'], resulting_vary)
    def test_get_cache_key(self):
        request = self._get_request(self.path)
        response = HttpResponse()
        key_prefix = 'localprefix'
        # Expect None if no headers have been set yet.
        self.assertEqual(get_cache_key(request), None)
        # Set headers to an empty list.
        learn_cache_key(request, response)
        self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
        # Verify that a specified key_prefix is taken into account.
        learn_cache_key(request, response, key_prefix=key_prefix)
        self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
    def test_get_cache_key_with_query(self):
        request = self._get_request(self.path + '?test=1')
        response = HttpResponse()
        # Expect None if no headers have been set yet.
        self.assertEqual(get_cache_key(request), None)
        # Set headers to an empty list.
        learn_cache_key(request, response)
        # Verify that the querystring is taken into account.
        self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.bd889c5a59603af44333ed21504db3cd.d41d8cd98f00b204e9800998ecf8427e')
    def test_learn_cache_key(self):
        request = self._get_request(self.path, 'HEAD')
        response = HttpResponse()
        response['Vary'] = 'Pony'
        # Make sure that the Vary header is added to the key hash
        learn_cache_key(request, response)
        self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
    def test_patch_cache_control(self):
        tests = (
            # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
            (None, {'private' : True}, set(['private'])),
            # Test whether private/public attributes are mutually exclusive
            ('private', {'private' : True}, set(['private'])),
            ('private', {'public' : True}, set(['public'])),
            ('public', {'public' : True}, set(['public'])),
            ('public', {'private' : True}, set(['private'])),
            ('must-revalidate,max-age=60,private', {'public' : True}, set(['must-revalidate', 'max-age=60', 'public'])),
            ('must-revalidate,max-age=60,public', {'private' : True}, set(['must-revalidate', 'max-age=60', 'private'])),
            ('must-revalidate,max-age=60', {'public' : True}, set(['must-revalidate', 'max-age=60', 'public'])),
        )
        cc_delim_re = re.compile(r'\s*,\s*')
        for initial_cc, newheaders, expected_cc in tests:
            response = HttpResponse()
            if initial_cc is not None:
                response['Cache-Control'] = initial_cc
            patch_cache_control(response, **newheaders)
            parts = set(cc_delim_re.split(response['Cache-Control']))
            self.assertEqual(parts, expected_cc)
@override_settings(
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
                'KEY_PREFIX': 'cacheprefix',
            },
        },
)
class PrefixedCacheUtils(CacheUtils):
    pass
@override_settings(
        CACHE_MIDDLEWARE_SECONDS=60,
        CACHE_MIDDLEWARE_KEY_PREFIX='test',
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
            },
        },
)
class CacheHEADTest(TestCase):
    def setUp(self):
        self.path = '/cache/test/'
        self.cache = get_cache('default')
    def tearDown(self):
        self.cache.clear()
    def _get_request(self, method):
        request = HttpRequest()
        request.META = {
            'SERVER_NAME': 'testserver',
            'SERVER_PORT': 80,
        }
        request.method = method
        request.path = request.path_info = self.path
        return request
    def _get_request_cache(self, method):
        request = self._get_request(method)
        request._cache_update_cache = True
        return request
    def _set_cache(self, request, msg):
        response = HttpResponse()
        response.content = msg
        return UpdateCacheMiddleware().process_response(request, response)
    def test_head_caches_correctly(self):
        test_content = 'test content'
        request = self._get_request_cache('HEAD')
        self._set_cache(request, test_content)
        request = self._get_request('HEAD')
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertNotEqual(get_cache_data, None)
        self.assertEqual(test_content.encode(), get_cache_data.content)
    def test_head_with_cached_get(self):
        test_content = 'test content'
        request = self._get_request_cache('GET')
        self._set_cache(request, test_content)
        request = self._get_request('HEAD')
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertNotEqual(get_cache_data, None)
        self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
        CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
            },
        },
        LANGUAGES=(
            ('en', 'English'),
            ('es', 'Spanish'),
        ),
)
class CacheI18nTest(TestCase):
    def setUp(self):
        self.path = '/cache/test/'
        self.cache = get_cache('default')
    def tearDown(self):
        self.cache.clear()
    def _get_request(self, method='GET'):
        request = HttpRequest()
        request.META = {
            'SERVER_NAME': 'testserver',
            'SERVER_PORT': 80,
        }
        request.method = method
        request.path = request.path_info = self.path
        return request
    def _get_request_cache(self, query_string=None):
        request = HttpRequest()
        request.META = {
            'SERVER_NAME': 'testserver',
            'SERVER_PORT': 80,
        }
        if query_string:
            request.META['QUERY_STRING'] = query_string
            request.GET = QueryDict(query_string)
        request.path = request.path_info = self.path
        request._cache_update_cache = True
        request.method = 'GET'
        request.session = {}
        return request
    @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
    def test_cache_key_i18n_translation(self):
        request = self._get_request()
        lang = translation.get_language()
        response = HttpResponse()
        key = learn_cache_key(request, response)
        self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
        key2 = get_cache_key(request)
        self.assertEqual(key, key2)
    def check_accept_language_vary(self, accept_language, vary, reference_key):
        request = self._get_request()
        request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
        request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
        response = HttpResponse()
        response['Vary'] = vary
        key = learn_cache_key(request, response)
        key2 = get_cache_key(request)
        self.assertEqual(key, reference_key)
        self.assertEqual(key2, reference_key)
    @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
    def test_cache_key_i18n_translation_accept_language(self):
        lang = translation.get_language()
        self.assertEqual(lang, 'en')
        request = self._get_request()
        request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
        response = HttpResponse()
        response['Vary'] = 'accept-encoding'
        key = learn_cache_key(request, response)
        self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
        self.check_accept_language_vary(
            'en-us',
            'cookie, accept-language, accept-encoding',
            key
        )
        self.check_accept_language_vary(
            'en-US',
            'cookie, accept-encoding, accept-language',
            key
        )
        self.check_accept_language_vary(
            'en-US,en;q=0.8',
            'accept-encoding, accept-language, cookie',
            key
        )
        self.check_accept_language_vary(
            'en-US,en;q=0.8,ko;q=0.6',
            'accept-language, cookie, accept-encoding',
            key
        )
        self.check_accept_language_vary(
            'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
            'accept-encoding, cookie, accept-language',
            key
        )
        self.check_accept_language_vary(
            'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
            'accept-language, accept-encoding, cookie',
            key
        )
        self.check_accept_language_vary(
            'ko;q=1.0,en;q=0.5',
            'cookie, accept-language, accept-encoding',
            key
        )
        self.check_accept_language_vary(
            'ko, en',
            'cookie, accept-encoding, accept-language',
            key
        )
        self.check_accept_language_vary(
            'ko-KR, en-US',
            'accept-encoding, accept-language, cookie',
            key
        )
    @override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
    def test_cache_key_i18n_formatting(self):
        request = self._get_request()
        lang = translation.get_language()
        response = HttpResponse()
        key = learn_cache_key(request, response)
        self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
        key2 = get_cache_key(request)
        self.assertEqual(key, key2)
    @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
    def test_cache_key_i18n_timezone(self):
        request = self._get_request()
        # This is tightly coupled to the implementation,
        # but it's the most straightforward way to test the key.
        tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
        tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
        response = HttpResponse()
        key = learn_cache_key(request, response)
        self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
        key2 = get_cache_key(request)
        self.assertEqual(key, key2)
    @override_settings(USE_I18N=False, USE_L10N=False)
    def test_cache_key_no_i18n (self):
        request = self._get_request()
        lang = translation.get_language()
        tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
        tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
        response = HttpResponse()
        key = learn_cache_key(request, response)
        self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
        self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
    @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
    def test_cache_key_with_non_ascii_tzname(self):
        # Regression test for #17476
        class CustomTzName(timezone.UTC):
            name = ''
            def tzname(self, dt):
                return self.name
        request = self._get_request()
        response = HttpResponse()
        with timezone.override(CustomTzName()):
            CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
            sanitized_name = 'Hora_estndar_de_Argentina'
            self.assertIn(sanitized_name, learn_cache_key(request, response),
                    "Cache keys should include the time zone name when time zones are active")
            CustomTzName.name = 'Hora estándar de Argentina'    # unicode
            sanitized_name = 'Hora_estndar_de_Argentina'
            self.assertIn(sanitized_name, learn_cache_key(request, response),
                    "Cache keys should include the time zone name when time zones are active")
    @override_settings(
            CACHE_MIDDLEWARE_KEY_PREFIX="test",
            CACHE_MIDDLEWARE_SECONDS=60,
            USE_ETAGS=True,
            USE_I18N=True,
    )
    def test_middleware(self):
        def set_cache(request, lang, msg):
            translation.activate(lang)
            response = HttpResponse()
            response.content = msg
            return UpdateCacheMiddleware().process_response(request, response)
        # cache with non empty request.GET
        request = self._get_request_cache(query_string='foo=bar&other=true')
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        # first access, cache must return None
        self.assertEqual(get_cache_data, None)
        response = HttpResponse()
        content = 'Check for cache with QUERY_STRING'
        response.content = content
        UpdateCacheMiddleware().process_response(request, response)
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        # cache must return content
        self.assertNotEqual(get_cache_data, None)
        self.assertEqual(get_cache_data.content, content.encode())
        # different QUERY_STRING, cache must be empty
        request = self._get_request_cache(query_string='foo=bar&somethingelse=true')
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertEqual(get_cache_data, None)
        # i18n tests
        en_message ="Hello world!"
        es_message ="Hola mundo!"
        request = self._get_request_cache()
        set_cache(request, 'en', en_message)
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        # Check that we can recover the cache
        self.assertNotEqual(get_cache_data, None)
        self.assertEqual(get_cache_data.content, en_message.encode())
        # Check that we use etags
        self.assertTrue(get_cache_data.has_header('ETag'))
        # Check that we can disable etags
        with self.settings(USE_ETAGS=False):
            request._cache_update_cache = True
            set_cache(request, 'en', en_message)
            get_cache_data = FetchFromCacheMiddleware().process_request(request)
            self.assertFalse(get_cache_data.has_header('ETag'))
        # change the session language and set content
        request = self._get_request_cache()
        set_cache(request, 'es', es_message)
        # change again the language
        translation.activate('en')
        # retrieve the content from cache
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertEqual(get_cache_data.content, en_message.encode())
        # change again the language
        translation.activate('es')
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertEqual(get_cache_data.content, es_message.encode())
        # reset the language
        translation.deactivate()
    @override_settings(
            CACHE_MIDDLEWARE_KEY_PREFIX="test",
            CACHE_MIDDLEWARE_SECONDS=60,
            USE_ETAGS=True,
    )
    def test_middleware_doesnt_cache_streaming_response(self):
        request = self._get_request()
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertIsNone(get_cache_data)
        # This test passes on Python < 3.3 even without the corresponding code
        # in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
        # fails (http://bugs.python.org/issue14288). LocMemCache silently
        # swallows the exception and doesn't store the response in cache.
        content = ['Check for cache with streaming content.']
        response = StreamingHttpResponse(content)
        UpdateCacheMiddleware().process_response(request, response)
        get_cache_data = FetchFromCacheMiddleware().process_request(request)
        self.assertIsNone(get_cache_data)
@override_settings(
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
                'KEY_PREFIX': 'cacheprefix'
            },
        },
)
class PrefixedCacheI18nTest(CacheI18nTest):
    pass
def hello_world_view(request, value):
    return HttpResponse('Hello World %s' % value)
def csrf_view(request):
    return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
        CACHE_MIDDLEWARE_ALIAS='other',
        CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
        CACHE_MIDDLEWARE_SECONDS=30,
        CACHE_MIDDLEWARE_ANONYMOUS_ONLY=False,
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
            },
            'other': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
                'LOCATION': 'other',
                'TIMEOUT': '1',
            },
        },
)
class CacheMiddlewareTest(IgnorePendingDeprecationWarningsMixin, TestCase):
    def setUp(self):
        super(CacheMiddlewareTest, self).setUp()
        self.factory = RequestFactory()
        self.default_cache = get_cache('default')
        self.other_cache = get_cache('other')
    def tearDown(self):
        self.default_cache.clear()
        self.other_cache.clear()
        super(CacheMiddlewareTest, self).tearDown()
    def test_constructor(self):
        """
        Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
        Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
        appropriately.
        """
        # If no arguments are passed in construction, it's being used as middleware.
        middleware = CacheMiddleware()
        # Now test object attributes against values defined in setUp above
        self.assertEqual(middleware.cache_timeout, 30)
        self.assertEqual(middleware.key_prefix, 'middlewareprefix')
        self.assertEqual(middleware.cache_alias, 'other')
        self.assertEqual(middleware.cache_anonymous_only, False)
        # If arguments are being passed in construction, it's being used as a decorator.
        # First, test with "defaults":
        as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
        self.assertEqual(as_view_decorator.cache_timeout, 300) # Timeout value for 'default' cache, i.e. 300
        self.assertEqual(as_view_decorator.key_prefix, '')
        self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
        self.assertEqual(as_view_decorator.cache_anonymous_only, False)
        # Next, test with custom values:
        as_view_decorator_with_custom = CacheMiddleware(cache_anonymous_only=True, cache_timeout=60, cache_alias='other', key_prefix='foo')
        self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
        self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
        self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
        self.assertEqual(as_view_decorator_with_custom.cache_anonymous_only, True)
    def test_middleware(self):
        middleware = CacheMiddleware()
        prefix_middleware = CacheMiddleware(key_prefix='prefix1')
        timeout_middleware = CacheMiddleware(cache_timeout=1)
        request = self.factory.get('/view/')
        # Put the request through the request middleware
        result = middleware.process_request(request)
        self.assertEqual(result, None)
        response = hello_world_view(request, '1')
        # Now put the response through the response middleware
        response = middleware.process_response(request, response)
        # Repeating the request should result in a cache hit
        result = middleware.process_request(request)
        self.assertNotEqual(result, None)
        self.assertEqual(result.content, b'Hello World 1')
        # The same request through a different middleware won't hit
        result = prefix_middleware.process_request(request)
        self.assertEqual(result, None)
        # The same request with a timeout _will_ hit
        result = timeout_middleware.process_request(request)
        self.assertNotEqual(result, None)
        self.assertEqual(result.content, b'Hello World 1')
    @override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
    def test_cache_middleware_anonymous_only_wont_cause_session_access(self):
        """ The cache middleware shouldn't cause a session access due to
        CACHE_MIDDLEWARE_ANONYMOUS_ONLY if nothing else has accessed the
        session. Refs 13283 """
        from django.contrib.sessions.middleware import SessionMiddleware
        from django.contrib.auth.middleware import AuthenticationMiddleware
        middleware = CacheMiddleware()
        session_middleware = SessionMiddleware()
        auth_middleware = AuthenticationMiddleware()
        request = self.factory.get('/view_anon/')
        # Put the request through the request middleware
        session_middleware.process_request(request)
        auth_middleware.process_request(request)
        result = middleware.process_request(request)
        self.assertEqual(result, None)
        response = hello_world_view(request, '1')
        # Now put the response through the response middleware
        session_middleware.process_response(request, response)
        response = middleware.process_response(request, response)
        self.assertEqual(request.session.accessed, False)
    @override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
    def test_cache_middleware_anonymous_only_with_cache_page(self):
        """CACHE_MIDDLEWARE_ANONYMOUS_ONLY should still be effective when used
        with the cache_page decorator: the response to a request from an
        authenticated user should not be cached."""
        request = self.factory.get('/view_anon/')
        class MockAuthenticatedUser(object):
            def is_authenticated(self):
                return True
        class MockAccessedSession(object):
            accessed = True
        request.user = MockAuthenticatedUser()
        request.session = MockAccessedSession()
        response = cache_page(60)(hello_world_view)(request, '1')
        self.assertFalse("Cache-Control" in response)
    def test_view_decorator(self):
        # decorate the same view with different cache decorators
        default_view = cache_page(3)(hello_world_view)
        default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
        explicit_default_view = cache_page(3, cache='default')(hello_world_view)
        explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
        other_view = cache_page(1, cache='other')(hello_world_view)
        other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
        request = self.factory.get('/view/')
        # Request the view once
        response = default_view(request, '1')
        self.assertEqual(response.content, b'Hello World 1')
        # Request again -- hit the cache
        response = default_view(request, '2')
        self.assertEqual(response.content, b'Hello World 1')
        # Requesting the same view with the explicit cache should yield the same result
        response = explicit_default_view(request, '3')
        self.assertEqual(response.content, b'Hello World 1')
        # Requesting with a prefix will hit a different cache key
        response = explicit_default_with_prefix_view(request, '4')
        self.assertEqual(response.content, b'Hello World 4')
        # Hitting the same view again gives a cache hit
        response = explicit_default_with_prefix_view(request, '5')
        self.assertEqual(response.content, b'Hello World 4')
        # And going back to the implicit cache will hit the same cache
        response = default_with_prefix_view(request, '6')
        self.assertEqual(response.content, b'Hello World 4')
        # Requesting from an alternate cache won't hit cache
        response = other_view(request, '7')
        self.assertEqual(response.content, b'Hello World 7')
        # But a repeated hit will hit cache
        response = other_view(request, '8')
        self.assertEqual(response.content, b'Hello World 7')
        # And prefixing the alternate cache yields yet another cache entry
        response = other_with_prefix_view(request, '9')
        self.assertEqual(response.content, b'Hello World 9')
        # But if we wait a couple of seconds...
        time.sleep(2)
        # ... the default cache will still hit
        cache = get_cache('default')
        response = default_view(request, '11')
        self.assertEqual(response.content, b'Hello World 1')
        # ... the default cache with a prefix will still hit
        response = default_with_prefix_view(request, '12')
        self.assertEqual(response.content, b'Hello World 4')
        # ... the explicit default cache will still hit
        response = explicit_default_view(request, '13')
        self.assertEqual(response.content, b'Hello World 1')
        # ... the explicit default cache with a prefix will still hit
        response = explicit_default_with_prefix_view(request, '14')
        self.assertEqual(response.content, b'Hello World 4')
        # .. but a rapidly expiring cache won't hit
        response = other_view(request, '15')
        self.assertEqual(response.content, b'Hello World 15')
        # .. even if it has a prefix
        response = other_with_prefix_view(request, '16')
        self.assertEqual(response.content, b'Hello World 16')
    def test_sensitive_cookie_not_cached(self):
        """
        Django must prevent caching of responses that set a user-specific (and
        maybe security sensitive) cookie in response to a cookie-less request.
        """
        csrf_middleware = CsrfViewMiddleware()
        cache_middleware = CacheMiddleware()
        request = self.factory.get('/view/')
        self.assertIsNone(cache_middleware.process_request(request))
        csrf_middleware.process_view(request, csrf_view, (), {})
        response = csrf_view(request)
        response = csrf_middleware.process_response(request, response)
        response = cache_middleware.process_response(request, response)
        # Inserting a CSRF cookie in a cookie-less request prevented caching.
        self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
        CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
        CACHE_MIDDLEWARE_SECONDS=1,
        CACHES={
            'default': {
                'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
            },
        },
        USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
    """
    Tests various headers w/ TemplateResponse.
    Most are probably redundant since they manipulate the same object
    anyway but the Etag header is 'special' because it relies on the
    content being complete (which is not necessarily always the case
    with a TemplateResponse)
    """
    def setUp(self):
        self.path = '/cache/test/'
        self.cache = get_cache('default')
    def tearDown(self):
        self.cache.clear()
    def _get_request(self, path, method='GET'):
        request = HttpRequest()
        request.META = {
            'SERVER_NAME': 'testserver',
            'SERVER_PORT': 80,
        }
        request.method = method
        request.path = request.path_info = "/cache/%s" % path
        return request
    def test_patch_vary_headers(self):
        headers = (
            # Initial vary, new headers, resulting vary.
            (None, ('Accept-Encoding',), 'Accept-Encoding'),
            ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
            ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
            ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
            ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
            ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
            (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
            ('Cookie,     Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
            ('Cookie    ,     Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
        )
        for initial_vary, newheaders, resulting_vary in headers:
            response = TemplateResponse(HttpResponse(), Template("This is a test"))
            if initial_vary is not None:
                response['Vary'] = initial_vary
            patch_vary_headers(response, newheaders)
            self.assertEqual(response['Vary'], resulting_vary)
    def test_get_cache_key(self):
        request = self._get_request(self.path)
        response = TemplateResponse(HttpResponse(), Template("This is a test"))
        key_prefix = 'localprefix'
        # Expect None if no headers have been set yet.
        self.assertEqual(get_cache_key(request), None)
        # Set headers to an empty list.
        learn_cache_key(request, response)
        self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
        # Verify that a specified key_prefix is taken into account.
        learn_cache_key(request, response, key_prefix=key_prefix)
        self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
    def test_get_cache_key_with_query(self):
        request = self._get_request(self.path + '?test=1')
        response = TemplateResponse(HttpResponse(), Template("This is a test"))
        # Expect None if no headers have been set yet.
        self.assertEqual(get_cache_key(request), None)
        # Set headers to an empty list.
        learn_cache_key(request, response)
        # Verify that the querystring is taken into account.
        self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.bd889c5a59603af44333ed21504db3cd.d41d8cd98f00b204e9800998ecf8427e')
    @override_settings(USE_ETAGS=False)
    def test_without_etag(self):
        response = TemplateResponse(HttpResponse(), Template("This is a test"))
        self.assertFalse(response.has_header('ETag'))
        patch_response_headers(response)
        self.assertFalse(response.has_header('ETag'))
        response = response.render()
        self.assertFalse(response.has_header('ETag'))
    @override_settings(USE_ETAGS=True)
    def test_with_etag(self):
        response = TemplateResponse(HttpResponse(), Template("This is a test"))
        self.assertFalse(response.has_header('ETag'))
        patch_response_headers(response)
        self.assertFalse(response.has_header('ETag'))
        response = response.render()
        self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
    # See https://code.djangoproject.com/ticket/16003
    urls = "admin_views.urls"
    def test_admin(self):
        with self.settings(USE_ETAGS=False):
            response = self.client.get('/test_admin/admin/')
            self.assertEqual(response.status_code, 200)
            self.assertFalse(response.has_header('ETag'))
        with self.settings(USE_ETAGS=True):
            response = self.client.get('/test_admin/admin/')
            self.assertEqual(response.status_code, 200)
            self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
    def test_without_vary_on(self):
        key = make_template_fragment_key('a.fragment')
        self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
    def test_with_one_vary_on(self):
        key = make_template_fragment_key('foo', ['abc'])
        self.assertEqual(key,
            'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
    def test_with_many_vary_on(self):
        key = make_template_fragment_key('bar', ['abc', 'def'])
        self.assertEqual(key,
            'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
    def test_proper_escaping(self):
        key = make_template_fragment_key('spam', ['abc:def%'])
        self.assertEqual(key,
            'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
 | 
| 
	the-stack_106_32283 | 
	
# COM3110/4155/6155: Text Processing
# Regular Expressions Lab Class
import sys, re
#------------------------------
testRE = re.compile('(logic|sicstus)', re.I)
#------------------------------
with open('RGX_DATA.html') as infs: 
    linenum = 0
    for line in infs:
        linenum += 1
        if line.strip() == '':
            continue
        print('  ', '-' * 100, '[%d]' % linenum, '\n   TEXT:', line, end='')
    
        m = testRE.search(line)
        if m:
            print('** TEST-RE:', m.group(1))
#        mm = testRE.finditer(line)
#        for m in mm:
#            print('** TEST-RE:', m.group(1))
 | 
| 
	the-stack_106_32284 | 
	import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
bingo = Bingo.createDatabaseFile(indigo, "get_indigo_object_bug", 'molecule')
for item in ("C1=CC=CC=C1", "C1=CN=CC=C1"):
    bingo.insert(indigo.loadMolecule(item))
result = bingo.searchSim(indigo.loadMolecule("C1=CC=CC=C1"), 0.3, 1.0, "tanimoto")
while result.next():
    print(result.getCurrentId(), result.getCurrentSimilarityValue(), result.getIndigoObject().smiles())
bingo.close()
 | 
| 
	the-stack_106_32285 | 
	"""
Test the memory module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import shutil
import os
import os.path
from tempfile import mkdtemp
import pickle
import warnings
import io
import sys
import time
import nose
from joblib.memory import Memory, MemorizedFunc, NotMemorizedFunc, MemorizedResult
from joblib.memory import NotMemorizedResult, _FUNCTION_HASHES
from joblib.test.common import with_numpy, np
from joblib.testing import assert_raises_regex
from joblib._compat import PY3_OR_LATER
###############################################################################
# Module-level variables for the tests
def f(x, y=1):
    """ A module-level function for testing purposes.
    """
    return x ** 2 + y
###############################################################################
# Test fixtures
env = dict()
def setup_module():
    """ Test setup.
    """
    cachedir = mkdtemp()
    env['dir'] = cachedir
    if os.path.exists(cachedir):
        shutil.rmtree(cachedir)
    # Don't make the cachedir, Memory should be able to do that on the fly
    print(80 * '_')
    print('test_memory setup (%s)' % env['dir'])
    print(80 * '_')
def _rmtree_onerror(func, path, excinfo):
    print('!' * 79)
    print('os function failed: %r' % func)
    print('file to be removed: %s' % path)
    print('exception was: %r' % excinfo[1])
    print('!' * 79)
def teardown_module():
    """ Test teardown.
    """
    shutil.rmtree(env['dir'], False, _rmtree_onerror)
    print(80 * '_')
    print('test_memory teardown (%s)' % env['dir'])
    print(80 * '_')
###############################################################################
# Helper function for the tests
def check_identity_lazy(func, accumulator):
    """ Given a function and an accumulator (a list that grows every
        time the function is called), check that the function can be
        decorated by memory to be a lazy identity.
    """
    # Call each function with several arguments, and check that it is
    # evaluated only once per argument.
    memory = Memory(cachedir=env['dir'], verbose=0)
    memory.clear(warn=False)
    func = memory.cache(func)
    for i in range(3):
        for _ in range(2):
            yield nose.tools.assert_equal, func(i), i
            yield nose.tools.assert_equal, len(accumulator), i + 1
###############################################################################
# Tests
def test_memory_integration():
    """ Simple test of memory lazy evaluation.
    """
    accumulator = list()
    # Rmk: this function has the same name than a module-level function,
    # thus it serves as a test to see that both are identified
    # as different.
    def f(l):
        accumulator.append(1)
        return l
    for test in check_identity_lazy(f, accumulator):
        yield test
    # Now test clearing
    for compress in (False, True):
     for mmap_mode in ('r', None):
        # We turn verbosity on to smoke test the verbosity code, however,
        # we capture it, as it is ugly
        try:
            # To smoke-test verbosity, we capture stdout
            orig_stdout = sys.stdout
            orig_stderr = sys.stdout
            if PY3_OR_LATER:
                sys.stderr = io.StringIO()
                sys.stderr = io.StringIO()
            else:
                sys.stdout = io.BytesIO()
                sys.stderr = io.BytesIO()
            memory = Memory(cachedir=env['dir'], verbose=10,
                            mmap_mode=mmap_mode, compress=compress)
            # First clear the cache directory, to check that our code can
            # handle that
            # NOTE: this line would raise an exception, as the database file is
            # still open; we ignore the error since we want to test what
            # happens if the directory disappears
            shutil.rmtree(env['dir'], ignore_errors=True)
            g = memory.cache(f)
            g(1)
            g.clear(warn=False)
            current_accumulator = len(accumulator)
            out = g(1)
        finally:
            sys.stdout = orig_stdout
            sys.stderr = orig_stderr
        yield nose.tools.assert_equal, len(accumulator), \
                    current_accumulator + 1
        # Also, check that Memory.eval works similarly
        yield nose.tools.assert_equal, memory.eval(f, 1), out
        yield nose.tools.assert_equal, len(accumulator), \
                    current_accumulator + 1
    # Now do a smoke test with a function defined in __main__, as the name
    # mangling rules are more complex
    f.__module__ = '__main__'
    memory = Memory(cachedir=env['dir'], verbose=0)
    memory.cache(f)(1)
def test_no_memory():
    """ Test memory with cachedir=None: no memoize """
    accumulator = list()
    def ff(l):
        accumulator.append(1)
        return l
    mem = Memory(cachedir=None, verbose=0)
    gg = mem.cache(ff)
    for _ in range(4):
        current_accumulator = len(accumulator)
        gg(1)
        yield nose.tools.assert_equal, len(accumulator), \
                    current_accumulator + 1
def test_memory_kwarg():
    " Test memory with a function with keyword arguments."
    accumulator = list()
    def g(l=None, m=1):
        accumulator.append(1)
        return l
    for test in check_identity_lazy(g, accumulator):
        yield test
    memory = Memory(cachedir=env['dir'], verbose=0)
    g = memory.cache(g)
    # Smoke test with an explicit keyword argument:
    nose.tools.assert_equal(g(l=30, m=2), 30)
def test_memory_lambda():
    " Test memory with a function with a lambda."
    accumulator = list()
    def helper(x):
        """ A helper function to define l as a lambda.
        """
        accumulator.append(1)
        return x
    l = lambda x: helper(x)
    for test in check_identity_lazy(l, accumulator):
        yield test
def test_memory_name_collision():
    " Check that name collisions with functions will raise warnings"
    memory = Memory(cachedir=env['dir'], verbose=0)
    @memory.cache
    def name_collision(x):
        """ A first function called name_collision
        """
        return x
    a = name_collision
    @memory.cache
    def name_collision(x):
        """ A second function called name_collision
        """
        return x
    b = name_collision
    if not hasattr(warnings, 'catch_warnings'):
        # catch_warnings is new in Python 2.6
        return
    with warnings.catch_warnings(record=True) as w:
        # Cause all warnings to always be triggered.
        warnings.simplefilter("always")
        # This is a temporary workaround until we get rid of
        # inspect.getargspec, see
        # https://github.com/joblib/joblib/issues/247
        warnings.simplefilter("ignore", DeprecationWarning)
        a(1)
        b(1)
        yield nose.tools.assert_equal, len(w), 1
        yield nose.tools.assert_true, "collision" in str(w[-1].message)
def test_memory_warning_lambda_collisions():
    # Check that multiple use of lambda will raise collisions
    memory = Memory(cachedir=env['dir'], verbose=0)
    # For isolation with other tests
    memory.clear()
    a = lambda x: x
    a = memory.cache(a)
    b = lambda x: x + 1
    b = memory.cache(b)
    with warnings.catch_warnings(record=True) as w:
        # Cause all warnings to always be triggered.
        warnings.simplefilter("always")
        # This is a temporary workaround until we get rid of
        # inspect.getargspec, see
        # https://github.com/joblib/joblib/issues/247
        warnings.simplefilter("ignore", DeprecationWarning)
        nose.tools.assert_equal(0, a(0))
        nose.tools.assert_equal(2, b(1))
        nose.tools.assert_equal(1, a(1))
    # In recent Python versions, we can retrieve the code of lambdas,
    # thus nothing is raised
    nose.tools.assert_equal(len(w), 4)
def test_memory_warning_collision_detection():
    # Check that collisions impossible to detect will raise appropriate
    # warnings.
    memory = Memory(cachedir=env['dir'], verbose=0)
    # For isolation with other tests
    memory.clear()
    a1 = eval('lambda x: x')
    a1 = memory.cache(a1)
    b1 = eval('lambda x: x+1')
    b1 = memory.cache(b1)
    if not hasattr(warnings, 'catch_warnings'):
        # catch_warnings is new in Python 2.6
        return
    with warnings.catch_warnings(record=True) as w:
        # Cause all warnings to always be triggered.
        warnings.simplefilter("always")
        # This is a temporary workaround until we get rid of
        # inspect.getargspec, see
        # https://github.com/joblib/joblib/issues/247
        warnings.simplefilter("ignore", DeprecationWarning)
        a1(1)
        b1(1)
        a1(0)
        yield nose.tools.assert_equal, len(w), 2
        yield nose.tools.assert_true, \
                "cannot detect" in str(w[-1].message).lower()
def test_memory_partial():
    " Test memory with functools.partial."
    accumulator = list()
    def func(x, y):
        """ A helper function to define l as a lambda.
        """
        accumulator.append(1)
        return y
    import functools
    function = functools.partial(func, 1)
    for test in check_identity_lazy(function, accumulator):
        yield test
def test_memory_eval():
    " Smoke test memory with a function with a function defined in an eval."
    memory = Memory(cachedir=env['dir'], verbose=0)
    m = eval('lambda x: x')
    mm = memory.cache(m)
    yield nose.tools.assert_equal, 1, mm(1)
def count_and_append(x=[]):
    """ A function with a side effect in its arguments.
        Return the lenght of its argument and append one element.
    """
    len_x = len(x)
    x.append(None)
    return len_x
def test_argument_change():
    """ Check that if a function has a side effect in its arguments, it
        should use the hash of changing arguments.
    """
    mem = Memory(cachedir=env['dir'], verbose=0)
    func = mem.cache(count_and_append)
    # call the function for the first time, is should cache it with
    # argument x=[]
    assert func() == 0
    # the second time the argument is x=[None], which is not cached
    # yet, so the functions should be called a second time
    assert func() == 1
@with_numpy
def test_memory_numpy():
    " Test memory with a function with numpy arrays."
    # Check with memmapping and without.
    for mmap_mode in (None, 'r'):
        accumulator = list()
        def n(l=None):
            accumulator.append(1)
            return l
        memory = Memory(cachedir=env['dir'], mmap_mode=mmap_mode,
                            verbose=0)
        memory.clear(warn=False)
        cached_n = memory.cache(n)
        rnd = np.random.RandomState(0)
        for i in range(3):
            a = rnd.random_sample((10, 10))
            for _ in range(3):
                yield nose.tools.assert_true, np.all(cached_n(a) == a)
                yield nose.tools.assert_equal, len(accumulator), i + 1
@with_numpy
def test_memory_numpy_check_mmap_mode():
    """Check that mmap_mode is respected even at the first call"""
    memory = Memory(cachedir=env['dir'], mmap_mode='r', verbose=0)
    memory.clear(warn=False)
    @memory.cache()
    def twice(a):
        return a * 2
    a = np.ones(3)
    b = twice(a)
    c = twice(a)
    nose.tools.assert_true(isinstance(c, np.memmap))
    nose.tools.assert_equal(c.mode, 'r')
    nose.tools.assert_true(isinstance(b, np.memmap))
    nose.tools.assert_equal(b.mode, 'r')
def test_memory_exception():
    """ Smoketest the exception handling of Memory.
    """
    memory = Memory(cachedir=env['dir'], verbose=0)
    class MyException(Exception):
        pass
    @memory.cache
    def h(exc=0):
        if exc:
            raise MyException
    # Call once, to initialise the cache
    h()
    for _ in range(3):
        # Call 3 times, to be sure that the Exception is always raised
        yield nose.tools.assert_raises, MyException, h, 1
def test_memory_ignore():
    " Test the ignore feature of memory "
    memory = Memory(cachedir=env['dir'], verbose=0)
    accumulator = list()
    @memory.cache(ignore=['y'])
    def z(x, y=1):
        accumulator.append(1)
    yield nose.tools.assert_equal, z.ignore, ['y']
    z(0, y=1)
    yield nose.tools.assert_equal, len(accumulator), 1
    z(0, y=1)
    yield nose.tools.assert_equal, len(accumulator), 1
    z(0, y=2)
    yield nose.tools.assert_equal, len(accumulator), 1
def test_partial_decoration():
    "Check cache may be called with kwargs before decorating"
    memory = Memory(cachedir=env['dir'], verbose=0)
    test_values = [
        (['x'], 100, 'r'),
        ([], 10, None),
    ]
    for ignore, verbose, mmap_mode in test_values:
        @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode)
        def z(x):
            pass
        yield nose.tools.assert_equal, z.ignore, ignore
        yield nose.tools.assert_equal, z._verbose, verbose
        yield nose.tools.assert_equal, z.mmap_mode, mmap_mode
def test_func_dir():
    # Test the creation of the memory cache directory for the function.
    memory = Memory(cachedir=env['dir'], verbose=0)
    memory.clear()
    path = __name__.split('.')
    path.append('f')
    path = os.path.join(env['dir'], 'joblib', *path)
    g = memory.cache(f)
    # Test that the function directory is created on demand
    yield nose.tools.assert_equal, g._get_func_dir(), path
    yield nose.tools.assert_true, os.path.exists(path)
    # Test that the code is stored.
    # For the following test to be robust to previous execution, we clear
    # the in-memory store
    _FUNCTION_HASHES.clear()
    yield nose.tools.assert_false, \
        g._check_previous_func_code()
    yield nose.tools.assert_true, \
            os.path.exists(os.path.join(path, 'func_code.py'))
    yield nose.tools.assert_true, \
        g._check_previous_func_code()
    # Test the robustness to failure of loading previous results.
    dir, _ = g.get_output_dir(1)
    a = g(1)
    yield nose.tools.assert_true, os.path.exists(dir)
    os.remove(os.path.join(dir, 'output.pkl'))
    yield nose.tools.assert_equal, a, g(1)
def test_persistence():
    # Test the memorized functions can be pickled and restored.
    memory = Memory(cachedir=env['dir'], verbose=0)
    g = memory.cache(f)
    output = g(1)
    h = pickle.loads(pickle.dumps(g))
    output_dir, _ = g.get_output_dir(1)
    yield nose.tools.assert_equal, output, h.load_output(output_dir)
    memory2 = pickle.loads(pickle.dumps(memory))
    yield nose.tools.assert_equal, memory.cachedir, memory2.cachedir
    # Smoke test that pickling a memory with cachedir=None works
    memory = Memory(cachedir=None, verbose=0)
    pickle.loads(pickle.dumps(memory))
    g = memory.cache(f)
    gp = pickle.loads(pickle.dumps(g))
    gp(1)
def test_call_and_shelve():
    """Test MemorizedFunc outputting a reference to cache.
    """
    for func, Result in zip((MemorizedFunc(f, env['dir']),
                             NotMemorizedFunc(f),
                             Memory(cachedir=env['dir']).cache(f),
                             Memory(cachedir=None).cache(f),
                             ),
                            (MemorizedResult, NotMemorizedResult,
                             MemorizedResult, NotMemorizedResult)):
        nose.tools.assert_equal(func(2), 5)
        result = func.call_and_shelve(2)
        nose.tools.assert_true(isinstance(result, Result))
        nose.tools.assert_equal(result.get(), 5)
        result.clear()
        nose.tools.assert_raises(KeyError, result.get)
        result.clear()  # Do nothing if there is no cache.
def test_memorized_pickling():
    for func in (MemorizedFunc(f, env['dir']), NotMemorizedFunc(f)):
        filename = os.path.join(env['dir'], 'pickling_test.dat')
        result = func.call_and_shelve(2)
        with open(filename, 'wb') as fp:
            pickle.dump(result, fp)
        with open(filename, 'rb') as fp:
            result2 = pickle.load(fp)
        nose.tools.assert_equal(result2.get(), result.get())
        os.remove(filename)
def test_memorized_repr():
    func = MemorizedFunc(f, env['dir'])
    result = func.call_and_shelve(2)
    func2 = MemorizedFunc(f, env['dir'])
    result2 = func2.call_and_shelve(2)
    nose.tools.assert_equal(result.get(), result2.get())
    nose.tools.assert_equal(repr(func), repr(func2))
    # Smoke test on deprecated methods
    func.format_signature(2)
    func.format_call(2)
    # Smoke test with NotMemorizedFunc
    func = NotMemorizedFunc(f)
    repr(func)
    repr(func.call_and_shelve(2))
    # Smoke test for message output (increase code coverage)
    func = MemorizedFunc(f, env['dir'], verbose=11, timestamp=time.time())
    result = func.call_and_shelve(11)
    result.get()
    func = MemorizedFunc(f, env['dir'], verbose=11)
    result = func.call_and_shelve(11)
    result.get()
    func = MemorizedFunc(f, env['dir'], verbose=5, timestamp=time.time())
    result = func.call_and_shelve(11)
    result.get()
    func = MemorizedFunc(f, env['dir'], verbose=5)
    result = func.call_and_shelve(11)
    result.get()
def test_memory_file_modification():
    # Test that modifying a Python file after loading it does not lead to
    # Recomputation
    dir_name = os.path.join(env['dir'], 'tmp_import')
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)
    filename = os.path.join(dir_name, 'tmp_joblib_.py')
    content = 'def f(x):\n    print(x)\n    return x\n'
    with open(filename, 'w') as module_file:
        module_file.write(content)
    # Load the module:
    sys.path.append(dir_name)
    import tmp_joblib_ as tmp
    mem = Memory(cachedir=env['dir'], verbose=0)
    f = mem.cache(tmp.f)
    # Capture sys.stdout to count how many time f is called
    orig_stdout = sys.stdout
    if PY3_OR_LATER:
        my_stdout = io.StringIO()
    else:
        my_stdout = io.BytesIO()
    try:
        sys.stdout = my_stdout
        # First call f a few times
        f(1)
        f(2)
        f(1)
        # Now modify the module where f is stored without modifying f
        with open(filename, 'w') as module_file:
            module_file.write('\n\n' + content)
        # And call f a couple more times
        f(1)
        f(1)
        # Flush the .pyc files
        shutil.rmtree(dir_name)
        os.mkdir(dir_name)
        # Now modify the module where f is stored, modifying f
        content = 'def f(x):\n    print("x=%s" % x)\n    return x\n'
        with open(filename, 'w') as module_file:
            module_file.write(content)
        # And call f more times prior to reloading: the cache should not be
        # invalidated at this point as the active function definition has not
        # changed in memory yet.
        f(1)
        f(1)
        # Now reload
        my_stdout.write('Reloading\n')
        sys.modules.pop('tmp_joblib_')
        import tmp_joblib_ as tmp
        f = mem.cache(tmp.f)
        # And call f more times
        f(1)
        f(1)
    finally:
        sys.stdout = orig_stdout
    nose.tools.assert_equal(my_stdout.getvalue(), '1\n2\nReloading\nx=1\n')
def _function_to_cache(a, b):
    # Just a place holder function to be mutated by tests
    pass
def _sum(a, b):
    return a + b
def _product(a, b):
    return a * b
def test_memory_in_memory_function_code_change():
    _function_to_cache.__code__ = _sum.__code__
    mem = Memory(cachedir=env['dir'], verbose=0)
    f = mem.cache(_function_to_cache)
    nose.tools.assert_equal(f(1, 2), 3)
    nose.tools.assert_equal(f(1, 2), 3)
    with warnings.catch_warnings(record=True):
        # ignore name collision warnings
        warnings.simplefilter("always")
        # Check that inline function modification triggers a cache invalidation
        _function_to_cache.__code__ = _product.__code__
        nose.tools.assert_equal(f(1, 2), 2)
        nose.tools.assert_equal(f(1, 2), 2)
def test_clear_memory_with_none_cachedir():
    mem = Memory(cachedir=None)
    mem.clear()
if PY3_OR_LATER:
    exec("""
def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
    return a, b, kw1, kw2
def func_with_signature(a: int, b: float) -> float:
    return a + b
""")
    def test_memory_func_with_kwonly_args():
        mem = Memory(cachedir=env['dir'], verbose=0)
        func_cached = mem.cache(func_with_kwonly_args)
        nose.tools.assert_equal(func_cached(1, 2, kw1=3), (1, 2, 3, 'kw2'))
        # Making sure that providing a keyword-only argument by
        # position raises an exception
        assert_raises_regex(
            ValueError,
            "Keyword-only parameter 'kw1' was passed as positional parameter",
            func_cached,
            1, 2, 3, {'kw2': 4})
        # Keyword-only parameter passed by position with cached call
        # should still raise ValueError
        func_cached(1, 2, kw1=3, kw2=4)
        assert_raises_regex(
            ValueError,
            "Keyword-only parameter 'kw1' was passed as positional parameter",
            func_cached,
            1, 2, 3, {'kw2': 4})
        # Test 'ignore' parameter
        func_cached = mem.cache(func_with_kwonly_args, ignore=['kw2'])
        nose.tools.assert_equal(func_cached(1, 2, kw1=3, kw2=4), (1, 2, 3, 4))
        nose.tools.assert_equal(func_cached(1, 2, kw1=3, kw2='ignored'), (1, 2, 3, 4))
    def test_memory_func_with_signature():
        mem = Memory(cachedir=env['dir'], verbose=0)
        func_cached = mem.cache(func_with_signature)
        nose.tools.assert_equal(func_cached(1, 2.), 3.)
 | 
| 
	the-stack_106_32289 | 
	from pymaze.maze import Maze
from pymaze.solver import DepthFirstBacktracker
from pymaze.solver import BiDirectional
from pymaze.solver import BreadthFirst
class MazeManager(object):
    """A manager that abstracts the interaction with the library's components. The graphs, animations, maze creation,
    and solutions are all handled through the manager.
    Attributes:
        mazes (list): It is possible to have more than one maze. They are stored inside this variable.
        media_name (string): The filename for animations and images
        quiet_mode (bool): When true, information is not shown on the console
    """
    def __init__(self):
        self.mazes = []
        self.media_name = ""
        self.quiet_mode = False
    def add_maze(self, row, col, id=0):
        """Add a maze to the manager. We give the maze an index of
        the total number of mazes in the manager. As long as we don't
        add functionality to delete mazes from the manager, the ids will
        always be unique. Note that the id will always be greater than 0 because
        we add 1 to the length of self.mazes, which is set after the id assignment
        Args:
            row (int): The height of the maze
            col (int): The width of the maze
            id (int):  The optional unique id of the maze.
        Returns
            Maze: The newly created maze
        """
        if id != 0:
            self.mazes.append(Maze(row, col, id))
        else:
            if len(self.mazes) < 1:
                self.mazes.append(Maze(row, col, 0))
            else:
                self.mazes.append(Maze(row, col, len(self.mazes) + 1))
        return self.mazes[-1]
    def add_existing_maze(self, maze, override=True):
        """Add an already existing maze to the manager.
        Note that it is assumed that the maze already has an id. If the id
        already exists, the function will fail. To assign a new, unique id to
        the maze, set the overwrite flag to true.
        Args:
            maze: The maze that will be added to the manager
            override (bool): A flag that you can set to bypass checking the id
        Returns:
            True: If the maze was added to the manager
            False: If the maze could not be added to the manager
        """
        # Check if there is a maze with the same id. If there is a conflict, return False
        if self.check_matching_id(maze.id) is None:
            if override:
                if len(self.mazes) < 1:
                    maze.id = 0
                else:
                    maze.id = self.mazes.__len__()+1
        else:
            return False
        self.mazes.append(maze)
        return maze
    def get_maze(self, id):
        """Get a maze by its id.
            Args:
                id (int): The id of the desired maze
            Return:
                    Maze: Returns the maze if it was found.
                    None: If no maze was found
        """
        for maze in self.mazes:
            if maze.id == id:
                return maze
        print("Unable to locate maze")
        return None
    def get_mazes(self):
        """Get all of the mazes that the manager is holding"""
        return self.mazes
    def get_maze_count(self):
        """Gets the number of mazes that the manager is holding"""
        return self.mazes.__len__()
    def solve_maze(self, maze_id, method, neighbor_method="fancy"):
        """ Called to solve a maze by a particular method. The method
        is specified by a string. The options are
            1. DepthFirstBacktracker
            2.
            3.
        Args:
            maze_id (int): The id of the maze that will be solved
            method (string): The name of the method (see above)
            neighbor_method:
        """
        maze = self.get_maze(maze_id)
        if maze is None:
            print("Unable to locate maze. Exiting solver.")
            return None
        """DEVNOTE: When adding a new solution method, call it from here.
            Also update the list of names in the documentation above"""
        if method == "DepthFirstBacktracker":
            solver = DepthFirstBacktracker(maze, neighbor_method, self.quiet_mode)
            maze.solution_path = solver.solve()
        elif method == "BiDirectional":
            solver = BiDirectional(maze, neighbor_method, self.quiet_mode)
            maze.solution_path = solver.solve()
        elif method == "BreadthFirst":
            solver = BreadthFirst(maze, neighbor_method, self.quiet_mode)
            maze.solution_path = solver.solve()
    def check_matching_id(self, id):
        """Check if the id already belongs to an existing maze
        Args:
            id (int): The id to be checked
        Returns:
        """
        return next((maze for maze in self.mazes if maze .id == id), None)
    def set_filename(self, filename):
        """
        Sets the filename for saving animations and images
        Args:
            filename (string): The name of the file without an extension
        """
        self.media_name = filename
    def set_quiet_mode(self, enabled):
        """
        Enables/Disables the quiet mode
        Args:
            enabled (bool): True when quiet mode is on, False when it is off
        """
        self.quiet_mode=enabled
 | 
| 
	the-stack_106_32290 | 
	# test dataset with opinion scores (os) in dictionary style with repetitions
dataset_name = 'test_dataset_os_as_dict_with_repetitions'
yuv_fmt = 'yuv420p'
width = 1920
height = 1080
ref_score = 5.0
ref_videos = [
       {'content_id': 0, 'content_name': 'foo', 'path': 'foo.png'},
       {'content_id': 1, 'content_name': 'bar', 'path': 'bar.png'}
]
dis_videos = [
              {'asset_id': 0,
               'content_id': 0,
               'os': {'Tom': [3, 2], 'Jerry': 4, 'Pinokio': 1},
               'path': 'baz1.png'},
              {'asset_id': 1,
               'content_id': 1,
               'os': {'Tom': [2, 2], 'Jerry': 1, 'Pinokio': [3, 3, 1]},
               'path': 'baz2.png'},
              {'asset_id': 2,
               'content_id': 0,
               'os': {'Tom': 4, 'Jerry': 1, 'Pinokio': 3},
               'path': 'baz3.png'}
]
 | 
| 
	the-stack_106_32291 | 
	# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
    "name" : "XBOX",
    "author" : "Kevin Masson",
    "description" : "",
    "blender" : (2, 80, 0),
    "version" : (0, 0, 1),
    "location" : "",
    "warning" : "",
    "category" : "Generic"
}
from . import auto_load
auto_load.init()
def register():
    auto_load.register()
def unregister():
    auto_load.unregister()
 | 
| 
	the-stack_106_32292 | 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
import re
from textwrap import dedent as dd
import glfw
from glfw import gl
class Shader(object):
    '''Wrapper for opengl boilerplate code'''
    def __init__(self, source):
        assert glfw.core.init(), 'Error: GLFW could not be initialized'
        self.inputs = OrderedDict()
        self.outputs = OrderedDict()
        self.uniforms = OrderedDict()
        self.source = dd('\n'.join([l for l in source.split('\n') if l.strip()]))
        self.parse(source)
        self.compiled = False
    @property
    def shader(self):
        if not hasattr(self, '_id'):
            self._id = gl.create_shader(self.opengl_type)
        return self._id
    def compile(self):
        '''Compiles and checks output'''
        if not self.compiled:
            shader_id = self.shader
            gl.shader_source(shader_id, self.source)
            gl.compile_shader(shader_id)
            result = gl.get_shaderiv(shader_id, gl.COMPILE_STATUS)
            log_length = gl.get_shaderiv(shader_id, gl.INFO_LOG_LENGTH)
            log = ''
            if log_length != 0:
                log = gl.get_shader_info_log(shader_id)
            if log.strip():
                assert result == gl.TRUE and log_length == 0, log
            self.compiled = True
            return shader_id
    def attach(self, program):
        if not self.compiled:
            self.compile()
        gl.attach_shader(program.program, self.shader)
    def detach(self, program):
        if self.shader is not None:
            gl.detach_shader(program.program, self.shader)
    def delete(self):
        if self.shader is not None:
            gl.delete_shader(self.shader)
    def cleanup(self, program):
        self.detach(program)
        self.delete()
    def __del__(self):
        self.delete()
    def __contains__(self, key):
        return key in self.inputs or key in self.uniforms
    def __getitem__(self, key):
        if key not in self.inputs and key not in self.uniforms:
            raise KeyError('Could not set "{}"'.format(key))
        else:
            if key in self.inputs:
                return self.inputs[key]
            elif key in self.uniforms:
                return self.uniforms[key]
    def __setitem__(self, key, val):
        if key in self.inputs or key in self.uniforms:
            setattr(self, key, val)
        else:
            raise KeyError('Could not set "{}"'.format(key))
    def __iter__(self):
        for key in self.inputs:
            yield key
        for key in self.uniforms:
            yield key
    def set_context(self, version):
        major, minor = version
        glfw.core.window_hint(glfw.FOCUSED, False)
        glfw.core.window_hint(glfw.CONTEXT_VERSION_MAJOR, major)
        glfw.core.window_hint(glfw.CONTEXT_VERSION_MINOR, minor)
        profile = glfw.OPENGL_ANY_PROFILE if version < (3, 2) else glfw.OPENGL_CORE_PROFILE
        glfw.core.window_hint(glfw.OPENGL_PROFILE, profile)
        # Setup forward compatibility if able
        forward_compat = False if version < (3, 0) else True
        glfw.core.window_hint(glfw.OPENGL_FORWARD_COMPAT, forward_compat)
        #  Keep the window invisible
        glfw.core.window_hint(glfw.VISIBLE, False)
        win = glfw.create_window(title='test', width=1, height=1)
        if win is not None:
            glfw.core.destroy_window(win)
        return major, minor
    def parse(self, source):
        '''Parses source looking for context required as well as
        inputs and uniforms'''
        opengl_mapping = {
            (1, 1): (2, 0),
            (1, 2): (2, 1),
            (1, 3): (3, 0),
            (1, 4): (3, 1),
            (1, 5): (3, 2),
        }
        version_pattern = r'^\#version\s+(?P<version>[0-9]+)\s*$'
        inputs2_pattern = ("\s*GLSL_TYPE\s+"
                           "((highp|mediump|lowp)\s+)?"
                           "(?P<vartype>\w+)\s+"
                           "(?P<varname>\w+)\s*"
                           "(\[(?P<varsize>\d+)\])?"
                           "(\s*\=\s*(?P<vardefault>[0-9.]+))?"
                           "\s*;"
                           )
        inputs_pattern = (
            r'(?P<direction>(in|out|uniform))\s+'
            r'((highp|mediump|lowp)\s+)?'
            r'(?P<vartype>\w+)\s+'
            r'(?P<varname>\w+)\s*'
            r'(\s*\=\s*(?P=vartype)?(?P<vardefault>(.+)))?'
            r'\;'
        )
        version_eng = re.compile(version_pattern)
        self.version = major, minor = (3, 2)
        engines = (
            [re.compile(inputs_pattern)] +
            [
                re.compile(inputs2_pattern.replace('GLSL_TYPE', kind), flags=re.MULTILINE)
                for kind in ('uniform', 'attribute', 'varying', 'const')
            ]
        )
        for line in source.split('\n'):
            line = line.strip()
            if version_eng.search(line):
                data = [m.groupdict() for m in version_eng.finditer(line)][0]
                version = tuple([int(c) for c in data['version']][:2])
                self.version = opengl_mapping.get(version, version)
            for eng in engines:
                if eng.search(line):
                    data = [m.groupdict() for m in eng.finditer(line)][0]
                    varname = data['varname']
                    vartype = data['vartype']
                    direction = data['direction']
                    default = data['vardefault']
                    if direction == 'in':
                        setattr(self, varname, vartype)
                        self.inputs[varname] = vartype
                    elif direction == 'out':
                        setattr(self, varname, vartype)
                        self.outputs[varname] = vartype
                    elif direction == 'uniform':
                        setattr(self, varname, vartype)
                        if default:
                            self.uniforms[varname] = ('uniform', vartype, default)
                        else:
                            self.uniforms[varname] = ('uniform', vartype, )
                    break
        self.set_context(self.version)
    def __repr__(self):
        cname = self.__class__.__name__
        version = self.version
        inputs = ''
        uniforms = ''
        if self.inputs.keys():
            inputs = 'inputs=[{}]'.format(', '.join(a for a in self.inputs.keys()))
        if self.uniforms.keys():
            uniforms = 'uniforms=[{}]'.format(', '.join(a for a in self.uniforms.keys()))
            if inputs:
                uniforms = ' {}'.format(uniforms)
        string = '<{cname}{version} {inputs}{uniforms}>'.format(**locals())
        return string
class VertexShader(Shader):
    opengl_type = gl.VERTEX_SHADER
class FragmentShader(Shader):
    opengl_type = gl.FRAGMENT_SHADER
class GeometryShader(Shader):
    opengl_type = gl.GEOMETRY_SHADER
class TessellationControlShader(Shader):
    opengl_type = gl.TESS_CONTROL_SHADER
class TessellationEvaluationShader(Shader):
    opengl_type = gl.TESS_EVALUATION_SHADER
 | 
| 
	the-stack_106_32293 | 
	import _plotly_utils.basevalidators
class GeoValidator(_plotly_utils.basevalidators.CompoundValidator):
    def __init__(self, plotly_name="geo", parent_name="layout", **kwargs):
        super(GeoValidator, self).__init__(
            plotly_name=plotly_name,
            parent_name=parent_name,
            data_class_str=kwargs.pop("data_class_str", "Geo"),
            data_docs=kwargs.pop(
                "data_docs",
                """
            bgcolor
                Set the background color of the map
            center
                :class:`plotly.graph_objects.layout.geo.Center`
                instance or dict with compatible properties
            coastlinecolor
                Sets the coastline color.
            coastlinewidth
                Sets the coastline stroke width (in px).
            countrycolor
                Sets line color of the country boundaries.
            countrywidth
                Sets line width (in px) of the country
                boundaries.
            domain
                :class:`plotly.graph_objects.layout.geo.Domain`
                instance or dict with compatible properties
            fitbounds
                Determines if this subplot's view settings are
                auto-computed to fit trace data. On scoped
                maps, setting `fitbounds` leads to `center.lon`
                and `center.lat` getting auto-filled. On maps
                with a non-clipped projection, setting
                `fitbounds` leads to `center.lon`,
                `center.lat`, and `projection.rotation.lon`
                getting auto-filled. On maps with a clipped
                projection, setting `fitbounds` leads to
                `center.lon`, `center.lat`,
                `projection.rotation.lon`,
                `projection.rotation.lat`, `lonaxis.range` and
                `lonaxis.range` getting auto-filled. If
                "locations", only the trace's visible locations
                are considered in the `fitbounds` computations.
                If "geojson", the entire trace input `geojson`
                (if provided) is considered in the `fitbounds`
                computations, Defaults to False.
            framecolor
                Sets the color the frame.
            framewidth
                Sets the stroke width (in px) of the frame.
            lakecolor
                Sets the color of the lakes.
            landcolor
                Sets the land mass color.
            lataxis
                :class:`plotly.graph_objects.layout.geo.Lataxis
                ` instance or dict with compatible properties
            lonaxis
                :class:`plotly.graph_objects.layout.geo.Lonaxis
                ` instance or dict with compatible properties
            oceancolor
                Sets the ocean color
            projection
                :class:`plotly.graph_objects.layout.geo.Project
                ion` instance or dict with compatible
                properties
            resolution
                Sets the resolution of the base layers. The
                values have units of km/mm e.g. 110 corresponds
                to a scale ratio of 1:110,000,000.
            rivercolor
                Sets color of the rivers.
            riverwidth
                Sets the stroke width (in px) of the rivers.
            scope
                Set the scope of the map.
            showcoastlines
                Sets whether or not the coastlines are drawn.
            showcountries
                Sets whether or not country boundaries are
                drawn.
            showframe
                Sets whether or not a frame is drawn around the
                map.
            showlakes
                Sets whether or not lakes are drawn.
            showland
                Sets whether or not land masses are filled in
                color.
            showocean
                Sets whether or not oceans are filled in color.
            showrivers
                Sets whether or not rivers are drawn.
            showsubunits
                Sets whether or not boundaries of subunits
                within countries (e.g. states, provinces) are
                drawn.
            subunitcolor
                Sets the color of the subunits boundaries.
            subunitwidth
                Sets the stroke width (in px) of the subunits
                boundaries.
            uirevision
                Controls persistence of user-driven changes in
                the view (projection and center). Defaults to
                `layout.uirevision`.
            visible
                Sets the default visibility of the base layers.
""",
            ),
            **kwargs
        )
 | 
| 
	the-stack_106_32299 | 
	#!/usr/bin/env python
"""Trivial example scheduler
:Authors: Eric H. Neilsen, Jr.
:Organization: Fermi National Accelerator Laboratory
"""
__docformat__ = "restructuredtext en"
import json
import os
import posix
import datetime
import logging
import json
import time
import datetime
from collections import OrderedDict
from argparse import ArgumentParser
from ConfigParser import ConfigParser
from obstac import Scheduler
class ExampleScheduler(Scheduler):
    def make_script(self):
        # This method get called when autoobs is first enabled, and when
        # time the SISPI OCS queue is changed while the autoobs is still
        # enabled
        with open(self.queue_fname, 'r') as fp:
            sispi_queue = json.load(fp)
        logging.debug("Found %d exposure(s) on the SISPI/OCS queue." % len(sispi_queue))
            
        with open(self.in_progress_fname, 'r') as fp:
            in_progress = json.load(fp)
        # If we don't want to add anything, return an empty list
        if len(sispi_queue) >= self.min_queue_len:
            logging.info("Queue is already %d exposures long, not adding anything"
                         % len(sispi_queue))
            # Add an empty script so autoobs knows the scheduler "passed"
            with open(self.output_fname, 'w') as fp:
                json.dump([], fp, indent=4)
            os.chmod(self.output_fname, 0o666)
            return
        try:
            self.expid += 1
        except:
            self.expid = 1
        
        # Follow Meeus chapter 12 to calculate LST
        mjd = 40587+time.time()/86400
        century = (mjd - 51544.5)/36525
        gmst = 280.46061837 + 360.98564736629*(mjd-51544.5) + 0.000387933*century*century - century*century*century/38710000
        lst = (gmst + self.longitude) % 360
        exposure = OrderedDict([
            ("expType", "object"), 
            ("object", "test_%d" % self.expid), 
            ("seqid", "Sequence of 1 test exposure"), 
            ("exptime", 90), 
            ("wait", "False"), 
            ("count", 1), 
            ("filter", "z"), 
            ("program", "test"), 
            ("RA", lst), 
            ("dec", self.latitude)])
        # List of exposures to add to the queue
        # In this case, it is a list of just one exposure, but
        # it can be any number.
        exposures = [exposure]
        
        with open(self.output_fname, 'w') as fp:
            logging.info("Sending %d exposure(s) to the SISPI/OCS queue." % len(exposures))
            json.dump(exposures, fp, indent=4)
        os.chmod(self.output_fname, 0o666)
            
if __name__ == "__main__":
    logging.basicConfig(format='%(asctime)s %(message)s',
                        level=logging.DEBUG)
    parser = ArgumentParser('Simple sample scheduler')
    parser.add_argument("config", help="the configuration file")
    args = parser.parse_args()
    scheduler = ExampleScheduler(args.config)
    scheduler.min_queue_len = 5
    scheduler()
 | 
| 
	the-stack_106_32301 | 
	# Author: Rishabh Sharma <[email protected]>
# This module was developed under funding provided by
# Google Summer of Code 2014
from ..client import GenericClient
import os
import tarfile
from functools import partial
from collections import OrderedDict
from astropy.time import Time
from astropy.time import TimeDelta
import astropy.units as u
import sunpy
from sunpy.util import replacement_filename
from sunpy.net.dataretriever.client import simple_path
from sunpy.net.download import Downloader, Results
__all__ = ['NOAAIndicesClient', 'NOAAPredictClient', 'SRSClient']
class NOAAIndicesClient(GenericClient):
    @staticmethod
    def _get_default_uri():
        """Return the url to download indices"""
        return ["ftp://ftp.swpc.noaa.gov/pub/weekly/RecentIndices.txt"]
    def _get_url_for_timerange(self, timerange, **kwargs):
        """
        Helper function:
        """
        return NOAAIndicesClient._get_default_uri()
    def _makeimap(self):
        """
        Helper Function:used to hold information about source.
        """
        self.map_['source'] = 'sdic'
        self.map_['instrument'] = 'noaa-indices'
        self.map_['physobs'] = 'sunspot number'
        self.map_['provider'] = 'swpc'
    @classmethod
    def _can_handle_query(cls, *query):
        """
        Answers whether client can service the query.
        Parameters
        ----------
        query : list of query objects
        Returns
        -------
        boolean
            answer as to whether client can service the query
        """
        chkattr = ['Time', 'Instrument']
        chklist = [x.__class__.__name__ in chkattr for x in query]
        for x in query:
            if x.__class__.__name__ == 'Instrument' and x.value == 'noaa-indices':
                return all(chklist)
        return False
class NOAAPredictClient(GenericClient):
    @staticmethod
    def _get_default_uri():
        """Return the url to download indices"""
        return ["http://services.swpc.noaa.gov/text/predicted-sunspot-radio-flux.txt"]
    def _get_url_for_timerange(self, timerange, **kwargs):
        """
        Helper function:
        """
        return NOAAPredictClient._get_default_uri()
    def _makeimap(self):
        """
        Helper Function:used to hold information about source.
        """
        self.map_['source'] = 'ises'
        self.map_['instrument'] = 'noaa-predict'
        self.map_['physobs'] = 'sunspot number'
        self.map_['provider'] = 'swpc'
    @classmethod
    def _can_handle_query(cls, *query):
        """
        Answers whether client can service the query.
        Parameters
        ----------
        query : list of query objects
        Returns
        -------
        boolean
            answer as to whether client can service the query
        """
        chkattr = ['Time', 'Instrument']
        chklist = [x.__class__.__name__ in chkattr for x in query]
        for x in query:
            if x.__class__.__name__ == 'Instrument' and x.value.lower() == 'noaa-predict':
                return all(chklist)
        return False
class SRSClient(GenericClient):
    @staticmethod
    def _get_default_uri():
        today = Time.now()
        year = today.strftime('%Y')
        date = today.strftime('%Y%m%d')
        return [('ftp://ftp.swpc.noaa.gov/pub/warehouse/',
                 '{0}/SRS/{1}SRS.txt').format(year, date)]
    def _get_url_for_timerange(self, timerange, **kwargs):
        if not timerange:
            return SRSClient._get_default_uri()
        result = list()
        base_url = 'ftp://ftp.swpc.noaa.gov/pub/warehouse/'
        total_days = int(timerange.days) + 1
        all_dates = timerange.split(total_days)
        today_year = Time.now().strftime('%Y')
        for day in all_dates:
            if today_year == day.end.strftime('%Y'):
                suffix = '{0}/SRS/{1}SRS.txt'.format(
                    day.end.strftime('%Y'), day.end.strftime('%Y%m%d'))
            else:
                suffix = '{0}/{1}_SRS.tar.gz'.format(
                    day.end.strftime('%Y'), day.end.strftime('%Y'))
            url = base_url + suffix
            result.append(url)
        return result
    def fetch(self, qres, path=None, error_callback=None, **kwargs):
        """
        Download a set of results.
        Parameters
        ----------
        qres : `~sunpy.net.dataretriever.QueryResponse`
            Results to download.
        Returns
        -------
        Results Object
        """
        urls = [qrblock.url for qrblock in qres]
        filenames = []
        local_filenames = []
        for i, [url, qre] in enumerate(zip(urls, qres)):
            name = url.split('/')[-1]
            # temporary fix !!! coz All QRBs have same start_time values
            day = Time(qre.time.start.strftime('%Y-%m-%d')) + TimeDelta(i*u.day)
            if name not in filenames:
                filenames.append(name)
            if name.endswith('.gz'):
                local_filenames.append('{}SRS.txt'.format(day.strftime('%Y%m%d')))
            else:
                local_filenames.append(name)
        # Files to be actually downloaded
        paths = self._get_full_filenames(qres, filenames, path)
        # Those files that will be present after get returns
        local_paths = self._get_full_filenames(qres, local_filenames, path)
        res = Results(lambda x: None, 0, lambda map_: self._link(map_))
        # remove duplicate urls. This will make paths and urls to have same number of elements.
        # OrderedDict is required to maintain ordering because it will be zipped with paths later
        urls = list(OrderedDict.fromkeys(urls))
        dobj = Downloader(max_conn=len(urls), max_total=len(urls))
        # We cast to list here in list(zip... to force execution of
        # res.require([x]) at the start of the loop.
        for aurl, ncall, fname in list(zip(urls, map(lambda x: res.require([x]),
                                                     urls), paths)):
            dobj.download(aurl, fname, ncall, error_callback)
        res.wait()
        res2 = Results(lambda x: None, 0)
        for fname, srs_filename in zip(local_paths, local_filenames):
            fname = fname.args[0]
            name = fname.split('/')[-1]
            past_year = False
            for i, fname2 in enumerate(paths):
                fname2 = fname2.args[0]
                if fname2.endswith('.txt'):
                    continue
                year = fname2.split('/')[-1]
                year = year.split('_SRS')[0]
                if year in name:
                    TarFile = tarfile.open(fname2)
                    filepath = fname.rpartition('/')[0]
                    member = TarFile.getmember('SRS/' + srs_filename)
                    member.name = name
                    TarFile.extract(member, path=filepath)
                    TarFile.close()
                    callback = res2.require([fname])
                    callback({'path': fname})
                    past_year = True
                    break
            if past_year is False:
                callback = res2.require([fname])
                callback({'path': fname})
        return res2
    def _makeimap(self):
        self.map_['source'] = 'swpc'
        self.map_['instrument'] = 'SOON'
        self.map_['physobs'] = 'SRS'
        self.map_['source'] = 'NOAA/USAF'
    @classmethod
    def _can_handle_query(cls, *query):
        """
        Answers whether client can service the query.
        Parameters
        ----------
        query : list of query objects
        Returns
        -------
        boolean
            answer as to whether client can service the query
        """
        chkattr = ["Time", "Instrument"]
        chklist = [x.__class__.__name__ in chkattr for x in query]
        for x in query:
            if x.__class__.__name__ == "Instrument" and\
               str(x.value).lower() in ["soon", "srs_table"]:
                return True
        return False
 | 
| 
	the-stack_106_32303 | 
	from devito.tools import memoized_meth
from devito import VectorTimeFunction, TensorTimeFunction
from examples.seismic import Receiver
from examples.seismic.elastic.operators import ForwardOperator
class ElasticWaveSolver(object):
    """
    Solver object that provides operators for seismic inversion problems
    and encapsulates the time and space discretization for a given problem
    setup.
    Parameters
    ----------
    model : Model
        Physical model with domain parameters.
    geometry : AcquisitionGeometry
        Geometry object that contains the source (SparseTimeFunction) and
        receivers (SparseTimeFunction) and their position.
    space_order : int, optional
        Order of the spatial stencil discretisation. Defaults to 4.
    """
    def __init__(self, model, geometry, space_order=4, **kwargs):
        self.model = model
        self.geometry = geometry
        self.space_order = space_order
        # Time step can be \sqrt{3}=1.73 bigger with 4th order
        self.dt = self.model.critical_dt
        # Cache compiler options
        self._kwargs = kwargs
    @memoized_meth
    def op_fwd(self, save=None):
        """Cached operator for forward runs with buffered wavefield"""
        return ForwardOperator(self.model, save=save, geometry=self.geometry,
                               space_order=self.space_order, **self._kwargs)
    def forward(self, src=None, rec1=None, rec2=None, lam=None, mu=None, b=None,
                v=None, tau=None, save=None, **kwargs):
        """
        Forward modelling function that creates the necessary
        data objects for running a forward modelling operator.
        Parameters
        ----------
        src : SparseTimeFunction or array_like, optional
            Time series data for the injected source term.
        rec1 : SparseTimeFunction or array_like, optional
            The interpolated receiver data of the pressure (tzz).
        rec2 : SparseTimeFunction or array_like, optional
            The interpolated receiver data of the particle velocities.
        v : VectorTimeFunction, optional
            The computed particle velocity.
        tau : TensorTimeFunction, optional
            The computed symmetric stress tensor.
        lam : Function, optional
            The time-constant first Lame parameter `rho * (vp**2 - 2 * vs **2)`.
        mu : Function, optional
            The Shear modulus `(rho * vs*2)`.
        b : Function, optional
            The time-constant inverse density (b=1 for water).
        save : int or Buffer, optional
            Option to store the entire (unrolled) wavefield.
        Returns
        -------
        Rec1(tzz), Rec2(div(v)), particle velocities v, stress tensor tau and
        performance summary.
        """
        # Source term is read-only, so re-use the default
        src = src or self.geometry.src
        # Create a new receiver object to store the result
        rec1 = rec1 or Receiver(name='rec1', grid=self.model.grid,
                                time_range=self.geometry.time_axis,
                                coordinates=self.geometry.rec_positions)
        rec2 = rec2 or Receiver(name='rec2', grid=self.model.grid,
                                time_range=self.geometry.time_axis,
                                coordinates=self.geometry.rec_positions)
        # Create all the fields vx, vz, tau_xx, tau_zz, tau_xz
        save_t = src.nt if save else None
        v = VectorTimeFunction(name='v', grid=self.model.grid, save=save_t,
                               space_order=self.space_order, time_order=1)
        tau = TensorTimeFunction(name='tau', grid=self.model.grid, save=save_t,
                                 space_order=self.space_order, time_order=1)
        kwargs.update({k.name: k for k in v})
        kwargs.update({k.name: k for k in tau})
        # Pick Lame parameters from model unless explicitly provided
        kwargs.update(self.model.physical_params(lam=lam, mu=mu, b=b))
        # Execute operator and return wavefield and receiver data
        summary = self.op_fwd(save).apply(src=src, rec1=rec1, rec2=rec2,
                                          dt=kwargs.pop('dt', self.dt), **kwargs)
        return rec1, rec2, v, tau, summary
 | 
| 
	the-stack_106_32304 | 
	# Copyright 2017 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A base class definition for trainable optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
OPTIMIZER_SCOPE = "LOL"
_LOCAL_VARIABLE_PREFIX = "local_state_"
_LOCAL_STATE_VARIABLE_COLLECTION = "local_state_collection"
EPSILON = 1e-6
class TrainableOptimizer(tf.compat.v1.train.Optimizer):
    """Base class for trainable optimizers.
    A trainable optimizer is an optimizer that has parameters that can themselves
    be learned (meta-optimized).
    Subclasses must implement:
            _compute_update(self, param, grad, state)
    """
    def __init__(self, name, state_keys, use_attention=False,
                             use_log_objective=False, obj_train_max_multiplier=-1,
                             use_second_derivatives=True, use_numerator_epsilon=False,
                             **kwargs):
        """Initializes the optimizer with the given name and settings.
        Args:
            name: The name string for this optimizer.
            state_keys: The names of any required state variables (list)
            use_attention: Whether this optimizer uses attention (Default: True)
            use_log_objective: Whether this optimizer uses the logarithm of the
                    objective when computing the loss (Default: False)
            obj_train_max_multiplier: The maximum multiplier for the increase in the
                    objective before meta-training is stopped. If <= 0, meta-training is
                    not stopped early. (Default: -1)
            use_second_derivatives: Whether this optimizer uses second derivatives in
                    meta-training. This should be set to False if some second derivatives
                    in the meta-training problem set are not defined in Tensorflow.
                    (Default: True)
            use_numerator_epsilon: Whether to use epsilon in the numerator when
                    scaling the problem objective during meta-training. (Default: False)
            **kwargs: Any additional keyword arguments.
        """
        self.use_second_derivatives = use_second_derivatives
        self.state_keys = sorted(state_keys)
        self.use_attention = use_attention
        self.use_log_objective = use_log_objective
        self.obj_train_max_multiplier = obj_train_max_multiplier
        self.use_numerator_epsilon = use_numerator_epsilon
        use_locking = False
        super(TrainableOptimizer, self).__init__(use_locking, name)
    def _create_slots(self, var_list):
        """Creates all slots needed by the variables.
        Args:
            var_list: A list of `Variable` objects.
        """
        for var in var_list:
            init_states = self._initialize_state(var)
            for slot_name in sorted(init_states):
                slot_var_name = "{}_{}".format(self.get_name(), slot_name)
                value = init_states[slot_name]
                self._get_or_make_slot(var, value, slot_name, slot_var_name)
    def _initialize_state(self, var):
        """Initializes any state required for this variable.
        Args:
            var: a tensor containing parameters to be optimized
        Returns:
            state: a dictionary mapping state keys to initial state values (tensors)
        """
        return {}
    def _initialize_global_state(self):
        """Initializes any global state values."""
        return []
    
    def _apply_common(self, grad, var):
        """Applies the optimizer updates to the variables.
        Note: this should only get called via _apply_dense or _apply_sparse when
        using the optimizer via optimizer.minimize or optimizer.apply_gradients.
        During meta-training, the optimizer.train function should be used to
        construct an optimization path that is differentiable.
        Args:
            grad: A tensor representing the gradient.
            var: A tf.Variable with the same shape as grad.
        Returns:
            update_op: A tensorflow op that assigns new values to the variable, and
                    also defines dependencies that update the state variables for the
                    optimizer.
        """
        state = {key: self.get_slot(var, key) for key in self.get_slot_names()}
        new_var, new_state = self._compute_update(var, grad, state)
        state_assign_ops = [tf.compat.v1.assign(state_var, new_state[key])
                                                for key, state_var in state.items()]
        with tf.control_dependencies(state_assign_ops):
            update_op = var.assign(new_var)
        return update_op
    def _apply_dense(self, grad, var):
        """Adds ops to apply dense gradients to 'var'."""
        return self._apply_common(grad, var)
    def _apply_sparse(self, grad, var):
        """Adds ops to apply sparse gradients to 'var'."""
        return self._apply_common(grad, var)
    def _compute_update(self, param, grad, state):
        """Computes the update step for optimization.
        Args:
            param: A tensor of parameters to optimize.
            grad: The gradient tensor of the objective with respect to the parameters.
                    (It has the same shape as param.)
            state: A dictionary containing any extra state required by the optimizer.
        Returns:
            updated_params: The updated parameters.
            updated_state: The dictionary of updated state variable(s).
        """
        raise NotImplementedError
    def _compute_updates(self, params, grads, states, global_state):
        """Maps the compute update functions for each parameter.
        This function can be overriden by a subclass if the subclass wants to
        combine information across the different parameters in the list.
        Args:
            params: A list of parameter tensors.
            grads: A list of gradients corresponding to each parameter.
            states: A list of state variables corresponding to each parameter.
            global_state: A list of global state variables for the problem.
        Returns:
            new_params: The updated parameters.
            new_states: The updated states.
            new_global_state: The updated global state.
            attention_params: A list of attention parameters. This is the same as
                    new_params if the optimizer does not use attention.
        """
        # Zip up the arguments to _compute_update.
        args = zip(params, grads, states)
        # Call compute_update on each set of parameter/gradient/state args.
        new_params, new_states = zip(*list(
                itertools.starmap(self._compute_update, args)))
        # Global state is unused in the basic case, just pass it through.
        return list(new_params), list(new_states), global_state, list(new_params)
    def train(self, problem, dataset, dataset_val=None, sample_meta_loss=None, meta_method="average"):
        """Creates graph operations to train the optimizer.
        Args:
            problem: A problem_generator.Problem instance to train on.
            dataset: A datasets.Dataset tuple to use when training.
            dataset_val: A datasets.Dataset tuple used to compute the meta objective
            meta_method: "average" - meta objective is averaged over all steps
                         "last" - meta objective is just of the last step
        Returns:
            meta_objective: A tensorflow operation for computing the meta-objective
            obj_weights: A tensor placeholder for feeding in the objective weights
            obj_values: The subproblem objective values during optimization
            batches: The batch indexes tensor for overriding with feed_dict
            first_unroll: A placeholder signifying if this is a first unroll
                (this will propagate the gradients slightly differently).
            reset_state: A placeholder signifying that the rnn state should be reset.
            output_state: The final state of the optimizer
            init_loop_vars_to_override: Local variables that can be assigned to
                propagate the optimizer and problem state for unrolling
            final_loop_vals: Final values of the loop variables that can be
                assigned to init_loop_vars_to_override.
        """
        # Placeholder for the objective weights
        obj_weights = tf.compat.v1.placeholder(tf.float32)
        num_iter = tf.shape(obj_weights)[0]
        # Unpack the dataset and generate the minibatches for training
        # For train by train, the train set and val set will be the same
        if dataset_val is None:
            dataset_val = dataset
        
        data_train, labels_train = dataset
        data_val, labels_val = dataset_val
        if sample_meta_loss is not None and sample_meta_loss == len(labels_val):
            sample_meta_loss = None  # This makes the code runs faster because there will be no sampling process; otherwise, the code will sample out all the data explicitly.
            
        # Convert the ndarrays to tensors so we can pass them back in via feed_dict
        data_train = tf.constant(data_train)
        labels_train = tf.constant(labels_train)
        data_val = tf.constant(data_val)
        labels_val = tf.constant(labels_val)
        batches = tf.compat.v1.placeholder(tf.int32)
        batches_val = tf.compat.v1.placeholder(tf.int32)
        first_unroll = tf.compat.v1.placeholder_with_default(False, [])
        reset_state = tf.compat.v1.placeholder_with_default(False, [])
        training_output = collections.namedtuple("TrainingOutput",
                                                 ["metaobj",
                                                  "obj_weights",
                                                  "problem_objectives",
                                                  "initial_obj",
                                                  "batches",
                                                  "first_unroll",
                                                  "reset_state",
                                                  "output_state",
                                                  "init_loop_vars",
                                                  "output_loop_vars",
                                                  "batches_val"])
        def loop_body(itr, obj_accum, params, attend_params, flattened_states,
                                    global_state, all_obj, unused_init_obj, batches, batches_val):
            """Body of the meta-training while loop for optimizing a sub-problem.
            Args:
                itr: The current meta-training iteration.
                obj_accum: The accumulated objective over all training steps so far.
                params: The parameters of the sub-problem.
                attend_params: The parameters of the sub-problems at the attended
                        location.
                flattened_states: The states of the trainable optimizer, sorted and
                        flattened into a list (since a while loop can't handle nested lists
                        or dictionaries).
                global_state: The global state of the optimizer.
                all_obj: The list of all objective values in the training process.
                unused_init_obj: The initial objective (unused here, but needed in the
                        variable list because it's used in a stopping condition in the
                        loop_cond.)
                data: The data for this problem.
                labels: The labels corresponding to the data.
                batches: The batch indexes needed for shuffled minibatch creation.
                batches_val: If sample_meta_loss is not None, we need to sample a number of data when we compute the meta objective. This is the batch indices for the sampled data. Note that all the batches here for different iterations are sampled independently instead of splitting the whole dataset evenly.
            Returns:
                itr: The updated meta-training iteration.
                obj_accum: The updated accumulated objective.
                params: The new parameters of the sub-problem.
                attend_params: The new parameters of the sub-problems at the attended
                        location.
                flattened_states: The new states of the trainable optimizer.
                global_state: The updated global state.
                all_obj: The updates list of all objective values.
                unused_init_obj: The initial objective.
                data: The data for this problem.
                labels: The labels corresponding to the data.
                batches: The batch indexes needed for shuffled minibatch creation.
                batches_val: The batch indices for meta objective computation.
            """
            batch_indices = tf.gather(batches, itr)
            batch_data = tf.gather(data_train, batch_indices)
            batch_labels = tf.gather(labels_train, batch_indices)
            # obj: used to compute meta obj; current_obj: inner obj
            # Compute the objective over the entire val dataset (full batch).
            if sample_meta_loss is None:
                obj = problem.objective(params, data_val, labels_val)
            else:
                print("{} samples will be randomly sampled when evaluating the meta-objective.".format(sample_meta_loss))
                batch_indices_val = tf.gather(batches_val, itr)
                batch_data_val = tf.gather(data_val, batch_indices_val)
                batch_labels_val = tf.gather(labels_val, batch_indices_val)
                obj = problem.objective(params, batch_data_val, batch_labels_val)
            # Compute the gradients on just the current batch
            if self.use_attention:
                current_obj = problem.objective(attend_params, batch_data, batch_labels)
                grads = problem.gradients(current_obj, attend_params)
            else:
                current_obj = problem.objective(params, batch_data, batch_labels)
                grads = problem.gradients(current_obj, params)
            if not self.use_second_derivatives:
                new_grads = []
                for grad in grads:
                    if isinstance(grad, tf.IndexedSlices):
                        new_grads.append(tf.IndexedSlices(tf.stop_gradient(grad.values), grad.indices))
                    else:
                        new_grads.append(tf.stop_gradient(grad))
                grads = new_grads
            # store the objective value for the entire problem at each iteration
            all_obj = tf.concat([all_obj, tf.reshape(obj, (1,))], 0)
            # accumulate the weighted objective for the entire dataset
            acc = tf.gather(obj_weights, itr) * obj
            obj_accum = tf.add(obj_accum, acc)
            # Set the shape to keep the shape invariant for obj_accum. Without this,
            # the graph builder thinks the tensor shape is unknown on the 2nd iter.
            obj_accum.set_shape([])
            # convert flattened_states to dictionaries
            dict_states = [dict(zip(self.state_keys, flat_state))
                                         for flat_state in flattened_states]
            # compute the new parameters and states
            args = (params, grads, dict_states, global_state)
            updates = self._compute_updates(*args)
            new_params, new_states, new_global_state, new_attend_params = updates
            # flatten the states
            new_flattened_states = [flatten_and_sort(item_dict) for item_dict in new_states]
            return [itr + 1, obj_accum, new_params, new_attend_params,
                            new_flattened_states, new_global_state, all_obj, unused_init_obj,
                            batches, batches_val]
        def loop_cond(itr, obj_accum, unused_params, unused_attend_params,
                                    unused_flattened_states, unused_global_state, all_obj,
                                    init_obj, *args):
            """Termination conditions of the sub-problem optimization loop."""
            del args    # unused
            cond1 = tf.less(itr, num_iter)    # We've run < num_iter times
            cond2 = tf.math.is_finite(obj_accum)    # The objective is still finite
            if self.obj_train_max_multiplier > 0:
                current_obj = tf.gather(all_obj, itr)
                # Account for negative init_obj too
                max_diff = (self.obj_train_max_multiplier - 1) * tf.abs(init_obj)
                max_obj = init_obj + max_diff
                # The objective is a reasonable multiplier of the original objective
                cond3 = tf.less(current_obj, max_obj)
                return tf.logical_and(tf.logical_and(cond1, cond2), cond3,
                                                            name="training_loop_cond")
            else:
                return tf.logical_and(cond1, cond2, name="training_loop_cond")
        # for the optimizer.objective() call in init:
        # otherwise, the optimizer.objective(data, labels) call may cause memory issues
        if sample_meta_loss is None:
            # The data here are used to compute init_obj
            init = self._initialize_training_loop_parameters(
                problem, data_val, labels_val, batches, first_unroll, reset_state, batches_val)
        else:
            print("{} samples will be randomly sampled when evaluating the meta-objective.".format(sample_meta_loss))
            init = self._initialize_training_loop_parameters(
                problem, data_val[:sample_meta_loss, :], labels_val[:sample_meta_loss], batches, first_unroll, reset_state, batches_val)  # The data here is just used to initialize the obj_init
        loop_vars, invariants, initial_obj, init_loop_vars_to_override = init
        loop_output = tf.while_loop(loop_cond, loop_body, loop_vars, swap_memory=True, shape_invariants=invariants)
        if meta_method == "average":
            meta_obj, problem_objectives = loop_output[1], loop_output[6]
        elif meta_method == "last":
            meta_obj, problem_objectives = loop_output[6][-1], loop_output[6]
        else:
            raise ValueError("meta_method {} not recognized.".format(meta_method))
        # The meta objective is normalized by the initial objective at the start of
        # the series of partial unrolls.
        
        # We don't rescale now
        #scaled_meta_objective = self.scale_objective(meta_obj, problem_objectives, initial_obj)
        if self.use_log_objective:
            scaled_meta_objective = tf.log(meta_obj)
        else:
            scaled_meta_objective = meta_obj
        
        final_loop_vals = ([initial_obj] + loop_output[2] + loop_output[3] + loop_output[5])
        final_loop_vals.extend(itertools.chain(*loop_output[4]))
        return training_output(scaled_meta_objective,  # = metaobj
                                                     obj_weights,
                                                     problem_objectives,  # all the inner objectives
                                                     initial_obj,
                                                     batches,
                                                     first_unroll,
                                                     reset_state,
                                                     loop_output[4],  # = output_state
                                                     init_loop_vars_to_override,  # = init_loop_vars
                                                     final_loop_vals,  # = output_loop_vars
                                                     batches_val)                             
    def _initialize_training_loop_parameters(
            self, problem, data, labels, batches, first_unroll, reset_state, batches_val):
        """Initializes the vars and params needed for the training process.
        Args:
            problem: The problem being optimized.
            data: The data for the problem used to compute init_obj.
            labels: The corresponding labels for the data used to compute init_obj.
            batches: The indexes needed to create shuffled batches of the data.
            first_unroll: Whether this is the first unroll in a partial unrolling.
            reset_state: Whether RNN state variables should be reset.
        Returns:
            loop_vars: The while loop variables for training.
            invariants: The corresponding variable shapes (required by while loop).
            initial_obj: The initial objective (used later for scaling).
            init_loop_vars_to_override: The loop vars that can be overridden when
                    performing training via partial unrolls.
        """
        # Extract these separately so we don't have to make inter-variable
        # dependencies.
        initial_tensors = problem.init_tensors()
        return_initial_tensor_values = first_unroll
        # This is like a switch, if it is the first unroll, the initial_params will just be the randomly sampled initial_tensors; otherwise, it will get the values locally stored.
        initial_params_vars, initial_params = local_state_variables(
                initial_tensors, return_initial_tensor_values)
        initial_attend_params_vars, initial_attend_params = local_state_variables(
                initial_tensors, return_initial_tensor_values)
        # Recalculate the initial objective for the list on each partial unroll with
        # the new initial_params. initial_obj holds the value from the very first
        # unroll.
        initial_obj_init = problem.objective(initial_params, data, labels)
        return_initial_obj_init = first_unroll
        [initial_obj_var], [initial_obj] = local_state_variables(
                [initial_obj_init], return_initial_obj_init)
        # Initialize the loop variables.
        initial_itr = tf.constant(0, dtype=tf.int32)
        initial_meta_obj = tf.constant(0, dtype=tf.float32)
        # N.B. the use of initial_obj_init here rather than initial_obj
        initial_problem_objectives = tf.reshape(initial_obj_init, (1,))
        # Initialize the extra state.
        initial_state_vars = []
        initial_state = []
        state_shapes = []
        return_initial_state_values = reset_state
        for param in initial_tensors:
            param_state_vars, param_state = local_state_variables(
                    flatten_and_sort(self._initialize_state(param)),
                    return_initial_state_values)
            initial_state_vars.append(param_state_vars)
            initial_state.append(param_state)
            state_shapes.append([f.get_shape() for f in param_state])
        # Initialize any global (problem-level) state.
        initial_global_state_vars, initial_global_state = local_state_variables(
                self._initialize_global_state(), return_initial_state_values)
        global_shapes = []
        for item in initial_global_state:
            global_shapes.append(item.get_shape())
        # build the list of loop variables:
        loop_vars = [
                initial_itr,
                initial_meta_obj,
                initial_params,                 # Local variables.
                initial_attend_params,    # Local variables.
                initial_state,                    # Local variables.
                initial_global_state,     # Local variables.
                initial_problem_objectives,
                initial_obj,                        # Local variable.
                batches,
                batches_val
        ]
        invariants = [
                initial_itr.get_shape(),
                initial_meta_obj.get_shape(),
                [t.get_shape() for t in initial_params],
                [t.get_shape() for t in initial_attend_params],
                state_shapes,
                global_shapes,
                tensor_shape.TensorShape([None]),     # The problem objectives list grows
                initial_obj.get_shape(),
                tensor_shape.unknown_shape(),    # Placeholder shapes are unknown
                tensor_shape.unknown_shape()
        ]
        # Initialize local variables that we will override with final tensors at the
        # next iter.
        init_loop_vars_to_override = (
                [initial_obj_var] + initial_params_vars + initial_attend_params_vars +
                initial_global_state_vars)
        init_loop_vars_to_override.extend(itertools.chain(*initial_state_vars))
        return loop_vars, invariants, initial_obj, init_loop_vars_to_override
    def scale_objective(self, total_obj, all_objs, initial_obj,
                                            obj_scale_eps=1e-6):
        """Normalizes the objective based on the initial objective value.
        Args:
            total_obj: The total accumulated objective over the training run.
            all_objs: A list of all the individual objectives over the training run.
            initial_obj: The initial objective value.
            obj_scale_eps: The epsilon value to use in computations for stability.
        Returns:
            The scaled objective as a single value.
        """
        if self.use_log_objective:
            if self.use_numerator_epsilon:
                scaled_problem_obj = ((all_objs + obj_scale_eps) /
                                                            (initial_obj + obj_scale_eps))
                log_scaled_problem_obj = tf.log(scaled_problem_obj)
            else:
                scaled_problem_obj = all_objs / (initial_obj + obj_scale_eps)
                log_scaled_problem_obj = tf.log(scaled_problem_obj + obj_scale_eps)
            return tf.reduce_mean(log_scaled_problem_obj)
        else:
            return total_obj / (initial_obj + obj_scale_eps)
def local_state_variables(init_values, return_init_values): 
    """Create local variables initialized from init_values.
    This will create local variables from a list of init_values. Each variable
    will be named based on the value's shape and dtype.
    As a convenience, a boolean tensor allows you to return value from
    the created local variable or from the original init value.
    Args:
        init_values: iterable of tensors
        return_init_values: boolean tensor
    Returns:
        local_vars: list of the created local variables.
        vals: if return_init_values is true, then this returns the values of
            init_values. Otherwise it returns the values of the local_vars.
    """
    if not init_values:
        return [], []
    # This generates a harmless warning when saving the metagraph.
    variable_use_count = tf.compat.v1.get_collection_ref(_LOCAL_STATE_VARIABLE_COLLECTION)
    if not variable_use_count:
        variable_use_count.append(collections.defaultdict(int))
    variable_use_count = variable_use_count[0]    # count the number of times that the name is used, to avoid name collision
    local_vars = []
    with tf.compat.v1.variable_scope(OPTIMIZER_SCOPE):
        # We can't use the init_value as an initializer as init_value may
        # itself depend on some problem variables. This would produce
        # inter-variable initialization order dependence which TensorFlow
        # sucks at making easy.
        for init_value in init_values:
            name = create_local_state_variable_name(init_value)
            unique_name = name + "_" + str(variable_use_count[name])
            variable_use_count[name] += 1
            # The overarching idea here is to be able to reuse variables between
            # different sessions on the same TensorFlow master without errors. By
            # uniquifying based on the type and name we mirror the checks made inside
            # TensorFlow, while still allowing some memory reuse. Ultimately this is a
            # hack due to the broken Session.reset().
            local_vars.append(
                    tf.compat.v1.get_local_variable(
                            unique_name,
                            initializer=tf.zeros(
                                    init_value.get_shape(), dtype=init_value.dtype)))
    # It makes things a lot simpler if we use the init_value the first
    # iteration, instead of the variable itself. It allows us to propagate
    # gradients through it as well as simplifying initialization. The variable
    # ends up assigned to after the first iteration.
    vals = tf.cond(return_init_values, lambda: init_values, lambda: local_vars)
    if len(init_values) == 1:
        # tf.cond extracts elements from singleton lists.
        vals = [vals]
    return local_vars, vals
def create_local_state_variable_name(tensor):
    """Create a name of the variable based on its type and shape."""
    if not tensor.get_shape().is_fully_defined():
        raise ValueError("Need a fully specified shape to create a local variable.")
    return (_LOCAL_VARIABLE_PREFIX + "_".join(
            map(str, tensor.get_shape().as_list())) + "_" + tensor.dtype.name)
def is_local_state_variable(op):
    """Returns if this op is a local state variable created for training."""
    return op.node_def.op in ["Variable", "VariableV2"] and op.name.startswith(
            OPTIMIZER_SCOPE + "/" + _LOCAL_VARIABLE_PREFIX)
def flatten_and_sort(dictionary):
    """Flattens a dictionary into a list of values sorted by the keys."""
    return [dictionary[k] for k in sorted(dictionary.keys())]
 | 
| 
	the-stack_106_32308 | 
	# coding: utf-8
import tushare as ts
import pandas as pd
import datetime
g = {}
# 定义行业类别
g.index = 'industry'
# 定义全局参数值
g.indexThre = 0.2  #站上pastDay日均线的行业比重
g.pastDay = 30  # 过去pastDay日参数
g.topK = 6  #
if g.index == 'index':
    # 定义行业指数list以便去股票
    g.indexList = ['000928.XSHG', '000929.XSHG', '000930.XSHG', '000931.XSHG',
                   '000932.XSHG', '000933.XSHG', '000934.XSHG', '000935.XSHG',
                   '000936.XSHG', '000937.XSHG', '000938.XSHG']
elif g.index == 'industry':
    # 定义行业list以便取股票
    g.indexList = ['A01','A02','A03','A04','A05','B06', \
                   'B07','B08','B09','B11','C13','C14','C15','C17','C18', \
                   'C19','C20','C21','C22','C23','C24','C25','C26','C27', \
                   'C28','C29','C30','C31','C32','C33','C34','C35','C36', \
                   'C37','C38','C39','C40','C41','C42','D44','D45','D46', \
                   'E47','E48','E50','F51','F52','G53','G54','G55','G56', \
                   'G58','G59','H61','H62','I63','I64','I65','J66','J67', \
                   'J68','J69','K70','L71','L72','M73','M74','N77','N78', \
                   'P82','Q83','R85','R86','R87','S90']
# 计算相对强弱RPS值
def calRPS(stocks, curDate, preDate):
    # 初始化参数信息
    numStocks = len(stocks)
    rankValue = []
    # 计算涨跌幅
    for code in stocks:
        # 获取过去pastDay的指数值
        lastDf = ts.get_hist_data(
            code=code, start=preDate, end=curDate, ktype='D')
        lastDf.sort(columns='close')
        lastClosePrice = float(lastDf.iloc[0])
        firstClosePrice = float(lastDf.iloc[-1])
        # 计算涨跌幅
        errCloseOpen = [lastClosePrice - firstClosePrice]
        rankValue += errCloseOpen
    # 根据周涨跌幅排名
    rpsStocks = {'code': stocks, 'rankValue': rankValue}
    rpsStocks = pd.DataFrame(rpsStocks)
    rpsStocks = rpsStocks.sort('rankValue', ascending=False)
    stocks = list(rpsStocks['code'])
    # 计算RPS值
    rpsValue = [99 - (100 * i / numStocks) for i in range(numStocks)]
    rpsStocks = {'code': stocks, 'rpsValue': rpsValue}
    rpsStocks = pd.DataFrame(rpsStocks)
    return rpsStocks
# 股票池:取强舍弱
def findStockPool(indexList, curDate, preDate, index='index'):
    topK = g.topK
    stocks = []
    rpsValue = []
    industryCode = []
    # 从每个行业中选取RPS值最高的topK只股票
    # for eachIndustry in industryList:
    for eachIndex in indexList:
        # 取出该行业的股票
        if index == 'index':
            stocks = get_index_stocks(eachIndex)
        elif index == 'industry':
            stocks = get_industry_stocks(eachIndex)
        else:
            return 'Error index order'
        # 计算股票的相对强弱RPS值
        rpsStocks = calRPS(stocks, curDate, preDate)
        stocks.append(rpsStocks[:topK]['code'])
        # rpsValue += list(rpsStocks[:topK]['rpsValue'])
    return stocks
# 选股:单均线动量策略
def selectStocks(stocks, curDate, data):
    # 初始化
    returnStocks = []
    # 筛选当且仅当当日收盘价在5日均线以上的股票
    for code in stocks:
        closePrice = ts.get_hist_data(
            code=code, start=curDate, end=curDate, ktype='D')
        closePrice.sort(columns='close')
        closePrice = float(closePrice.iloc[-1])
        ma5 = data[code].mavg(5, 'close')
        ma15 = data[code].mavg(15, 'close')
        # if closePrice > ma5:
        if closePrice > ma5 and ma5 > ma15:
            returnStocks.append(code)
        else:
            continue
    return returnStocks
# 止损:牛熊分界线
def calBuySign(indexList, pastDay, data, index='index'):
    # 初始化
    indexThre = g.indexThre
    # 计算过去几天的指数均值,判断是否满足牛熊分界值
    count = 0
    if index == 'index':
        for eachIndex in indexList:
            avgPrice = data[eachIndex].mavg(pastDay, 'close')
            if data[eachIndex].mavg(1, 'close') > avgPrice:
                count += 1
            else:
                continue
    elif index == 'industry':
        for eachIndustry in indexList:
            stocks = get_industry_stocks(eachIndustry)
            pastValue = 0
            curValue = 0
            for eachStocks in stocks:
                # pastValue += data[eachStocks].mavg(pastDay,'close')
                # curValue += data[eachStocks].mavg(1,'close')
                stocksPastPrice = data[eachStocks].mavg(pastDay, 'close')
                stocksCurrPrice = data[eachStocks].price
                if stocksPastPrice is None or stocksCurrPrice is None:
                    continue
                else:
                    pastValue += stocksPastPrice
                    curValue += stocksCurrPrice
            if curValue > pastValue:
                count += 1
            else:
                continue
    else:
        return 'Error index order.'
    # 根据行业比重发出牛熊市场信号
    if float(count) / len(indexList) > indexThre:
        return True
    else:
        return False
# 每个单位时间(如果按天回测,则每天调用一次,如果按分钟,则每分钟调用一次)调用一次
def handle_data(context, data):
    # 初始化参数
    index = g.index
    indexList = g.indexList
    indexThre = g.indexThre
    pastDay = g.pastDay
    curDate = datetime.date.today()
    preDate = curDate + datetime.timedelta(days=-pastDay)
    curDate = str(curDate)
    preDate = str(preDate)
    # 获取资金余额
    cash = context.portfolio.cash
    topK = g.topK
    numSell = 0
    numBuy = 0
    # 牛熊分界线发布止损信号
    buySign = calBuySign(indexList, pastDay, data, index)
    # buySign = True
    if buySign == True:
        # 取强舍弱选股:根据相对RPS指标选取各个行业中最强势的股票形成股票池
        candidateStocks = findStockPool(indexList, curDate, preDate, index)
        # 根据均线策略从股票池中选股买卖
        stocks = selectStocks(candidateStocks, curDate, preDate, data)
        countStocks = len(stocks)
        if countStocks > topK:
            rpsStocks = calRPS(stocks, curDate, preDate)
            stocks = list(rpsStocks[:topK]['code'])
        else:
            pass
        countStocks = len(stocks)
        # 判断当前是否持有目前股票,若已持有股票在新的候选池里则继续持有,否则卖出
        for security in context.portfolio.positions.keys():
            if security in stocks:
                continue
            else:
                order_target(security, 0)
                numSell += 1
                # print("Selling %s" %(security))
                # 根据股票池买入股票
        for security in stocks:
            # 获取股票基本信息:是否停牌、是否ST,持股头寸、股价等
            currentData = get_current_data()
            pauseSign = currentData[security].paused
            STInfo = get_extras(
                'is_st', security, start_date=preDate, end_date=curDate)
            STSign = STInfo.iloc[-1]
            stocksAmount = context.portfolio.positions[security].amount
            stocksPrice = data[security].price
            if not pauseSign and not STSign.bool():
                # 购买该股票,获得可购买的股票数量
                buyAmount = int((cash / countStocks) / stocksPrice)
                order(security, buyAmount)
                numBuy += 1
                # print("Buying %s" % (security))
            else:
                continue
    else:
        # 将目前所有的股票卖出
        for security in context.portfolio.positions:
            # 全部卖出
            order_target(security, 0)
            numSell += 1
            # 记录这次卖出
            # print("Selling %s" % (security))
 | 
| 
	the-stack_106_32309 | 
	#
# CanvasRenderAgg.py -- for rendering into a ImageViewAgg widget
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke.  All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
import aggdraw as agg
from . import AggHelp
from itertools import chain
# force registration of all canvas types
import ginga.canvas.types.all
class RenderContext(object):
    def __init__(self, viewer):
        self.viewer = viewer
        # TODO: encapsulate this drawable
        self.cr = AggHelp.AggContext(self.viewer.get_surface())
        self.pen = None
        self.brush = None
        self.font = None
    def set_line_from_shape(self, shape):
        # TODO: support line width and style
        alpha = getattr(shape, 'alpha', 1.0)
        self.pen = self.cr.get_pen(shape.color, alpha=alpha)
    def set_fill_from_shape(self, shape):
        fill = getattr(shape, 'fill', False)
        if fill:
            if hasattr(shape, 'fillcolor') and shape.fillcolor:
                color = shape.fillcolor
            else:
                color = shape.color
            alpha = getattr(shape, 'alpha', 1.0)
            alpha = getattr(shape, 'fillalpha', alpha)
            self.brush = self.cr.get_brush(color, alpha=alpha)
        else:
            self.brush = None
    def set_font_from_shape(self, shape):
        if hasattr(shape, 'font'):
            if hasattr(shape, 'fontsize') and shape.fontsize is not None:
                fontsize = shape.fontsize
            else:
                fontsize = shape.scale_font(self.viewer)
            alpha = getattr(shape, 'alpha', 1.0)
            self.font = self.cr.get_font(shape.font, fontsize, shape.color,
                                         alpha=alpha)
        else:
            self.font = None
    def initialize_from_shape(self, shape, line=True, fill=True, font=True):
        if line:
            self.set_line_from_shape(shape)
        if fill:
            self.set_fill_from_shape(shape)
        if font:
            self.set_font_from_shape(shape)
    def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):
        # TODO: support line width and style
        self.pen = self.cr.get_pen(color, alpha=alpha)
    def set_fill(self, color, alpha=1.0):
        if color is None:
            self.brush = None
        else:
            self.brush = self.cr.get_brush(color, alpha=alpha)
    def set_font(self, fontname, fontsize, color='black', alpha=1.0):
        self.font = self.cr.get_font(fontname, fontsize, color,
                                     alpha=alpha)
    def text_extents(self, text):
        return self.cr.text_extents(text, self.font)
    def get_affine_transform(self, cx, cy, rot_deg):
        x, y = 0, 0          # old center
        nx, ny = cx, cy      # new center
        sx = sy = 1.0        # new scale
        cosine = math.cos(math.radians(rot_deg))
        sine = math.sin(math.radians(rot_deg))
        a = cosine / sx
        b = sine / sx
        c = x - nx*a - ny*b
        d = -sine / sy
        e = cosine / sy
        f = y - nx*d - ny*e
        return (a, b, c, d, e, f)
    ##### DRAWING OPERATIONS #####
    def draw_text(self, cx, cy, text, rot_deg=0.0):
        wd, ht = self.cr.text_extents(text, self.font)
        self.cr.canvas.text((cx, cy-ht), text, self.font)
        ## affine = self.get_affine_transform(cx, cy-ht, rot_deg)
        ## self.cr.canvas.settransform(affine)
        ## self.cr.canvas.text((0, 0), text, self.font)
        ## # reset default transform
        ## self.cr.canvas.settransform()
    def draw_polygon(self, cpoints):
        self.cr.canvas.polygon(list(chain.from_iterable(cpoints)),
                               self.pen, self.brush)
    def draw_circle(self, cx, cy, cradius):
        self.cr.canvas.ellipse((cx-cradius, cy-cradius, cx+cradius, cy+cradius),
                               self.pen, self.brush)
    def draw_bezier_curve(self, cp):
        # there is a bug in path handling of some versions of aggdraw--
        # aggdraw here is ok:
        path = agg.Path()
        path.moveto(cp[0][0], cp[0][1])
        path.curveto(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])
        self.cr.canvas.path(path, self.pen, self.brush)
    def draw_ellipse_bezier(self, cp):
        # draw 4 bezier curves to make the ellipse because there seems
        # to be a bug in aggdraw ellipse drawing function
        path = agg.Path()
        path.moveto(cp[0][0], cp[0][1])
        path.curveto(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])
        path.curveto(cp[4][0], cp[4][1], cp[5][0], cp[5][1], cp[6][0], cp[6][1])
        path.curveto(cp[7][0], cp[7][1], cp[8][0], cp[8][1], cp[9][0], cp[9][1])
        path.curveto(cp[10][0], cp[10][1], cp[11][0], cp[11][1], cp[12][0], cp[12][1])
        self.cr.canvas.path(path, self.pen, self.brush)
    def draw_line(self, cx1, cy1, cx2, cy2):
        self.cr.canvas.line((cx1, cy1, cx2, cy2), self.pen)
    def draw_path(self, cp):
        # TODO: is there a more efficient way in aggdraw to do this?
        path = agg.Path()
        path.moveto(cp[0][0], cp[0][1])
        for pt in cp[1:]:
            path.lineto(pt[0], pt[1])
        self.cr.canvas.path(path, self.pen, self.brush)
class CanvasRenderer(object):
    def __init__(self, viewer):
        self.viewer = viewer
    def setup_cr(self, shape):
        cr = RenderContext(self.viewer)
        cr.initialize_from_shape(shape, font=False)
        return cr
    def get_dimensions(self, shape):
        cr = self.setup_cr(shape)
        cr.set_font_from_shape(shape)
        return cr.text_extents(shape.text)
#END
 | 
| 
	the-stack_106_32311 | 
	print("テーブル名を入力してください")
table = input()
#print("カラム数を入力してください")
#colum = int(input())
print("csvデータを張り付けてね")
mail=[]
while(True):
    i = input()
    if(i=="end"):
        break
    test=i.split(",")
    test2='"'
    for n in range(len(test[0])):
        test2.append(test[0][n])
    mail.append()
for i in mail:
    print("INSERT INTO " + table + " VALUES (" + i +");")
 | 
| 
	the-stack_106_32313 | 
	"""
Newsreader - Site
Copyright (c) 2018 Trevor Bramwell <[email protected]>
SPDX-License-Identifier: Apache-2.0
"""
import requests
from bs4 import BeautifulSoup
from .article import Article
class Site():
    """A site represents a news source and contains a list of articles
       after being parsed"""
    name = "NPR"
    def __init__(self, url):
        self.url = url
        self.articles = []
    def parse(self):
        """Parse a site and extract the articles"""
        # logging.info("Parsing: %s", self.url)
        page = requests.get(self.url)
        soup = BeautifulSoup(page.content, 'html5lib')
        links = soup.body.ul.find_all('li')
        for link in links:
            # logging.info("Adding article: %s", link)
            href = "%s%s" % (self.url, link.a['href'])
            article = Article(self.name, link.string, href)
            self.articles.append(article)
    def get_articles(self):
        """Return articles from the site, requires the site have been
           parsed first"""
        return self.articles
 | 
| 
	the-stack_106_32314 | 
	#!/usr/bin/env python
from distutils.core import setup
LONG_DESCRIPTION = \
'''The program reads one or more input FASTA files.
For each file it computes a variety of statistics, and then
prints a summary of the statistics as output.
The goal is to provide a solid foundation for new bioinformatics command line tools,
and is an ideal starting place for new projects.'''
setup(
    name='biodemo',
    version='0.1.0.0',
    author='Anna Syme',
    author_email='[email protected]',
    packages=['biodemo'],
    package_dir={'biodemo': 'biodemo'},
    entry_points={
        'console_scripts': ['biodemo = biodemo.biodemo:main']
    },
    url='https://github.com/GITHUB_USERNAME/biodemo',
    license='LICENSE',
    description=('A prototypical bioinformatics command line tool'),
    long_description=(LONG_DESCRIPTION),
    install_requires=["biopython"],
)
 | 
| 
	the-stack_106_32315 | 
	import tvm
import numpy as np
from tvm import relay
from tvm.relay.ir_pass import alpha_equal
from tvm.relay.ir_builder import convert
def test_tensor_type_alpha_equal():
    t1 = relay.TensorType((3, 4), "float32")
    t2 = relay.TensorType((3, 4), "float32")
    t3 = relay.TensorType((3, 4, 5), "float32")
    assert t1 == t2
    assert t1 != t3
    t1 = relay.TensorType((), "float32")
    t2 = relay.TensorType((), "float32")
    assert t1 == t2
def test_incomplete_type_alpha_equal():
    t1 = relay.IncompleteType(relay.Kind.Shape)
    t2 = relay.IncompleteType(relay.Kind.Type)
    t3 = relay.IncompleteType(relay.Kind.Type)
    # only equal when there is pointer equality
    assert t2 == t2
    assert t1 == t1
    assert t1 != t2
    assert t2 != t3
def test_type_param_alpha_equal():
    t1 = relay.TypeParam("v1", relay.Kind.Type)
    t2 = relay.TypeParam("v2", relay.Kind.Shape)
    t3 = relay.TypeParam("v3", relay.Kind.Type)
    # only pointer equality and eq_map allow equal params
    assert t1 == t1
    assert t2 == t2
    assert t1 != t2 # different kind
    assert t1 != t3 # not in eq_map
    # function types are the only way to put type params
    # in eq map
    ft1 = relay.FuncType(tvm.convert([]), t1, tvm.convert([t1]), tvm.convert([]))
    ft2 = relay.FuncType(tvm.convert([]), t3, tvm.convert([t3]), tvm.convert([]))
    # actually an invalid type because t2 is wrong kind
    ft3 = relay.FuncType(tvm.convert([]), t2, tvm.convert([t2]), tvm.convert([]))
    assert ft1 == ft2
    assert ft1 != ft3 # kinds still do not match
def test_func_type_alpha_equal():
    t1 = relay.TensorType((1, 2), "float32")
    t2 = relay.TensorType((1, 2, 3), "float32")
    tp1 = relay.TypeParam("v1", relay.Kind.Type)
    tp2 = relay.TypeParam("v2", relay.Kind.Type)
    tp3 = relay.TypeParam("v3", relay.Kind.Shape)
    tp4 = relay.TypeParam("v3", relay.Kind.Shape)
    broadcast = tvm.get_env_func("tvm.relay.type_relation.Broadcast")
    identity = tvm.get_env_func("tvm.relay.type_relation.Identity")
    tr1 = relay.TypeRelation(broadcast, tvm.convert([tp1, tp3]), 1, None)
    tr2 = relay.TypeRelation(broadcast, tvm.convert([tp2, tp4]), 1, None)
    tr3 = relay.TypeRelation(identity, tvm.convert([tp1, tp3]), 1, None)
    ft = relay.FuncType(tvm.convert([t1, t2]), tp1,
                         tvm.convert([tp1, tp3]),
                         tvm.convert([tr1]))
    translate_vars = relay.FuncType(tvm.convert([t1, t2]), tp1,
                         tvm.convert([tp2, tp4]),
                         tvm.convert([tr2]))
    assert ft == translate_vars
    different_args = relay.FuncType(tvm.convert([t1]), tp1,
                         tvm.convert([tp1, tp3]),
                         tvm.convert([tr1]))
    assert ft != different_args
    different_order = relay.FuncType(tvm.convert([t2, t1]), tp1,
                         tvm.convert([tp1, tp3]),
                         tvm.convert([tr1]))
    assert ft != different_order
    no_rel = relay.FuncType(tvm.convert([t1, t2]), tp1,
                         tvm.convert([tp1, tp3]),
                         tvm.convert([]))
    assert ft != no_rel
    more_vars = relay.FuncType(tvm.convert([t1, t2]), tp2,
                         tvm.convert([tp1, tp2, tp3]),
                         tvm.convert([tr1]))
    assert ft != more_vars
    all_the_vars = relay.FuncType(tvm.convert([t1, t2]), tp1,
                         tvm.convert([tp1, tp2, tp3, tp4]),
                         tvm.convert([tr1, tr2]))
    assert ft != all_the_vars
    different_rel = relay.FuncType(tvm.convert([t1, t2]), tp1,
                                   tvm.convert([tp1, tp3]),
                                   tvm.convert([tr3]))
    assert ft != different_rel
    more_rels = relay.FuncType(tvm.convert([t1, t2]), tp1,
                                   tvm.convert([tp1, tp3]),
                                   tvm.convert([tr1, tr3]))
    assert ft != more_rels
def test_tuple_type_alpha_equal():
    t1 = relay.TensorType((1, 2, 3), "float32")
    t2 = relay.TensorType((1, 2, 3, 4), "float32")
    tp1 = relay.TypeParam("v1", relay.Kind.Type)
    tp2 = relay.TypeParam("v2", relay.Kind.Type)
    tup1 = relay.TupleType(tvm.convert([t1, t2, tp1]))
    tup2 = relay.TupleType(tvm.convert([t1, t2, tp1]))
    tup3 = relay.TupleType(tvm.convert([t2, t1, tp1]))
    tup4 = relay.TupleType(tvm.convert([t1, t2, tp2]))
    # as long as types are alpha-equal and in same order,
    # tuples should be alpha-equal
    assert tup1 == tup2
    assert tup1 != tup3
    assert tup1 != tup4
def test_type_relation_alpha_equal():
    t1 = relay.TensorType((1, 2), "float32")
    t2 = relay.TensorType((1, 2, 3), "float32")
    t3 = relay.TensorType((1, 2, 3, 4), "float32")
    # functions are compared only by pointer equality so
    # we need to be sure to use the same pointers
    broadcast = tvm.get_env_func("tvm.relay.type_relation.Broadcast")
    identity = tvm.get_env_func("tvm.relay.type_relation.Identity")
    # attrs are also compared only by pointer equality
    attr1 = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3,4))
    attr2 = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3,4))
    tr = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr1)
    same = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr1)
    diff_func = relay.TypeRelation(identity, tvm.convert([t1, t2]), 1, attr1)
    diff_order = relay.TypeRelation(broadcast, tvm.convert([t2, t1]), 1, attr1)
    diff_args = relay.TypeRelation(broadcast, tvm.convert([t2, t3]), 1, attr1)
    diff_attr = relay.TypeRelation(broadcast, tvm.convert([t1, t2]), 1, attr2)
    bigger = relay.TypeRelation(identity, tvm.convert([t1, t3, t2]), 2, attr1)
    diff_num_inputs = relay.TypeRelation(identity, tvm.convert([t1, t3, t2]), 1, attr2)
    # func, number of args, input count, and order should be the same
    assert tr == same
    assert tr != diff_func
    assert tr != diff_order
    assert tr != diff_args
    assert tr != diff_attr
    assert tr != bigger
    assert bigger != diff_num_inputs
def test_constant_alpha_equal():
    x = convert(1)
    y = convert(2)
    assert alpha_equal(x, x)
    assert not alpha_equal(x, y)
    assert alpha_equal(x, convert(1))
def test_var_alpha_equal():
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")
    # normally only pointer equality
    assert alpha_equal(v1, v1)
    assert not alpha_equal(v1, v2)
    # let node allows for setting the eq_map
    l1 = relay.Let(v1, convert(1), v1)
    l2 = relay.Let(v2, convert(1), v2)
    l3 = relay.Let(v1, convert(1), v2)
    assert alpha_equal(l1, l2)
    assert not alpha_equal(l1, l3)
def test_global_var_alpha_equal():
    v1 = relay.GlobalVar("v1")
    v2 = relay.GlobalVar("v2")
    # only pointer equality suffices (smoke test)
    assert alpha_equal(v1, v1)
    assert not alpha_equal(v1, v2)
def test_tuple_alpha_equal():
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")
    # unit value is a valid tuple
    assert alpha_equal(relay.Tuple([]), relay.Tuple([]))
    tup = relay.Tuple([v1, convert(2), convert(3), relay.Tuple([convert(4)])])
    same = relay.Tuple([v1, convert(2), convert(3), relay.Tuple([convert(4)])])
    assert alpha_equal(tup, same)
    # use the eq_map
    let_tup = relay.Let(v1, tup, v1)
    let_mapped = relay.Let(v2, relay.Tuple([v2, convert(2), convert(3),
                                            relay.Tuple([convert(4)])]),
                           v2)
    assert alpha_equal(let_tup, let_mapped)
    more_fields = relay.Tuple([v1, convert(2), convert(3), relay.Tuple([convert(4)]), v2])
    assert not alpha_equal(tup, more_fields)
    fewer_fields = relay.Tuple([v1, convert(2), convert(3)])
    assert not alpha_equal(tup, fewer_fields)
    different_end = relay.Tuple([v1, convert(2), convert(3),
                           relay.Tuple([convert(5)])])
    assert not alpha_equal(tup, different_end)
    different_start = relay.Tuple([v2, convert(2), convert(3),
                                 relay.Tuple([convert(4)])])
    assert not alpha_equal(tup, different_start)
    longer_at_end = relay.Tuple([v1, convert(2), convert(3),
                                 relay.Tuple([convert(4), convert(5)])])
    assert not alpha_equal(tup, longer_at_end)
def test_tuple_get_item_alpha_equal():
    x = relay.Var('x')
    y = relay.Var('y')
    assert not alpha_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(y, 1))
    assert not alpha_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 2))
    assert alpha_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 1))
def test_function_alpha_equal():
    tt1 = relay.TensorType((1, 2, 3), "float32")
    tt2 = relay.TensorType((4, 5, 6), "int8")
    tt3 = relay.TupleType([tt1, tt2])
    v1 = relay.Var("v1", tt1)
    v2 = relay.Var("v2", tt2)
    v3 = relay.Var("v3", tt3)
    v4 = relay.Var("v4", tt2)
    vret = relay.Constant(tvm.nd.array(np.ones(1)))
    tp1 = relay.TypeParam("tp1", relay.Kind.Type)
    tp2 = relay.TypeParam("tp2", relay.Kind.Type)
    tp3 = relay.TypeParam("tp3", relay.Kind.Shape)
    tp4 = relay.TypeParam("tp4", relay.Kind.Shape)
    basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
    basic_tps = [tp1, tp2]
    func = relay.Function([v1, v2],
                          tt2, v1, basic_tps)
    mapped = relay.Function(basic_args, tt2, basic_args[0], basic_tps)
    assert alpha_equal(func, mapped)
    fewer_params = relay.Function([relay.Var("v4", tt2)], tt2, v4, basic_tps)
    assert not alpha_equal(func, fewer_params)
    more_params = relay.Function([relay.Var("v3", tt1),
                                  relay.Var("v4", tt2),
                                  relay.Var("v2", tt2)], tt2, v4, basic_tps)
    assert not alpha_equal(func, more_params)
    params_unordered = relay.Function([v2, v1],
                                      tt2, v1, basic_tps)
    assert not alpha_equal(func, params_unordered)
    params_mismatch = relay.Function([v1, v3],
                                     tt2, v1, basic_tps)
    assert not alpha_equal(func, params_mismatch)
    # also would not typecheck
    ret_type_mismatch = relay.Function(basic_args, tt1, v4, basic_tps)
    assert not alpha_equal(func, ret_type_mismatch)
    # also mis-typed
    different_body = relay.Function(basic_args, tt2, v3, basic_tps)
    assert not alpha_equal(func, different_body)
    fewer_type_params = relay.Function(basic_args, tt2, v4, [tp1])
    assert not alpha_equal(func, fewer_type_params)
    more_type_params = relay.Function(basic_args, tt2, v4, [tp1, tp2, tp3])
    assert not alpha_equal(func, more_type_params)
    type_params_unordered = relay.Function(basic_args, tt2, v4, [tp2, tp1])
    assert not alpha_equal(func, type_params_unordered)
    different_type_params = relay.Function(basic_args, tt2, v4, [tp3, tp4])
    assert not alpha_equal(func, different_type_params)
    # a well-typed example that also differs in body, ret type, and type params
    tupled_example = relay.Function(basic_args, tt3, relay.Tuple([v3, v4]))
    assert not alpha_equal(func, tupled_example)
def test_call_alpha_equal():
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")
    # attrs are compared only by pointer equality
    attr1 = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3,4))
    attr2 = tvm.make.node("attrs.TestAttrs", name="attr", padding=(3,4))
    tt1 = relay.TensorType((1, 2, 3), "float32")
    tt2 = relay.TensorType((), "int8")
    basic_args = [convert(1), convert(2), v2, relay.Tuple([])]
    # manually writing out args to ensure that args does not rely on
    # pointer equality
    call = relay.Call(v1, [convert(1), convert(2), v2, relay.Tuple([])],
                      attr1, [tt1])
    same = relay.Call(v1, basic_args, attr1, [tt1])
    assert alpha_equal(call, same)
    different_fn = relay.Call(v2, basic_args, attr1, [tt1])
    assert not alpha_equal(call, different_fn)
    fewer_args = relay.Call(v1, [convert(1), convert(2), v2], attr1, [tt1])
    assert not alpha_equal(call, fewer_args)
    reordered_args = relay.Call(v1, [convert(2), convert(1),
                                     relay.Tuple([]), v2], attr1, [tt1])
    assert not alpha_equal(call, reordered_args)
    different_args = relay.Call(v1, [convert(1), convert(2), convert(3)],
                                attr1, [tt1])
    assert not alpha_equal(call, different_args)
    more_args = relay.Call(v1, [convert(1), convert(2), v2, relay.Tuple([]),
                                convert(3), convert(4)], attr1, [tt1])
    assert not alpha_equal(call, more_args)
    different_attrs = relay.Call(v1, basic_args, attr2, [tt1])
    assert not alpha_equal(call, different_attrs)
    no_type_args = relay.Call(v1, basic_args, attr1)
    assert not alpha_equal(call, no_type_args)
    more_type_args = relay.Call(v1, basic_args, attr1, [tt1, tt2])
    assert not alpha_equal(call, more_type_args)
    different_type_arg = relay.Call(v1, basic_args, attr1, [tt2])
    assert not alpha_equal(call, different_type_arg)
def test_let_alpha_equal():
    tt1 = relay.TensorType((), "float32")
    tt2 = relay.TensorType((), "int8")
    v1 = relay.Var("v1")
    v1_wtype = relay.Var("v1", tt1)
    v2 = relay.Var("v2")
    v3 = relay.Var("v3")
    let = relay.Let(v1, convert(2), v1)
    mapped = relay.Let(v2, convert(2), v2)
    assert alpha_equal(let, mapped)
    mismatched_var = relay.Let(v2, convert(2), v3)
    assert not alpha_equal(let, mismatched_var)
    different_value = relay.Let(v2, convert(3), v2)
    assert not alpha_equal(let, different_value)
    different_body = relay.Let(v2, convert(3), convert(12))
    assert not alpha_equal(let, different_body)
    # specified types must match
    let_with_type = relay.Let(v1_wtype, convert(2), v1_wtype)
    same_type = relay.Let(v1_wtype, convert(2), v1_wtype)
    assert alpha_equal(let_with_type, same_type)
    assert not alpha_equal(let, let_with_type)
    v2 = relay.Var("v1", tt2)
    different_type = relay.Let(v2, convert(2), v2)
    assert not alpha_equal(let_with_type, different_type)
def test_if_alpha_equal():
    v1 = relay.Var("v1")
    v2 = relay.Var("v2")
    if_sample = relay.If(v1, convert(1), relay.Tuple([convert(2), convert(3)]))
    same = relay.If(v1, convert(1), relay.Tuple([convert(2), convert(3)]))
    assert alpha_equal(if_sample, same)
    different_cond = relay.If(v2, convert(1), relay.Tuple([convert(2), convert(3)]))
    assert not alpha_equal(if_sample, different_cond)
    different_true = relay.If(v1, convert(2), relay.Tuple([convert(2), convert(3)]))
    assert not alpha_equal(if_sample, different_true)
    different_false = relay.If(v1, convert(1), relay.Tuple([]))
    assert not alpha_equal(if_sample, different_false)
def test_op_alpha_equal():
    # only checks names
    op1 = relay.op.get("add")
    op2 = relay.op.get("add")
    assert alpha_equal(op1, op2)
    op3 = relay.op.get("take")
    assert not alpha_equal(op1, op3)
if __name__ == "__main__":
    test_tensor_type_alpha_equal()
    test_incomplete_type_alpha_equal()
    test_constant_alpha_equal()
    test_func_type_alpha_equal()
    test_tuple_type_alpha_equal()
    test_type_relation_alpha_equal()
    test_constant_alpha_equal()
    test_global_var_alpha_equal()
    test_tuple_alpha_equal()
    test_tuple_get_item_alpha_equal()
    test_function_alpha_equal()
    test_call_alpha_equal()
    test_let_alpha_equal()
    test_if_alpha_equal()
    test_op_alpha_equal()
 | 
| 
	the-stack_106_32316 | 
	import os
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, save, output_file
from bokeh.models import ColumnDataSource, HoverTool, LinearColorMapper, BasicTicker, PrintfTickFormatter, ColorBar, Range1d
from bokeh.transform import transform
from bokeh.palettes import RdBu, Spectral, RdYlBu, RdGy, YlGnBu, Inferno, Plasma, PuBu, Greys, Magma, Viridis
def plot_voltage(project_name, voltage_df, ss_code=None, ss_config=None):
    # Check for data to plot
    if voltage_df is None:
        return
    # If there is no data, than we cannot create a plot
    if voltage_df.empty:
        return
    # Create a default subsystem query string
    ss_str = "_{}_{}".format(ss_config, ss_code)
    TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
    # Create plot
    plot = figure(tools=[TOOLS], title="{} - Voltage".format(project_name), x_axis_type="datetime")
    # Combine the data into a Data frame for ColumnDataSource for the plot
    plot.line(x='datetime', y='voltage', source=ColumnDataSource(voltage_df), line_width=5, line_color="Goldenrod")
    # Set the labels
    plot.xaxis.axis_label = "Date/Time"
    plot.yaxis.axis_label = 'Voltage'
    # Set the tool tip
    plot.select_one(HoverTool).tooltips = [
        ('ENS Num', '@ensnum'),
        ('Date/Time', '@datetime{%F}'),
        ('Voltage', '@voltage'),
    ]
    plot.select_one(HoverTool).formatters = {
        'datetime': 'datetime'                          # use 'datetime' formatter for 'datetime' field
    }
    plot.select_one(HoverTool).mode = 'vline'           # display a tooltip whenever the cursor is vertically in line with a glyph
    # Save plot to HTML
    file_name = project_name + '{}_voltage.html'.format(ss_str)
    file_name = os.path.join('html', file_name)
    output_file(file_name, title="{} - Voltage".format(project_name))
    save(gridplot([[plot]], sizing_mode='stretch_both'))  # Just save to file | 
| 
	the-stack_106_32317 | 
	import os
import pickle
import gendist
import torchvision
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from tqdm import tqdm
from loguru import logger
from augly import image
from jax.flatten_util import ravel_pytree
def processor(X, angle):
    X_shift = image.aug_np_wrapper(X, image.rotate, degrees=angle)
    size_im = X_shift.shape[0]
    size_pad = (28 - size_im) // 2
    size_pad_mod = (28 - size_im) % 2
    X_shift = np.pad(X_shift, (size_pad, size_pad + size_pad_mod))
    
    return X_shift
def predict_shifted_dataset(ix_seed, X_batch, processor, config, wmodel, wparams, dmodel, proj, fn_reconstruct):
    """
    Parameters
    ----------
    ix_seed: array
    X_batch: array
    ...
    wmodel: model for the latent space
    wparams: trained weights for the latent space
    dmodel: model for the observed space
    dparams: trained model for the observed weights
    """
    x_seed = X_batch[ix]
    x_shift = processor.process_single(x_seed, **config).ravel()
    predicted_weights = wmodel.apply(wparams, x_shift)
    predicted_weights = proj.inverse_transform(predicted_weights)
    predicted_weights = fn_reconstruct(predicted_weights)
    
    X_batch_shift = processor(X_batch, config)
    y_batch_hat = dmodel.apply(predicted_weights, X_batch_shift)
    
    return y_batch_hat
path_experiment = "./outputs/2203221129/"
path_data_model = os.path.join(path_experiment, "output", "data-model-result.pkl")
path_meta_model = os.path.join(path_experiment, "output", "meta-model.pkl")
path_results = os.path.join(path_experiment, "output", "accuracy.pkl")
with open(path_data_model, "rb") as f:
    data_model_results = pickle.load(f)
with open(path_meta_model, "rb") as f:
    meta_model_results = pickle.load(f)
now_str = datetime.now().strftime("%Y%m%d%H%M")
file_log = f"trench_test_{now_str}.log"
path_logger = os.path.join(path_experiment, "logs", file_log)
logger.remove()
logger.add(path_logger, rotation="5mb")
mnist_test = torchvision.datasets.MNIST(root=".", train=False, download=True)
X_test = np.array(mnist_test.data) / 255
y_test = np.array(mnist_test.targets)
proc_class = gendist.processing.Factory(processor)
pca = meta_model_results["projection_model"]
meta_model = gendist.models.MLPWeightsV1(pca.n_components)
data_model = gendist.models.MLPDataV1(10)
_, fn_reconstruct_params = ravel_pytree(data_model_results["params"][0])
accuracy_configs_learned = []
ixs = np.arange(5)
for config in tqdm(data_model_results["configs"]):
    acc_dict = {}
    for ix in ixs:
        y_test_hat = predict_shifted_dataset(ix, X_test, proc_class, config,
                                             meta_model, meta_model_results["params"],
                                             data_model, pca, fn_reconstruct_params)
        y_test_hat = y_test_hat.argmax(axis=1)
        accuracy_learned = (y_test_hat == y_test).mean().item()
        acc_dict[ix] = accuracy_learned
        
    accuracy_configs_learned.append(acc_dict)
    
    angle = config["angle"]
    logger_row = "|".join([format(v, "0.2%") for v in acc_dict.values()])
    logger_row = f"{angle=:0.4f} | " + logger_row 
    
    logger.info(logger_row)
pd.DataFrame(acc_dict).to_pickle(path_results)
 | 
| 
	the-stack_106_32318 | 
	import pytest
@pytest.fixture
def document_0(publication):
    return {
        'references': [publication['identifiers'][0]],
    }
@pytest.fixture
def document_base(lab, award):
    return {
        'award': award['uuid'],
        'lab': lab['uuid'],
        'document_type': 'growth protocol',
    }
@pytest.fixture
def document_1(document_base):
    item = document_base.copy()
    item.update({
        'schema_version': '2',
        'status': 'CURRENT',
        'award': '4d462953-2da5-4fcf-a695-7206f2d5cf45'
    })
    return item
@pytest.fixture
def document_3(root, document, publication):
    item = root.get_by_uuid(document['uuid'])
    properties = item.properties.copy()
    properties.update({
        'schema_version': '3',
        'references': [publication['identifiers'][0]],
    })
    return properties
def test_document_0_upgrade(upgrader, document_0, publication):
    value = upgrader.upgrade('document', document_0, target_version='2')
    assert value['schema_version'] == '2'
    assert value['references'] == [publication['identifiers'][0]]
def test_document_upgrade_status(upgrader, document_1):
    value = upgrader.upgrade('document', document_1, target_version='3')
    assert value['schema_version'] == '3'
    assert value['status'] == 'in progress'
def test_document_upgrade_status_encode2(upgrader, document_1):
    document_1['award'] = '366388ac-685d-415c-b0bb-834ffafdf094'
    value = upgrader.upgrade('document', document_1, target_version='3')
    assert value['schema_version'] == '3'
    assert value['status'] == 'released'
def test_document_upgrade_status_deleted(upgrader, document_1):
    document_1['status'] = 'DELETED'
    value = upgrader.upgrade('document', document_1, target_version='3')
    assert value['schema_version'] == '3'
    assert value['status'] == 'deleted'
def test_document_upgrade_references(root, upgrader,
                                     document, document_3,
                                     publication,
                                     threadlocals, dummy_request):
    context = root.get_by_uuid(document['uuid'])
    dummy_request.context = context
    value = upgrader.upgrade('document', document_3, target_version='4', context=context)
    assert value['schema_version'] == '4'
    assert value['references'] == [publication['uuid']]
def test_document_upgrade_status_replaced(upgrader, document_base):
    document_base['status'] = 'replaced'
    document_base['schema_version'] = '7'
    value = upgrader.upgrade('document', document_base, current_version='7', target_version='8')
    assert value['schema_version'] == '8'
    assert value['status'] == 'deleted'
 | 
| 
	the-stack_106_32319 | 
	"""modify isotopes table
Revision ID: 8e68245fe95a
Revises: a5189c25d85e
Create Date: 2017-01-07 15:11:16.650856
"""
# revision identifiers, used by Alembic.
revision = "8e68245fe95a"
down_revision = "a5189c25d85e"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
    op.add_column("isotopes", sa.Column("mass_uncertainty", sa.Float))
    op.add_column("isotopes", sa.Column("is_radioactive", sa.Boolean))
    op.add_column("isotopes", sa.Column("half_life", sa.Float))
    op.add_column("isotopes", sa.Column("half_life_unit", sa.String))
def downgrade():
    with op.batch_alter_table("isotopes") as batch_op:
        batch_op.drop_column("mass_uncertainty")
        batch_op.drop_column("is_radioactive")
        batch_op.drop_column("half_life")
        batch_op.drop_column("half_life_unit")
 | 
| 
	the-stack_106_32320 | 
	from __future__ import annotations
import os
import signal
import subprocess as sp
from enum import Enum, auto
from threading import Thread
from typing import Callable, List
from pyutils import exc
from .util import find_executable, kill
# Public classes
class OutputAction(Enum):
    """Output actions."""
    PRINT = auto()
    DISCARD = auto()
    STORE = auto()
class Task:
    """Spawn processes and easily capture their output."""
    @property
    def name(self) -> str:
        return os.path.basename(self.path)
    @property
    def pid(self) -> int | None:
        return self._process.pid if self._process else None
    @property
    def stdout(self) -> str | None:
        return self._completed.stdout if self._completed else None
    @property
    def stderr(self) -> str | None:
        return self._completed.stderr if self._completed else None
    @property
    def exit_code(self) -> int | None:
        return self._completed.returncode if self._completed else None
    @classmethod
    def spawn(cls, executable: str,
              args: List[str] | None = None,
              output_action: OutputAction = OutputAction.STORE,
              input_path: str | None = None) -> Task:
        """Convenience factory method: builds, runs and returns a task."""
        task = cls(executable, args=args, output_action=output_action, input_path=input_path)
        task.run()
        return task
    @classmethod
    def copying(cls, task: Task) -> Task:
        return cls(task.path, args=task.args, output_action=task.output_action,
                   input_path=task.input_path)
    @classmethod
    def jar(cls, jar: str, jar_args: List[str] | None = None,
            jvm_opts: List[str] | None = None, output_action: OutputAction = OutputAction.STORE,
            input_path: str | None = None) -> Task:
        return cls('java', java_args(jar, jar_args=jar_args, jvm_opts=jvm_opts),
                   output_action=output_action, input_path=input_path)
    def __init__(self,
                 executable: str,
                 args: List[str] | None = None,
                 output_action: OutputAction = OutputAction.STORE,
                 input_path: str | None = None) -> None:
        exc.raise_if_falsy(executable=executable, output_action=output_action)
        if not os.path.isabs(executable):
            executable = find_executable(executable)
        self.path = executable
        self.args = args
        self.output_action = output_action
        self.input_path = input_path
        self._completed: sp.CompletedProcess | None = None
        self._process: sp.Popen | None = None
    def run(self, wait: bool = True, timeout: float | None = None) -> Task:
        """Run the task."""
        stdin = None
        try:
            handle = None
            if self.output_action == OutputAction.DISCARD:
                handle = sp.DEVNULL
            elif self.output_action == OutputAction.STORE:
                handle = sp.PIPE
            if self.input_path:
                stdin = open(self.input_path)
            self._process = sp.Popen(self._popen_args, stdout=handle, stderr=handle, stdin=stdin,
                                     universal_newlines=True)
            if wait:
                self.wait(timeout=timeout)
        except Exception as e:
            try:
                if stdin:
                    stdin.close()
                exc.re_raise_new_message(e, f'Failed to call process: {self.path}')
            except Exception:
                raise e
        return self
    def wait(self, timeout: float | None = None) -> Task:
        """Wait for the task to exit."""
        try:
            stdout, stderr = self._process.communicate(timeout=timeout)
        except sp.TimeoutExpired:
            self.send_signal(sig=signal.SIGKILL, children=True)
            stdout, stderr = self._process.communicate()
            raise sp.TimeoutExpired(self._process.args, timeout, output=stdout, stderr=stderr)
        except Exception:
            self.send_signal(sig=signal.SIGKILL, children=True)
            raise
        retcode = self._process.poll()
        if stdout:
            stdout = stdout.strip()
        if stderr:
            stderr = stderr.strip()
        self._completed = sp.CompletedProcess(self._popen_args, retcode, stdout, stderr)
        return self
    def run_async(self, timeout: float | None = None,
                  exit_handler: Callable[[Task, Exception], None] | None = None) -> Task:
        """Run the task asynchronously."""
        bg_proc = Thread(target=self._run_async_thread, args=[timeout, exit_handler])
        bg_proc.daemon = True
        bg_proc.start()
        return self
    def send_signal(self, sig: int = signal.SIGKILL, children: bool = False) -> Task:
        """Send a signal to the task."""
        if self._process and self._process.pid is not None:
            kill(self._process.pid, sig=sig, children=children)
        return self
    def raise_if_failed(self, ensure_output: bool = False, message: str | None = None) -> Task:
        """Raise an IOError if the task returned with a non-zero exit code."""
        auto_msg = None
        should_raise = False
        if self.exit_code:
            auto_msg = f'Process "{self.name}" returned exit code: {self.exit_code:d}'
            should_raise = True
        elif ensure_output and not self.stdout:
            auto_msg = f'Process "{self.name}" returned no output.'
            should_raise = True
        if should_raise:
            err_lines = []
            proc_out = self.stderr.strip() if self.stderr else None
            if not proc_out:
                proc_out = self.stdout.strip() if self.stdout else None
            for msg in [message, auto_msg, proc_out]:
                if msg:
                    err_lines.append(msg)
            raise IOError('\n'.join(err_lines))
        return self
    # Protected methods
    @property
    def _popen_args(self) -> List[str]:
        args = [self.path]
        if self.args:
            args.extend(self.args)
        return args
    def _run_async_thread(self, timeout: float | None,
                          exit_handler: Callable[[Task, Exception], None] | None) -> None:
        err = None
        try:
            self.run(timeout=timeout)
        except Exception as e:
            err = e
        finally:
            if exit_handler:
                exit_handler(self, err)
def java_args(jar: str, jar_args: List[str] | None = None,
              jvm_opts: List[str] | None = None) -> List[str]:
    """
    Returns the argument list to pass to the JVM in order to launch the given Jar.
    :param jar: Path to the jar file.
    :param jar_args: Args to pass to the jar.
    :param jvm_opts: Args to pass to the JVM.
    :return: Argument list.
    """
    args = []
    if jvm_opts:
        args.extend(jvm_opts)
    args.extend(('-jar', jar))
    if jar_args:
        args.extend(jar_args)
    return args
 | 
| 
	the-stack_106_32322 | 
	import dbm
import logging
import os
from pathlib import Path
import shutil
import tempfile
from django.apps import apps
from django.utils.module_loading import import_string
from .classes import DefinedStorage, PassthroughStorage
from .settings import setting_temporary_directory
logger = logging.getLogger(name=__name__)
def NamedTemporaryFile(*args, **kwargs):
    kwargs.update({'dir': setting_temporary_directory.value})
    return tempfile.NamedTemporaryFile(*args, **kwargs)
class PassthroughStorageProcessor(object):
    def __init__(
        self, app_label, defined_storage_name, log_file, model_name,
        file_attribute='file'
    ):
        self.app_label = app_label
        self.defined_storage_name = defined_storage_name
        self.file_attribute = file_attribute
        self.log_file = log_file
        self.model_name = model_name
    def _update_entry(self, key):
        if not self.reverse:
            self.database[key] = '1'
        else:
            try:
                del self.database[key]
            except KeyError:
                pass
    def _inclusion_condition(self, key):
        if self.reverse:
            return key in self.database
        else:
            return key not in self.database
    def execute(self, reverse=False):
        self.reverse = reverse
        model = apps.get_model(
            app_label=self.app_label, model_name=self.model_name
        )
        storage_instance = DefinedStorage.get(
            name=self.defined_storage_name
        ).get_storage_instance()
        if isinstance(storage_instance, PassthroughStorage):
            ContentType = apps.get_model(
                app_label='contenttypes', model_name='ContentType'
            )
            content_type = ContentType.objects.get_for_model(model=model)
            self.database = dbm.open(self.log_file, flag='c')
            for instance in model.objects.all():
                key = '{}.{}'.format(content_type.name, instance.pk)
                if self._inclusion_condition(key=key):
                    file_name = getattr(instance, self.file_attribute).name
                    content = storage_instance.open(
                        name=file_name, mode='rb',
                        _direct=not self.reverse
                    )
                    storage_instance.delete(name=file_name)
                    storage_instance.save(
                        name=file_name, content=content,
                        _direct=self.reverse
                    )
                    self._update_entry(key=key)
            self.database.close
def TemporaryFile(*args, **kwargs):
    kwargs.update({'dir': setting_temporary_directory.value})
    return tempfile.TemporaryFile(*args, **kwargs)
def fs_cleanup(filename, suppress_exceptions=True):
    """
    Tries to remove the given filename. Ignores non-existent files.
    """
    try:
        os.remove(filename)
    except OSError:
        try:
            shutil.rmtree(filename)
        except OSError:
            if suppress_exceptions:
                pass
            else:
                raise
def get_storage_subclass(dotted_path):
    """
    Import a storage class and return a subclass that will always return eq
    True to avoid creating a new migration when for runtime storage class
    changes. Used now only by historic migrations.
    """
    imported_storage_class = import_string(dotted_path=dotted_path)
    class StorageSubclass(imported_storage_class):
        def __init__(self, *args, **kwargs):
            return super(StorageSubclass, self).__init__(*args, **kwargs)
        def __eq__(self, other):
            return True
        def deconstruct(self):
            return ('mayan.apps.storage.classes.FakeStorageSubclass', (), {})
    return StorageSubclass
def mkdtemp(*args, **kwargs):
    """
    Creates a temporary directory in the most secure manner possible.
    There are no race conditions in the directory's creation.
    The directory is readable, writable, and searchable only by the creating
    user ID.
    """
    kwargs.update({'dir': setting_temporary_directory.value})
    return tempfile.mkdtemp(*args, **kwargs)
def mkstemp(*args, **kwargs):
    kwargs.update({'dir': setting_temporary_directory.value})
    return tempfile.mkstemp(*args, **kwargs)
def patch_files(path=None, replace_list=None):
    """
    Search and replace content from a list of file based on a pattern
    replace_list[
        {
            'filename_pattern': '*.css',
            'content_patterns': [
                {
                    'search': '',
                    'replace': '',
                }
            ]
        }
    ]
    """
    file_open_mode = 'r+'
    path_object = Path(path)
    for replace_entry in replace_list or []:
        for path_entry in path_object.glob('**/{}'.format(replace_entry['filename_pattern'])):
            if path_entry.is_file():
                for pattern in replace_entry['content_patterns']:
                    with path_entry.open(mode=file_open_mode) as source_file_object:
                        with tempfile.TemporaryFile(mode=file_open_mode) as temporary_file_object:
                            source_position = 0
                            destination_position = 0
                            while(True):
                                source_file_object.seek(source_position)
                                letter = source_file_object.read(1)
                                if len(letter) == 0:
                                    break
                                else:
                                    if letter == pattern['search'][0]:
                                        text = '{}{}'.format(letter, source_file_object.read(len(pattern['search']) - 1))
                                        temporary_file_object.seek(destination_position)
                                        if text == pattern['search']:
                                            text = pattern['replace']
                                            source_position = source_position + len(pattern['search'])
                                            destination_position = destination_position + len(pattern['replace'])
                                            temporary_file_object.write(text)
                                        else:
                                            source_position = source_position + 1
                                            destination_position = destination_position + 1
                                            temporary_file_object.write(letter)
                                    else:
                                        source_position = source_position + 1
                                        destination_position = destination_position + 1
                                        temporary_file_object.write(letter)
                            source_file_object.seek(0)
                            source_file_object.truncate()
                            temporary_file_object.seek(0)
                            shutil.copyfileobj(fsrc=temporary_file_object, fdst=source_file_object)
def validate_path(path):
    if not os.path.exists(path):
        # If doesn't exist try to create it
        try:
            os.mkdir(path)
        except Exception as exception:
            logger.debug('unhandled exception: %s', exception)
            return False
    # Check if it is writable
    try:
        fd, test_filepath = tempfile.mkstemp(dir=path)
        os.close(fd)
        os.unlink(test_filepath)
    except Exception as exception:
        logger.debug('unhandled exception: %s', exception)
        return False
    return True
 | 
| 
	the-stack_106_32325 | 
	import importlib
import torch.utils.data
from data.base_data_loader import BaseDataLoader
from data.base_dataset import BaseDataset
import numpy
def find_dataset_using_name(dataset_name):
    # Given the option --dataset [datasetname],
    # the file "data/datasetname_dataset.py"
    # will be imported.
    dataset_filename = "data." + dataset_name + "_dataset"
    datasetlib = importlib.import_module(dataset_filename)
    # In the file, the class called DatasetNameDataset() will
    # be instantiated. It has to be a subclass of BaseDataset,
    # and it is case-insensitive.
    dataset = None
    target_dataset_name = dataset_name.replace('_', '') + 'dataset'
    for name, cls in datasetlib.__dict__.items():
        if name.lower() == target_dataset_name.lower() \
           and issubclass(cls, BaseDataset):
            dataset = cls
    if dataset is None:
        print("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
        exit(0)
    return dataset
def get_option_setter(dataset_name):
    dataset_class = find_dataset_using_name(dataset_name)
    return dataset_class.modify_commandline_options
def create_dataset(opt):
    dataset = find_dataset_using_name(opt.dataset)
    instance = dataset()
    instance.initialize(opt)
    print("dataset [%s] was created" % (instance.name()))
    return instance
def CreateDataLoader(opt):
    data_loader = CustomDatasetDataLoader()
    data_loader.initialize(opt)
    return data_loader
# Wrapper class of Dataset class that performs
# multi-threaded data loading
class CustomDatasetDataLoader(BaseDataLoader):
    def name(self):
        return 'CustomDatasetDataLoader'
    def initialize(self, opt):
        BaseDataLoader.initialize(self, opt)
        self.dataset = create_dataset(opt)
        '''
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=opt.batch_size,
            shuffle=not opt.serial_batches,
            num_workers=int(opt.num_threads),
            worker_init_fn=lambda worker_id: numpy.random.seed(opt.seed + worker_id))
        '''
        
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=opt.batch_size,
            shuffle=not opt.serial_batches,
            num_workers= 0,
            worker_init_fn=lambda worker_id: numpy.random.seed(opt.seed + worker_id))
    def load_data(self):
        return self
    def __len__(self):
        return len(self.dataset)
    def __iter__(self):
        for i, data in enumerate(self.dataloader):
            yield data
 | 
| 
	the-stack_106_32326 | 
	from google.protobuf.json_format import MessageToJson
class EmailComparetor:
    def compareEmailPb(self, oldPb, newPb):
        if (oldPb.localPart != ''):
            if (newPb.localPart != ''):
                None
            else:
                assert False, self.errorString(errorString="Local Part", newPb=newPb)
        if (oldPb.domain != ''):
            if (newPb.domain != ''):
                None
            else:
                assert False, self.errorString(errorString="Domain", newPb=newPb)
    def errorString(self, errorString, newPb):
        return errorString + "Cannot be Empty" + MessageToJson(newPb)
 | 
| 
	the-stack_106_32327 | 
	
from scipy import signal
import numpy as np
import dtw
from grbpy.burst import Burst
import  matplotlib.pyplot as plt 
import csv
import os
import pickle
data_path = os.path.join('..','batse_data')
# select the matrix type
matrix_type = 'euclid'
# adds a 25% buffer tot eh t90 time when False
no_buffer = True
# this is just a placeholder if one wants to truncate 
# the matrix build to only a few bursts. good for testing
begin_with_burst = 999999
def get_burst_data(burst_num):
    '''this function is for parsing in burst data'''
    burst_info = burst_dict[burst_num]
    file_path = os.path.join(data_path,burst_info['burst_file'])
    grb = Burst(file_path)
    grb.parse_batse_file()
    burst_data = grb.sum_chan_data
    header_names = grb.header_names.split()
    header_data = grb.header_data.split()
    meta_dict = {}
    for i in range(len(header_data)):
        meta_dict[header_names[i]] = int(header_data[i])
    time = (np.arange(meta_dict['npts'])-meta_dict['nlasc'])*0.064
    t90_start = float(dur_dict[burst_num]['t90_start'])
    t90_end = float(dur_dict[burst_num]['t90_start']) + float(dur_dict[burst_num]['t90'])
    return time, burst_data, t90_start, t90_end
def remove_background(background_dict,burst_data,time):
    '''this function removes the background from a burst'''
    return burst_data-float(background_dict['intercept'])-(time*float(background_dict['slope']))
def norm_time(time):
    '''this function normalizes time of a burst'''
    return (time-min(time))/(max(time)-min(time))
def norm_data(data):
    '''this function normalizes data of a burst'''
    return data/max(data)
# parse in burst_info.csv
burst_dict = {}
with open(os.path.join('data','burst_info.csv'), newline='') as f:
    for row in csv.DictReader(f, delimiter=','):
        burst_dict[str(row['burst_num'])] = row
# parse in background_table.csv
background_dict = {}
with open(os.path.join('data','background_table.csv'), newline='') as f:
    for row in csv.DictReader(f, delimiter=','):
        background_dict[str(row['burst_num'])] = row
# parse in duration_table.csv
dur_dict = {}
with open(os.path.join('data','duration_table.csv'), newline='') as f:
    for row in csv.DictReader(f, delimiter=','):
        dur_dict[str(row['trig'])] = row
# inits 
distance_matrix = []
burst_list = []
# looping through bursts
for burst_num_1 in background_dict:
    if int(burst_num_1) > begin_with_burst:
        next
    else:
        
        # build burst list
        burst_list.append(background_dict[burst_num_1]['burst_num'])
        # inits
        calc_matrix = []
        # looping through bursts
        for burst_num_2 in background_dict:
            # convenient way to ensure an upper triangular matrix
            if int(burst_num_2) > begin_with_burst:
                next
            else:
                # convenient way to ensure an upper triangular matrix
                if int(burst_num_2) > int(burst_num_1):
                    # build buffers
                    t90_buffer_1 = float(dur_dict[burst_num_1]['t90']) * 0.25
                    t90_buffer_2 = float(dur_dict[burst_num_2]['t90']) * 0.25
                    # parse in burst data from file
                    time_1, burst_data_1, t90_start_1, t90_end_1 = get_burst_data(burst_num_1)
                    time_2, burst_data_2, t90_start_2, t90_end_2 = get_burst_data(burst_num_2)
                    # remove background
                    burst_data_2 = remove_background(background_dict[burst_num_2],burst_data_2,time_2)
                    burst_data_1 = remove_background(background_dict[burst_num_1],burst_data_1,time_1)
                    
                    # find the length of each burst vector for late use
                    len_t90_time_1 = len(time_1[(time_1 > float(t90_start_1)) & (time_1 < float(t90_end_1))])
                    len_t90_time_2 = len(time_2[(time_2 > float(t90_start_2)) & (time_2 < float(t90_end_2))])
                    
                    if no_buffer:
                        # build the t90 window for each bursts time and data
                        t90_data_1 = burst_data_1[(time_1 > float(t90_start_1)) & (time_1 < float(t90_end_1))]
                        t90_time_1 = time_1[(time_1 > float(t90_start_1)) & (time_1 < float(t90_end_1))]
                        t90_data_2= burst_data_2[(time_2 > float(t90_start_2)) & (time_2 < float(t90_end_2))]
                        t90_time_2 = time_2[(time_2 > float(t90_start_2)) & (time_2 < float(t90_end_2))]
                    else:
                        # build the t90+buffer window for each bursts time and data
                        t90_data_buffer_1 = burst_data_1[(time_1 > (float(t90_start_1)-t90_buffer_1)) & (time_1 < (float(t90_end_1)+t90_buffer_1))]
                        t90_time_buffer_1 = time_1[(time_1 > (float(t90_start_1)-t90_buffer_1)) & (time_1 < (float(t90_end_1)+t90_buffer_1))]
                        t90_data_buffer_2= burst_data_2[(time_2 > (float(t90_start_2)-t90_buffer_2)) & (time_2 < (float(t90_end_2)+t90_buffer_2))]
                        t90_time_buffer_2 = time_2[(time_2 > (float(t90_start_2)-t90_buffer_2)) & (time_2 < (float(t90_end_2)+t90_buffer_2))]
                    # This is a check to make sure the longer vector is the one that is resampled
                    if len_t90_time_1 < len_t90_time_2:
                        if no_buffer:
                            # resample the longer vector
                            resampled_burst, resampled_time = signal.resample(t90_data_2, len(t90_time_1), t=time_2)
                            # rename the other burst
                            other_burst, other_time = t90_data_1, t90_time_1
                        else:
                            # resample the longer vector
                            resampled_burst, resampled_time = signal.resample(t90_data_buffer_2, len(t90_time_buffer_1), t=time_2)
                            # rename the other burst
                            other_burst, other_time = t90_data_buffer_1, t90_time_buffer_1
                    elif len_t90_time_1 > len_t90_time_2:
                        if no_buffer:
                            # resample the longer vector
                            resampled_burst, resampled_time = signal.resample(t90_data_1, len(t90_time_2), t=time_1)
                            # rename the other burst
                            other_burst, other_time = t90_data_2, t90_time_2
                        else:
                            # resample the longer vector
                            resampled_burst, resampled_time = signal.resample(t90_data_buffer_1, len(t90_time_buffer_2), t=time_1)
                            # rename the other burst
                            other_burst, other_time = t90_data_buffer_2, t90_time_buffer_2
                    else:
                        next
                    # normalize the scale of each vector form 0 to 1 
                    norm_resampled = norm_data(resampled_burst)
                    norm_other     = norm_data(other_burst)
                    #################################
                    # Zero Normalized Cross Correlation
                    if matrix_type == 'corr':
                        norm_resampled = (norm_resampled - np.mean(norm_resampled)) / (np.std(norm_resampled))
                        norm_other = (norm_other - np.mean(norm_other)) / (np.std(norm_other))
                        corr = signal.correlate(norm_resampled,norm_other) / max(len(norm_resampled), len(norm_other))
                        calc = max(corr)
                    #################################
                    # Euclidean Norm
                    elif matrix_type == 'euclid':
                        calc = np.linalg.norm(norm_resampled-norm_other)
                    #################################
                    # Normalized Manhattan Distance
                    elif matrix_type == 'norm':
                        calc = np.linalg.norm(norm_resampled-norm_other, ord=1)/len(resampled_burst)
                    #################################
                    # Dynamic Time Warping
                    elif matrix_type == 'dtw':
                        DTW = dtw.dtw(norm_resampled,norm_other)
                        calc = DTW.normalizedDistance
                    else:
                        print('unsupported matrix_type')
                        next
                    print('burst 1:',burst_num_1,'- burst 2',burst_num_2,'-',matrix_type,'dist:',calc)
                    # append to the distance matrix
                    distance_matrix.append(calc)
# write out the matrix to a python pickle file
with open(os.path.join('data',matrix_type+'_burst_list'+('_no_buffer' if no_buffer else '')+'.pkl'), 'wb') as f:
    pickle.dump(burst_list, f)
# write out the burst list to a python pickle file
with open(os.path.join('data',matrix_type+'_matrix'+('_no_buffer' if no_buffer else '')+'.pkl'), 'wb') as f:
    pickle.dump(distance_matrix, f) | 
| 
	the-stack_106_32328 | 
	# Copyright 2012 by Wibowo Arindrarto.  All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Biopython interface for sequence search program outputs.
The SearchIO submodule provides parsers, indexers, and writers for outputs from
various sequence search programs. It provides an API similar to SeqIO and
AlignIO, with the following main functions: ``parse``, ``read``, ``to_dict``,
``index``, ``index_db``, ``write``, and ``convert``.
SearchIO parses a search output file's contents into a hierarchy of four nested
objects: QueryResult, Hit, HSP, and HSPFragment. Each of them models a part of
the search output file:
    - QueryResult represents a search query. This is the main object returned
      by the input functions and it contains all other objects.
    - Hit represents a database hit,
    - HSP represents high-scoring alignment region(s) in the hit,
    - HSPFragment represents a contiguous alignment within the HSP
In addition to the four objects above, SearchIO is also tightly integrated with
the SeqRecord objects (see SeqIO) and MultipleSeqAlignment objects (see
AlignIO). SeqRecord objects are used to store the actual matching hit and query
sequences, while MultipleSeqAlignment objects stores the alignment between them.
A detailed description of these objects' features and their example usages are
available in their respective documentations.
Input
=====
The main function for parsing search output files is Bio.SearchIO.parse(...).
This function parses a given search output file and returns a generator object
that yields one QueryResult object per iteration.
``parse`` takes two arguments: 1) a file handle or a filename of the input file
(the search output file) and 2) the format name.
    >>> from Bio import SearchIO
    >>> for qresult in SearchIO.parse('Blast/mirna.xml', 'blast-xml'):
    ...     print("%s %s" % (qresult.id, qresult.description))
    ...
    33211 mir_1
    33212 mir_2
    33213 mir_3
SearchIO also provides the Bio.SearchIO.read(...) function, which is intended
for use on search output files containing only one query. ``read`` returns one
QueryResult object and will raise an exception if the source file contains more
than one queries:
    >>> qresult = SearchIO.read('Blast/xml_2226_blastp_004.xml', 'blast-xml')
    >>> print("%s %s" % (qresult.id, qresult.description))
    ...
    gi|11464971:4-101 pleckstrin [Mus musculus]
    >>> SearchIO.read('Blast/mirna.xml', 'blast-xml')
    Traceback (most recent call last):
    ...
    ValueError: ...
For accessing search results of large output files, you may use the indexing
functions Bio.SearchIO.index(...) or Bio.SearchIO.index_db(...). They have a
similar interface to their counterparts in SeqIO and AlignIO, with the addition
of optional, format-specific keyword arguments.
Output
======
SearchIO has writing support for several formats, accessible from the
Bio.SearchIO.write(...) function. This function returns a tuple of four
numbers: the number of QueryResult, Hit, HSP, and HSPFragment written::
    qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
    SearchIO.write(qresults, 'results.tab', 'blast-tab')
    <stdout> (3, 239, 277, 277)
Note that different writers may require different attribute values of the
SearchIO objects. This limits the scope of writable search results to search
results possessing the required attributes.
For example, the writer for HMMER domain table output requires
the conditional e-value attribute from each HSP object, among others. If you
try to write to the HMMER domain table format and your HSPs do not have this
attribute, an exception will be raised.
Conversion
==========
SearchIO provides a shortcut function Bio.SearchIO.convert(...) to convert a
given file into another format. Under the hood, ``convert`` simply parses a given
output file and writes it to another using the ``parse`` and ``write`` functions.
Note that the same restrictions found in Bio.SearchIO.write(...) applies to the
convert function as well.
Conventions
===========
The main goal of creating SearchIO is to have a common, easy to use interface
across different search output files. As such, we have also created some
conventions / standards for SearchIO that extend beyond the common object model.
These conventions apply to all files parsed by SearchIO, regardless of their
individual formats.
Python-style sequence coordinates
---------------------------------
When storing sequence coordinates (start and end values), SearchIO uses
the Python-style slice convention: zero-based and half-open intervals. For
example, if in a BLAST XML output file the start and end coordinates of an
HSP are 10 and 28, they would become 9 and 28 in SearchIO. The start
coordinate becomes 9 because Python indices start from zero, while the end
coordinate remains 28 as Python slices omit the last item in an interval.
Beside giving you the benefits of standardization, this convention also
makes the coordinates usable for slicing sequences. For example, given a
full query sequence and the start and end coordinates of an HSP, one can
use the coordinates to extract part of the query sequence that results in
the database hit.
When these objects are written to an output file using
SearchIO.write(...), the coordinate values are restored to their
respective format's convention. Using the example above, if the HSP would
be written to an XML file, the start and end coordinates would become 10
and 28 again.
Sequence coordinate order
-------------------------
Some search output format reverses the start and end coordinate sequences
according to the sequence's strand. For example, in BLAST plain text
format if the matching strand lies in the minus orientation, then the
start coordinate will always be bigger than the end coordinate.
In SearchIO, start coordinates are always smaller than the end
coordinates, regardless of their originating strand. This ensures
consistency when using the coordinates to slice full sequences.
Note that this coordinate order convention is only enforced in the
HSPFragment level. If an HSP object has several HSPFragment objects, each
individual fragment will conform to this convention. But the order of the
fragments within the HSP object follows what the search output file uses.
Similar to the coordinate style convention, the start and end coordinates'
order are restored to their respective formats when the objects are
written using Bio.SearchIO.write(...).
Frames and strand values
------------------------
SearchIO only allows -1, 0, 1 and None as strand values. For frames, the
only allowed values are integers from -3 to 3 (inclusive) and None. Both
of these are standard Biopython conventions.
Supported Formats
=================
Below is a list of search program output formats supported by SearchIO.
Support for parsing, indexing, and writing:
 - blast-tab        - BLAST+ tabular output. Both variants without comments
                      (-m 6 flag) and with comments (-m 7 flag) are supported.
 - blast-xml        - BLAST+ XML output.
 - blat-psl         - The default output of BLAT (PSL format). Variants with or
                      without header are both supported. PSLX (PSL + sequences)
                      is also supported.
 - hmmer3-tab       - HMMER3 table output.
 - hmmer3-domtab    - HMMER3 domain table output. When using this format, the
                      program name has to be specified. For example, for parsing
                      hmmscan output, the name would be 'hmmscan-domtab'.
Support for parsing and indexing:
 - exonerate-text   - Exonerate plain text output.
 - exonerate-vulgar - Exonerate vulgar line.
 - exonerate-cigar  - Exonerate cigar line.
 - fasta-m10        - Bill Pearson's FASTA -m 10 output.
 - hmmer3-text      - HMMER3 regular text output format. Supported HMMER3
                      subprograms are hmmscan, hmmsearch, and phmmer.
 - hmmer2-text      - HMMER2 regular text output format. Supported HMMER2
                      subprograms are hmmpfam, hmmsearch.
Support for parsing:
 - blast-text       - BLAST+ plain text output.
 - hhsuite2-text    - HHSUITE plain text output.
Each of these formats have different keyword arguments available for use with
the main SearchIO functions. More details and examples are available in each
of the format's documentation.
"""
import sys
from Bio.File import as_handle
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
from Bio.SearchIO._utils import get_processor
__all__ = ("read", "parse", "to_dict", "index", "index_db", "write", "convert")
# dictionary of supported formats for parse() and read()
_ITERATOR_MAP = {
    "blast-tab": ("BlastIO", "BlastTabParser"),
    "blast-text": ("BlastIO", "BlastTextParser"),
    "blast-xml": ("BlastIO", "BlastXmlParser"),
    "blat-psl": ("BlatIO", "BlatPslParser"),
    "exonerate-cigar": ("ExonerateIO", "ExonerateCigarParser"),
    "exonerate-text": ("ExonerateIO", "ExonerateTextParser"),
    "exonerate-vulgar": ("ExonerateIO", "ExonerateVulgarParser"),
    "fasta-m10": ("FastaIO", "FastaM10Parser"),
    "hhsuite2-text": ("HHsuiteIO", "Hhsuite2TextParser"),
    "hhsuite3-text": ("HHsuiteIO", "Hhsuite2TextParser"),
    "hmmer2-text": ("HmmerIO", "Hmmer2TextParser"),
    "hmmer3-text": ("HmmerIO", "Hmmer3TextParser"),
    "hmmer3-tab": ("HmmerIO", "Hmmer3TabParser"),
    # for hmmer3-domtab, the specific program is part of the format name
    # as we need it distinguish hit / target coordinates
    "hmmscan3-domtab": ("HmmerIO", "Hmmer3DomtabHmmhitParser"),
    "hmmsearch3-domtab": ("HmmerIO", "Hmmer3DomtabHmmqueryParser"),
    "interproscan-xml": ("InterproscanIO", "InterproscanXmlParser"),
    "phmmer3-domtab": ("HmmerIO", "Hmmer3DomtabHmmqueryParser"),
}
# dictionary of supported formats for index()
_INDEXER_MAP = {
    "blast-tab": ("BlastIO", "BlastTabIndexer"),
    "blast-xml": ("BlastIO", "BlastXmlIndexer"),
    "blat-psl": ("BlatIO", "BlatPslIndexer"),
    "exonerate-cigar": ("ExonerateIO", "ExonerateCigarIndexer"),
    "exonerate-text": ("ExonerateIO", "ExonerateTextIndexer"),
    "exonerate-vulgar": ("ExonerateIO", "ExonerateVulgarIndexer"),
    "fasta-m10": ("FastaIO", "FastaM10Indexer"),
    "hmmer2-text": ("HmmerIO", "Hmmer2TextIndexer"),
    "hmmer3-text": ("HmmerIO", "Hmmer3TextIndexer"),
    "hmmer3-tab": ("HmmerIO", "Hmmer3TabIndexer"),
    "hmmscan3-domtab": ("HmmerIO", "Hmmer3DomtabHmmhitIndexer"),
    "hmmsearch3-domtab": ("HmmerIO", "Hmmer3DomtabHmmqueryIndexer"),
    "phmmer3-domtab": ("HmmerIO", "Hmmer3DomtabHmmqueryIndexer"),
}
# dictionary of supported formats for write()
_WRITER_MAP = {
    "blast-tab": ("BlastIO", "BlastTabWriter"),
    "blast-xml": ("BlastIO", "BlastXmlWriter"),
    "blat-psl": ("BlatIO", "BlatPslWriter"),
    "hmmer3-tab": ("HmmerIO", "Hmmer3TabWriter"),
    "hmmscan3-domtab": ("HmmerIO", "Hmmer3DomtabHmmhitWriter"),
    "hmmsearch3-domtab": ("HmmerIO", "Hmmer3DomtabHmmqueryWriter"),
    "phmmer3-domtab": ("HmmerIO", "Hmmer3DomtabHmmqueryWriter"),
}
def parse(handle, format=None, **kwargs):
    """Iterate over search tool output file as QueryResult objects.
    Arguments:
     - handle - Handle to the file, or the filename as a string.
     - format - Lower case string denoting one of the supported formats.
     - kwargs - Format-specific keyword arguments.
    This function is used to iterate over each query in a given search output
    file:
    >>> from Bio import SearchIO
    >>> qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
    >>> qresults
    <generator object ...>
    >>> for qresult in qresults:
    ...     print("Search %s has %i hits" % (qresult.id, len(qresult)))
    ...
    Search 33211 has 100 hits
    Search 33212 has 44 hits
    Search 33213 has 95 hits
    Depending on the file format, ``parse`` may also accept additional keyword
    argument(s) that modifies the behavior of the format parser. Here is a
    simple example, where the keyword argument enables parsing of a commented
    BLAST tabular output file:
    >>> from Bio import SearchIO
    >>> for qresult in SearchIO.parse('Blast/mirna.tab', 'blast-tab', comments=True):
    ...     print("Search %s has %i hits" % (qresult.id, len(qresult)))
    ...
    Search 33211 has 100 hits
    Search 33212 has 44 hits
    Search 33213 has 95 hits
    """
    # get the iterator object and do error checking
    iterator = get_processor(format, _ITERATOR_MAP)
    # HACK: force BLAST XML decoding to use utf-8
    handle_kwargs = {}
    if format == "blast-xml":
        handle_kwargs["encoding"] = "utf-8"
    # and start iterating
    with as_handle(handle, "rU", **handle_kwargs) as source_file:
        generator = iterator(source_file, **kwargs)
        yield from generator
def read(handle, format=None, **kwargs):
    """Turn a search output file containing one query into a single QueryResult.
     - handle - Handle to the file, or the filename as a string.
     - format - Lower case string denoting one of the supported formats.
     - kwargs - Format-specific keyword arguments.
    ``read`` is used for parsing search output files containing exactly one query:
    >>> from Bio import SearchIO
    >>> qresult = SearchIO.read('Blast/xml_2226_blastp_004.xml', 'blast-xml')
    >>> print("%s %s" % (qresult.id, qresult.description))
    ...
    gi|11464971:4-101 pleckstrin [Mus musculus]
    If the given handle has no results, an exception will be raised:
    >>> from Bio import SearchIO
    >>> qresult = SearchIO.read('Blast/tab_2226_tblastn_002.txt', 'blast-tab')
    Traceback (most recent call last):
    ...
    ValueError: No query results found in handle
    Similarly, if the given handle has more than one results, an exception will
    also be raised:
    >>> from Bio import SearchIO
    >>> qresult = SearchIO.read('Blast/tab_2226_tblastn_001.txt', 'blast-tab')
    Traceback (most recent call last):
    ...
    ValueError: More than one query results found in handle
    Like ``parse``, ``read`` may also accept keyword argument(s) depending on the
    search output file format.
    """
    generator = parse(handle, format, **kwargs)
    try:
        first = next(generator)
    except StopIteration:
        raise ValueError("No query results found in handle")
    else:
        try:
            second = next(generator)
        except StopIteration:
            second = None
    if second is not None:
        raise ValueError("More than one query results found in handle")
    return first
def to_dict(qresults, key_function=None):
    """Turn a QueryResult iterator or list into a dictionary.
     - qresults     - Iterable returning QueryResult objects.
     - key_function - Optional callback function which when given a
                      QueryResult object should return a unique key for the
                      dictionary. Defaults to using .id of the result.
    This function enables access of QueryResult objects from a single search
    output file using its identifier.
    >>> from Bio import SearchIO
    >>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml')
    >>> search_dict = SearchIO.to_dict(qresults)
    >>> list(search_dict)
    ['gi|195230749:301-1383', 'gi|325053704:108-1166', ..., 'gi|53729353:216-1313']
    >>> search_dict['gi|156630997:105-1160']
    QueryResult(id='gi|156630997:105-1160', 5 hits)
    By default, the dictionary key is the QueryResult's string ID. This may be
    changed by supplying a callback function that returns the desired identifier.
    Here is an example using a function that removes the 'gi|' part in the
    beginning of the QueryResult ID.
    >>> from Bio import SearchIO
    >>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml')
    >>> key_func = lambda qresult: qresult.id.split('|')[1]
    >>> search_dict = SearchIO.to_dict(qresults, key_func)
    >>> list(search_dict)
    ['195230749:301-1383', '325053704:108-1166', ..., '53729353:216-1313']
    >>> search_dict['156630997:105-1160']
    QueryResult(id='gi|156630997:105-1160', 5 hits)
    Note that the callback function does not change the QueryResult's ID value.
    It only changes the key value used to retrieve the associated QueryResult.
    As this function loads all QueryResult objects into memory, it may be
    unsuitable for dealing with files containing many queries. In that case, it
    is recommended that you use either ``index`` or ``index_db``.
    Since Python 3.7, the default dict class maintains key order, meaning
    this dictionary will reflect the order of records given to it. For
    CPython and PyPy, this was already implemented for Python 3.6, so
    effectively you can always assume the record order is preserved.
    """
    # This comment stops black style adding a blank line here, which causes flake8 D202.
    def _default_key_function(rec):
        return rec.id
    if key_function is None:
        key_function = _default_key_function
    qdict = {}
    for qresult in qresults:
        key = key_function(qresult)
        if key in qdict:
            raise ValueError("Duplicate key %r" % key)
        qdict[key] = qresult
    return qdict
def index(filename, format=None, key_function=None, **kwargs):
    """Indexes a search output file and returns a dictionary-like object.
     - filename     - string giving name of file to be indexed
     - format       - Lower case string denoting one of the supported formats.
     - key_function - Optional callback function which when given a
                      QueryResult should return a unique key for the dictionary.
     - kwargs       - Format-specific keyword arguments.
    Index returns a pseudo-dictionary object with QueryResult objects as its
    values and a string identifier as its keys. The function is mainly useful
    for dealing with large search output files, as it enables access to any
    given QueryResult object much faster than using parse or read.
    Index works by storing in-memory the start locations of all queries in a
    file. When a user requested access to the query, this function will jump
    to its start position, parse the whole query, and return it as a
    QueryResult object:
    >>> from Bio import SearchIO
    >>> search_idx = SearchIO.index('Blast/wnts.xml', 'blast-xml')
    >>> search_idx
    SearchIO.index('Blast/wnts.xml', 'blast-xml', key_function=None)
    >>> sorted(search_idx)
    ['gi|156630997:105-1160', 'gi|195230749:301-1383', ..., 'gi|53729353:216-1313']
    >>> search_idx['gi|195230749:301-1383']
    QueryResult(id='gi|195230749:301-1383', 5 hits)
    >>> search_idx.close()
    If the file is BGZF compressed, this is detected automatically. Ordinary
    GZIP files are not supported:
    >>> from Bio import SearchIO
    >>> search_idx = SearchIO.index('Blast/wnts.xml.bgz', 'blast-xml')
    >>> search_idx
    SearchIO.index('Blast/wnts.xml.bgz', 'blast-xml', key_function=None)
    >>> search_idx['gi|195230749:301-1383']
    QueryResult(id='gi|195230749:301-1383', 5 hits)
    >>> search_idx.close()
    You can supply a custom callback function to alter the default identifier
    string. This function should accept as its input the QueryResult ID string
    and return a modified version of it.
    >>> from Bio import SearchIO
    >>> key_func = lambda id: id.split('|')[1]
    >>> search_idx = SearchIO.index('Blast/wnts.xml', 'blast-xml', key_func)
    >>> search_idx
    SearchIO.index('Blast/wnts.xml', 'blast-xml', key_function=<function <lambda> at ...>)
    >>> sorted(search_idx)
    ['156630997:105-1160', ..., '371502086:108-1205', '53729353:216-1313']
    >>> search_idx['156630997:105-1160']
    QueryResult(id='gi|156630997:105-1160', 5 hits)
    >>> search_idx.close()
    Note that the callback function does not change the QueryResult's ID value.
    It only changes the key value used to retrieve the associated QueryResult.
    """
    if not isinstance(filename, str):
        raise TypeError("Need a filename (not a handle)")
    from Bio.File import _IndexedSeqFileDict
    proxy_class = get_processor(format, _INDEXER_MAP)
    repr = "SearchIO.index(%r, %r, key_function=%r)" % (filename, format, key_function)
    return _IndexedSeqFileDict(
        proxy_class(filename, **kwargs), key_function, repr, "QueryResult"
    )
def index_db(index_filename, filenames=None, format=None, key_function=None, **kwargs):
    """Indexes several search output files into an SQLite database.
     - index_filename - The SQLite filename.
     - filenames    - List of strings specifying file(s) to be indexed, or when
                      indexing a single file this can be given as a string.
                      (optional if reloading an existing index, but must match)
     - format       - Lower case string denoting one of the supported formats.
                      (optional if reloading an existing index, but must match)
     - key_function - Optional callback function which when given a
                      QueryResult identifier string should return a unique
                      key for the dictionary.
     - kwargs       - Format-specific keyword arguments.
    The ``index_db`` function is similar to ``index`` in that it indexes the start
    position of all queries from search output files. The main difference is
    instead of storing these indices in-memory, they are written to disk as an
    SQLite database file. This allows the indices to persist between Python
    sessions. This enables access to any queries in the file without any
    indexing overhead, provided it has been indexed at least once.
    >>> from Bio import SearchIO
    >>> idx_filename = ":memory:" # Use a real filename, this is in RAM only!
    >>> db_idx = SearchIO.index_db(idx_filename, 'Blast/mirna.xml', 'blast-xml')
    >>> sorted(db_idx)
    ['33211', '33212', '33213']
    >>> db_idx['33212']
    QueryResult(id='33212', 44 hits)
    >>> db_idx.close()
    ``index_db`` can also index multiple files and store them in the same
    database, making it easier to group multiple search files and access them
    from a single interface.
    >>> from Bio import SearchIO
    >>> idx_filename = ":memory:" # Use a real filename, this is in RAM only!
    >>> files = ['Blast/mirna.xml', 'Blast/wnts.xml']
    >>> db_idx = SearchIO.index_db(idx_filename, files, 'blast-xml')
    >>> sorted(db_idx)
    ['33211', '33212', '33213', 'gi|156630997:105-1160', ..., 'gi|53729353:216-1313']
    >>> db_idx['33212']
    QueryResult(id='33212', 44 hits)
    >>> db_idx.close()
    One common example where this is helpful is if you had a large set of
    query sequences (say ten thousand) which you split into ten query files
    of one thousand sequences each in order to run as ten separate BLAST jobs
    on a cluster. You could use ``index_db`` to index the ten BLAST output
    files together for seamless access to all the results as one dictionary.
    Note that ':memory:' rather than an index filename tells SQLite to hold
    the index database in memory. This is useful for quick tests, but using
    the Bio.SearchIO.index(...) function instead would use less memory.
    BGZF compressed files are supported, and detected automatically. Ordinary
    GZIP compressed files are not supported.
    See also Bio.SearchIO.index(), Bio.SearchIO.to_dict(), and the Python module
    glob which is useful for building lists of files.
    """
    # cast filenames to list if it's a string
    # (can we check if it's a string or a generator?)
    if isinstance(filenames, str):
        filenames = [filenames]
    from Bio.File import _SQLiteManySeqFilesDict
    repr = "SearchIO.index_db(%r, filenames=%r, format=%r, key_function=%r, ...)" % (
        index_filename,
        filenames,
        format,
        key_function,
    )
    def proxy_factory(format, filename=None):
        """Given a filename returns proxy object, else boolean if format OK."""
        if filename:
            return get_processor(format, _INDEXER_MAP)(filename, **kwargs)
        else:
            return format in _INDEXER_MAP
    return _SQLiteManySeqFilesDict(
        index_filename, filenames, proxy_factory, format, key_function, repr
    )
def write(qresults, handle, format=None, **kwargs):
    """Write QueryResult objects to a file in the given format.
     - qresults - An iterator returning QueryResult objects or a single
                  QueryResult object.
     - handle   - Handle to the file, or the filename as a string.
     - format   - Lower case string denoting one of the supported formats.
     - kwargs   - Format-specific keyword arguments.
    The ``write`` function writes QueryResult object(s) into the given output
    handle / filename. You can supply it with a single QueryResult object or an
    iterable returning one or more QueryResult objects. In both cases, the
    function will return a tuple of four values: the number of QueryResult, Hit,
    HSP, and HSPFragment objects it writes to the output file::
        from Bio import SearchIO
        qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
        SearchIO.write(qresults, 'results.tab', 'blast-tab')
        <stdout> (3, 239, 277, 277)
    The output of different formats may be adjusted using the format-specific
    keyword arguments. Here is an example that writes BLAT PSL output file with
    a header::
        from Bio import SearchIO
        qresults = SearchIO.parse('Blat/psl_34_001.psl', 'blat-psl')
        SearchIO.write(qresults, 'results.tab', 'blat-psl', header=True)
        <stdout> (2, 13, 22, 26)
    """
    # turn qresults into an iterator if it's a single QueryResult object
    if isinstance(qresults, QueryResult):
        qresults = iter([qresults])
    else:
        qresults = iter(qresults)
    # get the writer object and do error checking
    writer_class = get_processor(format, _WRITER_MAP)
    # write to the handle
    with as_handle(handle, "w") as target_file:
        writer = writer_class(target_file, **kwargs)
        # count how many qresults, hits, and hsps
        qresult_count, hit_count, hsp_count, frag_count = writer.write_file(qresults)
    return qresult_count, hit_count, hsp_count, frag_count
def convert(in_file, in_format, out_file, out_format, in_kwargs=None, out_kwargs=None):
    """Convert between two search output formats, return number of records.
     - in_file    - Handle to the input file, or the filename as string.
     - in_format  - Lower case string denoting the format of the input file.
     - out_file   - Handle to the output file, or the filename as string.
     - out_format - Lower case string denoting the format of the output file.
     - in_kwargs  - Dictionary of keyword arguments for the input function.
     - out_kwargs - Dictionary of keyword arguments for the output function.
    The convert function is a shortcut function for ``parse`` and ``write``. It has
    the same return type as ``write``. Format-specific arguments may be passed to
    the convert function, but only as dictionaries.
    Here is an example of using ``convert`` to convert from a BLAST+ XML file
    into a tabular file with comments::
        from Bio import SearchIO
        in_file = 'Blast/mirna.xml'
        in_fmt = 'blast-xml'
        out_file = 'results.tab'
        out_fmt = 'blast-tab'
        out_kwarg = {'comments': True}
        SearchIO.convert(in_file, in_fmt, out_file, out_fmt, out_kwargs=out_kwarg)
        <stdout> (3, 239, 277, 277)
    Given that different search output file provide different statistics and
    different level of details, the convert function is limited only to
    converting formats that have the same statistics and for conversion to
    formats with the same level of detail, or less.
    For example, converting from a BLAST+ XML output to a HMMER table file
    is not possible, as these are two search programs with different kinds of
    statistics. In theory, you may provide the necessary values required by the
    HMMER table file (e.g. conditional e-values, envelope coordinates, etc).
    However, these values are likely to hold little meaning as they are not true
    HMMER-computed values.
    Another example is converting from BLAST+ XML to BLAST+ tabular file. This
    is possible, as BLAST+ XML provide all the values necessary to create a
    BLAST+ tabular file. However, the reverse conversion may not be possible.
    There are more details covered in the XML file that are not found in a
    tabular file (e.g. the lambda and kappa values)
    """
    if in_kwargs is None:
        in_kwargs = {}
    if out_kwargs is None:
        out_kwargs = {}
    qresults = parse(in_file, in_format, **in_kwargs)
    return write(qresults, out_file, out_format, **out_kwargs)
# if not used as a module, run the doctest
if __name__ == "__main__":
    from Bio._utils import run_doctest
    run_doctest()
 | 
| 
	the-stack_106_32329 | 
	import pygame as pg
from settings import *
class Player(pg.sprite.Sprite):
    def __init__(self, game, x, y):
        self.groups = game.all_sprites
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = pg.Surface((TILESIZE, TILESIZE))
        self.image.fill(YELLOW)
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
    def move(self, dx=0, dy=0):
        if self.collide_with_walls(dx, dy)==False:
            self.x += dx
            self.y += dy
    def collide_with_walls(self, dx=0, dy=0):
        for wall in self.game.walls:
            if wall.x == self.x + dx and wall.y == self.y + dy:
                return True
        return False
    def update(self):
        self.rect.x = self.x * TILESIZE
        self.rect.y = self.y * TILESIZE
class Wall(pg.sprite.Sprite):
    def __init__(self, game, x, y):
        self.groups = game.all_sprites, game.walls
        pg.sprite.Sprite.__init__(self, self.groups)
        self.game = game
        self.image = pg.Surface((TILESIZE, TILESIZE))
        self.image.fill(GREEN)
        self.rect = self.image.get_rect()
        self.x = x
        self.y = y
        self.rect.x = x * TILESIZE
        self.rect.y = y * TILESIZE | 
| 
	the-stack_106_32331 | 
	from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type,     msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
    geq = msat_make_geq(menv, arg0, arg1)
    return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
    return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
    leq = msat_make_leq(menv, arg0, arg1)
    return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
    n_arg0 = msat_make_not(menv, arg0)
    return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
                                                        msat_term, msat_term]:
    assert menv
    assert isinstance(menv, msat_env)
    assert enc
    assert isinstance(enc, LTLEncoder)
    bool_type = msat_get_bool_type(menv)
    real_type = msat_get_rational_type(menv)
    i = msat_declare_function(menv, "i", real_type)
    i = msat_make_constant(menv, i)
    r = msat_declare_function(menv, "r", real_type)
    r = msat_make_constant(menv, r)
    l = msat_declare_function(menv, "l", real_type)
    l = msat_make_constant(menv, l)
    inc_i = msat_declare_function(menv, "inc_i", bool_type)
    inc_i = msat_make_constant(menv, inc_i)
    x_i = msat_declare_function(menv, name_next("i"), real_type)
    x_i = msat_make_constant(menv, x_i)
    x_r = msat_declare_function(menv, name_next("r"), real_type)
    x_r = msat_make_constant(menv, x_r)
    x_l = msat_declare_function(menv, name_next("l"), real_type)
    x_l = msat_make_constant(menv, x_l)
    x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
    x_inc_i = msat_make_constant(menv, x_inc_i)
    curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
    zero = msat_make_number(menv, "0")
    one = msat_make_number(menv, "1")
    r_gt_0 = msat_make_gt(menv, r, zero)
    r_lt_l = msat_make_lt(menv, r, l)
    i_geq_0 = msat_make_geq(menv, i, zero)
    init = msat_make_and(menv, r_gt_0, r_lt_l)
    init = msat_make_and(menv, init,
                         msat_make_and(menv, i_geq_0,
                                       msat_make_not(menv, inc_i)))
    init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
    # r' = r
    trans = msat_make_equal(menv, x_r, r)
    # i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
    i_lt_l = msat_make_lt(menv, i, l)
    x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
                                 msat_make_equal(menv, x_i,
                                                 msat_make_plus(menv, i, one)))
    x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
                             msat_make_equal(menv, x_i, i))
    x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
    x_l_eq_l = msat_make_equal(menv, x_l, l)
    x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
                                                   x_l_eq_l)
    trans = msat_make_and(menv, trans,
                          msat_make_impl(menv, i_lt_l,
                                         x_i_eq_i_p_1_or_i_and_x_l_eq_l))
    # i >= l -> i' = 0 & l' = l + 1 & !inc_i'
    i_geq_l = msat_make_geq(menv, i, l)
    x_i_eq_0 = msat_make_equal(menv, x_i, zero)
    x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
    x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
                                              msat_make_and(menv, x_i_eq_0,
                                                            x_l_eq_l_p_1),
                                              msat_make_not(menv, x_inc_i))
    trans = msat_make_and(menv, trans,
                          msat_make_impl(menv, i_geq_l,
                                         x_i_eq_0_and_x_l_eq_l_p_1))
    # (G F inc_i) -> ! G F r > i
    G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
    r_gt_i = msat_make_gt(menv, r, i)
    n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
    ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
    return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
    assert isinstance(env, PysmtEnv)
    mgr = env.formula_manager
    i = mgr.Symbol("i", types.REAL)
    r = mgr.Symbol("r", types.REAL)
    l = mgr.Symbol("l", types.REAL)
    inc_i = mgr.Symbol("inc_i", types.BOOL)
    symbs = frozenset([i, r, l, inc_i])
    x_i = symb_to_next(mgr, i)
    x_r = symb_to_next(mgr, r)
    x_l = symb_to_next(mgr, l)
    x_inc_i = symb_to_next(mgr, inc_i)
    res = []
    n0 = mgr.Real(0)
    n1 = mgr.Real(1)
    stutter = mgr.Equals(x_i, i)
    loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
    loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
    h_i = Hint("h_i0", env, frozenset([i]), symbs)
    h_i.set_locs([loc])
    res.append(h_i)
    loc = Location(env, mgr.GE(r, n0))
    loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
    h_r = Hint("h_r0", env, frozenset([r]), symbs)
    h_r.set_locs([loc])
    res.append(h_r)
    loc = Location(env, mgr.GE(l, n0))
    loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
    h_l = Hint("h_l0", env, frozenset([l]), symbs)
    h_l.set_locs([loc])
    res.append(h_l)
    loc = Location(env, inc_i)
    loc.set_progress(0, x_inc_i)
    h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
    h_inc.set_locs([loc])
    res.append(h_inc)
    stutter = mgr.Equals(x_i, i)
    loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
    loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
    h_i = Hint("h_i1", env, frozenset([i]), symbs)
    h_i.set_locs([loc])
    res.append(h_i)
    loc = Location(env, mgr.LE(r, n0))
    loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
    h_r = Hint("h_r1", env, frozenset([r]), symbs)
    h_r.set_locs([loc])
    res.append(h_r)
    loc = Location(env, mgr.LE(l, n0))
    loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
    h_l = Hint("h_l1", env, frozenset([l]), symbs)
    h_l.set_locs([loc])
    res.append(h_l)
    loc0 = Location(env, mgr.GE(i, n0))
    loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
    loc1 = Location(env, mgr.GE(i, n0))
    loc1.set_progress(0, mgr.Equals(x_i, i))
    h_i = Hint("h_i2", env, frozenset([i]), symbs)
    h_i.set_locs([loc0, loc1])
    res.append(h_i)
    loc0 = Location(env, mgr.Not(inc_i))
    loc0.set_progress(1, x_inc_i)
    loc1 = Location(env, inc_i)
    loc1.set_progress(0, mgr.Not(x_inc_i))
    h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
    h_inc.set_locs([loc0, loc1])
    res.append(h_inc)
    loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
                    stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
    loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
    loc1 = Location(env, mgr.GE(i, n0))
    loc1.set_progress(0, mgr.Equals(x_i, i))
    h_i = Hint("h_i3", env, frozenset([i]), symbs)
    h_i.set_locs([loc0, loc1])
    res.append(h_i)
    loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
                    stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
    loc0.set_progress(1, mgr.Equals(x_r, r))
    loc1 = Location(env, mgr.GE(r, n0))
    loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
    h_r = Hint("h_r3", env, frozenset([r]), symbs)
    h_r.set_locs([loc0, loc1])
    res.append(h_r)
    loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
                    stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
    loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
    loc1 = Location(env, mgr.GE(l, n0))
    loc1.set_progress(0, mgr.Equals(x_l, l))
    h_l = Hint("h_l3", env, frozenset([l]), symbs)
    h_l.set_locs([loc0, loc1])
    res.append(h_l)
    loc0 = Location(env, mgr.GE(i, n0))
    loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
    loc1 = Location(env, mgr.GE(i, n0))
    loc1.set_progress(2, mgr.Equals(x_i, i))
    loc2 = Location(env, mgr.GE(i, n0))
    loc2.set_progress(0, mgr.Equals(x_i, i))
    h_i = Hint("h_i4", env, frozenset([i]), symbs)
    h_i.set_locs([loc0, loc1, loc2])
    res.append(h_i)
    loc0 = Location(env, mgr.GE(r, n0))
    loc0.set_progress(1, mgr.Equals(x_r, r))
    loc1 = Location(env, mgr.GE(r, n0))
    loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
    loc2 = Location(env, mgr.GE(r, n0))
    loc2.set_progress(0, mgr.Equals(x_r, r))
    h_r = Hint("h_r4", env, frozenset([r]), symbs)
    h_r.set_locs([loc0, loc1, loc2])
    res.append(h_r)
    loc0 = Location(env, mgr.Not(inc_i))
    loc0.set_progress(1, x_inc_i)
    loc1 = Location(env, inc_i, stutterT=x_inc_i)
    loc1.set_progress(2, mgr.Not(x_inc_i))
    loc2 = Location(env, mgr.Not(inc_i))
    loc2.set_progress(0, mgr.Not(x_inc_i))
    h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
    h_inc.set_locs([loc0, loc1, loc2])
    res.append(h_inc)
    return frozenset(res)
 | 
| 
	the-stack_106_32334 | 
	"""
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations, models
class Migration(migrations.Migration):
    dependencies = [
        ("module_intent", "0002_auto_20201009_1142"),
    ]
    operations = [
        migrations.AlterField(
            model_name="intent",
            name="status",
            field=models.BooleanField(default=True, verbose_name="意图状态"),
        ),
    ]
 | 
| 
	the-stack_106_32335 | 
	#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020 by Inria
Authoried by Xiaoyu BIE ([email protected])
License agreement in LICENSE.txt
"""
import datetime
import scipy.io as sio
import os
import sys
import argparse
from matplotlib import ticker
from tqdm import tqdm
import torch
import numpy as np
import librosa
import soundfile as sf
import matplotlib.pyplot as plt
from dvae.learning_algo import LearningAlgorithm
from dvae.learning_algo_ss import LearningAlgorithm_ss
from dvae.utils.eval_metric import compute_median, EvalMetrics
from dvae.utils.random_seeders import set_random_seeds
set_random_seeds(666)
class Options:
    def __init__(self):
        self.parser = argparse.ArgumentParser()
        self.opt = None
    def _initial(self):
        # Basic config file
        self.parser.add_argument('--ss', action='store_true', help='schedule sampling')
        self.parser.add_argument('--cfg', type=str, default=None, help='config path')
        self.parser.add_argument('--saved_dict', type=str, default=None, help='trained model dict')
        self.parser.add_argument('--date', type=str, default=None, help='date and time when save training')
        # Dataset
        self.parser.add_argument('--test_dir', type=str, default='./data/clean_speech/wsj0_si_et_05', help='test dataset')
        # Restuls directory
        self.parser.add_argument('--ret_dir', type=str, default='./data/tmp', help='tmp dir for audio reconstruction')
    def get_params(self):
        self._initial()
        self.opt = self.parser.parse_args()
        params = vars(self.opt)
        return params
params = Options().get_params()
if params['ss']:
    learning_algo = LearningAlgorithm_ss(params=params)
else:
    learning_algo = LearningAlgorithm(params=params)
learning_algo.build_model()
dvae = learning_algo.model
dvae.load_state_dict(torch.load(params['saved_dict'], map_location='cpu'))
eval_metrics = EvalMetrics(metric='all')
dvae.eval()
cfg = learning_algo.cfg
print('Total params: %.2fM' % (sum(p.numel() for p in dvae.parameters()) / 1000000.0))
# Load configs
data_path = cfg.get('User', 'data_path')
sequence_len = cfg.getint('DataFrame', 'sequence_len')
dataset_name = cfg.get('DataFrame', 'dataset_name')
saved_root = cfg.get('User', 'saved_root')
if saved_root == "./saved_model":
    saved_root = "./eval_model"
z_dim = cfg.getint('Network','z_dim')
tag = cfg.get('Network', 'tag')
date = '2022-03-' + params["date"]
filename = "{}_{}_{}_z_dim={}".format(dataset_name, date, tag, z_dim)
save_dir = os.path.join(saved_root, filename) + '/'
rat_data = sio.loadmat(data_path)
## load trial information
idx_split = rat_data["trial"][0]
## load spike data
spike_by_neuron_use = rat_data["spikes"]
## load locations
locations_vec = rat_data["loc"][0]
u = np.array(
    np.array_split(
        np.hstack((locations_vec.reshape(-1, 1), np.zeros((locations_vec.shape[0], 2)))), idx_split[1:-1],
        axis=0
    )
)
x_all = np.array(np.array_split(spike_by_neuron_use, idx_split[1:-1], axis=0))
trial_ls = [len(x) for x in x_all]
num_trial = len(x_all)
for ii in range(len(u)):
    u[ii][:, int(ii % 2) + 1] = 1
# add zero samples by sequence length, will be remove when plotting
max_seq_len = np.max([len(trial) for trial in x_all])  # 351
min_seq_len = np.min([len(trial) for trial in x_all])  # 70
temp = torch.zeros((len(x_all), sequence_len, x_all[0].shape[1]))
for i, x in enumerate(x_all):
    sample = torch.FloatTensor(x)
    if sequence_len <= len(sample):
        sample = sample[:sequence_len]
    elif sequence_len > len(sample):
        s_len, x_dim = sample.shape
        zeros = torch.zeros(sequence_len - s_len, x_dim)
        sample = torch.cat([sample, zeros], 0)
    # if sequence_len <= min_seq_len:
    #     sample = sample[:sequence_len]
    # elif sequence_len >= max_seq_len:
    #     s_len, x_dim = sample.shape
    #     zeros = torch.zeros(sequence_len - s_len, x_dim)
    #     sample = torch.cat([sample, zeros], 0)
    #     assert sample.shape[0] == max_seq_len
    temp[i] = sample
x_all = temp
x_all = x_all.permute(1, 0, 2)
with torch.no_grad():
    outputs = dvae.inference(x_all)
    _, z_mean, _ = outputs
    z_mean = z_mean.permute(1, 0, 2).reshape(-1, 2).numpy()
def get_tc_rd(y, hd, hd_bins):  # compute empirical tunning curve of data
    tuning_curve = np.zeros((len(hd_bins) - 1, y.shape[1]))
    for ii in range(len(hd_bins) - 1):
        data_pos = (hd >= hd_bins[ii]) * (hd <= hd_bins[ii + 1])
        tuning_curve[ii, :] = y[data_pos, :].mean(axis=0)
    return tuning_curve
## posterior mean
# We need the direction information for hue
# and the location information for shade
# So we restore u_all, which should only be used
# for these two purposes from now.
temp = []
ind = 0
for ii in range(num_trial):
    length = min(trial_ls[ii], sequence_len)
    z_m = z_mean[ind:ind+length]
    temp.append(z_m)
    ind = ind + sequence_len
z_mean = np.concatenate(temp)
locations_vec = rat_data['loc'][0]
u_all = np.array(
    np.array_split(np.hstack((locations_vec.reshape(-1, 1), np.zeros((locations_vec.shape[0], 2)))), idx_split[1:-1],
                   axis=0))
temp = []
for u in u_all:
    temp.append(u[:sequence_len])
u_all = temp
for ii in range(len(u_all)):
    u_all[ii][:, int(ii % 2) + 1] = 1;
ll = 11
hd_bins = np.linspace(0, 1.6, ll)
select = np.concatenate(u_all)[:, 1] == 1
print(z_mean.shape)
# print(u_all.shape)
tc1 = get_tc_rd(z_mean[select], np.concatenate(u_all)[select, 0], hd_bins)
# plt.plot(np.concatenate(u_all)[select, 0], color='r')
select = np.concatenate(u_all)[:, 2] == 1
tc2 = get_tc_rd(z_mean[select], np.concatenate(u_all)[select, 0], hd_bins)
# plt.plot(np.concatenate(u_all)[select, 0], color='b')
dis_mat = np.zeros((len(tc1), len(tc2)))
for jj in range(len(tc1)):
    dis_mat[jj] = np.sqrt(np.square(tc1[jj] - tc2).sum(axis=-1))
ll = 5000
fig = plt.figure(figsize=(5.5, 4))
ax = plt.subplot(111)
# fig.add_subplot(111, projection='3d')
fsz = 14
## learn locations
select = np.concatenate(u_all)[:ll, 1] == 1
im = ax.scatter(
    z_mean[:ll][select][:, 0],
    z_mean[:ll][select][:, 1],
    s=1,
    c=np.concatenate(u_all)[:ll][select, 0],
    cmap="Reds",
    vmin=0,
    vmax=1.6,
)
ax.plot(tc1[:, 0], tc1[:, 1], c="black")
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=14)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
## learn locations
select = np.concatenate(u_all)[:ll][:, 1] == 0
im = ax.scatter(
    z_mean[:ll][select][:, 0],
    z_mean[:ll][select][:, 1],
    s=1,
    c=np.concatenate(u_all)[:ll][select, 0],
    cmap="Blues",
    vmin=0,
    vmax=1.6,
)
ax.plot(tc2[:, 0], tc2[:, 1], c="black")
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=14)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
ax.set_xlabel("Latent 1", fontsize=fsz)
ax.set_ylabel("Latent 2", fontsize=fsz)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.setp(ax.get_xticklabels(), fontsize=fsz)
plt.setp(ax.get_yticklabels(), fontsize=fsz)
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=4, min_n_ticks=4, prune=None))
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=4, min_n_ticks=4, prune=None))
plt.savefig(save_dir + "z")
plt.show()
 | 
| 
	the-stack_106_32336 | 
	"""
Given two words A and B, find the minimum number of steps required to convert A to B. (each operation is counted as 1 step.)
You have the following 3 operations permitted on a word:
Insert a character
Delete a character
Replace a character
Example :
edit distance between
"Anshuman" and "Antihuman" is 2.
Operation 1: Replace s with t.
Operation 2: Insert i.
"""
class Solution:
    # @param A : string
    # @param B : string
    # @return an integer
    def minDistance(self, A, B):
        n = len(A)
        m = len(B)
        dp = [[0 for x in xrange(m + 1)] for x in xrange(n + 1)]
        for i in xrange(n + 1):
            for j in xrange(m + 1):
                if i == 0:
                    dp[i][j] = j
                elif j == 0:
                    dp[i][j] = i
                elif A[i - 1] == B[j - 1]:
                    dp[i][j] = dp[i - 1][j - 1]
                else:
                    dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1
        return dp[n][m]
 | 
| 
	the-stack_106_32337 | 
	import io
import asyncio
import discord
import random
import asyncio
import random
import datetime
import config
import discord
from utils.embed import Embed
import traceback
from discord import errors
from discord.ext import commands
from discord.ext import commands
class general(commands.Cog):
    def __init__(self, bot):
        self.bot = bot
    @commands.Cog.listener()
    async def on_guild_join(self, guild):
        if guild.owner.id == 898755879766204416:
            return await guild.leave()
        await self.bot.change_presence(status=discord.Status.online,
                                       activity=discord.Activity(name="짱구야 도움 | 서버: {}".format(len(self.bot.guilds)),
                                                                 type=discord.ActivityType.playing))
        if guild.id in [653083797763522580, 786470326732587008, 608711879858192479]:
            return
        em = discord.Embed(
            title="초대해줘서 고마워요!",
            description="""
짱구봇을 초대주셔서 감사드립니다.
짱구봇은 편한시스템을 가지고 있는 짱구입니다.
도움말은 `짱구야 도움`,
프리픽스는 `짱구야 `,`짱구야`,`ㄱ `,`ㄱ` 입니다.            
"""
        )
        em.set_thumbnail(url=self.bot.user.avatar_url)
        em.set_image(
            url="https://cdn.discordapp.com/attachments/915556934977998879/917754253701951499/c265877614d80026.png")
        try:
            await guild.owner.send(embed=em)
        except errors.HTTPException:  # errors.Forbidden when does not have permission
            # except error as error mean except (error, error) <- does not working in python 3.10
            ch = self.bot.get_channel((random.choice(guild.channels)).id)
            await ch.send(embed=em)
        em = discord.Embed(
            description=f"{guild.name}({guild.id})에 접속함\n서버수 : {len(self.bot.guilds)}"
        )
        em.timestamp = datetime.datetime.utcnow()
        await self.bot.get_channel(915551578730164234).send(embed=em)
    @commands.Cog.listener()
    async def on_guild_remove(self, guild):
        await self.bot.change_presence(status=discord.Status.online,
                                       activity=discord.Activity(name="짱구야 도움 | 서버: {}".format(len(self.bot.guilds)),
                                                                 type=discord.ActivityType.playing))
        em = discord.Embed(
            description=f"{guild.name}({guild.id})에서 나감\n서버수 : {len(self.bot.guilds)}"
        )
        em.timestamp = datetime.datetime.utcnow()
        await self.bot.get_channel(915551578730164234).send(embed=em)
    @commands.Cog.listener()
    async def on_message_edit(self, before, after):
        embed=discord.Embed(title="메시지수정로그", color=0x00FFFF)
        embed.set_footer(text=f"멤버 이름 :{before.author.name} • Message ID: {before.id}")
        embed.timestamp = datetime.datetime.utcnow()
        embed.add_field(name='수정전:', value=before.content , inline=False)
        embed.add_field(name="수정후:", value=after.content , inline=False)
        embed.set_thumbnail(url="https://cdn.discordapp.com/icons/915551354800451616/f27061c35e3f1dc203b3564cd864e99a.webp?size=96")        
        channel = self.bot.get_channel(915555627332435988)
        await channel.send(embed=embed)
    @commands.Cog.listener()
    async def on_message_delete(self, message):
        embed = discord.Embed(title="메시지 삭제로그", color= 0x0000ff)
        embed.add_field(name="**메시지삭제**", value=f"메시지 : {message.content} \n \n 삭제됨")
        embed.set_thumbnail(url="https://cdn.discordapp.com/icons/915551354800451616/f27061c35e3f1dc203b3564cd864e99a.webp?size=96")
        embed.timestamp = datetime.datetime.utcnow()
        embed.colour = (0x000ff)
        dele = self.bot.get_channel(915555627332435988)
        await dele.send(embed=embed)
    
    #에러로그
    @commands.Cog.listener()
    async def on_command(self, ctx):
       self.logger.info(f"{ctx.author}({ctx.author.id}) - {ctx.message.content}")
       await self.bot.get_channel(int(config.BotSettings.logging_channel)).send(f"{ctx.author}({ctx.author.id}) - `{ctx.message.content}`")
       await self.bot.get_channel(int(config.BotSettings.stafflog)).send(f"{ctx.author}({ctx.author.id}) - `{ctx.message.content}`")
    #일반로그
    @commands.Cog.listener()
    async def on_command(self, ctx):
        channel = self.bot.get_channel(915555649990053918)
        embed = discord.Embed(
            title ="일반로그",
            description= f"닉네임 : {ctx.author} \n \n 아이디 : {ctx.author.id} \n \n 명령어로그 : {ctx.message.content}",
            color= 0x0000ff
        ).set_thumbnail(url="https://cdn.discordapp.com/icons/915551354800451616/f27061c35e3f1dc203b3564cd864e99a.webp?size=96")        
        embed.timestamp = datetime.datetime.utcnow()
        embed.add_field(name="서버", value=f"{ctx.guild.name} 에서 사용됨")
        await channel.send(embed=embed)
    @commands.Cog.listener()
    async def on_command_error(self, ctx, error):
        ignoredError = (
            commands.CommandNotFound,
            commands.errors.CheckFailure,
            commands.CheckFailure,
        )
        if isinstance(error, ignoredError):
            return
        elif isinstance(error, commands.CommandOnCooldown):
            cooldown = int(error.retry_after)
            hours = cooldown // 3600
            minutes = (cooldown % 3600) // 60
            seconds = cooldown % 60
            time = []
            if not hours == 0:
                time.append(f"{hours}시간")
            if not minutes == 0:
                time.append(f"{minutes}분")
            if not seconds == 0:
                time.append(f"{seconds}초")
            embed = Embed.warn(
                timestamp=ctx.message.created_at,
                description=f"사용하신 명령어는 ``{' '.join(time)}`` 뒤에 사용하실 수 있습니다.",
            )
            Embed.user_footer(embed, ctx)
            return await ctx.send(embed=embed, hidden=True)
        elif isinstance(error, commands.MissingPermissions):
            a = ""
            for p in error.missing_perms:
                if str(p) == "manage_messages":
                    p = "메시지 관리"
                elif str(p) == "kick_members":
                    p = "멤버 추방"
                elif str(p) == "ban_members":
                    p = "멤버 차단"
                elif str(p) == "administrator":
                    p = "관리자"
                elif str(p) == "create_instant_invite":
                    p = "초대링크 생성"
                elif str(p) == "manage_channels":
                    p = "채널 관리"
                elif str(p) == "manage_guild":
                    p = "서버 관리"
                elif str(p) == "add_reactions":
                    p = "메시지 반응 추가"
                elif str(p) == "view_audit_log":
                    p = "감사 로그 보기"
                elif str(p) == "read_messages":
                    p = "메시지 읽기"
                elif str(p) == "send_messages":
                    p = "메시지 보내기"
                elif str(p) == "read_message_history":
                    p = "이전 메시지 읽기"
                elif str(p) == "mute_members":
                    p = "멤버 음소거 시키기"
                elif str(p) == "move_members":
                    p = "멤버 채널 이동시키기"
                elif str(p) == "change_nickname":
                    p = "자기자신의 닉네임 변경하기"
                elif str(p) == "manage_nicknames":
                    p = "다른유저의 닉네임 변경하기"
                elif str(p) == "manage_roles":
                    p = "역활 관리하기"
                elif str(p) == "manage_webhooks":
                    p = "웹훅크 관리하기"
                elif str(p) == "manage_emojis":
                    p = "이모지 관리하기"
                elif str(p) == "use_slash_commands":
                    p = "/ 명령어 사용"
                if p != error.missing_perms[len(error.missing_perms) - 1]:
                    a += f"{p}, "
                else:
                    a += f"{p}"
            embed = Embed.warn(
                timestamp=ctx.message.created_at,
                description=f"당신의 권한이 부족합니다.\n\n> 필요 권한 : {str(a)}",
            )
            Embed.user_footer(embed, ctx)
            return await ctx.send(
                embed=embed,
                hidden=True,
            )
        elif isinstance(error, commands.BotMissingPermissions):
            a = ""
            for p in error.missing_perms:
                if str(p) == "manage_messages":
                    p = "메시지 관리"
                elif str(p) == "kick_members":
                    p = "멤버 추방"
                elif str(p) == "ban_members":
                    p = "멤버 차단"
                elif str(p) == "administrator":
                    p = "관리자"
                elif str(p) == "create_instant_invite":
                    p = "초대링크 생성"
                elif str(p) == "manage_channels":
                    p = "채널 관리"
                elif str(p) == "manage_guild":
                    p = "서버 관리"
                elif str(p) == "add_reactions":
                    p = "메시지 반응 추가"
                elif str(p) == "view_audit_log":
                    p = "감사 로그 보기"
                elif str(p) == "read_messages":
                    p = "메시지 읽기"
                elif str(p) == "send_messages":
                    p = "메시지 보내기"
                elif str(p) == "read_message_history":
                    p = "이전 메시지 읽기"
                elif str(p) == "mute_members":
                    p = "멤버 음소거 시키기"
                elif str(p) == "move_members":
                    p = "멤버 채널 이동시키기"
                elif str(p) == "change_nickname":
                    p = "자기자신의 닉네임 변경하기"
                elif str(p) == "manage_nicknames":
                    p = "다른유저의 닉네임 변경하기"
                elif str(p) == "manage_roles":
                    p = "역활 관리하기"
                elif str(p) == "manage_webhooks":
                    p = "웹훅크 관리하기"
                elif str(p) == "manage_emojis":
                    p = "이모지 관리하기"
                elif str(p) == "use_slash_commands":
                    p = "/ 명령어 사용"
                if p != error.missing_perms[len(error.missing_perms) - 1]:
                    a += f"{p}, "
                else:
                    a += f"{p}"
            embed = Embed.warn(
                timestamp=ctx.message.created_at,
                description=f"봇의 권한이 부족합니다.\n\n> 필요 권한 : {str(a)}",
            )
            Embed.user_footer(embed, ctx)
            return await ctx.send(
                embed=embed,
            )
        elif isinstance(error, commands.MissingRequiredArgument):
            embed = Embed.warn(
                timestamp=ctx.message.created_at, description="필요한 값이 존재하지 않습니다."
            )
            Embed.user_footer(embed, ctx)
            return await ctx.send(
                embed=embed,
                hidden=True,
            )
        elif isinstance(error, commands.MemberNotFound):
            embed = Embed.warn(timestamp=ctx.message.created_at, description="존재하지 않는 멤버입니다.")
            Embed.user_footer(embed, ctx)
            return await ctx.send(
                embed=embed,
                hidden=True,
            )
        else:
            tb = traceback.format_exception(type(error), error, error.__traceback__)
            err = [line.rstrip() for line in tb]
            errstr = "\n".join(err)
            # f = open(f"logs/{code}.log", "a", encoding="utf-8")
            # f.write(f"{ctx.author}({ctx.author.id}) -{ctx.message.content}\n에러 발생 일시: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            # f.write("\n\n")
            # f.write(errstr)
            # f.close()
            embed = Embed.error(
                timestamp=ctx.message.created_at, description=f"```py\n{errstr}\n```"
            )
            Embed.user_footer(embed, ctx)
            print(errstr)
            return await ctx.send(
                embed=embed,
            )
def setup(bot):
    bot.add_cog(general(bot))
 | 
| 
	the-stack_106_32338 | 
	# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
    fix_xml_ampersands,
    float_or_none,
    xpath_with_ns,
    xpath_text,
)
class KarriereVideosIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)'
    _TESTS = [{
        'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin',
        'info_dict': {
            'id': '32c91',
            'ext': 'flv',
            'title': 'AltenpflegerIn',
            'description': 'md5:dbadd1259fde2159a9b28667cb664ae2',
            'thumbnail': r're:^http://.*\.png',
        },
        'params': {
            # rtmp download
            'skip_download': True,
        }
    }, {
        # broken ampersands
        'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun',
        'info_dict': {
            'id': '5sniu',
            'ext': 'flv',
            'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"',
            'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33',
            'thumbnail': r're:^http://.*\.png',
        },
        'params': {
            # rtmp download
            'skip_download': True,
        }
    }]
    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)
        title = (self._html_search_meta('title', webpage, default=None) or
                 self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
        video_id = self._search_regex(
            r'/config/video/(.+?)\.xml', webpage, 'video id')
        # Server returns malformed headers
        # Force Accept-Encoding: * to prevent gzipped results
        playlist = self._download_xml(
            'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id,
            video_id, transform_source=fix_xml_ampersands,
            headers={'Accept-Encoding': '*'})
        NS_MAP = {
            'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'
        }
        def ns(path):
            return xpath_with_ns(path, NS_MAP)
        item = playlist.find('./tracklist/item')
        video_file = xpath_text(
            item, ns('./jwplayer:file'), 'video url', fatal=True)
        streamer = xpath_text(
            item, ns('./jwplayer:streamer'), 'streamer', fatal=True)
        uploader = xpath_text(
            item, ns('./jwplayer:author'), 'uploader')
        duration = float_or_none(
            xpath_text(item, ns('./jwplayer:duration'), 'duration'))
        description = self._html_search_regex(
            r'(?s)<div class="leadtext">(.+?)</div>',
            webpage, 'description')
        thumbnail = self._html_search_meta(
            'thumbnail', webpage, 'thumbnail')
        if thumbnail:
            thumbnail = compat_urlparse.urljoin(url, thumbnail)
        return {
            'id': video_id,
            'url': streamer.replace('rtmpt', 'rtmp'),
            'play_path': 'mp4:%s' % video_file,
            'ext': 'flv',
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
            'uploader': uploader,
            'duration': duration,
        }
 | 
| 
	the-stack_106_32339 | 
	#  coding: utf-8
#  ----------------------------------------------------------------
#  <copyright company="Aspose" file="CreateFolderRequest.py">
#    Copyright (c) 2018-2019 Aspose Pty Ltd. All rights reserved.
#  </copyright>
#  <summary>
#    Permission is hereby granted, free of charge, to any person obtaining a
#   copy  of this software and associated documentation files (the "Software"),
#   to deal  in the Software without restriction, including without limitation
#   the rights  to use, copy, modify, merge, publish, distribute, sublicense,
#   and/or sell  copies of the Software, and to permit persons to whom the
#   Software is  furnished to do so, subject to the following conditions:
#
#   The above copyright notice and this permission notice shall be included in
#   all  copies or substantial portions of the Software.
#
#   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#   IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#   AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#   FROM,  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
#   DEALINGS IN THE SOFTWARE.
#  </summary>
#  -----------------------------------------------------------------
from asposecadcloud.models.requests.cad_request import CadRequest
from asposecadcloud.models.requests.http_request import HttpRequest
class CreateFolderRequest(CadRequest):
    """
    Request model for create_folder operation.
    Initializes a new instance.
    :param path Folder path to create e.g. 'folder_1/folder_2/'
    :param storage_name Storage name
    """
    def __init__(self, path, storage_name=None):
        CadRequest.__init__(self)
        self.path = path
        self.storage_name = storage_name
    def to_http_info(self, config):
        """
        Prepares initial info for HTTP request
        :param config: CAD API configuration
        :type: asposecadcloud.Configuration
        :return: http_request configured http request
        :rtype: Configuration.models.requests.HttpRequest
        """
        # verify the required parameter 'path' is set
        if self.path is None:
            raise ValueError("Missing the required parameter `path` when calling `create_folder`")
        collection_formats = {}
        path = '/cad/storage/folder/{path}'
        path_params = {}
        if self.path is not None:
            path_params[self._lowercase_first_letter('path')] = self.path
        query_params = []
        if self._lowercase_first_letter('storageName') in path:
            path = path.replace('{' + self._lowercase_first_letter('storageName' + '}'), self.storage_name if self.storage_name is not None else '')
        else:
            if self.storage_name is not None:
                query_params.append((self._lowercase_first_letter('storageName'), self.storage_name))
        header_params = {}
        form_params = []
        local_var_files = []
        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self._select_header_accept(
            ['application/json'])
        # HTTP header `Content-Type`
        header_params['Content-Type'] = 'multipart/form-data' if form_params or local_var_files else self._select_header_content_type(
            ['application/json'])
        # Authentication setting
        auth_settings = ['JWT']
        return HttpRequest(path, path_params, query_params, header_params, form_params, body_params, local_var_files,
                           collection_formats, auth_settings)
 | 
| 
	the-stack_106_32341 | 
	from .base_atari_env import BaseAtariEnv, base_env_wrapper_fn, parallel_wrapper_fn
import os
def raw_env(**kwargs):
    mode = 33
    num_players = 4
    return BaseAtariEnv(game="pong", num_players=num_players, mode_num=mode, env_name=os.path.basename(__file__)[:-3], **kwargs)
env = base_env_wrapper_fn(raw_env)
parallel_env = parallel_wrapper_fn(env)
 | 
| 
	the-stack_106_32342 | 
	"""
Utility functions to sync and work with Open Humans data in a local filesystem.
"""
import csv
import hashlib
import logging
import os
import re
import arrow
from humanfriendly import format_size, parse_size
import requests
MAX_FILE_DEFAULT = parse_size('128m')
def strip_zip_suffix(filename):
    if filename.endswith('.gz'):
        return filename[:-3]
    elif filename.endswith('.bz2'):
        return filename[:-4]
    else:
        return filename
def guess_tags(filename):
    tags = []
    stripped_filename = strip_zip_suffix(filename)
    if stripped_filename.endswith('.vcf'):
        tags.append('vcf')
    if stripped_filename.endswith('.json'):
        tags.append('json')
    if stripped_filename.endswith('.csv'):
        tags.append('csv')
    return tags
def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT):
    """
    Collate local file info as preperation for Open Humans upload.
    Note: Files with filesize > max_bytes are not included in returned info.
    """
    file_data = {}
    logging.info('Characterizing files in {}'.format(filedir))
    for filename in os.listdir(filedir):
        filepath = os.path.join(filedir, filename)
        file_stats = os.stat(filepath)
        creation_date = arrow.get(file_stats.st_ctime).isoformat()
        file_size = file_stats.st_size
        if file_size <= max_bytes:
            file_md5 = hashlib.md5()
            with open(filepath, "rb") as f:
                for chunk in iter(lambda: f.read(4096), b""):
                    file_md5.update(chunk)
            md5 = file_md5.hexdigest()
            file_data[filename] = {
                'tags': guess_tags(filename),
                'description': '',
                'md5': md5,
                'creation_date': creation_date,
            }
    return file_data
def validate_metadata(target_dir, metadata):
    """
    Check that the files listed in metadata exactly match files in target dir.
    """
    file_list = os.listdir(target_dir)
    for filename in file_list:
        if filename not in metadata:
            return False
    for filename in metadata:
        if filename not in file_list:
            return False
    return True
def load_metadata_csv_single_user(csv_in, header, tags_idx):
    """
    Return the metadata as requested for a single user.
    """
    metadata = {}
    for row in csv_in:
        if row[0] == 'None' and [x == 'NA' for x in row[1:]]:
            break
        metadata[row[0]] = {
            header[i]: row[i] for i in range(1, len(header)) if
            i != tags_idx
        }
        metadata[row[0]]['tags'] = [t.strip() for t in
                                    row[tags_idx].split(',') if
                                    t.strip()]
    return metadata
def load_metadata_csv_multi_user(csv_in, header, tags_idx):
    """
    Return the metadata as requested for a single user.
    """
    metadata = {}
    for row in csv_in:
        if row[0] not in metadata:
            metadata[row[0]] = {}
        if row[1] == 'None' and all([x == 'NA' for x in row[2:]]):
            continue
        metadata[row[0]][row[1]] = {
            header[i]: row[i] for i in range(2, len(header)) if
            i != tags_idx
        }
        metadata[row[0]][row[1]]['tags'] = [t.strip() for t in
                                            row[tags_idx].split(',') if
                                            t.strip()]
    return metadata
def load_metadata_csv(input_filepath):
    """
    Return dict of metadata.
    Format is either dict (filenames are keys) or dict-of-dicts (project member
    IDs as top level keys, then filenames as keys).
    """
    with open(input_filepath) as f:
        csv_in = csv.reader(f)
        header = next(csv_in)
        try:
            tags_idx = header.index('tags')
        except ValueError:
            tags_idx = None
        if header[0] == 'project_member_id':
            metadata = load_metadata_csv_multi_user(csv_in, header, tags_idx)
        elif header[0] == 'filename':
            metadata = load_metadata_csv_single_user(csv_in, header, tags_idx)
    return metadata
def mk_metadata_csv(filedir, outputfilepath, max_bytes=MAX_FILE_DEFAULT):
    with open(outputfilepath, 'w') as outputfile:
        csv_out = csv.writer(outputfile)
        subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if
                   os.path.isdir(os.path.join(filedir, i))]
        if subdirs:
            logging.info('Making metadata for subdirs of {}'.format(filedir))
            if not all([re.match('^[0-9]{8}$', os.path.basename(d))
                        for d in subdirs]):
                raise ValueError("Subdirs not all project member ID format!")
            csv_out.writerow(['project_member_id', 'filename', 'tags',
                              'description', 'md5', 'creation_date'])
            for subdir in subdirs:
                file_info = characterize_local_files(
                    filedir=subdir, max_bytes=max_bytes)
                proj_member_id = os.path.basename(subdir)
                if not file_info:
                    csv_out.writerow([proj_member_id, 'None',
                                      'NA', 'NA', 'NA', 'NA'])
                    continue
                for filename in file_info:
                    csv_out.writerow([proj_member_id,
                                      filename,
                                      ', '.join(file_info[filename]['tags']),
                                      file_info[filename]['description'],
                                      file_info[filename]['md5'],
                                      file_info[filename]['creation_date'],
                                      ])
        else:
            csv_out.writerow(['filename', 'tags',
                              'description', 'md5', 'creation_date'])
            file_info = characterize_local_files(
                filedir=filedir, max_bytes=max_bytes)
            for filename in file_info:
                csv_out.writerow([filename,
                                  ', '.join(file_info[filename]['tags']),
                                  file_info[filename]['description'],
                                  file_info[filename]['md5'],
                                  file_info[filename]['creation_date'],
                                  ])
def download_file(download_url, target_filepath, max_bytes=MAX_FILE_DEFAULT):
    """
    Download a file.
    """
    response = requests.get(download_url, stream=True)
    size = int(response.headers['Content-Length'])
    if size > max_bytes:
        logging.info('Skipping {}, {} > {}'.format(
            target_filepath, format_size(size), format_size(max_bytes)))
        return
    logging.info('Downloading {} ({})'.format(
        target_filepath, format_size(size)))
    if os.path.exists(target_filepath):
        stat = os.stat(target_filepath)
        if stat.st_size == size:
            logging.info('Skipping, file exists and is the right '
                         'size: {}'.format(target_filepath))
            return
        else:
            logging.info('Replacing, file exists and is the wrong '
                         'size: {}'.format(target_filepath))
            os.remove(target_filepath)
    with open(target_filepath, 'wb') as f:
        for chunk in response.iter_content(chunk_size=8192):
            if chunk:
                f.write(chunk)
    logging.info('Download complete: {}'.format(target_filepath))
def read_id_list(filepath):
    if not filepath:
        return None
    id_list = []
    with open(filepath) as f:
        for line in f:
            line = line.rstrip()
            if not re.match('^[0-9]{8}$', line):
                raise('Each line in whitelist or blacklist is expected '
                      'to contain an eight digit ID, and nothing else.')
            else:
                id_list.append(line)
    return id_list
 | 
| 
	the-stack_106_32344 | 
	# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The code is based on HigherHRNet-Human-Pose-Estimation.
# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)
# Modified by Zigang Geng ([email protected]).
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = ''
_C.NAME = 'regression'
_C.LOG_DIR = ''
_C.DATA_DIR = ''
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.RANK = 0
_C.VERBOSE = True
_C.DIST_BACKEND = 'nccl'
_C.MULTIPROCESSING_DISTRIBUTED = True
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True #This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = 'hrnet_dekr'
_C.MODEL.INIT_WEIGHTS = True
_C.MODEL.PRETRAINED = ''
_C.MODEL.NUM_JOINTS = 17
_C.MODEL.SPEC = CN(new_allowed=True)
_C.LOSS = CN()
_C.LOSS.WITH_HEATMAPS_LOSS = True
_C.LOSS.HEATMAPS_LOSS_FACTOR = 1.0
_C.LOSS.WITH_OFFSETS_LOSS = True
_C.LOSS.OFFSETS_LOSS_FACTOR = 1.0
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ''
_C.DATASET.DATASET = 'coco_kpt'
_C.DATASET.DATASET_TEST = ''
_C.DATASET.NUM_JOINTS = 17
_C.DATASET.MAX_NUM_PEOPLE = 30
_C.DATASET.TRAIN = 'train2017'
_C.DATASET.TEST = 'val2017'
_C.DATASET.DATA_FORMAT = 'jpg'
# training data augmentation
_C.DATASET.MAX_ROTATION = 30
_C.DATASET.MIN_SCALE = 0.75
_C.DATASET.MAX_SCALE = 1.25
_C.DATASET.SCALE_TYPE = 'short'
_C.DATASET.MAX_TRANSLATE = 40
_C.DATASET.INPUT_SIZE = 512
_C.DATASET.OUTPUT_SIZE = 128
_C.DATASET.FLIP = 0.5
# heatmap generator
_C.DATASET.SIGMA = 2.0
_C.DATASET.CENTER_SIGMA = 4.0
_C.DATASET.BG_WEIGHT = 0.1
# offset generator
_C.DATASET.OFFSET_RADIUS = 4
# train
_C.TRAIN = CN()
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = [90, 110]
_C.TRAIN.LR = 0.001
_C.TRAIN.OPTIMIZER = 'adam'
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.WD = 0.0001
_C.TRAIN.NESTEROV = False
_C.TRAIN.GAMMA1 = 0.99
_C.TRAIN.GAMMA2 = 0.0
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 140
_C.TRAIN.RESUME = False
_C.TRAIN.CHECKPOINT = ''
_C.TRAIN.IMAGES_PER_GPU = 32
_C.TRAIN.SHUFFLE = True
# testing
_C.TEST = CN()
# size of images for each device
_C.TEST.IMAGES_PER_GPU = 32
_C.TEST.FLIP_TEST = True
_C.TEST.SCALE_FACTOR = [1]
_C.TEST.MODEL_FILE = ''
_C.TEST.POOL_THRESHOLD1 = 300
_C.TEST.POOL_THRESHOLD2 = 200
_C.TEST.NMS_THRE = 0.15
_C.TEST.NMS_NUM_THRE = 10
_C.TEST.KEYPOINT_THRESHOLD = 0.01
_C.TEST.DECREASE = 1.0
_C.TEST.MATCH_HMP = False
_C.TEST.ADJUST_THRESHOLD = 0.05
_C.TEST.MAX_ABSORB_DISTANCE = 75
_C.TEST.GUASSIAN_KERNEL = 6
_C.TEST.LOG_PROGRESS = True
_C.RESCORE = CN()
_C.RESCORE.VALID = True
_C.RESCORE.GET_DATA = False
_C.RESCORE.END_EPOCH = 20
_C.RESCORE.LR = 0.001
_C.RESCORE.HIDDEN_LAYER = 256
_C.RESCORE.BATCHSIZE = 1024
_C.RESCORE.MODEL_FILE = 'model/rescore/final_rescore_coco_kpt.pth'
_C.RESCORE.DATA_FILE = 'data/rescore_data/rescore_dataset_train_coco_kpt'
def update_config(cfg, args):
    #Make this CfgNode and all of its children mutable.
    cfg.defrost()
    
    ## load values from a file and list
    cfg.merge_from_file(args.cfg)
    cfg.merge_from_list(args.opts)
    if not os.path.exists(cfg.DATASET.ROOT):
        cfg.DATASET.ROOT = os.path.join(
            cfg.DATA_DIR, cfg.DATASET.ROOT
        )
    cfg.MODEL.PRETRAINED = os.path.join(
        cfg.DATA_DIR, cfg.MODEL.PRETRAINED
    )
    if cfg.TEST.MODEL_FILE:
        cfg.TEST.MODEL_FILE = os.path.join(
            cfg.DATA_DIR, cfg.TEST.MODEL_FILE
        )
    cfg.freeze()
if __name__ == '__main__':
    import sys
    with open(sys.argv[1], 'w') as f:
        print(_C, file=f)
 | 
| 
	the-stack_106_32345 | 
	import enum
import subprocess
import os
import tempfile
try:
    from importlib.resources import path
except ImportError:
    # use backport for python < 3.7
    from importlib_resources import path
__all__ = ["subroutinize", "OutputFormat", "Error"]
class OutputFormat(enum.Enum):
    CFF = "cff"
    CFF2 = "cff2"
try:
    from ._version import version as __version__
except ImportError:
    __version__ = "0.0.0+unknown"
class Error(Exception):
    pass
def _run_embedded_tx(*args, **kwargs):
    """Run the embedded tx executable with the list of positional arguments.
    Return a subprocess.CompletedProcess object with the following attributes:
    args, returncode, stdout, stderr.
    All keyword arguments are forwarded to subprocess.run function.
    """
    with path(__name__, "tx") as tx_cli:
        return subprocess.run([tx_cli] + list(args), **kwargs)
def subroutinize(fontdata: bytes, output_format=OutputFormat.CFF2) -> bytes:
    """Run subroutinizer on the input font data and return processed output."""
    if not isinstance(fontdata, bytes):
        raise TypeError(f"expected bytes, found {type(fontdata).__name__}")
    output_format = OutputFormat(output_format)
    # We can't read from stdin because of this issue:
    # https://github.com/adobe-type-tools/afdko/issues/937
    with tempfile.NamedTemporaryFile(prefix="tx-", delete=False) as tmp:
        tmp.write(fontdata)
    try:
        # write to stdout and capture output
        result = _run_embedded_tx(
            f"-{output_format.value}",
            "+S",
            "+b",
            tmp.name,
            capture_output=True,
            check=True,
        )
    except subprocess.CalledProcessError as e:
        raise Error(e.stderr.decode())
    finally:
        os.remove(tmp.name)
    return result.stdout
 | 
| 
	the-stack_106_32347 | 
	
"""The data layer used during training to train a Fast R-CNN network.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import torch
from model.utils.config import cfg
from roi_data_layer.minibatch import get_minibatch, get_minibatch
from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
import numpy as np
import random
import time
import pdb
class roibatchLoader(data.Dataset):
  def __init__(self, roidb, ratio_list, ratio_index, batch_size, num_classes, training=True, normalize=None):
    self._roidb = roidb
    self._num_classes = num_classes
    # we make the height of image consistent to trim_height, trim_width
    self.trim_height = cfg.TRAIN.TRIM_HEIGHT
    self.trim_width = cfg.TRAIN.TRIM_WIDTH
    self.max_num_box = cfg.MAX_NUM_GT_BOXES
    self.training = training
    self.normalize = normalize
    self.ratio_list = ratio_list
    self.ratio_index = ratio_index
    self.batch_size = batch_size
    self.data_size = len(self.ratio_list)
    # given the ratio_list, we want to make the ratio same for each batch.
    self.ratio_list_batch = torch.Tensor(self.data_size).zero_()
    num_batch = int(np.ceil(len(ratio_index) / batch_size))
    for i in range(num_batch):
        left_idx = i*batch_size
        right_idx = min((i+1)*batch_size-1, self.data_size-1)
        if ratio_list[right_idx] < 1:
            # for ratio < 1, we preserve the leftmost in each batch.
            target_ratio = ratio_list[left_idx]
        elif ratio_list[left_idx] > 1:
            # for ratio > 1, we preserve the rightmost in each batch.
            target_ratio = ratio_list[right_idx]
        else:
            # for ratio cross 1, we make it to be 1.
            target_ratio = 1
        # self.ratio_list_batch[left_idx:(right_idx+1)] = torch.tensor(target_ratio.astype(np.float64)) # trainset ratio list ,each batch is same number
        target_ratio = np.float64(target_ratio)
        self.ratio_list_batch[left_idx:(right_idx+1)] = torch.tensor(target_ratio.astype(np.float64))
  def __getitem__(self, index):
    if self.training:
        index_ratio = int(self.ratio_index[index])
    else:
        index_ratio = index
    # get the anchor index for current sample index
    # here we set the anchor index to the last one
    # sample in this group
    minibatch_db = [self._roidb[index_ratio]]
    blobs = get_minibatch(minibatch_db, self._num_classes)
    data = torch.from_numpy(blobs['data'])
    im_info = torch.from_numpy(blobs['im_info'])
    # we need to random shuffle the bounding box.
    data_height, data_width = data.size(1), data.size(2)
    if self.training:
        np.random.shuffle(blobs['gt_boxes'])
        gt_boxes = torch.from_numpy(blobs['gt_boxes'])
        ########################################################
        # padding the input image to fixed size for each group #
        ########################################################
        # NOTE1: need to cope with the case where a group cover both conditions. (done)
        # NOTE2: need to consider the situation for the tail samples. (no worry)
        # NOTE3: need to implement a parallel data loader. (no worry)
        # get the index range
        # if the image need to crop, crop to the target size.
        ratio = self.ratio_list_batch[index]
        if self._roidb[index_ratio]['need_crop']:
            if ratio < 1:
                # this means that data_width << data_height, we need to crop the
                # data_height
                min_y = int(torch.min(gt_boxes[:,1]))
                max_y = int(torch.max(gt_boxes[:,3]))
                trim_size = int(np.floor(data_width / ratio))
                if trim_size > data_height:
                    trim_size = data_height                
                box_region = max_y - min_y + 1
                if min_y == 0:
                    y_s = 0
                else:
                    if (box_region-trim_size) < 0:
                        y_s_min = max(max_y-trim_size, 0)
                        y_s_max = min(min_y, data_height-trim_size)
                        if y_s_min == y_s_max:
                            y_s = y_s_min
                        else:
                            y_s = np.random.choice(range(y_s_min, y_s_max))
                    else:
                        y_s_add = int((box_region-trim_size)/2)
                        if y_s_add == 0:
                            y_s = min_y
                        else:
                            y_s = np.random.choice(range(min_y, min_y+y_s_add))
                # crop the image
                data = data[:, y_s:(y_s + trim_size), :, :]
                # shift y coordiante of gt_boxes
                gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)
                # update gt bounding box according the trip
                gt_boxes[:, 1].clamp_(0, trim_size - 1)
                gt_boxes[:, 3].clamp_(0, trim_size - 1)
            else:
                # this means that data_width >> data_height, we need to crop the
                # data_width
                min_x = int(torch.min(gt_boxes[:,0]))
                max_x = int(torch.max(gt_boxes[:,2]))
                trim_size = int(np.ceil(data_height * ratio))
                if trim_size > data_width:
                    trim_size = data_width                
                box_region = max_x - min_x + 1
                if min_x == 0:
                    x_s = 0
                else:
                    if (box_region-trim_size) < 0:
                        x_s_min = max(max_x-trim_size, 0)
                        x_s_max = min(min_x, data_width-trim_size)
                        if x_s_min == x_s_max:
                            x_s = x_s_min
                        else:
                            x_s = np.random.choice(range(x_s_min, x_s_max))
                    else:
                        x_s_add = int((box_region-trim_size)/2)
                        if x_s_add == 0:
                            x_s = min_x
                        else:
                            x_s = np.random.choice(range(min_x, min_x+x_s_add))
                # crop the image
                data = data[:, :, x_s:(x_s + trim_size), :]
                # shift x coordiante of gt_boxes
                gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
                # update gt bounding box according the trip
                gt_boxes[:, 0].clamp_(0, trim_size - 1)
                gt_boxes[:, 2].clamp_(0, trim_size - 1)
        # based on the ratio, padding the image.
        if ratio < 1:
            # this means that data_width < data_height
            trim_size = int(np.floor(data_width / ratio))
            padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                             data_width, 3).zero_()
            padding_data[:data_height, :, :] = data[0]
            # update im_info
            im_info[0, 0] = padding_data.size(0)
            # print("height %d %d \n" %(index, anchor_idx))
        elif ratio > 1:
            # this means that data_width > data_height
            # if the image need to crop.
            padding_data = torch.FloatTensor(data_height, \
                                             int(np.ceil(data_height * ratio)), 3).zero_()
            padding_data[:, :data_width, :] = data[0]
            im_info[0, 1] = padding_data.size(1)
        else:
            trim_size = min(data_height, data_width)
            padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()
            padding_data = data[0][:trim_size, :trim_size, :]
            # gt_boxes.clamp_(0, trim_size)
            gt_boxes[:, :4].clamp_(0, trim_size)
            im_info[0, 0] = trim_size
            im_info[0, 1] = trim_size
        # check the bounding box:
        not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])
        keep = torch.nonzero(not_keep == 0).view(-1)
        gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()
        if keep.numel() != 0:
            gt_boxes = gt_boxes[keep]
            num_boxes = min(gt_boxes.size(0), self.max_num_box)
            gt_boxes_padding[:num_boxes,:] = gt_boxes[:num_boxes]
        else:
            num_boxes = 0
            # permute trim_data to adapt to downstream processing
        padding_data = padding_data.permute(2, 0, 1).contiguous()
        im_info = im_info.view(3)
        return padding_data, im_info, gt_boxes_padding, num_boxes
    else:
        data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)
        im_info = im_info.view(3)
        gt_boxes = torch.FloatTensor([1,1,1,1,1])
        num_boxes = 0
        return data, im_info, gt_boxes, num_boxes
  def __len__(self):
    return len(self._roidb)
 | 
| 
	the-stack_106_32348 | 
	""" This strdiff computes differences in two strings
	from etutils.strdiff import strdiff
	A= strdiff(string1, string2, <optional>)
 INPUT:
   data:           datamatrix
                   rows    = features
                   colums  = samples
 OPTIONAL
 OUTPUT
	output
 DESCRIPTION
   Compute differences in strings
 EXAMPLE
   from etutils.strdiff import strdiff
   string1 = 'MeGaaaaaa-image'
   string2 = 'megaimage'
   string1 = 'IGADKYFHARGNYDAA AWOOH'
   string2 = 'KGADKYFHARGNYEAA  W'
   [out, outstring] = strdiff(string1, string2, methodtype='exact')
   print("%d differences\n\n%s" %(out, outstring))
   string1 = 'MeGaaaaaa-image'
   string2 = 'megaimage'
   [out, _] = strdiff(string1, string2)
   string1 = 'MeGa-image'
   string2 = 'megaimage hyphypehoera'
   [out, _] = strdiff(string1, string2)
   [out, _] = strdiff(string2, string1)
 SEE ALSO
   stringmagnet
"""
#print(__doc__)
#--------------------------------------------------------------------------
# Name        : strdiff.py
# Author      : E.Taskesen
# Contact     : [email protected]
# Date        : Nov. 2017
#--------------------------------------------------------------------------
#%% Libraries
import numpy as np
import difflib
#%%
def strdiff(string1, string2, methodtype='', no_match_c=' ', match_c='|'):
	#%% DECLARATIONS
    # Make dictionary to store Parameters
    outdiff=''
    Param = {}
    Param['methodtype'] = methodtype
    Param['no_match_c'] = no_match_c
    Param['match_c'] = match_c
    #%% Lower
    string1=string1.lower()
    string2=string2.lower()
    #%% Similar characters overall
    if Param['methodtype']=='':
        out=np.sum(np.in1d(list(string1), list(string2))==False)
    #%% Compute number of changes
    if Param['methodtype']=='ratio':
        out=difflib.SequenceMatcher(None, string1, string2).ratio()
    
    #%% Exact matching
    if Param['methodtype']=='exact':
        if len(string2) < len(string1):
            string1, string2 = string2, string1
        result = ''
        n_diff = 0
        
        #% char search
        for c1, c2 in zip(string1, string2):
            if c1 == c2:
                result += Param['match_c']
            else:
                result += Param['no_match_c']
                n_diff += 1
    
        delta = len(string2) - len(string1)
        result += delta * Param['no_match_c']
        n_diff += delta
        out=n_diff
    
        #% Make nice word-difference
        outdiff = string1+'\n'+result+'\n'+string2
    
    return(out,outdiff)
 | 
| 
	the-stack_106_32349 | 
	from typing import Any, Dict, Optional, Union
import httpx
from ...client import Client
from ...models.conn_record import ConnRecord
from ...types import UNSET, Response, Unset
def _get_kwargs(
    conn_id: str,
    *,
    client: Client,
    mediation_id: Union[Unset, None, str] = UNSET,
    my_endpoint: Union[Unset, None, str] = UNSET,
    my_label: Union[Unset, None, str] = UNSET,
) -> Dict[str, Any]:
    url = "{}/connections/{conn_id}/accept-invitation".format(client.base_url, conn_id=conn_id)
    headers: Dict[str, Any] = client.get_headers()
    cookies: Dict[str, Any] = client.get_cookies()
    params: Dict[str, Any] = {
        "mediation_id": mediation_id,
        "my_endpoint": my_endpoint,
        "my_label": my_label,
    }
    params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
    return {
        "url": url,
        "headers": headers,
        "cookies": cookies,
        "timeout": client.get_timeout(),
        "params": params,
    }
def _parse_response(*, response: httpx.Response) -> Optional[ConnRecord]:
    if response.status_code == 200:
        response_200 = ConnRecord.from_dict(response.json())
        return response_200
    return None
def _build_response(*, response: httpx.Response) -> Response[ConnRecord]:
    return Response(
        status_code=response.status_code,
        content=response.content,
        headers=response.headers,
        parsed=_parse_response(response=response),
    )
def sync_detailed(
    conn_id: str,
    *,
    client: Client,
    mediation_id: Union[Unset, None, str] = UNSET,
    my_endpoint: Union[Unset, None, str] = UNSET,
    my_label: Union[Unset, None, str] = UNSET,
) -> Response[ConnRecord]:
    kwargs = _get_kwargs(
        conn_id=conn_id,
        client=client,
        mediation_id=mediation_id,
        my_endpoint=my_endpoint,
        my_label=my_label,
    )
    response = httpx.post(
        verify=client.verify_ssl,
        **kwargs,
    )
    return _build_response(response=response)
def sync(
    conn_id: str,
    *,
    client: Client,
    mediation_id: Union[Unset, None, str] = UNSET,
    my_endpoint: Union[Unset, None, str] = UNSET,
    my_label: Union[Unset, None, str] = UNSET,
) -> Optional[ConnRecord]:
    """ """
    return sync_detailed(
        conn_id=conn_id,
        client=client,
        mediation_id=mediation_id,
        my_endpoint=my_endpoint,
        my_label=my_label,
    ).parsed
async def asyncio_detailed(
    conn_id: str,
    *,
    client: Client,
    mediation_id: Union[Unset, None, str] = UNSET,
    my_endpoint: Union[Unset, None, str] = UNSET,
    my_label: Union[Unset, None, str] = UNSET,
) -> Response[ConnRecord]:
    kwargs = _get_kwargs(
        conn_id=conn_id,
        client=client,
        mediation_id=mediation_id,
        my_endpoint=my_endpoint,
        my_label=my_label,
    )
    async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
        response = await _client.post(**kwargs)
    return _build_response(response=response)
async def asyncio(
    conn_id: str,
    *,
    client: Client,
    mediation_id: Union[Unset, None, str] = UNSET,
    my_endpoint: Union[Unset, None, str] = UNSET,
    my_label: Union[Unset, None, str] = UNSET,
) -> Optional[ConnRecord]:
    """ """
    return (
        await asyncio_detailed(
            conn_id=conn_id,
            client=client,
            mediation_id=mediation_id,
            my_endpoint=my_endpoint,
            my_label=my_label,
        )
    ).parsed
 | 
| 
	the-stack_106_32350 | 
	#-*-coding: utf-8 -*-
# regular expressions module
import re as reg
def find_int_in_str(string=None):
    """
    trouver les nombres entiers dans une chaine de caractere
    en ignorant les signes
    
    :param string: str
    
    :reutrn: ['float', 'float', ...]
    """
    response = []
    if string: # si la chaine n'est pas vide
        response = reg.findall("([0.0-9.9]+)", string) # on cherche les floatants
    return  [float(integer) for integer in response] | 
| 
	the-stack_106_32351 | 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import mock
import pytest
import yaml
from ansible_collections.hpe.oneview.tests.unit.utils.hpe_test_utils import OneViewBaseTest
from ansible_collections.hpe.oneview.tests.unit.utils.oneview_module_loader import StoragePoolModule
FAKE_MSG_ERROR = 'Fake message error'
YAML_STORAGE_POOL = """
        config: "{{ config }}"
        state: present
        data:
           storageSystemUri: "/rest/storage-systems/TXQ1010307"
           poolName: "FST_CPG2"
          """
YAML_STORAGE_POOL_500 = """
        config: "{{ config }}"
        state: present
        data:
           storageSystemUri: "/rest/storage-systems/TXQ1010307"
           name: "FST_CPG2"
           isManaged: True
          """
YAML_STORAGE_POOL_ABSENT_500 = """
        config: "{{ config }}"
        state: absent
        data:
           storageSystemUri: "/rest/storage-systems/TXQ1010307"
           name: "FST_CPG2"
          """
YAML_STORAGE_POOL_MISSING_KEY = """
    config: "{{ config }}"
    state: present
    data:
       storageSystemUri: "/rest/storage-systems/TXQ1010307"
      """
YAML_STORAGE_POOL_ABSENT = """
        config: "{{ config }}"
        state: absent
        data:
           poolName: "FST_CPG2"
        """
DICT_DEFAULT_STORAGE_POOL = yaml.load(YAML_STORAGE_POOL)["data"]
DICT_DEFAULT_STORAGE_POOL_500 = yaml.load(YAML_STORAGE_POOL_500)["data"]
@pytest.mark.resource(TestStoragePoolModule='storage_pools')
class TestStoragePoolModule(OneViewBaseTest):
    @pytest.fixture(autouse=True)
    def specific_set_up(self, setUp):
        self.mock_ov_client.api_version = 300
    def test_should_create_new_storage_pool(self):
        self.resource.get_by_name.return_value = []
        self.resource.data = {"poolName": "name"}
        self.resource.add.return_value = self.resource
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL)
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=True,
            msg=StoragePoolModule.MSG_CREATED,
            ansible_facts=dict(storage_pool={"poolName": "name"})
        )
    def test_should_do_nothing_when_storage_pool_already_exist(self):
        self.resource.data = DICT_DEFAULT_STORAGE_POOL
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL)
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=False,
            msg=StoragePoolModule.MSG_ALREADY_PRESENT,
            ansible_facts=dict(storage_pool=DICT_DEFAULT_STORAGE_POOL)
        )
    def test_should_remove_storage_pool(self):
        self.resource.data = DICT_DEFAULT_STORAGE_POOL
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_ABSENT)
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=True,
            msg=StoragePoolModule.MSG_DELETED
        )
    def test_should_do_nothing_when_storage_pool_not_exist(self):
        self.mock_ov_client.api_version = 500
        self.resource.get_by_name.return_value = None
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_ABSENT)
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=False,
            msg=StoragePoolModule.MSG_ALREADY_ABSENT,
            ansible_facts=dict(storage_pool=None)
        )
    def test_should_fail_when_key_is_missing_api300(self):
        self.mock_ov_client.api_version = 300
        self.resource.data = DICT_DEFAULT_STORAGE_POOL
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_MISSING_KEY)
        StoragePoolModule().run()
        self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=StoragePoolModule.MSG_MANDATORY_FIELD_MISSING)
    def test_should_fail_when_key_is_missing_api500(self):
        self.mock_ov_client.api_version = 500
        self.resource.data = DICT_DEFAULT_STORAGE_POOL
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_MISSING_KEY)
        StoragePoolModule().run()
        self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=StoragePoolModule.MSG_MANDATORY_FIELD_MISSING)
    def test_update_when_storage_pool_already_exists_and_is_different_api500(self):
        self.mock_ov_client.api_version = 500
        update_params = yaml.load(YAML_STORAGE_POOL_500)
        update_params['data']['isManaged'] = False
        self.mock_ansible_module.params = update_params
        self.resource.data = DICT_DEFAULT_STORAGE_POOL_500
        self.mock_ov_client.storage_pools.update.return_value = update_params
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=True,
            msg=StoragePoolModule.MSG_UPDATED,
            ansible_facts=dict(storage_pool=DICT_DEFAULT_STORAGE_POOL_500)
        )
    def test_update_should_do_nothing_when_storage_pool_already_exists_and_is_equal_api500(self):
        self.mock_ov_client.api_version = 500
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_500)
        self.resource.data = DICT_DEFAULT_STORAGE_POOL_500
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=False,
            msg=StoragePoolModule.MSG_ALREADY_PRESENT,
            ansible_facts=dict(storage_pool=DICT_DEFAULT_STORAGE_POOL_500)
        )
    def test_update_should_do_nothing_when_storage_pool_is_absent_and_do_not_exists_api500(self):
        self.mock_ov_client.api_version = 500
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_ABSENT_500)
        self.resource.get_by_name.return_value = None
        StoragePoolModule().run()
        self.mock_ansible_module.exit_json.assert_called_once_with(
            changed=False,
            msg=StoragePoolModule.MSG_ALREADY_ABSENT,
            ansible_facts=dict(storage_pool=None)
        )
    def test_should_fail_when_present_but_storage_pool_is_absent_api500(self):
        self.mock_ov_client.api_version = 500
        self.resource.get_by_name.return_value = None
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_500)
        StoragePoolModule().run()
        self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=StoragePoolModule.MSG_RESOURCE_NOT_FOUND)
    def test_should_fail_when_absent_but_storage_pool_exists_api500(self):
        self.mock_ov_client.api_version = 500
        self.resource.data = DICT_DEFAULT_STORAGE_POOL_500
        self.mock_ansible_module.params = yaml.load(YAML_STORAGE_POOL_ABSENT_500)
        StoragePoolModule().run()
        self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=StoragePoolModule.MSG_RESOURCE_FOUND)
if __name__ == '__main__':
    pytest.main([__file__])
 | 
| 
	the-stack_106_32354 | 
	from django import forms
from django.forms.models import ModelForm
from django.utils.translation import ugettext_lazy as _
from itdagene.app.workschedule.models import Worker, WorkerInSchedule, WorkSchedule
from itdagene.core.models import Preference
class WorkScheduleForm(ModelForm):
    invites = forms.MultipleChoiceField(label=_("Add worker"), required=False)
    class Meta:
        model = WorkSchedule
        fields = ("title", "date", "start_time", "end_time", "description", "invites")
    def __init__(self, *args, **kwargs):
        super(WorkScheduleForm, self).__init__(*args, **kwargs)
        workers = Worker.objects.filter(
            preference=Preference.current_preference().year
        ).order_by("name")
        self.fields["invites"].choices = [
            (worker.pk, worker.name) for worker in workers
        ]
        self.fields["invites"].widget.attrs["class"] = "chosen"
    def save(self, commit=True):
        workschedule = super(WorkScheduleForm, self).save(commit=commit)
        for i in self.cleaned_data["invites"]:
            WorkerInSchedule.objects.get_or_create(schedule=workschedule, worker_id=i)
        return workschedule
class WorkerForm(ModelForm):
    class Meta:
        model = Worker
        fields = ("name", "phone", "email", "t_shirt_size")
    def __init__(self, *args, **kwargs):
        super(WorkerForm, self).__init__(*args, **kwargs)
    def save(self):
        pref = Preference.current_preference()
        worker = super(WorkerForm, self).save(commit=False)
        worker.preference = pref.year
        worker.save()
        return worker
 | 
| 
	the-stack_106_32356 | 
	# stolen from https://djangostars.com/blog/how-to-create-and-deploy-a-telegram-bot/
import requests  
from bottle import Bottle, response, request as bottle_request
class BotHandlerMixin:  
    BOT_URL = None
    def get_chat_id(self, data):
        """
        Method to extract chat id from telegram request.
        """
        chat_id = data['message']['chat']['id']
        return chat_id
    def get_message(self, data):
        """
        Method to extract message id from telegram request.
        """
        message_text = data['message']['text']
        return message_text
    def send_message(self, prepared_data):
        """
        Prepared data should be json which includes at least `chat_id` and `text`
        """       
        message_url = self.BOT_URL + 'sendMessage'
        requests.post(message_url, json=prepared_data)
class TelegramBot(BotHandlerMixin, Bottle):  
    BOT_URL = 'https://api.telegram.org/****************/'
    def __init__(self, *args, **kwargs):
        super(TelegramBot, self).__init__()
        self.route('/', callback=self.post_handler, method="POST")
    def change_text_message(self, text):
        return text[::-1]
    def prepare_data_for_answer(self, data):
        message = self.get_message(data)
        answer = self.change_text_message(message)
        chat_id = self.get_chat_id(data)
        json_data = {
            "chat_id": chat_id,
            "text": answer,
        }
        return json_data
    def post_handler(self):
        data = bottle_request.json
        answer_data = self.prepare_data_for_answer(data)
        self.send_message(answer_data)
        return response
        
        
if __name__ == '__main__':  
    app = TelegramBot()
    app.run(host='localhost', port=8080)
 | 
| 
	the-stack_106_32359 | 
	import pandas as pd
import datetime
from sklearn.model_selection import train_test_split
import os, sys
sys.path.insert(0, os.path.abspath(".."))
from src.utils.utils import save_df
def ingest_file(path):
    """Reads data from a csv located at path and returns a dataframe
    Parameters:
        path (string): Path where the csv file resides
    Returns:
        dataframe
    """
    df = pd.read_csv(path)
    return df
def generate_label(df):
    """Generates in the passed data frame a variable called `label`
    which is equal to 1 when `codigo_cierre` is (F) or (N) and 0
    in any other case
    Parameters:
        df (dataframe)
    """
    df['label'] = df['codigo_cierre'].str.split(' ', n=1, expand=False)
    df['label'] = df['label'].apply(lambda x: x[0][1])
    df['label'] = df['label'].apply(lambda x: 1 if x == 'F' or x == 'N' else 0)
    return df
def drop_cols(df):
    """Drops unused columns from passed dataframe. The variables that are eliminated are:
        - codigo_cierre
        - fecha_cierre
        - año_cierre
        - mes_cierre
        - hora_cierre
        - latitud
        - longitud
        - clas_con_f_alarma
        - delegacion_cierre
        - geopoint
    Parameters:
        df (dataframe)
    """
    dropped_columns = ['codigo_cierre', 'fecha_cierre', 'año_cierre', 'mes_cierre', 'hora_cierre',
                       'latitud', 'longitud', 'clas_con_f_alarma', 'delegacion_cierre', 'geopoint']
    df.drop(dropped_columns, axis='columns', inplace=True)
    return df
def save_ingestion(df, path):
    save_df(df, path)
def date_transformation(col, df):
    # arreglamos los strings de fechas
    # fix Dates
    # 31/02/19 --> 31/02/2019
    for ano in range(2013,2021):
        string_correccion_ano = str(ano)
        string_correccion_ano=string_correccion_ano[-2:]
        print(string_correccion_ano)
        for mes in range (1,13):
            if(mes<10):
                string_correccion_mes = '0'+str(mes)
            else:
                string_correccion_mes = str(mes)
            for dia in range(1, 32):
                elemento = dia
                if(elemento<10):
                    string_correccion = '0'+str(dia)+'/'+string_correccion_mes+'/'+string_correccion_ano
                    string_correcto = '0'+str(dia)+'/'+string_correccion_mes+'/20'+string_correccion_ano
                else:
                    string_correccion = str(dia)+'/'+string_correccion_mes+'/'+string_correccion_ano
                    string_correcto = str(dia)+'/'+string_correccion_mes+'/20'+string_correccion_ano
                #print(string_correccion)
                df.loc[df[col] == string_correccion, col] = string_correcto
    # Convertir columna en datetime
    df[col]=pd.to_datetime(df[col], format='%d/%m/%Y')
    return df
def ingest(input_path, output_path):
    df = ingest_file(input_path)
    df = generate_label(df)
    df = drop_cols(df)
    save_ingestion(df, output_path)
 | 
| 
	the-stack_106_32361 | 
	import mmcv
import numpy as np
import torch
from mmdet.core import bbox2roi, build_assigner, build_sampler
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.models.dense_heads import (AnchorHead, CornerHead, FCOSHead,
                                      FSAFHead, GuidedAnchorHead, PAAHead,
                                      SABLRetinaHead, TransformerHead,
                                      VFNetHead, YOLACTHead, YOLACTProtonet,
                                      YOLACTSegmHead, paa_head)
from mmdet.models.dense_heads.paa_head import levels_to_images
from mmdet.models.roi_heads.bbox_heads import BBoxHead, SABLHead
from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead
def test_paa_head_loss():
    """Tests paa head loss when truth is empty and non-empty."""
    class mock_skm(object):
        def GaussianMixture(self, *args, **kwargs):
            return self
        def fit(self, loss):
            pass
        def predict(self, loss):
            components = np.zeros_like(loss, dtype=np.long)
            return components.reshape(-1)
        def score_samples(self, loss):
            scores = np.random.random(len(loss))
            return scores
    paa_head.skm = mock_skm()
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    train_cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.1,
                neg_iou_thr=0.1,
                min_pos_iou=0,
                ignore_iof_thr=-1),
            allowed_border=-1,
            pos_weight=-1,
            debug=False))
    # since Focal Loss is not supported on CPU
    self = PAAHead(
        num_classes=4,
        in_channels=1,
        train_cfg=train_cfg,
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
        loss_centerness=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
    feat = [
        torch.rand(1, 1, s // feat_size, s // feat_size)
        for feat_size in [4, 8, 16, 32, 64]
    ]
    self.init_weights()
    cls_scores, bbox_preds, iou_preds = self(feat)
    # Test that empty ground truth encourages the network to predict background
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    gt_bboxes_ignore = None
    empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
                                gt_labels, img_metas, gt_bboxes_ignore)
    # When there is no truth, the cls loss should be nonzero but there should
    # be no box loss.
    empty_cls_loss = empty_gt_losses['loss_cls']
    empty_box_loss = empty_gt_losses['loss_bbox']
    empty_iou_loss = empty_gt_losses['loss_iou']
    assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert empty_box_loss.item() == 0, (
        'there should be no box loss when there are no true boxes')
    assert empty_iou_loss.item() == 0, (
        'there should be no box loss when there are no true boxes')
    # When truth is non-empty then both cls and box loss should be nonzero for
    # random inputs
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
                              gt_labels, img_metas, gt_bboxes_ignore)
    onegt_cls_loss = one_gt_losses['loss_cls']
    onegt_box_loss = one_gt_losses['loss_bbox']
    onegt_iou_loss = one_gt_losses['loss_iou']
    assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
    assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
    n, c, h, w = 10, 4, 20, 20
    mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
    results = levels_to_images(mlvl_tensor)
    assert len(results) == n
    assert results[0].size() == (h * w * 5, c)
    assert self.with_score_voting
    cls_scores = [torch.ones(4, 5, 5)]
    bbox_preds = [torch.ones(4, 5, 5)]
    iou_preds = [torch.ones(1, 5, 5)]
    mlvl_anchors = [torch.ones(5 * 5, 4)]
    img_shape = None
    scale_factor = [0.5, 0.5]
    cfg = mmcv.Config(
        dict(
            nms_pre=1000,
            min_bbox_size=0,
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.6),
            max_per_img=100))
    rescale = False
    self._get_bboxes_single(
        cls_scores,
        bbox_preds,
        iou_preds,
        mlvl_anchors,
        img_shape,
        scale_factor,
        cfg,
        rescale=rescale)
def test_fcos_head_loss():
    """Tests fcos head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    train_cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.4,
                min_pos_iou=0,
                ignore_iof_thr=-1),
            allowed_border=-1,
            pos_weight=-1,
            debug=False))
    # since Focal Loss is not supported on CPU
    self = FCOSHead(
        num_classes=4,
        in_channels=1,
        train_cfg=train_cfg,
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
    feat = [
        torch.rand(1, 1, s // feat_size, s // feat_size)
        for feat_size in [4, 8, 16, 32, 64]
    ]
    cls_scores, bbox_preds, centerness = self.forward(feat)
    # Test that empty ground truth encourages the network to predict background
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    gt_bboxes_ignore = None
    empty_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
                                gt_labels, img_metas, gt_bboxes_ignore)
    # When there is no truth, the cls loss should be nonzero but there should
    # be no box loss.
    empty_cls_loss = empty_gt_losses['loss_cls']
    empty_box_loss = empty_gt_losses['loss_bbox']
    assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert empty_box_loss.item() == 0, (
        'there should be no box loss when there are no true boxes')
    # When truth is non-empty then both cls and box loss should be nonzero for
    # random inputs
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    one_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
                              gt_labels, img_metas, gt_bboxes_ignore)
    onegt_cls_loss = one_gt_losses['loss_cls']
    onegt_box_loss = one_gt_losses['loss_bbox']
    assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_vfnet_head_loss():
    """Tests vfnet head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    train_cfg = mmcv.Config(
        dict(
            assigner=dict(type='ATSSAssigner', topk=9),
            allowed_border=-1,
            pos_weight=-1,
            debug=False))
    # since Focal Loss is not supported on CPU
    self = VFNetHead(
        num_classes=4,
        in_channels=1,
        train_cfg=train_cfg,
        loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
    if torch.cuda.is_available():
        self.cuda()
        feat = [
            torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
            for feat_size in [4, 8, 16, 32, 64]
        ]
        cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat)
        # Test that empty ground truth encourages the network to predict
        # background
        gt_bboxes = [torch.empty((0, 4)).cuda()]
        gt_labels = [torch.LongTensor([]).cuda()]
        gt_bboxes_ignore = None
        empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
                                    gt_bboxes, gt_labels, img_metas,
                                    gt_bboxes_ignore)
        # When there is no truth, the cls loss should be nonzero but there
        # should be no box loss.
        empty_cls_loss = empty_gt_losses['loss_cls']
        empty_box_loss = empty_gt_losses['loss_bbox']
        assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert empty_box_loss.item() == 0, (
            'there should be no box loss when there are no true boxes')
        # When truth is non-empty then both cls and box loss should be nonzero
        # for random inputs
        gt_bboxes = [
            torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
        ]
        gt_labels = [torch.LongTensor([2]).cuda()]
        one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
                                  gt_bboxes, gt_labels, img_metas,
                                  gt_bboxes_ignore)
        onegt_cls_loss = one_gt_losses['loss_cls']
        onegt_box_loss = one_gt_losses['loss_bbox']
        assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_anchor_head_loss():
    """Tests anchor head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False))
    self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
    # Anchor head expects a multiple levels of features per image
    feat = [
        torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
        for i in range(len(self.anchor_generator.strides))
    ]
    cls_scores, bbox_preds = self.forward(feat)
    # Test that empty ground truth encourages the network to predict background
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    gt_bboxes_ignore = None
    empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
                                img_metas, gt_bboxes_ignore)
    # When there is no truth, the cls loss should be nonzero but there should
    # be no box loss.
    empty_cls_loss = sum(empty_gt_losses['loss_cls'])
    empty_box_loss = sum(empty_gt_losses['loss_bbox'])
    assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert empty_box_loss.item() == 0, (
        'there should be no box loss when there are no true boxes')
    # When truth is non-empty then both cls and box loss should be nonzero for
    # random inputs
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
                              img_metas, gt_bboxes_ignore)
    onegt_cls_loss = sum(one_gt_losses['loss_cls'])
    onegt_box_loss = sum(one_gt_losses['loss_bbox'])
    assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_fsaf_head_loss():
    """Tests anchor head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    cfg = dict(
        reg_decoded_bbox=True,
        anchor_generator=dict(
            type='AnchorGenerator',
            octave_base_scale=1,
            scales_per_octave=1,
            ratios=[1.0],
            strides=[8, 16, 32, 64, 128]),
        bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
        loss_cls=dict(
            type='FocalLoss',
            use_sigmoid=True,
            gamma=2.0,
            alpha=0.25,
            loss_weight=1.0,
            reduction='none'),
        loss_bbox=dict(
            type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'))
    train_cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='CenterRegionAssigner',
                pos_scale=0.2,
                neg_scale=0.2,
                min_pos_iof=0.01),
            allowed_border=-1,
            pos_weight=-1,
            debug=False))
    head = FSAFHead(num_classes=4, in_channels=1, train_cfg=train_cfg, **cfg)
    if torch.cuda.is_available():
        head.cuda()
        # FSAF head expects a multiple levels of features per image
        feat = [
            torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
            for i in range(len(head.anchor_generator.strides))
        ]
        cls_scores, bbox_preds = head.forward(feat)
        gt_bboxes_ignore = None
        # When truth is non-empty then both cls and box loss should be nonzero
        #  for random inputs
        gt_bboxes = [
            torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
        ]
        gt_labels = [torch.LongTensor([2]).cuda()]
        one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
                                  img_metas, gt_bboxes_ignore)
        onegt_cls_loss = sum(one_gt_losses['loss_cls'])
        onegt_box_loss = sum(one_gt_losses['loss_bbox'])
        assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
        # Test that empty ground truth encourages the network to predict bkg
        gt_bboxes = [torch.empty((0, 4)).cuda()]
        gt_labels = [torch.LongTensor([]).cuda()]
        empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
                                    gt_labels, img_metas, gt_bboxes_ignore)
        # When there is no truth, the cls loss should be nonzero but there
        # should be no box loss.
        empty_cls_loss = sum(empty_gt_losses['loss_cls'])
        empty_box_loss = sum(empty_gt_losses['loss_bbox'])
        assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert empty_box_loss.item() == 0, (
            'there should be no box loss when there are no true boxes')
def test_ga_anchor_head_loss():
    """Tests anchor head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            ga_assigner=dict(
                type='ApproxMaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                ignore_iof_thr=-1),
            ga_sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            center_ratio=0.2,
            ignore_ratio=0.5,
            pos_weight=-1,
            debug=False))
    head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
    # Anchor head expects a multiple levels of features per image
    if torch.cuda.is_available():
        head.cuda()
        feat = [
            torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
            for i in range(len(head.approx_anchor_generator.base_anchors))
        ]
        cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
        # Test that empty ground truth encourages the network to predict
        # background
        gt_bboxes = [torch.empty((0, 4)).cuda()]
        gt_labels = [torch.LongTensor([]).cuda()]
        gt_bboxes_ignore = None
        empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
                                    loc_preds, gt_bboxes, gt_labels, img_metas,
                                    gt_bboxes_ignore)
        # When there is no truth, the cls loss should be nonzero but there
        # should be no box loss.
        empty_cls_loss = sum(empty_gt_losses['loss_cls'])
        empty_box_loss = sum(empty_gt_losses['loss_bbox'])
        assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert empty_box_loss.item() == 0, (
            'there should be no box loss when there are no true boxes')
        # When truth is non-empty then both cls and box loss should be nonzero
        # for random inputs
        gt_bboxes = [
            torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
        ]
        gt_labels = [torch.LongTensor([2]).cuda()]
        one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
                                  loc_preds, gt_bboxes, gt_labels, img_metas,
                                  gt_bboxes_ignore)
        onegt_cls_loss = sum(one_gt_losses['loss_cls'])
        onegt_box_loss = sum(one_gt_losses['loss_bbox'])
        assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_bbox_head_loss():
    """Tests bbox head loss when truth is empty and non-empty."""
    self = BBoxHead(in_channels=8, roi_feat_size=3)
    # Dummy proposals
    proposal_list = [
        torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
    ]
    target_cfg = mmcv.Config(dict(pos_weight=1))
    # Test bbox loss when truth is empty
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
                                            gt_labels)
    bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
                                    target_cfg)
    labels, label_weights, bbox_targets, bbox_weights = bbox_targets
    # Create dummy features "extracted" for each sampled bbox
    num_sampled = sum(len(res.bboxes) for res in sampling_results)
    rois = bbox2roi([res.bboxes for res in sampling_results])
    dummy_feats = torch.rand(num_sampled, 8 * 3 * 3)
    cls_scores, bbox_preds = self.forward(dummy_feats)
    losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
                       bbox_targets, bbox_weights)
    assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
    assert losses.get('loss_bbox', 0) == 0, 'empty gt loss should be zero'
    # Test bbox loss when truth is non-empty
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
                                            gt_labels)
    rois = bbox2roi([res.bboxes for res in sampling_results])
    bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
                                    target_cfg)
    labels, label_weights, bbox_targets, bbox_weights = bbox_targets
    # Create dummy features "extracted" for each sampled bbox
    num_sampled = sum(len(res.bboxes) for res in sampling_results)
    dummy_feats = torch.rand(num_sampled, 8 * 3 * 3)
    cls_scores, bbox_preds = self.forward(dummy_feats)
    losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
                       bbox_targets, bbox_weights)
    assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
    assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero'
def test_sabl_bbox_head_loss():
    """Tests bbox head loss when truth is empty and non-empty."""
    self = SABLHead(
        num_classes=4,
        cls_in_channels=3,
        reg_in_channels=3,
        cls_out_channels=3,
        reg_offset_out_channels=3,
        reg_cls_out_channels=3,
        roi_feat_size=7)
    # Dummy proposals
    proposal_list = [
        torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
    ]
    target_cfg = mmcv.Config(dict(pos_weight=1))
    # Test bbox loss when truth is empty
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
                                            gt_labels)
    bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
                                    target_cfg)
    labels, label_weights, bbox_targets, bbox_weights = bbox_targets
    # Create dummy features "extracted" for each sampled bbox
    num_sampled = sum(len(res.bboxes) for res in sampling_results)
    rois = bbox2roi([res.bboxes for res in sampling_results])
    dummy_feats = torch.rand(num_sampled, 3, 7, 7)
    cls_scores, bbox_preds = self.forward(dummy_feats)
    losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
                       bbox_targets, bbox_weights)
    assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
    assert losses.get('loss_bbox_cls',
                      0) == 0, 'empty gt bbox-cls-loss should be zero'
    assert losses.get('loss_bbox_reg',
                      0) == 0, 'empty gt bbox-reg-loss should be zero'
    # Test bbox loss when truth is non-empty
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
                                            gt_labels)
    rois = bbox2roi([res.bboxes for res in sampling_results])
    bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
                                    target_cfg)
    labels, label_weights, bbox_targets, bbox_weights = bbox_targets
    # Create dummy features "extracted" for each sampled bbox
    num_sampled = sum(len(res.bboxes) for res in sampling_results)
    dummy_feats = torch.rand(num_sampled, 3, 7, 7)
    cls_scores, bbox_preds = self.forward(dummy_feats)
    losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
                       bbox_targets, bbox_weights)
    assert losses.get('loss_bbox_cls',
                      0) > 0, 'empty gt bbox-cls-loss should be zero'
    assert losses.get('loss_bbox_reg',
                      0) > 0, 'empty gt bbox-reg-loss should be zero'
def test_sabl_retina_head_loss():
    """Tests anchor head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='ApproxMaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.4,
                min_pos_iou=0.0,
                ignore_iof_thr=-1),
            allowed_border=-1,
            pos_weight=-1,
            debug=False))
    head = SABLRetinaHead(
        num_classes=4,
        in_channels=3,
        feat_channels=10,
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        train_cfg=cfg)
    if torch.cuda.is_available():
        head.cuda()
        # Anchor head expects a multiple levels of features per image
        feat = [
            torch.rand(1, 3, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
            for i in range(len(head.approx_anchor_generator.base_anchors))
        ]
        cls_scores, bbox_preds = head.forward(feat)
        # Test that empty ground truth encourages the network
        # to predict background
        gt_bboxes = [torch.empty((0, 4)).cuda()]
        gt_labels = [torch.LongTensor([]).cuda()]
        gt_bboxes_ignore = None
        empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
                                    gt_labels, img_metas, gt_bboxes_ignore)
        # When there is no truth, the cls loss should be nonzero but there
        # should be no box loss.
        empty_cls_loss = sum(empty_gt_losses['loss_cls'])
        empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls'])
        empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg'])
        assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert empty_box_cls_loss.item() == 0, (
            'there should be no box cls loss when there are no true boxes')
        assert empty_box_reg_loss.item() == 0, (
            'there should be no box reg loss when there are no true boxes')
        # When truth is non-empty then both cls and box loss should
        # be nonzero for random inputs
        gt_bboxes = [
            torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
        ]
        gt_labels = [torch.LongTensor([2]).cuda()]
        one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
                                  img_metas, gt_bboxes_ignore)
        onegt_cls_loss = sum(one_gt_losses['loss_cls'])
        onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls'])
        onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg'])
        assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
        assert onegt_box_cls_loss.item() > 0, 'box loss cls should be non-zero'
        assert onegt_box_reg_loss.item() > 0, 'box loss reg should be non-zero'
def test_refine_boxes():
    """Mirrors the doctest in
    ``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` but checks for
    multiple values of n_roi / n_img."""
    self = BBoxHead(reg_class_agnostic=True)
    test_settings = [
        # Corner case: less rois than images
        {
            'n_roi': 2,
            'n_img': 4,
            'rng': 34285940
        },
        # Corner case: no images
        {
            'n_roi': 0,
            'n_img': 0,
            'rng': 52925222
        },
        # Corner cases: few images / rois
        {
            'n_roi': 1,
            'n_img': 1,
            'rng': 1200281
        },
        {
            'n_roi': 2,
            'n_img': 1,
            'rng': 1200282
        },
        {
            'n_roi': 2,
            'n_img': 2,
            'rng': 1200283
        },
        {
            'n_roi': 1,
            'n_img': 2,
            'rng': 1200284
        },
        # Corner case: no rois few images
        {
            'n_roi': 0,
            'n_img': 1,
            'rng': 23955860
        },
        {
            'n_roi': 0,
            'n_img': 2,
            'rng': 25830516
        },
        # Corner case: no rois many images
        {
            'n_roi': 0,
            'n_img': 10,
            'rng': 671346
        },
        {
            'n_roi': 0,
            'n_img': 20,
            'rng': 699807
        },
        # Corner case: cal_similarity num rois and images
        {
            'n_roi': 20,
            'n_img': 20,
            'rng': 1200238
        },
        {
            'n_roi': 10,
            'n_img': 20,
            'rng': 1200238
        },
        {
            'n_roi': 5,
            'n_img': 5,
            'rng': 1200238
        },
        # ----------------------------------
        # Common case: more rois than images
        {
            'n_roi': 100,
            'n_img': 1,
            'rng': 337156
        },
        {
            'n_roi': 150,
            'n_img': 2,
            'rng': 275898
        },
        {
            'n_roi': 500,
            'n_img': 5,
            'rng': 4903221
        },
    ]
    for demokw in test_settings:
        try:
            n_roi = demokw['n_roi']
            n_img = demokw['n_img']
            rng = demokw['rng']
            print(f'Test refine_boxes case: {demokw!r}')
            tup = _demodata_refine_boxes(n_roi, n_img, rng=rng)
            rois, labels, bbox_preds, pos_is_gts, img_metas = tup
            bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
                                             pos_is_gts, img_metas)
            assert len(bboxes_list) == n_img
            assert sum(map(len, bboxes_list)) <= n_roi
            assert all(b.shape[1] == 4 for b in bboxes_list)
        except Exception:
            print(f'Test failed with demokw={demokw!r}')
            raise
def _demodata_refine_boxes(n_roi, n_img, rng=0):
    """Create random test data for the
    ``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` method."""
    import numpy as np
    from mmdet.core.bbox.demodata import random_boxes
    from mmdet.core.bbox.demodata import ensure_rng
    try:
        import kwarray
    except ImportError:
        import pytest
        pytest.skip('kwarray is required for this test')
    scale = 512
    rng = ensure_rng(rng)
    img_metas = [{'img_shape': (scale, scale)} for _ in range(n_img)]
    # Create rois in the expected format
    roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
    if n_img == 0:
        assert n_roi == 0, 'cannot have any rois if there are no images'
        img_ids = torch.empty((0, ), dtype=torch.long)
        roi_boxes = torch.empty((0, 4), dtype=torch.float32)
    else:
        img_ids = rng.randint(0, n_img, (n_roi, ))
        img_ids = torch.from_numpy(img_ids)
    rois = torch.cat([img_ids[:, None].float(), roi_boxes], dim=1)
    # Create other args
    labels = rng.randint(0, 2, (n_roi, ))
    labels = torch.from_numpy(labels).long()
    bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
    # For each image, pretend random positive boxes are gts
    is_label_pos = (labels.numpy() > 0).astype(np.int)
    lbl_per_img = kwarray.group_items(is_label_pos, img_ids.numpy())
    pos_per_img = [sum(lbl_per_img.get(gid, [])) for gid in range(n_img)]
    # randomly generate with numpy then sort with torch
    _pos_is_gts = [
        rng.randint(0, 2, (npos, )).astype(np.uint8) for npos in pos_per_img
    ]
    pos_is_gts = [
        torch.from_numpy(p).sort(descending=True)[0] for p in _pos_is_gts
    ]
    return rois, labels, bbox_preds, pos_is_gts, img_metas
def test_mask_head_loss():
    """Test mask head loss when mask target is empty."""
    self = FCNMaskHead(
        num_convs=1,
        roi_feat_size=6,
        in_channels=8,
        conv_out_channels=8,
        num_classes=8)
    # Dummy proposals
    proposal_list = [
        torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
    ]
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
                                            gt_labels)
    # create dummy mask
    import numpy as np
    from mmdet.core import BitmapMasks
    dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
    gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
    # create dummy train_cfg
    train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
    # Create dummy features "extracted" for each sampled bbox
    num_sampled = sum(len(res.bboxes) for res in sampling_results)
    dummy_feats = torch.rand(num_sampled, 8, 6, 6)
    mask_pred = self.forward(dummy_feats)
    mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
    pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
    loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
    onegt_mask_loss = sum(loss_mask['loss_mask'])
    assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
    # test mask_iou_head
    mask_iou_head = MaskIoUHead(
        num_convs=1,
        num_fcs=1,
        roi_feat_size=6,
        in_channels=8,
        conv_out_channels=8,
        fc_out_channels=8,
        num_classes=8)
    pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
    mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
    pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
    mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
                                                 pos_mask_pred, mask_targets,
                                                 train_cfg)
    loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
    onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
    assert onegt_mask_iou_loss.item() >= 0
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
    """Create sample results that can be passed to BBoxHead.get_targets."""
    num_imgs = 1
    feat = torch.rand(1, 1, 3, 3)
    assign_config = dict(
        type='MaxIoUAssigner',
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        min_pos_iou=0.5,
        ignore_iof_thr=-1)
    sampler_config = dict(
        type='RandomSampler',
        num=512,
        pos_fraction=0.25,
        neg_pos_ub=-1,
        add_gt_as_proposals=True)
    bbox_assigner = build_assigner(assign_config)
    bbox_sampler = build_sampler(sampler_config)
    gt_bboxes_ignore = [None for _ in range(num_imgs)]
    sampling_results = []
    for i in range(num_imgs):
        assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
                                             gt_bboxes_ignore[i], gt_labels[i])
        sampling_result = bbox_sampler.sample(
            assign_result,
            proposal_list[i],
            gt_bboxes[i],
            gt_labels[i],
            feats=feat)
        sampling_results.append(sampling_result)
    return sampling_results
def test_corner_head_loss():
    """Tests corner head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    self = CornerHead(num_classes=4, in_channels=1)
    # Corner head expects a multiple levels of features per image
    feat = [
        torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels)
    ]
    tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs = self.forward(feat)
    # Test that empty ground truth encourages the network to predict background
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    gt_bboxes_ignore = None
    empty_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
                                br_offs, gt_bboxes, gt_labels, img_metas,
                                gt_bboxes_ignore)
    empty_det_loss = sum(empty_gt_losses['det_loss'])
    empty_push_loss = sum(empty_gt_losses['push_loss'])
    empty_pull_loss = sum(empty_gt_losses['pull_loss'])
    empty_off_loss = sum(empty_gt_losses['off_loss'])
    assert empty_det_loss.item() > 0, 'det loss should be non-zero'
    assert empty_push_loss.item() == 0, (
        'there should be no push loss when there are no true boxes')
    assert empty_pull_loss.item() == 0, (
        'there should be no pull loss when there are no true boxes')
    assert empty_off_loss.item() == 0, (
        'there should be no box loss when there are no true boxes')
    # When truth is non-empty then both cls and box loss should be nonzero for
    # random inputs
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    one_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
                              br_offs, gt_bboxes, gt_labels, img_metas,
                              gt_bboxes_ignore)
    onegt_det_loss = sum(one_gt_losses['det_loss'])
    onegt_push_loss = sum(one_gt_losses['push_loss'])
    onegt_pull_loss = sum(one_gt_losses['pull_loss'])
    onegt_off_loss = sum(one_gt_losses['off_loss'])
    assert onegt_det_loss.item() > 0, 'det loss should be non-zero'
    assert onegt_push_loss.item() == 0, (
        'there should be no push loss when there are only one true box')
    assert onegt_pull_loss.item() > 0, 'pull loss should be non-zero'
    assert onegt_off_loss.item() > 0, 'off loss should be non-zero'
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874],
                      [123.6667, 123.8757, 138.6326, 251.8874]]),
    ]
    gt_labels = [torch.LongTensor([2, 3])]
    # equalize the corners' embedding value of different objects to make the
    # push_loss larger than 0
    gt_bboxes_ind = (gt_bboxes[0] // 4).int().tolist()
    for tl_emb_feat, br_emb_feat in zip(tl_embs, br_embs):
        tl_emb_feat[:, :, gt_bboxes_ind[0][1],
                    gt_bboxes_ind[0][0]] = tl_emb_feat[:, :,
                                                       gt_bboxes_ind[1][1],
                                                       gt_bboxes_ind[1][0]]
        br_emb_feat[:, :, gt_bboxes_ind[0][3],
                    gt_bboxes_ind[0][2]] = br_emb_feat[:, :,
                                                       gt_bboxes_ind[1][3],
                                                       gt_bboxes_ind[1][2]]
    two_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
                              br_offs, gt_bboxes, gt_labels, img_metas,
                              gt_bboxes_ignore)
    twogt_det_loss = sum(two_gt_losses['det_loss'])
    twogt_push_loss = sum(two_gt_losses['push_loss'])
    twogt_pull_loss = sum(two_gt_losses['pull_loss'])
    twogt_off_loss = sum(two_gt_losses['off_loss'])
    assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
    assert twogt_push_loss.item() > 0, 'push loss should be non-zero'
    assert twogt_pull_loss.item() > 0, 'pull loss should be non-zero'
    assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
def test_corner_head_encode_and_decode_heatmap():
    """Tests corner head generating and decoding the heatmap."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3),
        'border': (0, 0, 0, 0)
    }]
    gt_bboxes = [
        torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
                      [10, 20, 200, 240]])
    ]
    gt_labels = [torch.LongTensor([1, 1, 2])]
    self = CornerHead(num_classes=4, in_channels=1, corner_emb_channels=1)
    feat = [
        torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels)
    ]
    targets = self.get_targets(
        gt_bboxes,
        gt_labels,
        feat[0].shape,
        img_metas[0]['pad_shape'],
        with_corner_emb=self.with_corner_emb)
    gt_tl_heatmap = targets['topleft_heatmap']
    gt_br_heatmap = targets['bottomright_heatmap']
    gt_tl_offset = targets['topleft_offset']
    gt_br_offset = targets['bottomright_offset']
    embedding = targets['corner_embedding']
    [top, left], [bottom, right] = embedding[0][0]
    gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
    gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
    gt_tl_embedding_heatmap[0, 0, top, left] = 1
    gt_br_embedding_heatmap[0, 0, bottom, right] = 1
    batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
        tl_heat=gt_tl_heatmap,
        br_heat=gt_br_heatmap,
        tl_off=gt_tl_offset,
        br_off=gt_br_offset,
        tl_emb=gt_tl_embedding_heatmap,
        br_emb=gt_br_embedding_heatmap,
        img_meta=img_metas[0],
        k=100,
        kernel=3,
        distance_threshold=0.5)
    bboxes = batch_bboxes.view(-1, 4)
    scores = batch_scores.view(-1, 1)
    clses = batch_clses.view(-1, 1)
    idx = scores.argsort(dim=0, descending=True)
    bboxes = bboxes[idx].view(-1, 4)
    scores = scores[idx].view(-1)
    clses = clses[idx].view(-1)
    valid_bboxes = bboxes[torch.where(scores > 0.05)]
    valid_labels = clses[torch.where(scores > 0.05)]
    max_coordinate = valid_bboxes.max()
    offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1)
    gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1)
    offset_bboxes = valid_bboxes + offsets[:, None]
    offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None]
    iou_matrix = bbox_overlaps(offset_bboxes.numpy(), offset_gtbboxes.numpy())
    assert (iou_matrix == 1).sum() == 3
def test_yolact_head_loss():
    """Tests yolact head losses when truth is empty and non-empty."""
    s = 550
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3)
    }]
    train_cfg = mmcv.Config(
        dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.4,
                min_pos_iou=0.,
                ignore_iof_thr=-1,
                gt_max_assign_all=False),
            smoothl1_beta=1.,
            allowed_border=-1,
            pos_weight=-1,
            neg_pos_ratio=3,
            debug=False,
            min_gt_box_wh=[4.0, 4.0]))
    bbox_head = YOLACTHead(
        num_classes=80,
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            octave_base_scale=3,
            scales_per_octave=1,
            base_sizes=[8, 16, 32, 64, 128],
            ratios=[0.5, 1.0, 2.0],
            strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
            centers=[(550 * 0.5 / x, 550 * 0.5 / x)
                     for x in [69, 35, 18, 9, 5]]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[0.1, 0.1, 0.2, 0.2]),
        loss_cls=dict(
            type='CrossEntropyLoss',
            use_sigmoid=False,
            reduction='none',
            loss_weight=1.0),
        loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
        num_head_convs=1,
        num_protos=32,
        use_ohem=True,
        train_cfg=train_cfg)
    segm_head = YOLACTSegmHead(
        in_channels=256,
        num_classes=80,
        loss_segm=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
    mask_head = YOLACTProtonet(
        num_classes=80,
        in_channels=256,
        num_protos=32,
        max_masks_to_train=100,
        loss_mask_weight=6.125)
    feat = [
        torch.rand(1, 256, feat_size, feat_size)
        for feat_size in [69, 35, 18, 9, 5]
    ]
    cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat)
    # Test that empty ground truth encourages the network to predict background
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    gt_masks = [torch.empty((0, 550, 550))]
    gt_bboxes_ignore = None
    empty_gt_losses, sampling_results = bbox_head.loss(
        cls_score,
        bbox_pred,
        gt_bboxes,
        gt_labels,
        img_metas,
        gt_bboxes_ignore=gt_bboxes_ignore)
    # When there is no truth, the cls loss should be nonzero but there should
    # be no box loss.
    empty_cls_loss = sum(empty_gt_losses['loss_cls'])
    empty_box_loss = sum(empty_gt_losses['loss_bbox'])
    assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert empty_box_loss.item() == 0, (
        'there should be no box loss when there are no true boxes')
    # Test segm head and mask head
    segm_head_outs = segm_head(feat[0])
    empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
    mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
                          sampling_results)
    empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas,
                                     sampling_results)
    # When there is no truth, the segm and mask loss should be zero.
    empty_segm_loss = sum(empty_segm_loss['loss_segm'])
    empty_mask_loss = sum(empty_mask_loss['loss_mask'])
    assert empty_segm_loss.item() == 0, (
        'there should be no segm loss when there are no true boxes')
    assert empty_mask_loss == 0, (
        'there should be no mask loss when there are no true boxes')
    # When truth is non-empty then cls, box, mask, segm loss should be
    # nonzero for random inputs.
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
    one_gt_losses, sampling_results = bbox_head.loss(
        cls_score,
        bbox_pred,
        gt_bboxes,
        gt_labels,
        img_metas,
        gt_bboxes_ignore=gt_bboxes_ignore)
    one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
    one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
    assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero'
    assert one_gt_box_loss.item() > 0, 'box loss should be non-zero'
    one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
    mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
                          sampling_results)
    one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes,
                                      img_metas, sampling_results)
    one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
    one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
    assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero'
    assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero'
def test_transformer_head_loss():
    """Tests transformer head loss when truth is empty and non-empty."""
    s = 256
    img_metas = [{
        'img_shape': (s, s, 3),
        'scale_factor': 1,
        'pad_shape': (s, s, 3),
        'batch_input_shape': (s, s)
    }]
    train_cfg = dict(
        assigner=dict(
            type='HungarianAssigner',
            cls_cost=dict(type='ClassificationCost', weight=1.0),
            reg_cost=dict(type='BBoxL1Cost', weight=5.0),
            iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)))
    transformer_cfg = dict(
        type='Transformer',
        embed_dims=4,
        num_heads=1,
        num_encoder_layers=1,
        num_decoder_layers=1,
        feedforward_channels=1,
        dropout=0.1,
        act_cfg=dict(type='ReLU', inplace=True),
        norm_cfg=dict(type='LN'),
        num_fcs=2,
        pre_norm=False,
        return_intermediate_dec=True)
    positional_encoding_cfg = dict(
        type='SinePositionalEncoding', num_feats=2, normalize=True)
    self = TransformerHead(
        num_classes=4,
        in_channels=1,
        num_fcs=2,
        train_cfg=train_cfg,
        transformer=transformer_cfg,
        positional_encoding=positional_encoding_cfg)
    self.init_weights()
    feat = [
        torch.rand(1, 1, s // feat_size, s // feat_size)
        for feat_size in [4, 8, 16, 32, 64]
    ]
    cls_scores, bbox_preds = self.forward(feat, img_metas)
    # Test that empty ground truth encourages the network to predict background
    gt_bboxes = [torch.empty((0, 4))]
    gt_labels = [torch.LongTensor([])]
    gt_bboxes_ignore = None
    empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
                                img_metas, gt_bboxes_ignore)
    # When there is no truth, the cls loss should be nonzero but there should
    # be no box loss.
    for key, loss in empty_gt_losses.items():
        if 'cls' in key:
            assert loss.item() > 0, 'cls loss should be non-zero'
        elif 'bbox' in key:
            assert loss.item(
            ) == 0, 'there should be no box loss when there are no true boxes'
        elif 'iou' in key:
            assert loss.item(
            ) == 0, 'there should be no iou loss when there are no true boxes'
    # When truth is non-empty then both cls and box loss should be nonzero for
    # random inputs
    gt_bboxes = [
        torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
    ]
    gt_labels = [torch.LongTensor([2])]
    one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
                              img_metas, gt_bboxes_ignore)
    for loss in one_gt_losses.values():
        assert loss.item(
        ) > 0, 'cls loss, or box loss, or iou loss should be non-zero'
    # test forward_train
    self.forward_train(feat, img_metas, gt_bboxes, gt_labels)
    # test inference mode
    self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True)
 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.