filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_22883 | #/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
OnLost onFound Stress Test.
"""
import threading
import time
from queue import Empty
from acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest
from acts.test_utils.bt.BleEnum import AdvertiseSettingsAdvertiseMode
from acts.test_utils.bt.BleEnum import ScanSettingsCallbackType
from acts.test_utils.bt.BleEnum import ScanSettingsScanMode
from acts.test_utils.bt.BleEnum import ScanSettingsMatchMode
from acts.test_utils.bt.BleEnum import ScanSettingsMatchNum
from acts.test_utils.bt.bt_test_utils import cleanup_scanners_and_advertisers
from acts.test_utils.bt.bt_test_utils import get_advanced_droid_list
from acts.test_utils.bt.bt_gatt_utils import orchestrate_gatt_connection
from acts.test_utils.bt.bt_test_utils import reset_bluetooth
from acts.test_utils.bt.bt_gatt_utils import run_continuous_write_descriptor
from acts.test_utils.bt.bt_gatt_utils import setup_multiple_services
class BleOnLostOnFoundStressTest(BluetoothBaseTest):
default_timeout = 10
max_scan_instances = 28
report_delay = 2000
active_scan_callback_list = []
active_adv_callback_list = []
scan_result = "BleScan{}onScanResults"
batch_scan_result = "BleScan{}onBatchScanResult"
def __init__(self, controllers):
BluetoothBaseTest.__init__(self, controllers)
self.droid_list = get_advanced_droid_list(self.android_devices)
self.scn_ad = self.android_devices[0]
self.adv_ad = self.android_devices[1]
if self.droid_list[1]['max_advertisements'] == 0:
self.tests = ()
return
def teardown_test(self):
cleanup_scanners_and_advertisers(
self.scn_ad, self.active_adv_callback_list, self.scn_ad,
self.active_adv_callback_list)
self.active_adv_callback_list = []
self.active_scan_callback_list = []
def on_exception(self, test_name, begin_time):
reset_bluetooth(self.android_devices)
def _start_generic_advertisement_include_device_name(self):
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)
self.adv_ad.droid.bleSetAdvertiseSettingsAdvertiseMode(
AdvertiseSettingsAdvertiseMode.ADVERTISE_MODE_LOW_LATENCY.value)
advertise_data = self.adv_ad.droid.bleBuildAdvertiseData()
advertise_settings = self.adv_ad.droid.bleBuildAdvertiseSettings()
advertise_callback = self.adv_ad.droid.bleGenBleAdvertiseCallback()
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
self.adv_ad.ed.pop_event(
"BleAdvertise{}onSuccess".format(advertise_callback),
self.default_timeout)
self.active_adv_callback_list.append(advertise_callback)
return advertise_callback
def _verify_no_events_found(self, event_name):
try:
self.scn_ad.ed.pop_event(event_name, self.default_timeout)
self.log.error("Found an event when none was expected.")
return False
except Empty:
self.log.info("No scan result found as expected.")
return True
def _poll_energy(self):
import random
while True:
self.log.debug(
self.scn_ad.droid.bluetoothGetControllerActivityEnergyInfo(1))
time.sleep(2)
@BluetoothBaseTest.bt_test_wrap
def test_on_star_while_polling_energy_stats(self):
"""
Tests ...
Steps
1: ...
:return: boolean
"""
thread = threading.Thread(target=self._poll_energy)
thread.start()
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_FOUND_AND_LOST.value)
self.scn_ad.droid.bleSetScanSettingsMatchMode(
ScanSettingsMatchMode.AGGRESIVE.value)
self.scn_ad.droid.bleSetScanSettingsNumOfMatches(
ScanSettingsMatchNum.MATCH_NUM_ONE_ADVERTISEMENT.value)
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback = self.scn_ad.droid.bleGenScanCallback()
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
self.active_scan_callback_list.append(scan_callback)
on_found_count = 0
on_lost_count = 0
from contextlib import suppress
for x in range(1000):
adv_callback = (
self._start_generic_advertisement_include_device_name())
with suppress(Exception):
event = self.scn_ad.ed.pop_event(
self.scan_result.format(scan_callback),
self.default_timeout * 3)
if event['data']['CallbackType'] == 2:
on_found_count += 1
elif event['data']['CallbackType'] == 4:
on_lost_count += 1
self.adv_ad.droid.bleStopBleAdvertising(adv_callback)
with suppress(Exception):
event2 = self.scn_ad.ed.pop_event(
self.scan_result.format(scan_callback),
self.default_timeout * 4)
if event2['data']['CallbackType'] == 2:
on_found_count += 1
elif event2['data']['CallbackType'] == 4:
on_lost_count += 1
thread.join()
return True
@BluetoothBaseTest.bt_test_wrap
def test_more_stress_test(self):
gatt_server_callback, gatt_server = setup_multiple_services(
self.adv_ad)
bluetooth_gatt, gatt_callback, adv_callback = (
orchestrate_gatt_connection(self.scn_ad, self.adv_ad))
self.active_scan_callback_list.append(adv_callback)
if self.scn_ad.droid.gattClientDiscoverServices(bluetooth_gatt):
event = self.scn_ad.ed.pop_event(
"GattConnect{}onServicesDiscovered".format(bluetooth_gatt),
self.default_timeout)
discovered_services_index = event['data']['ServicesIndex']
else:
self.log.info("Failed to discover services.")
return False
services_count = self.scn_ad.droid.gattClientGetDiscoveredServicesCount(
discovered_services_index)
thread = threading.Thread(
target=run_continuous_write_descriptor,
args=(self.scn_ad.droid, self.scn_ad.ed, self.adv_ad.droid,
self.adv_ad.ed, gatt_server, gatt_server_callback,
bluetooth_gatt, services_count, discovered_services_index))
thread.start()
thread2 = threading.Thread(target=self._poll_energy)
thread2.start()
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_FOUND_AND_LOST.value)
self.scn_ad.droid.bleSetScanSettingsMatchMode(
ScanSettingsMatchMode.AGGRESIVE.value)
self.scn_ad.droid.bleSetScanSettingsNumOfMatches(
ScanSettingsMatchNum.MATCH_NUM_ONE_ADVERTISEMENT.value)
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback = self.scn_ad.droid.bleGenScanCallback()
self.scn_ad.droid.bleBuildScanFilter(filter_list)
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
self.active_scan_callback_list.append(scan_callback)
on_found_count = 0
on_lost_count = 0
time.sleep(60)
from contextlib import suppress
for x in range(1000):
adv_callback = self._start_generic_advertisement_include_device_name(
)
with suppress(Exception):
event = self.scn_ad.ed.pop_event(
self.scan_result.format(scan_callback),
self.default_timeout * 3)
if event['data']['CallbackType'] == 2:
on_found_count += 1
elif event['data']['CallbackType'] == 4:
on_lost_count += 1
self.adv_ad.droid.bleStopBleAdvertising(adv_callback)
with suppress(Exception):
event2 = self.scn_ad.ed.pop_event(
self.scan_result.format(scan_callback),
self.default_timeout * 4)
if event2['data']['CallbackType'] == 2:
on_found_count += 1
elif event2['data']['CallbackType'] == 4:
on_lost_count += 1
thread.join()
thread2.join()
return True
|
the-stack_106_22884 | import sys
import argparse
import logging
import getpass
import os
from . import samTobed
from . import pyWriter
from . import henipipe
POLL_TIME = 5
LOG_PREFIX = '[HENIPIPE]: '
# Set up a basic logger
LOGGER = logging.getLogger('something')
myFormatter = logging.Formatter('%(asctime)s: %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(myFormatter)
LOGGER.addHandler(handler)
LOGGER.setLevel(logging.DEBUG)
myFormatter._fmt = "[HENIPIPE]: " + myFormatter._fmt
def run_henipipe(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser('A wrapper for running henipipe')
parser.add_argument('job', type=str, choices=['SC', 'MAKERUNSHEET', 'ALIGN', 'SCALE', 'MERGE', 'SEACR', 'MACS2', 'AUC', 'GENOMESFILE', 'FASTQC', 'TRIM', 'BIGWIG'], help='a required string denoting segment of pipeline to run. 1) "MAKERUNSHEET" - to parse a folder of fastqs; 2) "ALIGN" - to perform alignment using bowtie and output bed files; 3) "SCALE" - to normalize data to reference (spike in); 4) "MERGE" - to merge bedgraphs 5) "SEACR" - to perform SEACR; 6) "MACS" - to perform MACS2; 7) "AUC" - to calculate AUC between normalized bedgraph using a peak file; 8) "GENOMESFILE" - print location of genomes.json file; 9) "FASTQC" - run fastqc on cluster; 10) run trimmotatic on cluster; 11) make Bigwigs from bedgraphs;')
parser.add_argument('--sample_flag', '-sf', type=str, default="", help='FOR MAKERUNSHEET only string to identify samples of interest in a fastq folder')
parser.add_argument('--fastq_folder', '-fq', type=str, help='For SC and MAKERUNSHEET only: Pathname of fastq folder (files must be organized in folders named by sample)')
parser.add_argument('--trim_folder', '-tf', type=str, default = ".", help='REQURIED, For TRIM only: Pathname of output folder; Note that all trimmed fastqs will be placed in the same folder')
parser.add_argument('--trim_args', '-ta', type=str, default = "ILLUMINACLIP:TruSeq3-PE.fa:2:30:10:2:keepBothReads LEADING:3 TRAILING:3 MINLEN:36", help='OPTIONAL, For TRIM only: Args to pass to trimmomatic')
parser.add_argument('--organized_by', '-by', type=str, choices=['folder', 'file'], default='folder', help='Option to specify how fastq or unbam folder is organized')
parser.add_argument('--genome_key', '-gk', type=str, default="default", help='For MAKERUNSHEET only: abbreviation to use "installed" genomes in the runsheet (See README.md for more details')
parser.add_argument('--split_char', '-sc', type=str, default="_R1_", help='Character by which to split the fastqfile name into samples, OPTIONAL and for MAKERUNSHEET only')
parser.add_argument('--R1_char', '-r1c', type=str, default="_R1_", help='Character by which to split the fastqfile name into read1, OPTIONAL and for MAKERUNSHEET only; default = "_R1_"')
parser.add_argument('--R2_char', '-r2c', type=str, default="_R2_", help='Character by which to split the fastqfile name into read2, OPTIONAL and for MAKERUNSHEET only; default = "_R2_"')
parser.add_argument('--ext', '-e', type=str, default=".fastq.gz", help='suffix of fastq files, OPTIONAL and for MAKERUNSHEET only')
parser.add_argument('--filter_high', '-fh', type=int, default=None, help='For ALIGN only: upper limit of fragment size to exclude, defaults is no upper limit. OPTIONAL')
parser.add_argument('--filter_low', '-fl', type=int, default=None, help='For ALIGN only: lower limit of fragment size to exclude, defaults is no lower limit. OPTIONAL')
parser.add_argument('--output', '-o', type=str, default=".", help='Pathname to output folder (note this folder must exist already!!), Defaults to current directory')
parser.add_argument('--runsheet', '-r', type=str, default="runsheet.csv", help='tab-delim file with sample fields as defined in the script. - REQUIRED for all jobs except MAKERUNSHEET')
parser.add_argument('--log_prefix', '-l', type=str, default='henipipe.log', help='Prefix specifying log files for henipipe output from henipipe calls. OPTIONAL')
parser.add_argument('--select', '-s', type=str, default=None, help='To only run the selected row in the runsheet, OPTIONAL')
parser.add_argument('--debug', '-d', action='store_true', help='To print commands (For testing flow). OPTIONAL')
parser.add_argument('--bowtie_flags', '-b', type=str, default='--end-to-end --very-sensitive --no-mixed --no-discordant -q --phred33 -I 10 -X 700', help='For ALIGN: bowtie flags, OPTIONAL')
parser.add_argument('--cluster', '-c', type=str, default='SLURM', choices=['PBS', 'SLURM', 'local'], help='Cluster software. OPTIONAL Currently supported: PBS, SLURM and local')
parser.add_argument('--threads', '-t', type=str, default="8", help='number of threads; default: 8')
parser.add_argument('--gb_ram', '-gb', type=str, default=None, help='gigabytes of RAM per thread')
parser.add_argument('--install', '-i', type=str, default=None, help='FOR GENOMESFILE: location of file to install as a new genomes.json file, existing genomes.json will be erased')
parser.add_argument('--norm_method', '-n', type=str, default='coverage', choices=['coverage', 'read_count', 'spike_in', 'none'], help='For ALIGN and SCALE: Normalization method, by "read_count", "coverage", or "spike_in". If method is "spike_in", HeniPipe will align to the spike_in reference genome provided in runsheet. OPTIONAL')
parser.add_argument('--user', '-u', type=str, default=None, help='user for submitting jobs - defaults to username. OPTIONAL')
parser.add_argument('--SEACR_norm', '-Sn', type=str, default='non', choices=['non', 'norm'], help='For SEACR: Normalization method; default is "non"-normalized, select "norm" to normalize using SEACR. OPTIONAL')
parser.add_argument('--SEACR_fdr', '-Sf', type=str, default='0.05', help='For SEACR: Used to set FDR threshold when control is not used. OPTIONAL')
parser.add_argument('--SEACR_stringency', '-Ss', type=str, default='stringent', choices=['stringent', 'relaxed'], help='FOR SEACR: Default will run as "stringent", other option is "relaxed". OPTIONAL')
parser.add_argument('--keep_files', '-k', action ='store_true', default=False, help='FOR ALIGN: use this flag to turn off piping (Will generate all files).')
parser.add_argument('--verbose', '-v', default=False, action='store_true', help='Run with some additional ouput - not much though... OPTIONAL')
"""
call = 'henipipe MAKERUNSHEET -fq ../fastq -sf mini -gk heni_hg38 -o .'
call = 'henipipe MACS2 -r ./runsheet.csv -d -mk -s 1:10'
call = 'henipipe GENOMESFILE'
call = 'henipipe MAKERUNSHEET -fq ../fastq'
call = 'henipipe MAKERUNSHEET -fq ../fastq'
call = 'henipipe ALIGN -r runsheet.csv -d'
args = parser.parse_args(call.split(" ")[1:])
"""
args = parser.parse_args()
#deal with user
if args.user is None:
args.user = getpass.getuser()
if args.job=="SC":
if os.path.isabs(args.fastq_folder) is False:
if args.fastq_folder == ".":
args.fastq_folder = os.getcwd()
else :
args.fastq_folder = os.path.abspath(args.fastq_folder)
if os.path.exists(args.fastq_folder) is False:
raise ValueError('Path: '+args.fastq_folder+' not found')
if os.path.isabs(args.output) is False:
if args.output == ".":
args.output = os.getcwd()
else :
args.output = os.path.abspath(args.output)
if os.path.exists(args.output) is False:
raise ValueError('Path: '+args.output+' not found')
LOGGER.info("Running Single Cell processing...")
if args.select is not None:
select = [i for i in list(henipipe.parse_range_list(args.select))]
else:
select = None
SCjob = henipipe.SCAlign(folder=args.fastq_folder, output=args.output,
threads = args.threads, ram = args.gb_ram, debug=args.debug, sample_flag = args.sample_flag,
genome_key = args.genome_key, no_pipe=args.keep_files, cluster=args.cluster,
bowtie_flags=args.bowtie_flags, log=args.log_prefix, user=args.user,
ext=args.ext, r1_char=args.R1_char, strsplit= args.split_char,
r2_char=args.R2_char, fname=args.runsheet, organized_by=args.organized_by,
filter = [args.filter_low, args.filter_high], select = select)
dependency = SCjob.run_dependency_job()
SCjob = henipipe.SCMerge(runsheet_data = SCjob.runsheet_data, output=args.output, debug=args.debug,
dependency = dependency, cluster=SCjob.environs.cluster, log=SCjob.environs.log, user=SCjob.environs.user,
genome_key = args.genome_key, norm=args.SEACR_norm, stringency=args.SEACR_stringency, fdr_thresh = args.SEACR_fdr)
SCjob.run_dependency_job()
exit()
#log
if args.job=="GENOMESFILE":
_ROOT = os.path.abspath(os.path.dirname(__file__))
if args.install is None:
GENOMES_JSON = os.path.join(_ROOT, 'data', 'genomes.json')
print(GENOMES_JSON)
if args.install is not None:
from shutil import copyfile
args.install = os.path.abspath(args.install)
copyfile(args.install, os.path.join(_ROOT, 'data', 'genomes.json'))
exit()
#log
#deal with paths
if args.job=="MAKERUNSHEET":
if os.path.isabs(args.fastq_folder) is False:
if args.fastq_folder == ".":
args.fastq_folder = os.getcwd()
else :
args.fastq_folder = os.path.abspath(args.fastq_folder)
if os.path.exists(args.fastq_folder) is False:
raise ValueError('Path: '+args.fastq_folder+' not found')
if os.path.isabs(args.output) is False:
if args.output == ".":
args.output = os.getcwd()
else :
args.output = os.path.abspath(args.output)
if os.path.exists(args.output) is False:
raise ValueError('Path: '+args.output+' not found')
if args.job != "MAKERUNSHEET":
if os.path.exists(args.runsheet) is False:
raise ValueError('Path: '+args.runsheet+' not found')
args.output = os.path.abspath(args.output)
if args.job=="MAKERUNSHEET":
LOGGER.info("Parsing fastq folder - "+args.fastq_folder+" ...")
LOGGER.info("Writing runsheet to - "+os.path.join(args.output, 'runsheet.csv')+" ...")
LOGGER.info("Using genome_key - "+args.genome_key+" ...")
#henipipe.make_runsheet(folder=args.fastq_folder, output=args.output, sample_flag = args.sample_flag, genome_key = args.genome_key, no_pipe=args.keep_files)
henipipe.make_runsheet(folder=args.fastq_folder, output=args.output, sample_flag = args.sample_flag, genome_key = args.genome_key, \
no_pipe=args.keep_files, ext=args.ext, r1_char=args.R1_char, strsplit= args.split_char, \
r2_char=args.R2_char, fname=args.runsheet, organized_by=args.organized_by)
exit()
#parse and chech runsheet
args.runsheet = os.path.abspath(args.runsheet)
"""
parsed_runsheet = list(parse_runsheet(args.runsheet))
check_runsheet(args, parsed_runsheet, verbose=args.verbose)
"""
parsed_runsheet = list(henipipe.parse_runsheet(args.runsheet))
henipipe.check_runsheet(args, parsed_runsheet, verbose=args.verbose)
#deal with sample selection
if args.select is not None:
parsed_runsheet = [parsed_runsheet[i-1] for i in list(henipipe.parse_range_list(args.select))]
if args.debug == False:
LOGGER.info("Logging to %s... examine this file if samples fail." % args.log_prefix)
if args.job=="FASTQC":
LOGGER.info("Running fastqc on all fastqs in runsheet")
Fastqcjob = henipipe.Fastqc(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, cluster=args.cluster, log=args.log_prefix, user=args.user)
Fastqcjob.run_job()
exit()
if args.job=="TRIM":
if os.path.isabs(args.trim_folder) is False:
if args.trim_folder == ".":
args.trim_folder = os.getcwd()
else :
args.trim_folder = os.path.abspath(args.trim_folder)
if os.path.exists(args.trim_folder) is False:
raise ValueError('Path: '+args.trim_folder+' not found')
LOGGER.info("Running trimmotatic on all fastqs in runsheet")
Trimjob = henipipe.Trim(runsheet_data = parsed_runsheet, trim_args = args.trim_args, trim_folder = args.trim_folder, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, cluster=args.cluster, log=args.log_prefix, user=args.user)
Trimjob.run_job()
exit()
if args.job=="ALIGN":
#deal with filtering
LOGGER.info("Aligning reads...")
#Alignjob = Align(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, no_pipe=args.keep_files, cluster=args.cluster, bowtie_flags=args.bowtie_flags, log=args.log_prefix, user=args.user, norm_method=args.norm_method, filter = [args.filter_low, args.filter_high])
Alignjob = henipipe.Align(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, no_pipe=args.keep_files, cluster=args.cluster, bowtie_flags=args.bowtie_flags, log=args.log_prefix, user=args.user, norm_method=args.norm_method, filter = [args.filter_low, args.filter_high])
LOGGER.info("Submitting alignment jobs... Debug mode is %s" % args.debug)
Alignjob.run_job()
exit()
if args.job=="SCALE":
LOGGER.info("Calculating %s", args.norm_method)
Scalejob = henipipe.Scale(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, cluster=args.cluster, log=args.log_prefix, norm_method=args.norm_method, user=args.user)
LOGGER.info("Submitting bedgraph jobs... Debug mode is %s" % args.debug)
Scalejob.run_job()
exit()
if args.job=="BIGWIG":
LOGGER.info("Making Bigwigs:")
Bigwigjob = henipipe.Bigwig(runsheet_data = parsed_runsheet, debug=args.debug, cluster=args.cluster, log=args.log_prefix, norm_method=args.norm_method, user=args.user)
LOGGER.info("Submitting bigwig jobs... Debug mode is %s" % args.debug)
Bigwigjob.run_job()
exit()
if args.job=="MERGE":
Mergejob = henipipe.Merge(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, cluster=args.cluster, log=args.log_prefix, norm_method=args.norm_method, user=args.user, out=args.output)
#Mergejob = Merge(runsheet_data = parsed_runsheet, debug=args.debug, cluster=args.cluster, log=args.log_prefix, norm_method=args.norm_method, user=args.user)
LOGGER.info("Submitting merge-bedgraph jobs... Debug mode is %s" % args.debug)
Mergejob.run_job()
exit()
if args.job=="SEACR":
LOGGER.info("Running SEACR using settings: SEACR_norm = %s, SEACR_stringency = %s" % (args.SEACR_norm, args.SEACR_stringency))
SEACRjob = henipipe.SEACR(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, cluster=args.cluster, norm=args.SEACR_norm, stringency=args.SEACR_stringency, user=args.user, log=args.log_prefix)
SEACRjob.run_job()
exit()
if args.job=="MACS2":
LOGGER.info("Running MACS2")
MACS2job = henipipe.MACS2(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, cluster=args.cluster, user=args.user, log=args.log_prefix, out=args.output)
MACS2job.run_job()
exit()
if args.job=="AUC":
LOGGER.info("Running AUC")
AUCjob = henipipe.AUC(runsheet_data = parsed_runsheet, threads = args.threads, gb_ram = args.gb_ram, debug=args.debug, no_pipe=args.keep_files, cluster=args.cluster, user=args.user, log=args.log_prefix, out=args.output, norm=args.SEACR_norm, stringency=args.SEACR_stringency)
AUCjob.run_job()
exit()
if __name__ == "__main__":
run_henipipe()
"""
[parsed_runsheet[i-1] for i in list(parse_range_list("1:4,11,12"))]
"""
|
the-stack_106_22885 | # encoding: utf-8
"""
Created by Kaijun on 2020/9/13
"""
import struct
import serial
import Queue
import threading
import time
# CE FA 03 18 76 0E 5C 03 F0 FF 88 3C E0 FF A2 00 84 00 00 A2 FF E0 00 84 F4 01 20 03 CE FA 03 18 76 0E 34 03 4C FF 70 3C E5 FF 9C 00 88 00 00 9C FF E5 00 88 F4 01 20 03
def do_parse(ext_type,ext_len,ext_data):
# 将ext_data 封装成完整的数据帧
ext_data.insert(0, ext_len);
ext_data.insert(0, ext_type);
ext_data.insert(0, 0xfa);
ext_data.insert(0,0xce);
# CE FA 03 18 76 0E 5C 03 F0 FF 88 3C E0 FF A2 00 84 00 00 A2 FF E0 00 84 F4 01 20 03
# 根据数据帧的类型的类型来做对应的解析 0x01 线速度 0x02 电池
if ext_type == 0x03:
# 对数据进行拆包
# 温度
temperature = struct.unpack('h',bytearray(ext_data[4:6]))[0]
# 加速度
ax = struct.unpack('h',bytearray(ext_data[6:8]))[0]
ay = struct.unpack('h',bytearray(ext_data[8:10]))[0]
az = struct.unpack('h',bytearray(ext_data[10:12]))[0]
# 角速度
gx = struct.unpack('h', bytearray(ext_data[12:14]))[0]
gy = struct.unpack('h', bytearray(ext_data[14:16]))[0]
gz = struct.unpack('h', bytearray(ext_data[16:18]))[0]
# 地磁
mx = struct.unpack('h', bytearray(ext_data[18:20]))[0]
my = struct.unpack('h', bytearray(ext_data[20:22]))[0]
mz = struct.unpack('h', bytearray(ext_data[22:24]))[0]
# 速度
velocity = struct.unpack('h', bytearray(ext_data[24:26]))[0]
angular = struct.unpack('h', bytearray(ext_data[26:28]))[0]
print(velocity,angular)
def do_recv():
"""
找到一帧完整数据 从recv_queue中查找
1. 查找帧头0
2. 查找帧头1
3. 查找帧类型
4. 查找帧的长度
5. 根据帧长度,读取帧数据
完整的帧数据
"""
while True:
buff = recv_queue.get();
value = bytearray(buff)[0]
if value == 0xce: # 找到帧头0
# 若找到帧头0,则继续判断帧头1
value = bytearray(recv_queue.get())[0]
if value == 0xfa: # 若判断成功,则head0和head1匹配成功,意味着帧头完全匹配
# 开始读类型
ext_type = bytearray(recv_queue.get())[0]
# 读帧数据长度
ext_len = bytearray(recv_queue.get())[0]
# 读数据
ext_data = []
while len(ext_data) < ext_len:
# 不断往后读取数据
value = bytearray(recv_queue.get())[0]
ext_data.append(value);
# 开始解析数据
do_parse(ext_type,ext_len,ext_data);
# 测试线程 每隔 1s 给下位机发送一条指令
def testSend():
while True:
# 帧头0 帧头1 帧类型
cmd = [0xce,0xfa,0x05]
# 添加帧长度
cmd.append(0x04);
# 下发线速度和角速度
velocity = 0.05;
angular = 0;
# 对数据进行拆解
velocity_params = bytearray(struct.pack('h',int(velocity*1000)))
angular_params = bytearray(struct.pack('h',int(angular*1000)))
cmd.append(velocity_params[0])
cmd.append(velocity_params[1])
cmd.append(angular_params[0]);
cmd.append(angular_params[1]);
cmd.append(0xad);
# [206, 250, 5, 4, 250, 0, 244, 1]
# 将数据发送给串口
ser.write(cmd)
print("发送成功")
time.sleep(1);
if __name__ == '__main__':
# 创建一个窗口对象
ser = serial.Serial(port="COM25",baudrate=115200)
# 判断串口是否打开成功
if not ser.isOpen():
ser.open();
# 创建消息队列,专门用于存储读到的数据
recv_queue = Queue.Queue();
threading.Thread(target=testSend).start();
# 开启线程,专门用于取队列中查找帧数据
threading.Thread(target=do_recv).start();
# 读取串口中的数据
while True:
buff = ser.read();
# 读到数据,立马存起来
recv_queue.put(buff);
# 断开连接
ser.close();
|
the-stack_106_22889 | # Copyright 2016-2020 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import faculty_cli.config
def test_casebook_url(mock_profile):
assert (
faculty_cli.config.casebook_url()
== "https://casebook.services.subdomain.my.faculty.ai"
)
def test_hudson_url(mock_profile):
assert (
faculty_cli.config.hudson_url()
== "https://hudson.services.subdomain.my.faculty.ai"
)
def test_baskerville_url(mock_profile):
assert (
faculty_cli.config.baskerville_url()
== "https://baskerville.services.subdomain.my.faculty.ai"
)
|
the-stack_106_22890 | import os
import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 37 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP, use_lanelines=True, wide_camera=False):
self.use_lanelines = use_lanelines
self.LP = LanePlanner(wide_camera)
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init()
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.desired_curvature = 0.0
self.safe_desired_curvature = 0.0
self.desired_curvature_rate = 0.0
self.safe_desired_curvature_rate = 0.0
def update(self, sm, CP):
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
measured_curvature = sm['controlsState'].curvature
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
if len(md.orientation.xStd) == TRAJECTORY_SIZE:
self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
# LaneChangeState.off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
# LaneChangeState.preLaneChange
elif self.lane_change_state == LaneChangeState.preLaneChange:
# Set lane change direction
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
else: # If there are no blinkers we will go back to LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and not blindspot_detected:
self.lane_change_state = LaneChangeState.laneChangeStarting
# LaneChangeState.laneChangeStarting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
# fade out over .5s
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
# 98% certainty
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# LaneChangeState.laneChangeFinishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
# fade in laneline over 1s
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Turn off lanes during lane change
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
if self.use_lanelines:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
else:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
# Heading cost is useful at low speed, otherwise end of plan can be off-heading
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
# for now CAR_ROTATION_RADIUS is disabled
# to use it, enable it in the MPC
assert abs(CAR_ROTATION_RADIUS) < 1e-3
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
# init state for next
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init()
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]
plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]
plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
pm.send('lateralPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message('liveMpc')
dat.liveMpc.x = list(self.mpc_solution.x)
dat.liveMpc.y = list(self.mpc_solution.y)
dat.liveMpc.psi = list(self.mpc_solution.psi)
dat.liveMpc.curvature = list(self.mpc_solution.curvature)
dat.liveMpc.cost = self.mpc_solution.cost
pm.send('liveMpc', dat)
|
the-stack_106_22891 | # coding:utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.module.module import runnable
from paddlehub.compat.module.nlp_module import DataFormatError
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
import argparse
import os
import numpy as np
import paddle.fluid.dygraph as D
from .model.tokenizing_ernie import ErnieTokenizer
from .model.decode import beam_search_infilling
from .model.modeling_ernie_gen import ErnieModelForGeneration
@moduleinfo(
name="ernie_gen_leave",
version="1.0.0",
summary="",
author="彭兆帅,郑博培",
author_email="[email protected],[email protected]",
type="nlp/text_generation",
)
class ErnieGen(hub.NLPPredictionModule):
def _initialize(self):
"""
initialize with the necessary elements
"""
assets_path = os.path.join(self.directory, "assets")
gen_checkpoint_path = os.path.join(assets_path, "ernie_gen")
ernie_cfg_path = os.path.join(assets_path, 'ernie_config.json')
with open(ernie_cfg_path, encoding='utf8') as ernie_cfg_file:
ernie_cfg = dict(json.loads(ernie_cfg_file.read()))
ernie_vocab_path = os.path.join(assets_path, 'vocab.txt')
with open(ernie_vocab_path, encoding='utf8') as ernie_vocab_file:
ernie_vocab = {j.strip().split('\t')[0]: i for i, j in enumerate(ernie_vocab_file.readlines())}
with fluid.dygraph.guard(fluid.CPUPlace()):
with fluid.unique_name.guard():
self.model = ErnieModelForGeneration(ernie_cfg)
finetuned_states, _ = D.load_dygraph(gen_checkpoint_path)
self.model.set_dict(finetuned_states)
self.tokenizer = ErnieTokenizer(ernie_vocab)
self.rev_dict = {v: k for k, v in self.tokenizer.vocab.items()}
self.rev_dict[self.tokenizer.pad_id] = '' # replace [PAD]
self.rev_dict[self.tokenizer.unk_id] = '' # replace [PAD]
self.rev_lookup = np.vectorize(lambda i: self.rev_dict[i])
@serving
def generate(self, texts, use_gpu=False, beam_width=5):
"""
Get the predict result from the input texts.
Args:
texts(list): the input texts.
use_gpu(bool): whether use gpu to predict or not
beam_width(int): the beam search width.
Returns:
results(list): the predict result.
"""
if texts and isinstance(texts, list) and all(texts) and all([isinstance(text, str) for text in texts]):
predicted_data = texts
else:
raise ValueError("The input texts should be a list with nonempty string elements.")
if use_gpu and "CUDA_VISIBLE_DEVICES" not in os.environ:
use_gpu = False
logger.warning(
"use_gpu has been set False as you didn't set the environment variable CUDA_VISIBLE_DEVICES while using use_gpu=True"
)
if use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
self.model.eval()
results = []
for text in predicted_data:
sample_results = []
ids, sids = self.tokenizer.encode(text)
src_ids = D.to_variable(np.expand_dims(ids, 0))
src_sids = D.to_variable(np.expand_dims(sids, 0))
output_ids = beam_search_infilling(
self.model,
src_ids,
src_sids,
eos_id=self.tokenizer.sep_id,
sos_id=self.tokenizer.cls_id,
attn_id=self.tokenizer.vocab['[MASK]'],
max_decode_len=50,
max_encode_len=50,
beam_width=beam_width,
tgt_type_id=1)
output_str = self.rev_lookup(output_ids[0].numpy())
for ostr in output_str.tolist():
if '[SEP]' in ostr:
ostr = ostr[:ostr.index('[SEP]')]
sample_results.append("".join(ostr))
results.append(sample_results)
return results
def add_module_config_arg(self):
"""
Add the command config options
"""
self.arg_config_group.add_argument(
'--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU for prediction")
self.arg_config_group.add_argument('--beam_width', type=int, default=5, help="the beam search width")
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description='Run the %s module.' % self.name,
prog='hub run %s' % self.name,
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, optional.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.generate(texts=input_data, use_gpu=args.use_gpu, beam_width=args.beam_width)
return results
|
the-stack_106_22892 | # Copyright (c) 2008-2009 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# package is named greentest, not test, so it won't be confused with test in stdlib
# pylint:disable=broad-except,unused-argument,no-member,too-many-branches,unused-variable
# pylint:disable=attribute-defined-outside-init,abstract-method
import sys
import types
import unittest
from unittest import TestCase as BaseTestCase
import time
import os
from os.path import basename, splitext
import gevent
import gevent.core
from patched_tests_setup import get_switch_expected
from gevent.hub import _get_hub
from functools import wraps
import contextlib
import gc
import _six as six
PYPY = hasattr(sys, 'pypy_version_info')
VERBOSE = sys.argv.count('-v') > 1
if '--debug-greentest' in sys.argv:
sys.argv.remove('--debug-greentest')
DEBUG = True
else:
DEBUG = False
RUN_LEAKCHECKS = os.getenv('GEVENTTEST_LEAKCHECK')
OPTIONAL_MODULES = ['resolver_ares']
# Generally, ignore the portions that are only implemented
# on particular platforms; they generally contain partial
# implementations completed in different modules.
PLATFORM_SPECIFIC_SUFFIXES = ['2', '279', '3']
if sys.platform.startswith('win'):
PLATFORM_SPECIFIC_SUFFIXES.append('posix')
PY2 = None
PY3 = None
PY34 = None
PY36 = None
NON_APPLICABLE_SUFFIXES = []
if sys.version_info[0] == 3:
# Python 3
NON_APPLICABLE_SUFFIXES.extend(('2', '279'))
PY2 = False
PY3 = True
if sys.version_info[1] >= 4:
PY34 = True
if sys.version_info[1] >= 6:
PY36 = True
elif sys.version_info[0] == 2:
# Any python 2
PY3 = False
PY2 = True
NON_APPLICABLE_SUFFIXES.append('3')
if (sys.version_info[1] < 7
or (sys.version_info[1] == 7 and sys.version_info[2] < 9)):
# Python 2, < 2.7.9
NON_APPLICABLE_SUFFIXES.append('279')
PYPY3 = PYPY and PY3
if sys.platform.startswith('win'):
NON_APPLICABLE_SUFFIXES.append("posix")
# This is intimately tied to FileObjectPosix
NON_APPLICABLE_SUFFIXES.append("fileobject2")
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS')
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
def _do_not_skip(reason):
def dec(f):
return f
return dec
if RUNNING_ON_APPVEYOR:
# See comments scattered around about timeouts and the timer
# resolution available on appveyor (lots of jitter). this
# seems worse with the 62-bit builds.
# Note that we skip/adjust these tests only on AppVeyor, not
# win32---we don't think there's gevent related problems but
# environment related problems. These can be tested and debugged
# separately on windows in a more stable environment.
skipOnAppVeyor = unittest.skip
# We can't exec corecext on appveyor if we haven't run setup.py in
# 'develop' mode (i.e., we install)
NON_APPLICABLE_SUFFIXES.append('corecext')
else:
skipOnAppVeyor = _do_not_skip
if PYPY3 and RUNNING_ON_CI:
# Same as above, for PyPy3.3-5.5-alpha
skipOnPyPy3OnCI = unittest.skip
else:
skipOnPyPy3OnCI = _do_not_skip
if PYPY:
skipOnPyPy = unittest.skip
else:
skipOnPyPy = _do_not_skip
EXPECT_POOR_TIMER_RESOLUTION = PYPY3 or RUNNING_ON_APPVEYOR
class ExpectedException(Exception):
"""An exception whose traceback should be ignored"""
def wrap_switch_count_check(method):
@wraps(method)
def wrap_switch_count_check(self, *args, **kwargs):
initial_switch_count = getattr(_get_hub(), 'switch_count', None)
self.switch_expected = getattr(self, 'switch_expected', True)
if initial_switch_count is not None:
fullname = getattr(self, 'fullname', None)
if self.switch_expected == 'default' and fullname:
self.switch_expected = get_switch_expected(fullname)
result = method(self, *args, **kwargs)
if initial_switch_count is not None and self.switch_expected is not None:
switch_count = _get_hub().switch_count - initial_switch_count
if self.switch_expected is True:
assert switch_count >= 0
if not switch_count:
raise AssertionError('%s did not switch' % fullname)
elif self.switch_expected is False:
if switch_count:
raise AssertionError('%s switched but not expected to' % fullname)
else:
raise AssertionError('Invalid value for switch_expected: %r' % (self.switch_expected, ))
return result
return wrap_switch_count_check
def wrap_timeout(timeout, method):
if timeout is None:
return method
@wraps(method)
def wrap_timeout(self, *args, **kwargs):
with gevent.Timeout(timeout, 'test timed out', ref=False):
return method(self, *args, **kwargs)
return wrap_timeout
def ignores_leakcheck(func):
func.ignore_leakcheck = True
return func
def wrap_refcount(method):
if not RUN_LEAKCHECKS:
return method
if getattr(method, 'ignore_leakcheck', False):
return method
# Some builtin things that we ignore
IGNORED_TYPES = (tuple, dict, types.FrameType, types.TracebackType)
def type_hist():
import collections
d = collections.defaultdict(int)
for x in gc.get_objects():
k = type(x)
if k in IGNORED_TYPES:
continue
if k == gevent.core.callback and x.callback is None and x.args is None:
# these represent callbacks that have been stopped, but
# the event loop hasn't cycled around to run them. The only
# known cause of this is killing greenlets before they get a chance
# to run for the first time.
continue
d[k] += 1
return d
def report_diff(a, b):
diff_lines = []
for k, v in sorted(a.items(), key=lambda i: i[0].__name__):
if b[k] != v:
diff_lines.append("%s: %s != %s" % (k, v, b[k]))
if not diff_lines:
return None
diff = '\n'.join(diff_lines)
return diff
@wraps(method)
def wrap_refcount(self, *args, **kwargs):
gc.collect()
gc.collect()
gc.collect()
deltas = []
d = None
gc.disable()
try:
while True:
# Grab current snapshot
hist_before = type_hist()
d = sum(hist_before.values())
self.setUp()
method(self, *args, **kwargs)
self.tearDown()
# Grab post snapshot
if 'urlparse' in sys.modules:
sys.modules['urlparse'].clear_cache()
if 'urllib.parse' in sys.modules:
sys.modules['urllib.parse'].clear_cache()
hist_after = type_hist()
d = sum(hist_after.values()) - d
deltas.append(d)
# Reset and check for cycles
gc.collect()
if gc.garbage:
raise AssertionError("Generated uncollectable garbage %r" % (gc.garbage,))
# the following configurations are classified as "no leak"
# [0, 0]
# [x, 0, 0]
# [... a, b, c, d] where a+b+c+d = 0
#
# the following configurations are classified as "leak"
# [... z, z, z] where z > 0
if deltas[-2:] == [0, 0] and len(deltas) in (2, 3):
break
elif deltas[-3:] == [0, 0, 0]:
break
elif len(deltas) >= 4 and sum(deltas[-4:]) == 0:
break
elif len(deltas) >= 3 and deltas[-1] > 0 and deltas[-1] == deltas[-2] and deltas[-2] == deltas[-3]:
diff = report_diff(hist_before, hist_after)
raise AssertionError('refcount increased by %r\n%s' % (deltas, diff))
# OK, we don't know for sure yet. Let's search for more
if sum(deltas[-3:]) <= 0 or sum(deltas[-4:]) <= 0 or deltas[-4:].count(0) >= 2:
# this is suspicious, so give a few more runs
limit = 11
else:
limit = 7
if len(deltas) >= limit:
raise AssertionError('refcount increased by %r\n%s' % (deltas, report_diff(hist_before, hist_after)))
finally:
gc.enable()
self.skipTearDown = True
return wrap_refcount
def wrap_error_fatal(method):
@wraps(method)
def wrap_error_fatal(self, *args, **kwargs):
# XXX should also be able to do gevent.SYSTEM_ERROR = object
# which is a global default to all hubs
SYSTEM_ERROR = gevent.get_hub().SYSTEM_ERROR
gevent.get_hub().SYSTEM_ERROR = object
try:
return method(self, *args, **kwargs)
finally:
gevent.get_hub().SYSTEM_ERROR = SYSTEM_ERROR
return wrap_error_fatal
def wrap_restore_handle_error(method):
@wraps(method)
def wrap_restore_handle_error(self, *args, **kwargs):
old = gevent.get_hub().handle_error
try:
return method(self, *args, **kwargs)
finally:
gevent.get_hub().handle_error = old
if self.peek_error()[0] is not None:
gevent.getcurrent().throw(*self.peek_error()[1:])
return wrap_restore_handle_error
def _get_class_attr(classDict, bases, attr, default=AttributeError):
NONE = object()
value = classDict.get(attr, NONE)
if value is not NONE:
return value
for base in bases:
value = getattr(bases[0], attr, NONE)
if value is not NONE:
return value
if default is AttributeError:
raise AttributeError('Attribute %r not found\n%s\n%s\n' % (attr, classDict, bases))
return default
class TestCaseMetaClass(type):
# wrap each test method with
# a) timeout check
# b) fatal error check
# c) restore the hub's error handler (see expect_one_error)
# d) totalrefcount check
def __new__(cls, classname, bases, classDict):
# pylint and pep8 fight over what this should be called (mcs or cls).
# pylint gets it right, but we cant scope disable pep8, so we go with
# its convention.
# pylint: disable=bad-mcs-classmethod-argument
timeout = classDict.get('__timeout__', 'NONE')
if timeout == 'NONE':
timeout = getattr(bases[0], '__timeout__', None)
if RUN_LEAKCHECKS and timeout is not None:
timeout *= 6
check_totalrefcount = _get_class_attr(classDict, bases, 'check_totalrefcount', True)
error_fatal = _get_class_attr(classDict, bases, 'error_fatal', True)
# Python 3: must copy, we mutate the classDict. Interestingly enough,
# it doesn't actually error out, but under 3.6 we wind up wrapping
# and re-wrapping the same items over and over and over.
for key, value in list(classDict.items()):
if key.startswith('test') and callable(value):
classDict.pop(key)
#value = wrap_switch_count_check(value)
value = wrap_timeout(timeout, value)
my_error_fatal = getattr(value, 'error_fatal', None)
if my_error_fatal is None:
my_error_fatal = error_fatal
if my_error_fatal:
value = wrap_error_fatal(value)
value = wrap_restore_handle_error(value)
if check_totalrefcount:
value = wrap_refcount(value)
classDict[key] = value
return type.__new__(cls, classname, bases, classDict)
class TestCase(TestCaseMetaClass("NewBase", (BaseTestCase,), {})):
# Travis is slow and overloaded; Appveyor used to be faster, but
# as of Dec 2015 it's almost always slower and/or has much worse timer
# resolution
__timeout__ = 1 if not RUNNING_ON_CI else 7
switch_expected = 'default'
error_fatal = True
close_on_teardown = ()
def run(self, *args, **kwargs):
if self.switch_expected == 'default':
self.switch_expected = get_switch_expected(self.fullname)
return BaseTestCase.run(self, *args, **kwargs)
def tearDown(self):
if getattr(self, 'skipTearDown', False):
return
if hasattr(self, 'cleanup'):
self.cleanup()
self._error = self._none
for x in self.close_on_teardown:
close = getattr(x, 'close', x)
try:
close()
except Exception:
pass
try:
del self.close_on_teardown
except AttributeError:
pass
def _close_on_teardown(self, resource):
if 'close_on_teardown' not in self.__dict__:
self.close_on_teardown = []
self.close_on_teardown.append(resource)
return resource
@property
def testname(self):
return getattr(self, '_testMethodName', '') or getattr(self, '_TestCase__testMethodName')
@property
def testcasename(self):
return self.__class__.__name__ + '.' + self.testname
@property
def modulename(self):
return os.path.basename(sys.modules[self.__class__.__module__].__file__).rsplit('.', 1)[0]
@property
def fullname(self):
return splitext(basename(self.modulename))[0] + '.' + self.testcasename
_none = (None, None, None)
_error = _none
def expect_one_error(self):
assert self._error == self._none, self._error
self._old_handle_error = gevent.get_hub().handle_error
gevent.get_hub().handle_error = self._store_error
def _store_error(self, where, type, value, tb):
del tb
if self._error != self._none:
gevent.get_hub().parent.throw(type, value)
else:
self._error = (where, type, value)
def peek_error(self):
return self._error
def get_error(self):
try:
return self._error
finally:
self._error = self._none
def assert_error(self, type=None, value=None, error=None, where_type=None):
if error is None:
error = self.get_error()
if type is not None:
assert issubclass(error[1], type), error
if value is not None:
if isinstance(value, str):
assert str(error[2]) == value, error
else:
assert error[2] is value, error
if where_type is not None:
self.assertIsInstance(error[0], where_type)
return error
if RUNNING_ON_APPVEYOR:
# appveyor timeouts are unreliable; seems to be very slow wakeups
def assertTimeoutAlmostEqual(self, *args, **kwargs):
return
def assertTimeWithinRange(self, delay, min_time, max_time):
return
else:
def assertTimeoutAlmostEqual(self, *args, **kwargs):
self.assertAlmostEqual(*args, **kwargs)
def assertTimeWithinRange(self, delay, min_time, max_time):
self.assertLessEqual(delay, max_time)
self.assertGreaterEqual(delay, min_time)
main = unittest.main
_original_Hub = gevent.hub.Hub
class CountingHub(_original_Hub):
EXPECTED_TEST_ERROR = (ExpectedException,)
switch_count = 0
def switch(self, *args):
self.switch_count += 1
return _original_Hub.switch(self, *args)
def handle_error(self, context, type, value, tb):
if issubclass(type, self.EXPECTED_TEST_ERROR):
# Don't print these to cut down on the noise in the test logs
return
return _original_Hub.handle_error(self, context, type, value, tb)
gevent.hub.Hub = CountingHub
class _DelayWaitMixin(object):
_default_wait_timeout = 0.01
_default_delay_min_adj = 0.001
if not RUNNING_ON_APPVEYOR:
_default_delay_max_adj = 0.11
else:
# Timing resolution is extremely poor on Appveyor
# and subject to jitter.
_default_delay_max_adj = 1.5
def wait(self, timeout):
raise NotImplementedError('override me in subclass')
def _check_delay_bounds(self, timeout, delay,
delay_min_adj=None,
delay_max_adj=None):
delay_min_adj = self._default_delay_min_adj if not delay_min_adj else delay_min_adj
delay_max_adj = self._default_delay_max_adj if not delay_max_adj else delay_max_adj
self.assertGreaterEqual(delay, timeout - delay_min_adj)
self.assertLess(delay, timeout + delay_max_adj)
def _wait_and_check(self, timeout=None):
if timeout is None:
timeout = self._default_wait_timeout
# gevent.timer instances have a 'seconds' attribute,
# otherwise it's the raw number
seconds = getattr(timeout, 'seconds', timeout)
start = time.time()
try:
result = self.wait(timeout)
finally:
self._check_delay_bounds(seconds, time.time() - start,
self._default_delay_min_adj,
self._default_delay_max_adj)
return result
def test_outer_timeout_is_not_lost(self):
timeout = gevent.Timeout.start_new(0.001, ref=False)
try:
try:
result = self.wait(timeout=1)
except gevent.Timeout as ex:
assert ex is timeout, (ex, timeout)
else:
raise AssertionError('must raise Timeout (returned %r)' % (result, ))
finally:
timeout.cancel()
class GenericWaitTestCase(_DelayWaitMixin, TestCase):
_default_wait_timeout = 0.2
_default_delay_min_adj = 0.1
if not RUNNING_ON_APPVEYOR:
_default_delay_max_adj = 0.11
else:
# Timing resolution is very poor on Appveyor
# and subject to jitter
_default_delay_max_adj = 1.5
def test_returns_none_after_timeout(self):
result = self._wait_and_check()
# join and wait simply return after timeout expires
assert result is None, repr(result)
class GenericGetTestCase(_DelayWaitMixin, TestCase):
Timeout = gevent.Timeout
def cleanup(self):
pass
def test_raises_timeout_number(self):
self.assertRaises(self.Timeout, self._wait_and_check, timeout=0.01)
# get raises Timeout after timeout expired
self.cleanup()
def test_raises_timeout_Timeout(self):
timeout = gevent.Timeout(self._default_wait_timeout)
try:
self._wait_and_check(timeout=timeout)
except gevent.Timeout as ex:
assert ex is timeout, (ex, timeout)
self.cleanup()
def test_raises_timeout_Timeout_exc_customized(self):
error = RuntimeError('expected error')
timeout = gevent.Timeout(self._default_wait_timeout, exception=error)
try:
self._wait_and_check(timeout=timeout)
except RuntimeError as ex:
assert ex is error, (ex, error)
self.cleanup()
def walk_modules(basedir=None, modpath=None, include_so=False, recursive=False):
if PYPY:
include_so = False
if basedir is None:
basedir = os.path.dirname(gevent.__file__)
if modpath is None:
modpath = 'gevent.'
else:
if modpath is None:
modpath = ''
for fn in sorted(os.listdir(basedir)):
path = os.path.join(basedir, fn)
if os.path.isdir(path):
if not recursive:
continue
pkg_init = os.path.join(path, '__init__.py')
if os.path.exists(pkg_init):
yield pkg_init, modpath + fn
for p, m in walk_modules(path, modpath + fn + "."):
yield p, m
continue
if fn.endswith('.py'):
x = fn[:-3]
if x.endswith('_d'):
x = x[:-2]
if x in ['__init__', 'core', 'ares', '_util', '_semaphore',
'corecffi', '_corecffi', '_corecffi_build']:
continue
if x in OPTIONAL_MODULES:
try:
six.exec_("import %s" % x, {})
except ImportError:
continue
yield path, modpath + x
elif include_so and fn.endswith('.so'):
if '.pypy-' in fn:
continue
if fn.endswith('_d.so'):
yield path, modpath + fn[:-5]
else:
yield path, modpath + fn[:-3]
def bind_and_listen(sock, address=('', 0), backlog=50, reuse_addr=True):
from socket import SOL_SOCKET, SO_REUSEADDR, error
if reuse_addr:
try:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, sock.getsockopt(SOL_SOCKET, SO_REUSEADDR) | 1)
except error:
pass
sock.bind(address)
sock.listen(backlog)
def tcp_listener(address, backlog=50, reuse_addr=True):
"""A shortcut to create a TCP socket, bind it and put it into listening state."""
from gevent import socket
sock = socket.socket()
bind_and_listen(sock)
return sock
@contextlib.contextmanager
def disabled_gc():
was_enabled = gc.isenabled()
gc.disable()
try:
yield
finally:
if was_enabled:
gc.enable()
import re
# Linux/OS X/BSD platforms can implement this by calling out to lsof
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
raise OSError("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
command, pid, user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
else:
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError):
return 0
lsof_get_open_files = get_open_files
try:
import psutil
except ImportError:
pass
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
if RUNNING_ON_TRAVIS:
# XXX: Note: installing psutil on the travis linux vm caused failures in test__makefile_refs.
get_open_files = lsof_get_open_files
if PYPY:
def getrefcount(*args):
pass
else:
def getrefcount(*args):
return sys.getrefcount(*args)
|
the-stack_106_22894 | # Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from sotabencheval.image_classification import ImageNetEvaluator
from sotabencheval.utils import is_server
from timm import create_model
from timm.data import resolve_data_config, create_loader, DatasetTar
from timm.models import apply_test_time_pool
from tqdm import tqdm
import os
NUM_GPU = 1
BATCH_SIZE = 256 * NUM_GPU
def _entry(model_name, paper_model_name, paper_arxiv_id, batch_size=BATCH_SIZE,
ttp=False, args=dict(), model_desc=None):
return dict(
model=model_name,
model_description=model_desc,
paper_model_name=paper_model_name,
paper_arxiv_id=paper_arxiv_id,
batch_size=batch_size,
ttp=ttp,
args=args)
# NOTE For any original PyTorch models, I'll remove from this list when you add to sotabench to
# avoid overlap and confusion. Please contact me.
model_list = [
## Weights ported by myself from other frameworks or trained myself in PyTorch
_entry('adv_inception_v3', 'Adversarial Inception V3', '1611.01236',
model_desc='Ported from official Tensorflow weights'),
_entry('ens_adv_inception_resnet_v2', 'Ensemble Adversarial Inception V3', '1705.07204',
model_desc='Ported from official Tensorflow weights'),
_entry('dpn68', 'DPN-68 (224x224)', '1707.01629'),
_entry('dpn68b', 'DPN-68b (224x224)', '1707.01629'),
_entry('dpn92', 'DPN-92 (224x224)', '1707.01629'),
_entry('dpn98', 'DPN-98 (224x224)', '1707.01629'),
_entry('dpn107', 'DPN-107 (224x224)', '1707.01629'),
_entry('dpn131', 'DPN-131 (224x224)', '1707.01629'),
_entry('dpn68', 'DPN-68 (320x320, Mean-Max Pooling)', '1707.01629', ttp=True, args=dict(img_size=320)),
_entry('dpn68b', 'DPN-68b (320x320, Mean-Max Pooling)', '1707.01629', ttp=True, args=dict(img_size=320)),
_entry('dpn92', 'DPN-92 (320x320, Mean-Max Pooling)', '1707.01629',
ttp=True, args=dict(img_size=320), batch_size=BATCH_SIZE//2),
_entry('dpn98', 'DPN-98 (320x320, Mean-Max Pooling)', '1707.01629',
ttp=True, args=dict(img_size=320), batch_size=BATCH_SIZE//2),
_entry('dpn107', 'DPN-107 (320x320, Mean-Max Pooling)', '1707.01629',
ttp=True, args=dict(img_size=320), batch_size=BATCH_SIZE//4),
_entry('dpn131', 'DPN-131 (320x320, Mean-Max Pooling)', '1707.01629',
ttp=True, args=dict(img_size=320), batch_size=BATCH_SIZE//4),
_entry('efficientnet_b0', 'EfficientNet-B0', '1905.11946'),
_entry('efficientnet_b1', 'EfficientNet-B1', '1905.11946'),
_entry('efficientnet_b2', 'EfficientNet-B2', '1905.11946',
model_desc='Trained from scratch in PyTorch w/ RandAugment'),
_entry('efficientnet_b2a', 'EfficientNet-B2 (288x288, 1.0 crop)', '1905.11946',
model_desc='Trained from scratch in PyTorch w/ RandAugment'),
_entry('efficientnet_b3', 'EfficientNet-B3', '1905.11946',
model_desc='Trained from scratch in PyTorch w/ RandAugment'),
_entry('efficientnet_b3a', 'EfficientNet-B3 (320x320, 1.0 crop)', '1905.11946',
model_desc='Trained from scratch in PyTorch w/ RandAugment'),
_entry('efficientnet_es', 'EfficientNet-EdgeTPU-S', '1905.11946',
model_desc='Trained from scratch in PyTorch w/ RandAugment'),
_entry('efficientnet_em', 'EfficientNet-EdgeTPU-M', '1905.11946',
model_desc='Trained from scratch in PyTorch w/ RandAugment'),
_entry('gluon_inception_v3', 'Inception V3', '1512.00567', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet18_v1b', 'ResNet-18', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet34_v1b', 'ResNet-34', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet50_v1b', 'ResNet-50', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet50_v1c', 'ResNet-50-C', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet50_v1d', 'ResNet-50-D', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet50_v1s', 'ResNet-50-S', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet101_v1b', 'ResNet-101', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet101_v1c', 'ResNet-101-C', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet101_v1d', 'ResNet-101-D', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet101_v1s', 'ResNet-101-S', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet152_v1b', 'ResNet-152', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet152_v1c', 'ResNet-152-C', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet152_v1d', 'ResNet-152-D', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnet152_v1s', 'ResNet-152-S', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnext50_32x4d', 'ResNeXt-50 32x4d', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnext101_32x4d', 'ResNeXt-101 32x4d', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_resnext101_64x4d', 'ResNeXt-101 64x4d', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_senet154', 'SENet-154', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_seresnext50_32x4d', 'SE-ResNeXt-50 32x4d', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_seresnext101_32x4d', 'SE-ResNeXt-101 32x4d', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_seresnext101_64x4d', 'SE-ResNeXt-101 64x4d', '1812.01187', model_desc='Ported from GluonCV Model Zoo'),
_entry('gluon_xception65', 'Modified Aligned Xception', '1802.02611', batch_size=BATCH_SIZE//2,
model_desc='Ported from GluonCV Model Zoo'),
_entry('mixnet_xl', 'MixNet-XL', '1907.09595', model_desc="My own scaling beyond paper's MixNet Large"),
_entry('mixnet_l', 'MixNet-L', '1907.09595'),
_entry('mixnet_m', 'MixNet-M', '1907.09595'),
_entry('mixnet_s', 'MixNet-S', '1907.09595'),
_entry('fbnetc_100', 'FBNet-C', '1812.03443',
model_desc='Trained in PyTorch with RMSProp, exponential LR decay'),
_entry('mnasnet_100', 'MnasNet-B1', '1807.11626'),
_entry('semnasnet_100', 'MnasNet-A1', '1807.11626'),
_entry('spnasnet_100', 'Single-Path NAS', '1904.02877',
model_desc='Trained in PyTorch with SGD, cosine LR decay'),
_entry('mobilenetv3_large_100', 'MobileNet V3-Large 1.0', '1905.02244',
model_desc='Trained in PyTorch with RMSProp, exponential LR decay, and hyper-params matching '
'paper as closely as possible.'),
_entry('resnet18', 'ResNet-18', '1812.01187'),
_entry('resnet26', 'ResNet-26', '1812.01187', model_desc='Block cfg of ResNet-34 w/ Bottleneck'),
_entry('resnet26d', 'ResNet-26-D', '1812.01187',
model_desc='Block cfg of ResNet-34 w/ Bottleneck, deep stem, and avg-pool in downsample layers.'),
_entry('resnet34', 'ResNet-34', '1812.01187'),
_entry('resnet50', 'ResNet-50', '1812.01187', model_desc='Trained with AugMix + JSD loss'),
_entry('resnet50', 'ResNet-50 (288x288 Mean-Max Pooling)', '1812.01187',
ttp=True, args=dict(img_size=288),
model_desc='Trained with AugMix + JSD loss'),
_entry('resnext50_32x4d', 'ResNeXt-50 32x4d', '1812.01187'),
_entry('resnext50d_32x4d', 'ResNeXt-50-D 32x4d', '1812.01187',
model_desc="'D' variant (3x3 deep stem w/ avg-pool downscale). Trained with "
"SGD w/ cosine LR decay, random-erasing (gaussian per-pixel noise) and label-smoothing"),
_entry('wide_resnet50_2', 'Wide-ResNet-50', '1605.07146'),
_entry('seresnet50', 'SE-ResNet-50', '1709.01507'),
_entry('seresnext26d_32x4d', 'SE-ResNeXt-26-D 32x4d', '1812.01187',
model_desc='Block cfg of SE-ResNeXt-34 w/ Bottleneck, deep stem, and avg-pool in downsample layers.'),
_entry('seresnext26t_32x4d', 'SE-ResNeXt-26-T 32x4d', '1812.01187',
model_desc='Block cfg of SE-ResNeXt-34 w/ Bottleneck, deep tiered stem, and avg-pool in downsample layers.'),
_entry('seresnext50_32x4d', 'SE-ResNeXt-50 32x4d', '1709.01507'),
_entry('skresnet18', 'SK-ResNet-18', '1903.06586'),
_entry('skresnet34', 'SK-ResNet-34', '1903.06586'),
_entry('skresnext50_32x4d', 'SKNet-50', '1903.06586'),
_entry('ecaresnetlight', 'ECA-ResNet-Light', '1910.03151',
model_desc='A tweaked ResNet50d with ECA attn.'),
_entry('ecaresnet50d', 'ECA-ResNet-50d', '1910.03151',
model_desc='A ResNet50d with ECA attn'),
_entry('ecaresnet101d', 'ECA-ResNet-101d', '1910.03151',
model_desc='A ResNet101d with ECA attn'),
_entry('resnetblur50', 'ResNet-Blur-50', '1904.11486'),
_entry('densenet121', 'DenseNet-121', '1608.06993'),
_entry('densenetblur121d', 'DenseNet-Blur-121D', '1904.11486',
model_desc='DenseNet with blur pooling and deep stem'),
_entry('ese_vovnet19b_dw', 'VoVNet-19-DW-V2', '1911.06667'),
_entry('ese_vovnet39b', 'VoVNet-39-V2', '1911.06667'),
_entry('cspresnet50', 'CSPResNet-50', '1911.11929'),
_entry('cspresnext50', 'CSPResNeXt-50', '1911.11929'),
_entry('cspdarknet53', 'CSPDarkNet-53', '1911.11929'),
_entry('tf_efficientnet_b0', 'EfficientNet-B0 (AutoAugment)', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b1', 'EfficientNet-B1 (AutoAugment)', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b2', 'EfficientNet-B2 (AutoAugment)', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b3', 'EfficientNet-B3 (AutoAugment)', '1905.11946', batch_size=BATCH_SIZE//2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b4', 'EfficientNet-B4 (AutoAugment)', '1905.11946', batch_size=BATCH_SIZE//2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b5', 'EfficientNet-B5 (RandAugment)', '1905.11946', batch_size=BATCH_SIZE//4,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b6', 'EfficientNet-B6 (AutoAugment)', '1905.11946', batch_size=BATCH_SIZE//8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b7', 'EfficientNet-B7 (RandAugment)', '1905.11946', batch_size=BATCH_SIZE//8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b8', 'EfficientNet-B8 (RandAugment)', '1905.11946', batch_size=BATCH_SIZE // 8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b0_ap', 'EfficientNet-B0 (AdvProp)', '1911.09665',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b1_ap', 'EfficientNet-B1 (AdvProp)', '1911.09665',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b2_ap', 'EfficientNet-B2 (AdvProp)', '1911.09665',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b3_ap', 'EfficientNet-B3 (AdvProp)', '1911.09665', batch_size=BATCH_SIZE // 2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b4_ap', 'EfficientNet-B4 (AdvProp)', '1911.09665', batch_size=BATCH_SIZE // 2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b5_ap', 'EfficientNet-B5 (AdvProp)', '1911.09665', batch_size=BATCH_SIZE // 4,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b6_ap', 'EfficientNet-B6 (AdvProp)', '1911.09665', batch_size=BATCH_SIZE // 8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b7_ap', 'EfficientNet-B7 (AdvProp)', '1911.09665', batch_size=BATCH_SIZE // 8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b8_ap', 'EfficientNet-B8 (AdvProp)', '1911.09665', batch_size=BATCH_SIZE // 8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b0_ns', 'EfficientNet-B0 (NoisyStudent)', '1911.04252',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b1_ns', 'EfficientNet-B1 (NoisyStudent)', '1911.04252',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b2_ns', 'EfficientNet-B2 (NoisyStudent)', '1911.04252',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b3_ns', 'EfficientNet-B3 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b4_ns', 'EfficientNet-B4 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b5_ns', 'EfficientNet-B5 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 4,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b6_ns', 'EfficientNet-B6 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_b7_ns', 'EfficientNet-B7 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 8,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_l2_ns_475', 'EfficientNet-L2 475 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 16,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_l2_ns', 'EfficientNet-L2 (NoisyStudent)', '1911.04252', batch_size=BATCH_SIZE // 64,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_cc_b0_4e', 'EfficientNet-CondConv-B0 4 experts', '1904.04971',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_cc_b0_8e', 'EfficientNet-CondConv-B0 8 experts', '1904.04971',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_cc_b1_8e', 'EfficientNet-CondConv-B1 8 experts', '1904.04971',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_es', 'EfficientNet-EdgeTPU-S', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_em', 'EfficientNet-EdgeTPU-M', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_el', 'EfficientNet-EdgeTPU-L', '1905.11946', batch_size=BATCH_SIZE//2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_lite0', 'EfficientNet-Lite0', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_lite1', 'EfficientNet-Lite1', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_lite2', 'EfficientNet-Lite2', '1905.11946',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_lite3', 'EfficientNet-Lite3', '1905.11946', batch_size=BATCH_SIZE // 2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_efficientnet_lite4', 'EfficientNet-Lite4', '1905.11946', batch_size=BATCH_SIZE // 2,
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_inception_v3', 'Inception V3', '1512.00567', model_desc='Ported from official Tensorflow weights'),
_entry('tf_mixnet_l', 'MixNet-L', '1907.09595', model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mixnet_m', 'MixNet-M', '1907.09595', model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mixnet_s', 'MixNet-S', '1907.09595', model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mobilenetv3_large_100', 'MobileNet V3-Large 1.0', '1905.02244',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mobilenetv3_large_075', 'MobileNet V3-Large 0.75', '1905.02244',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mobilenetv3_large_minimal_100', 'MobileNet V3-Large Minimal 1.0', '1905.02244',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mobilenetv3_small_100', 'MobileNet V3-Small 1.0', '1905.02244',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mobilenetv3_small_075', 'MobileNet V3-Small 0.75', '1905.02244',
model_desc='Ported from official Google AI Tensorflow weights'),
_entry('tf_mobilenetv3_small_minimal_100', 'MobileNet V3-Small Minimal 1.0', '1905.02244',
model_desc='Ported from official Google AI Tensorflow weights'),
## Cadene ported weights (to remove if Cadene adds sotabench)
_entry('inception_resnet_v2', 'Inception ResNet V2', '1602.07261'),
_entry('inception_v4', 'Inception V4', '1602.07261'),
_entry('nasnetalarge', 'NASNet-A Large', '1707.07012', batch_size=BATCH_SIZE // 4),
_entry('pnasnet5large', 'PNASNet-5', '1712.00559', batch_size=BATCH_SIZE // 4),
_entry('xception', 'Xception', '1610.02357', batch_size=BATCH_SIZE//2),
_entry('legacy_seresnet18', 'SE-ResNet-18', '1709.01507'),
_entry('legacy_seresnet34', 'SE-ResNet-34', '1709.01507'),
_entry('legacy_seresnet50', 'SE-ResNet-50', '1709.01507'),
_entry('legacy_seresnet101', 'SE-ResNet-101', '1709.01507'),
_entry('legacy_seresnet152', 'SE-ResNet-152', '1709.01507'),
_entry('legacy_seresnext26_32x4d', 'SE-ResNeXt-26 32x4d', '1709.01507',
model_desc='Block cfg of SE-ResNeXt-34 w/ Bottleneck'),
_entry('legacy_seresnext50_32x4d', 'SE-ResNeXt-50 32x4d', '1709.01507'),
_entry('legacy_seresnext101_32x4d', 'SE-ResNeXt-101 32x4d', '1709.01507'),
_entry('legacy_senet154', 'SENet-154', '1709.01507'),
## Torchvision weights
# _entry('densenet121'),
# _entry('densenet161'),
# _entry('densenet169'),
# _entry('densenet201'),
# _entry('inception_v3', paper_model_name='Inception V3', ),
# _entry('tv_resnet34', , ),
# _entry('tv_resnet50', , ),
# _entry('resnet101', , ),
# _entry('resnet152', , ),
# _entry('tv_resnext50_32x4d', , ),
# _entry('resnext101_32x8d', ),
# _entry('wide_resnet50_2' , ),
# _entry('wide_resnet101_2', , ),
## Facebook WSL weights
_entry('ig_resnext101_32x8d', 'ResNeXt-101 32x8d', '1805.00932',
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x16d', 'ResNeXt-101 32x16d', '1805.00932',
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x32d', 'ResNeXt-101 32x32d', '1805.00932', batch_size=BATCH_SIZE // 2,
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x48d', 'ResNeXt-101 32x48d', '1805.00932', batch_size=BATCH_SIZE // 4,
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x8d', 'ResNeXt-101 32x8d (288x288 Mean-Max Pooling)', '1805.00932',
ttp=True, args=dict(img_size=288),
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x16d', 'ResNeXt-101 32x16d (288x288 Mean-Max Pooling)', '1805.00932',
ttp=True, args=dict(img_size=288), batch_size=BATCH_SIZE // 2,
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x32d', 'ResNeXt-101 32x32d (288x288 Mean-Max Pooling)', '1805.00932',
ttp=True, args=dict(img_size=288), batch_size=BATCH_SIZE // 4,
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
_entry('ig_resnext101_32x48d', 'ResNeXt-101 32x48d (288x288 Mean-Max Pooling)', '1805.00932',
ttp=True, args=dict(img_size=288), batch_size=BATCH_SIZE // 8,
model_desc='Weakly-Supervised pre-training on 1B Instagram hashtag dataset by Facebook Research'),
## Facebook SSL weights
_entry('ssl_resnet18', 'ResNet-18', '1905.00546',
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnet50', 'ResNet-50', '1905.00546',
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext50_32x4d', 'ResNeXt-50 32x4d', '1905.00546',
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext101_32x4d', 'ResNeXt-101 32x4d', '1905.00546',
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext101_32x8d', 'ResNeXt-101 32x8d', '1905.00546',
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext101_32x16d', 'ResNeXt-101 32x16d', '1905.00546',
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnet50', 'ResNet-50 (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext50_32x4d', 'ResNeXt-50 32x4d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext101_32x4d', 'ResNeXt-101 32x4d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext101_32x8d', 'ResNeXt-101 32x8d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
_entry('ssl_resnext101_32x16d', 'ResNeXt-101 32x16d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288), batch_size=BATCH_SIZE // 2,
model_desc='Semi-Supervised pre-training on YFCC100M dataset by Facebook Research'),
## Facebook SWSL weights
_entry('swsl_resnet18', 'ResNet-18', '1905.00546',
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnet50', 'ResNet-50', '1905.00546',
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext50_32x4d', 'ResNeXt-50 32x4d', '1905.00546',
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext101_32x4d', 'ResNeXt-101 32x4d', '1905.00546',
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext101_32x8d', 'ResNeXt-101 32x8d', '1905.00546',
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext101_32x16d', 'ResNeXt-101 32x16d', '1905.00546',
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnet50', 'ResNet-50 (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext50_32x4d', 'ResNeXt-50 32x4d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext101_32x4d', 'ResNeXt-101 32x4d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext101_32x8d', 'ResNeXt-101 32x8d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288),
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
_entry('swsl_resnext101_32x16d', 'ResNeXt-101 32x16d (288x288 Mean-Max Pooling)', '1905.00546',
ttp=True, args=dict(img_size=288), batch_size=BATCH_SIZE // 2,
model_desc='Semi-Weakly-Supervised pre-training on 1 billion unlabelled dataset by Facebook Research'),
## DLA official impl weights (to remove if sotabench added to source)
_entry('dla34', 'DLA-34', '1707.06484'),
_entry('dla46_c', 'DLA-46-C', '1707.06484'),
_entry('dla46x_c', 'DLA-X-46-C', '1707.06484'),
_entry('dla60x_c', 'DLA-X-60-C', '1707.06484'),
_entry('dla60', 'DLA-60', '1707.06484'),
_entry('dla60x', 'DLA-X-60', '1707.06484'),
_entry('dla102', 'DLA-102', '1707.06484'),
_entry('dla102x', 'DLA-X-102', '1707.06484'),
_entry('dla102x2', 'DLA-X-102 64', '1707.06484'),
_entry('dla169', 'DLA-169', '1707.06484'),
## Res2Net official impl weights (to remove if sotabench added to source)
_entry('res2net50_26w_4s', 'Res2Net-50 26x4s', '1904.01169'),
_entry('res2net50_14w_8s', 'Res2Net-50 14x8s', '1904.01169'),
_entry('res2net50_26w_6s', 'Res2Net-50 26x6s', '1904.01169'),
_entry('res2net50_26w_8s', 'Res2Net-50 26x8s', '1904.01169'),
_entry('res2net50_48w_2s', 'Res2Net-50 48x2s', '1904.01169'),
_entry('res2net101_26w_4s', 'Res2NeXt-101 26x4s', '1904.01169'),
_entry('res2next50', 'Res2NeXt-50', '1904.01169'),
_entry('dla60_res2net', 'Res2Net-DLA-60', '1904.01169'),
_entry('dla60_res2next', 'Res2NeXt-DLA-60', '1904.01169'),
## HRNet official impl weights
_entry('hrnet_w18_small', 'HRNet-W18-C-Small-V1', '1908.07919'),
_entry('hrnet_w18_small_v2', 'HRNet-W18-C-Small-V2', '1908.07919'),
_entry('hrnet_w18', 'HRNet-W18-C', '1908.07919'),
_entry('hrnet_w30', 'HRNet-W30-C', '1908.07919'),
_entry('hrnet_w32', 'HRNet-W32-C', '1908.07919'),
_entry('hrnet_w40', 'HRNet-W40-C', '1908.07919'),
_entry('hrnet_w44', 'HRNet-W44-C', '1908.07919'),
_entry('hrnet_w48', 'HRNet-W48-C', '1908.07919'),
_entry('hrnet_w64', 'HRNet-W64-C', '1908.07919'),
## SelecSLS official impl weights
_entry('selecsls42b', 'SelecSLS-42_B', '1907.00837',
model_desc='Originally from https://github.com/mehtadushy/SelecSLS-Pytorch'),
_entry('selecsls60', 'SelecSLS-60', '1907.00837',
model_desc='Originally from https://github.com/mehtadushy/SelecSLS-Pytorch'),
_entry('selecsls60b', 'SelecSLS-60_B', '1907.00837',
model_desc='Originally from https://github.com/mehtadushy/SelecSLS-Pytorch'),
## ResNeSt official impl weights
_entry('resnest14d', 'ResNeSt-14', '2004.08955',
model_desc='Originally from GluonCV'),
_entry('resnest26d', 'ResNeSt-26', '2004.08955',
model_desc='Originally from GluonCV'),
_entry('resnest50d', 'ResNeSt-50', '2004.08955',
model_desc='Originally from https://github.com/zhanghang1989/ResNeSt'),
_entry('resnest101e', 'ResNeSt-101', '2004.08955',
model_desc='Originally from https://github.com/zhanghang1989/ResNeSt'),
_entry('resnest200e', 'ResNeSt-200', '2004.08955',
model_desc='Originally from https://github.com/zhanghang1989/ResNeSt'),
_entry('resnest269e', 'ResNeSt-269', '2004.08955', batch_size=BATCH_SIZE // 2,
model_desc='Originally from https://github.com/zhanghang1989/ResNeSt'),
_entry('resnest50d_4s2x40d', 'ResNeSt-50 4s2x40d', '2004.08955',
model_desc='Originally from https://github.com/zhanghang1989/ResNeSt'),
_entry('resnest50d_1s4x24d', 'ResNeSt-50 1s4x24d', '2004.08955',
model_desc='Originally from https://github.com/zhanghang1989/ResNeSt'),
## RegNet official impl weighs
_entry('regnetx_002', 'RegNetX-200MF', '2003.13678'),
_entry('regnetx_004', 'RegNetX-400MF', '2003.13678'),
_entry('regnetx_006', 'RegNetX-600MF', '2003.13678'),
_entry('regnetx_008', 'RegNetX-800MF', '2003.13678'),
_entry('regnetx_016', 'RegNetX-1.6GF', '2003.13678'),
_entry('regnetx_032', 'RegNetX-3.2GF', '2003.13678'),
_entry('regnetx_040', 'RegNetX-4.0GF', '2003.13678'),
_entry('regnetx_064', 'RegNetX-6.4GF', '2003.13678'),
_entry('regnetx_080', 'RegNetX-8.0GF', '2003.13678'),
_entry('regnetx_120', 'RegNetX-12GF', '2003.13678'),
_entry('regnetx_160', 'RegNetX-16GF', '2003.13678'),
_entry('regnetx_320', 'RegNetX-32GF', '2003.13678', batch_size=BATCH_SIZE // 2),
_entry('regnety_002', 'RegNetY-200MF', '2003.13678'),
_entry('regnety_004', 'RegNetY-400MF', '2003.13678'),
_entry('regnety_006', 'RegNetY-600MF', '2003.13678'),
_entry('regnety_008', 'RegNetY-800MF', '2003.13678'),
_entry('regnety_016', 'RegNetY-1.6GF', '2003.13678'),
_entry('regnety_032', 'RegNetY-3.2GF', '2003.13678'),
_entry('regnety_040', 'RegNetY-4.0GF', '2003.13678'),
_entry('regnety_064', 'RegNetY-6.4GF', '2003.13678'),
_entry('regnety_080', 'RegNetY-8.0GF', '2003.13678'),
_entry('regnety_120', 'RegNetY-12GF', '2003.13678'),
_entry('regnety_160', 'RegNetY-16GF', '2003.13678'),
_entry('regnety_320', 'RegNetY-32GF', '2003.13678', batch_size=BATCH_SIZE // 2),
_entry('rexnet_100', 'ReXNet-1.0x', '2007.00992'),
_entry('rexnet_130', 'ReXNet-1.3x', '2007.00992'),
_entry('rexnet_150', 'ReXNet-1.5x', '2007.00992'),
_entry('rexnet_200', 'ReXNet-2.0x', '2007.00992'),
_entry('vit_small_patch16_224', 'ViT-S/16', None),
_entry('vit_base_patch16_224', 'ViT-B/16', None),
]
if is_server():
DATA_ROOT = './.data/vision/imagenet'
else:
# local settings
DATA_ROOT = './'
DATA_FILENAME = 'ILSVRC2012_img_val.tar'
TAR_PATH = os.path.join(DATA_ROOT, DATA_FILENAME)
for m in model_list:
model_name = m['model']
# create model from name
model = create_model(model_name, pretrained=True)
param_count = sum([m.numel() for m in model.parameters()])
print('Model %s, %s created. Param count: %d' % (model_name, m['paper_model_name'], param_count))
dataset = DatasetTar(TAR_PATH)
filenames = [os.path.splitext(f)[0] for f in dataset.filenames()]
# get appropriate transform for model's default pretrained config
data_config = resolve_data_config(m['args'], model=model, verbose=True)
test_time_pool = False
if m['ttp']:
model, test_time_pool = apply_test_time_pool(model, data_config)
data_config['crop_pct'] = 1.0
batch_size = m['batch_size']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=batch_size,
use_prefetcher=True,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=6,
crop_pct=data_config['crop_pct'],
pin_memory=True)
evaluator = ImageNetEvaluator(
root=DATA_ROOT,
model_name=m['paper_model_name'],
paper_arxiv_id=m['paper_arxiv_id'],
model_description=m.get('model_description', None),
)
model.npu()
model.eval()
with torch.no_grad():
# warmup
input = torch.randn((batch_size,) + tuple(data_config['input_size'])).npu()
model(input)
bar = tqdm(desc="Evaluation", mininterval=5, total=50000)
evaluator.reset_time()
sample_count = 0
for input, target in loader:
output = model(input)
num_samples = len(output)
image_ids = [filenames[i] for i in range(sample_count, sample_count + num_samples)]
output = output.cpu().numpy()
evaluator.add(dict(zip(image_ids, list(output))))
sample_count += num_samples
bar.update(num_samples)
if evaluator.cache_exists:
break
bar.close()
evaluator.save()
for k, v in evaluator.results.items():
print(k, v)
for k, v in evaluator.speed_mem_metrics.items():
print(k, v)
torch.npu.empty_cache()
|
the-stack_106_22895 | from tkinter import *
from tkinter import ttk
def calculate(*args):
try:
value = float(feet.get())
meters.set((0.3048 * value * 10000.0 + 0.5)/10000.0)
except ValueError:
pass
root = Tk()
root.title("Feet to Meters")
root.geometry("400x300+0+0")
mainframe = ttk.Frame(root, padding="3 3 3 3")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
Label(text="Select Your Choice", font=("Calibri", 13)).pack()
Label(text="").pack()
Button(text="Login", command = login).pack()
Label(text="").pack()
Button(text="Register",command=register).pack()
feet = StringVar()
meters = StringVar()
feet_entry = ttk.Entry(mainframe, width=7, textvariable=feet)
feet_entry.grid(column=2, row=1, sticky=(W, E))
ttk.Label(mainframe, textvariable=meters).grid(column=2, row=2, sticky=(W, E))
ttk.Button(mainframe, text="Calculate", command=calculate).grid(column=3, row=3, sticky=W)
ttk.Label(mainframe, text="feet").grid(column=3, row=1, sticky=W)
ttk.Label(mainframe, text="is equivalent to").grid(column=1, row=2, sticky=E)
ttk.Label(mainframe, text="meters").grid(column=3, row=2, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
feet_entry.focus()
root.bind('<Return>', calculate)
root.mainloop() |
the-stack_106_22896 | from Calc2D.CalculationClass import Calculation
import time
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from tornado.ioloop import IOLoop
from tornado import gen
import tornado.web
import tornado.websocket
import os
import os.path
import json
import unicodedata
import logging
import base64
import traceback
import sys
import config
pool = ThreadPoolExecutor(max_workers=config.MAX_THREADPOOL_WORKERS)
def generate_redshifts(redshift_config):
logging.info(redshift_config)
arrs = []
for conf in redshift_config:
log = conf["log"]
func = np.logspace if log else np.linspace
start = np.log10(conf["from"]) if log else conf["from"]
stop = np.log10(conf["to"]) if log else conf["to"]
arrs.append(func(start, stop, conf["points"]))
# Remove duplicates
return np.flip(np.unique(np.concatenate(arrs)), axis=0)
# Load available colormaps
def get_colormaps(path=config.COLORMAP_PATH):
categories = []
maps = []
order = {'Default': 1, 'Uniform': 2, 'Diverging': 3, 'Miscellaneous': 4}
cmap_directories = list(sorted(
os.listdir(os.path.join("static", path)),
key=lambda d: order[d]
))
for directory in cmap_directories:
categories.append(directory)
maps_for_category = []
for cmap in os.listdir(os.path.join("static", path, directory)):
maps_for_category.append({
'label': cmap[:cmap.rfind(".")],
'src': os.path.join(os.path.join(config.COLORMAP_PATH, directory, cmap)),
})
maps.append(maps_for_category)
return categories, maps
class SimulationHandler(tornado.web.RequestHandler):
def get(self):
categories, colormaps = get_colormaps()
self.render('RSI.html', categories=categories, colormaps=colormaps)
class DataConnection(tornado.websocket.WebSocketHandler):
def open(self):
logging.info("Client connected!")
self.calc = Calculation(kbins=config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
# Send list of `k` values only once
logging.info("Sending k range to client");
self.write_message(json.dumps({
"type": "krange",
"k": self.calc.krange.tolist()
}))
def on_close(self):
logging.info("Connection was closed")
@gen.coroutine
def on_message(self, message):
message = json.loads(message)
param_type = message['type']
logging.debug("Received message from client: {}".format(message))
params = message['params']
if param_type == "Initial":
initialDataType = str(params['initialDataType'])
size = params["xScale"]
resolution = int(params["resolution"])
self.calc.resolution = resolution
self.calc.size = size
logging.info("Size: {} x {} Mpc^2, resolution: {} x {}".format(size, size, resolution, resolution))
SIlimit = params['SILimit']
if SIlimit == "None":
SIlimit = None
sigma = float(params['sigma'])
SI_ns = params['n_s']
if initialDataType == "SI":
A_s = 2.214 * 10**(-9)
else:
A_s = 1
redshift = generate_redshifts(params["redshift"])
self.calc.redshift = redshift
self.write_message(
json.dumps({
'type': 'redshift',
'redshift': redshift.tolist()
}))
logging.info("Submitting initial state generation to ThreadPoolExecutor")
yield pool.submit(self.set_initial_condition, sigma, initialDataType,
SIlimit, SI_ns, A_s)
self.send_initial_state()
self.write_message(json.dumps({'type': 'success', 'sort': 'Initial'}))
elif param_type == "Cosmo":
logging.info("Received cosmological parameters")
cosmological_parameters = params
logging.info("Submitting calculation to ThreadPoolExecutor")
messages = yield pool.submit(self.set_cosmological_parameters, cosmological_parameters)
for message in messages:
self.write_message(json.dumps(message))
elif param_type == "Start":
logging.info("Starting propagation...")
try:
for redindex, z in enumerate(self.calc.redshift):
self.send_frame(redindex)
self.write_message(json.dumps({'type': 'success', 'sort': 'Data'}))
except Exception as e:
logging.exception(e)
self.send_exception(e)
def send_frame(self, redindex):
# `extrema`: (minimum, maximum) of (real space) data
Valuenew, FValuenew, extrema = self.calc.getData(redindex)
logging.info("Sending data for redshift = {}".format(self.calc.redshift[redindex]))
# Create data to be displayed in transfer function window
TransferData, _ = self.calc.getTransferData(redindex)
self.write_message(json.dumps({'type': 'extrema', 'extrema': extrema}))
progress = float(redindex) / len(self.calc.redshift)
real = {quantity: base64.b64encode(data.astype(np.float32)) for quantity, data in Valuenew.iteritems()}
transfer = {quantity: base64.b64encode(data.astype(np.float32)) for quantity, data in TransferData.iteritems()}
self.write_message(
json.dumps({
'type': 'data',
'progress': progress,
'real': real,
'fourier': [],
'transfer': transfer,
}))
def send_initial_state(self):
Value, FValue, extrema = self.calc.getInitialData()
TransferData = np.ones(config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
krange = np.zeros(config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
logging.info("Sending initial data to client.")
self.write_message({
"type": "resolution",
"value": self.calc.resolution
})
extremastring = json.dumps({'type': 'extrema', 'extrema': extrema})
datastring = json.dumps({
'type': 'data',
'real': base64.b64encode(Value.astype(np.float32)),
'fourier': [],
'transfer': base64.b64encode(TransferData.astype(np.float32)),
'k': krange.tolist()
})
self.write_message(extremastring)
self.write_message(datastring)
def set_initial_condition(self, sigma, initialDataType, SIlimit, SI_ns, A_s):
try:
self.calc.setInitialConditions(
sigma=sigma,
initialDataType=initialDataType,
SIlimit=SIlimit,
SI_ns=SI_ns,
A=A_s
)
except Exception as e:
logging.exception(e)
self.send_exception(e)
def send_exception(self, e):
self.write_message(json.dumps({'type': 'exception', 'exception': traceback.format_exc()}))
def set_cosmological_parameters(self, cosmologicalParameters):
try:
messages = []
logging.info("Starting calculation...")
self.calc.setCosmologialParameters(cosmologicalParameters=cosmologicalParameters)
logging.info("Finished calculation!")
messages.append({'type': 'success', 'sort': 'Cosmo'})
messages.append({
'type': 'Cl',
'l': self.calc.tCl.l.tolist(),
'tCl': self.calc.tCl.tCl.tolist()
})
messages.append({
'type': 'mPk',
'kh': self.calc.mPk.kh.tolist(),
'Pkh': self.calc.mPk.Pkh.tolist()
})
z_of_decoupling = self.calc.z_dec
frame_of_decoupling = np.argmin(np.abs(z_of_decoupling - self.calc.redshift))
if self.calc.redshift[frame_of_decoupling] > z_of_decoupling:
frame_of_decoupling -= 1
messages.append({
'type': 'decoupling',
'frame': frame_of_decoupling,
'z': z_of_decoupling})
except Exception as e:
logging.exception(e)
self.send_exception(e)
else:
return messages
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = tornado.web.Application(
[
(r"/", SimulationHandler),
(r"/datasocket", DataConnection),
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
)
PORT = config.PORT if len(sys.argv) == 1 else int(sys.argv[1])
application.listen(PORT)
logging.info("Application launched on http://localhost:{}".format(PORT))
IOLoop.instance().current().start()
if __name__ == '__main__':
main()
|
the-stack_106_22897 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import standalone
"""
This test ensure `inherit_id` update is correctly replicated on cow views.
The view receiving the `inherit_id` update is either:
1. in a module loaded before `website`. In that case, `website` code is not
loaded yet, so we store the updates to replay the changes on the cow views
once `website` module is loaded (see `_check()`). This test is testing that
part.
2. in a module loaded after `website`. In that case, the `inherit_id` update is
directly replicated on the cow views. That behavior is tested with
`test_module_new_inherit_view_on_parent_already_forked` and
`test_specific_view_module_update_inherit_change` in `website` module.
"""
@standalone('cow_views_inherit')
def test_01_cow_views_inherit_on_module_update(env):
# A B A B
# / \ => / \
# D D' D D'
# 1. Setup hierarchy as comment above
View = env['ir.ui.view']
View.with_context(_force_unlink=True, active_test=False).search([('website_id', '=', 1)]).unlink()
child_view = env.ref('portal.footer_language_selector')
parent_view = env.ref('portal.portal_back_in_edit_mode')
# Change `inherit_id` so the module update will set it back to the XML value
child_view.write({'inherit_id': parent_view.id, 'arch': child_view.arch_db.replace('o_footer_copyright_name', 'text-center')})
# Trigger COW on view
child_view.with_context(website_id=1).write({'name': 'COW Website 1'})
child_cow_view = child_view._get_specific_views()
# 2. Ensure setup is as expected
assert child_cow_view.inherit_id == parent_view, "Ensure test is setup as expected."
# 3. Upgrade the module
portal_module = env['ir.module.module'].search([('name', '=', 'portal')])
portal_module.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# 4. Ensure cow view also got its inherit_id updated
expected_parent_view = env.ref('portal.frontend_layout') # XML data
assert child_view.inherit_id == expected_parent_view, "Generic view security check."
assert child_cow_view.inherit_id == expected_parent_view, "COW view should also have received the `inherit_id` update."
@standalone('cow_views_inherit')
def test_02_cow_views_inherit_on_module_update(env):
# A B B' A B B'
# / \ => | |
# D D' D D'
# 1. Setup hierarchy as comment above
View = env['ir.ui.view']
View.with_context(_force_unlink=True, active_test=False).search([('website_id', '=', 1)]).unlink()
view_D = env.ref('portal.my_account_link')
view_A = env.ref('portal.message_thread')
# Change `inherit_id` so the module update will set it back to the XML value
view_D.write({'inherit_id': view_A.id, 'arch_db': view_D.arch_db.replace('o_logout_divider', 'discussion')})
# Trigger COW on view
view_B = env.ref('portal.user_dropdown') # XML data
view_D.with_context(website_id=1).write({'name': 'D Website 1'})
view_B.with_context(website_id=1).write({'name': 'B Website 1'})
view_Dcow = view_D._get_specific_views()
# 2. Ensure setup is as expected
view_Bcow = view_B._get_specific_views()
assert view_Dcow.inherit_id == view_A, "Ensure test is setup as expected."
assert len(view_Bcow) == len(view_Dcow) == 1, "Ensure test is setup as expected (2)."
assert view_B != view_Bcow, "Security check to ensure `_get_specific_views` return what it should."
# 3. Upgrade the module
portal_module = env['ir.module.module'].search([('name', '=', 'portal')])
portal_module.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# 4. Ensure cow view also got its inherit_id updated
assert view_D.inherit_id == view_B, "Generic view security check."
assert view_Dcow.inherit_id == view_Bcow, "COW view should also have received the `inherit_id` update."
|
the-stack_106_22898 | """A connector for Twitch."""
import asyncio
import os
import re
import logging
import aiohttp
import json
import secrets
import hashlib
import hmac
from voluptuous import Required
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message, JoinRoom, DeleteMessage, LeaveRoom, BanUser
from opsdroid.const import (
TWITCH_API_ENDPOINT,
TWITCH_OAUTH_ENDPOINT,
TWITCH_WEBHOOK_ENDPOINT,
TWITCH_IRC_MESSAGE_REGEX,
TWITCH_JSON,
)
from . import events as twitch_event
CONFIG_SCHEMA = {
Required("code"): str,
Required("client-id"): str,
Required("client-secret"): str,
Required("channel"): str,
"redirect": str,
"forward-url": str,
"always-listening": bool,
}
_LOGGER = logging.getLogger(__name__)
class ConnectorTwitch(Connector):
"""A connector for Twitch."""
def __init__(self, config, opsdroid=None):
"""Set up all the needed things for the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Twitch connector."))
self.name = "twitch"
self.opsdroid = opsdroid
self.is_live = config.get("always-listening", False)
self.default_target = config["channel"]
self.token = None
self.code = config["code"]
self.client_id = config["client-id"]
self.client_secret = config["client-secret"]
self.redirect = config.get("redirect", "http://localhost")
self.bot_name = config.get("bot-name", "opsdroid")
self.websocket = None
self.user_id = None
self.webhook_secret = secrets.token_urlsafe(18)
# TODO: Allow usage of SSL connection
self.server = "ws://irc-ws.chat.twitch.tv"
self.port = "80"
self.base_url = config.get("base-url")
self.loop = asyncio.get_event_loop()
self.reconnections = 0
self.auth_file = TWITCH_JSON
async def validate_request(self, request, secret):
"""Compute sha256 hash of request and secret.
Twitch suggests that we should always validate the requests made to our webhook callback url,
that way we protect ourselves from received an event that wasn't sent by Twitch. After sending
``hub.secret`` on our webhook subscribe, Twitch will use that secret to send the ``x-hub-signature``
header, that is the hash that we should compare with our own computed one, if they don't match
then the request is not valid and shouldn't be parsed.
"""
signature = request.headers.get("x-hub-signature")
if signature:
signature = signature.replace("sha256=", "")
payload = await request.read()
computed_hash = hmac.new(
secret.encode(), msg=payload, digestmod=hashlib.sha256
).hexdigest()
return signature == computed_hash
async def get_user_id(self, channel, token, client_id):
"""Call twitch api to get broadcaster user id.
A lot of webhooks expect you to pass your user id in order to get the
notification when a user subscribes or folllows the broadcaster
channel.
Since we are calling the Twitch API to get our ``self.user_id`` on connect,
we will use this method to handle when a token has expired, so if we get a
401 status back from Twitch we will raise a ClientResponseError and send back
the status and the message Unauthorized, that way we can refresh the oauth token
on connect if the exception is raised.
Args:
channel (string): Channel that we wish to get the broadcaster id from.
token (string): OAuth token obtained from previous authentication.
client_id (string): Client ID obtained from creating a Twitch App to iteract with opsdroid.
Return:
string: Broadcaster/user id received from Twitch
Raises:
ConnectionError: Raised exception if we got an unauthorized code from twitch. Our
oauth token probably expired.
"""
async with aiohttp.ClientSession() as session:
response = await session.get(
f"{TWITCH_API_ENDPOINT}/users",
headers={"Authorization": f"Bearer {token}", "Client-ID": client_id},
params={"login": channel},
)
if response.status == 401:
raise ConnectionError("Unauthorized")
if response.status >= 400:
_LOGGER.warning(
_("Unable to receive broadcaster id - Error: %s, %s."),
response.status,
response.text,
)
response = await response.json()
return response["data"][0]["id"]
async def send_message(self, message):
"""Send message throught websocket.
To send a message to the Twitch IRC server through websocket we need to use the
same style, we will always send the command `PRIVMSG` and the channel we want to
send the message to. The message also comes after :.
Args:
message(string): Text message that should be sent to Twitch chat.
"""
await self.websocket.send_str(f"PRIVMSG #{self.default_target} :{message}")
def save_authentication_data(self, data):
"""Save data obtained from requesting authentication token."""
with open(self.auth_file, "w") as file:
json.dump(data, file)
def get_authorization_data(self):
"""Open file containing authentication data."""
with open(self.auth_file, "r") as file:
data = json.load(file)
return data
async def request_oauth_token(self):
"""Call Twitch and requests new oauth token.
This method assumes that the user already has the code obtained from
following the first oauth step which is making a get request to the
twitch api endpoint: ``https://id.twitch.tv/oauth2/authorize`` and passing
the needed client id, redirect uri and needed scopes to work with the bot.
This method is the second - and final step - when trying to get the oauth token.
We use the code that the user obtained on step one - check documentation - and
make a post request to Twitch to get the ``access_token`` and ``refresh_token`` so
we can refresh the access_token when needed. Note that the refresh_token doesn't
change with each refresh.
"""
async with aiohttp.ClientSession() as session:
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "authorization_code",
"redirect_uri": self.redirect,
"code": self.code,
}
resp = await session.post(TWITCH_OAUTH_ENDPOINT, params=params)
data = await resp.json()
try:
self.token = data["access_token"]
self.save_authentication_data(data)
except KeyError:
_LOGGER.warning(_("Unable to request oauth token - %s"), data)
async def refresh_token(self):
"""Attempt to refresh the oauth token.
Twitch oauth tokens expire after a day, so we need to do a post request to twitch
to get a new token when ours expires. The refresh token is already saved on the ``twitch.json``
file so we can just open that file, get the appropriate token and then update the file with the
new received data.
"""
_LOGGER.warning(_("Oauth token expired, attempting to refresh token."))
refresh_token = self.get_authorization_data()
async with aiohttp.ClientSession() as session:
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "refresh_token",
"redirect_uri": self.redirect,
"refresh_token": refresh_token["refresh_token"],
}
resp = await session.post(TWITCH_OAUTH_ENDPOINT, params=params)
data = await resp.json()
self.token = data["access_token"]
self.save_authentication_data(data)
async def send_handshake(self):
"""Send needed data to the websockets to be able to make a connection.
If we try to connect to Twitch with an expired oauth token, we need to
request a new token. The problem is that Twitch doesn't close the websocket
and will only notify the user that the login authentication failed after
we sent the ``PASS`` , ``NICK`` and ``JOIN`` command to the websocket.
So we need to send the initial commands to Twitch, await for a status with
``await self.websockets.recv()`` and there will be our notification that the
authentication failed in the form of ``:tmi.twitch.tv NOTICE * :Login authentication failed``
This method was created to prevent us from having to copy the same commands
and send them to the websocket. If there is an authentication issue, then we
will have to send the same commands again - just with a new token.
"""
await self.websocket.send_str(f"PASS oauth:{self.token}")
await self.websocket.send_str(f"NICK {self.bot_name}")
await self.websocket.send_str(f"JOIN #{self.default_target}")
await self.websocket.send_str("CAP REQ :twitch.tv/commands")
await self.websocket.send_str("CAP REQ :twitch.tv/tags")
await self.websocket.send_str("CAP REQ :twitch.tv/membership")
async def connect_websocket(self):
"""Connect to the irc chat through websockets.
Our connect method will attempt to make a connection to Twitch through the
websockets server. If the connection is made, any sort of failure received
from the websocket will be in the form of a ``NOTICE``, unless Twitch closes
the websocket connection.
In this method we attempt to connect to the websocket and use the previously
saved oauth token to join a twitch channel.
Once we are logged in and on a Twitch channel, we will request access to special
features from Twitch.
The ``commands`` request is used to allow us to send special commands to the Twitch
IRC server.
The ``tags`` request is used to receive more information with each message received
from twitch. Tags enable us to get metadata such as message ids.
The ``membership`` request is used to get notifications when an user enters the
chat server (it doesn't mean that the user is watching the streamer) and also when
a user leaves the chat channel.
"""
_LOGGER.info(_("Connecting to Twitch IRC Server."))
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f"{self.server}:{self.port}", heartbeat=600
) as websocket:
self.websocket = websocket
await self.send_handshake()
await self.get_messages_loop()
async def webhook(self, topic, mode):
"""Subscribe to a specific webhook.
Twitch has different webhooks that you can subscribe to, when you subscribe to a
particular webhook, a ``post`` request needs to be made containing a ``JSON`` payload,
that tells Twitch what subscription you are attempting to do.
When you submit the ``post`` request to ``TWITCH_WEBHOOK_ENDPOINT`` , twitch will send back
a ``get`` request to your ``callback`` url (``hub.callback`` ) with a challenge. Twitch will
then await for a response containing only the challenge in plain text.
With this in mind, that is the reason why we open two routes (``get`` and ``post`` ) that link
to ``/connector/<connector name>``.
The ``hub.topic`` represents the webhook that we want to suscribe from twitch.
The ``hub.lease_seconds`` defines the number of seconds until the subscription expires, maximum
is 864000 seconds (10 days), but we will set up a day as our expiration since our app oauth
tokens seem to expire after a day.
Args:
topic (string): Twitch webhook url to subscribe/unsubscribe to.
mode (string): subscribe or unsuscribe to the webhook.
"""
_LOGGER.info(_("Attempting to connect to webhook %s."), topic)
if topic == "follows":
topic = f"{TWITCH_API_ENDPOINT}/users/follows?to_id={self.user_id}&first=1"
if topic == "stream changed":
topic = f"{TWITCH_API_ENDPOINT}/streams?user_id={self.user_id}"
if topic == "subscribers":
topic = f"{TWITCH_API_ENDPOINT}/subscriptions/events?broadcaster_id={self.user_id}&first=1"
headers = {"Client-ID": self.client_id, "Authorization": f"Bearer {self.token}"}
async with aiohttp.ClientSession() as session:
payload = {
"hub.callback": f"{self.base_url}/connector/{self.name}",
"hub.mode": mode,
"hub.topic": topic,
"hub.lease_seconds": 60 * 60 * 24 * 9, # Expire after 9 days
"hub.secret": self.webhook_secret,
}
response = await session.post(
TWITCH_WEBHOOK_ENDPOINT, headers=headers, json=payload
)
if response.status >= 400:
_LOGGER.debug(_("Error: %s - %s"), response.status, response.text)
async def handle_challenge(self, request):
"""Challenge handler for get request made by Twitch.
Upon subscription to a Twitch webhook, Twitch will do a get request to the
``callback`` url provided to check if the url exists. Twitch will do a get request
with a challenge and expects the ``callback`` url to return that challenge in plain-text
back to Twitch.
This is what we are doing here, we are getting ``hub.challenge`` from the request and return
it in plain-text, if we can't find that challenge we will return a status code 500.
Args:
request (aiohttp.web.Request): Request made to the get route created for webhook subscription.
Returns:
aiohttp.web.Response: if request contains ``hub.challenge`` we return it, otherwise return status 500.
"""
challenge = request.rel_url.query.get("hub.challenge")
if challenge:
return aiohttp.web.Response(text=challenge)
_LOGGER.debug(_("Failed to get challenge from GET Request made by Twitch."))
return aiohttp.web.Response(status=500)
async def twitch_webhook_handler(self, request):
"""Handle event from Twitch webhooks.
This method will handle events when they are pushed to the webhook post route. Each webhook will
send a different kind of payload so we can handle each event and trigger the right opsdroid event
for the received payload.
For follow events the payload will contain ``from_id`` (broadcaster id), ``from_username`` (broadcaster username)
``to_id`` (follower id), ``to_name`` (follower name) and ``followed_at`` (timestamp).
For stream changes a lot more things are returned but we only really care about ``type`` (if live/offline)
``title`` (stream title).
For subscriptions events we will want to know ``event_type`` , ``timestamp`` , ``event_data.plan_name`` , ``event_data.is_gift`` ,
``event_data.tier`` , ``event_data.username`` and ``event_data.gifter_name``.
Args:
request (aiohttp.web.Request): Request made to the post route created for webhook subscription.
Return:
aiohttp.web.Response: Send a ``received`` message and status 200 - Twitch will keep sending the event if it doesn't get the 200 status code.
"""
valid = await self.validate_request(request, self.webhook_secret)
payload = await request.json()
if valid:
try:
[data] = payload.get("data")
_LOGGER.debug(_("Got event from Twitch - %s") % data)
if data.get("followed_at"):
_LOGGER.debug(_("Follower event received by Twitch."))
user_followed = twitch_event.UserFollowed(
follower=data["from_name"],
followed_at=data["followed_at"],
connector=self,
)
await self.opsdroid.parse(user_followed)
if data.get("started_at"):
_LOGGER.debug(_("Broadcaster went live event received by Twitch."))
self.is_live = True
await self.listen()
stream_started = twitch_event.StreamStarted(
title=data["title"],
viewers=data["viewer_count"],
started_at=data["started_at"],
connector=self,
)
await self.opsdroid.parse(stream_started)
if data.get("event_type") == "subscriptions.notification":
_LOGGER.debug(_("Subscriber event received by Twitch."))
user_subscription = twitch_event.UserSubscribed(
user=data["event_data"]["user_name"],
message=data["event_data"]["message"],
)
await self.opsdroid.parse(user_subscription)
if data.get("event_type") == "subscriptions.subscribe":
_LOGGER.debug(_("Subscriber event received by Twitch."))
user_subscription = twitch_event.UserSubscribed(
user=data["event_data"]["user_name"], message=None
)
await self.opsdroid.parse(user_subscription)
if data.get("event_type") == "subscriptions.subscribe" and data[
"event_data"
].get("is_gift"):
_LOGGER.debug(_("Gifted subscriber event received by Twitch."))
gifted_subscription = twitch_event.UserGiftedSubscription(
gifter_name=data["event_data"]["gifter_name"],
gifted_named=data["event_data"]["user_name"],
)
await self.opsdroid.parse(gifted_subscription)
except ValueError:
# When the stream goes offline, Twitch will return ```data: []```
# that will raise ValueError since it can't unpack empty list
stream_ended = twitch_event.StreamEnded(connector=self)
await self.opsdroid.parse(stream_ended)
if not self.config.get("always-listening"):
self.is_live = False
self.disconnect_websockets()
return aiohttp.web.Response(text=json.dumps("Received"), status=200)
return aiohttp.web.Response(text=json.dumps("Unauthorized"), status=401)
async def connect(self):
"""Connect to Twitch services.
Within our connect method we do a quick check to see if the file ``twitch.json`` exists in
the application folder, if this file doesn't exist we assume that it's the first time the
user is running opsdroid and we do the first request for the oauth token.
If this file exists then we just need to read from the file, get the token in the file and
attempt to connect to the websockets and subscribe to the Twitch events webhook.
"""
if not os.path.isfile(self.auth_file):
_LOGGER.info(
_("No previous authorization data found, requesting new oauth token.")
)
await self.request_oauth_token()
else:
_LOGGER.info(
_(
"Found previous authorization data, getting oauth token and attempting to connect."
)
)
self.token = self.get_authorization_data()["access_token"]
try:
self.user_id = await self.get_user_id(
self.default_target, self.token, self.client_id
)
except ConnectionError:
await self.refresh_token()
self.user_id = await self.get_user_id(
self.default_target, self.token, self.client_id
)
# Setup routes for webhooks subscription
self.opsdroid.web_server.web_app.router.add_get(
f"/connector/{self.name}", self.handle_challenge
)
self.opsdroid.web_server.web_app.router.add_post(
f"/connector/{self.name}", self.twitch_webhook_handler
)
await self.webhook("follows", "subscribe")
await self.webhook("stream changed", "subscribe")
await self.webhook("subscribers", "subscribe")
async def listen(self):
"""Listen method of the connector.
Every connector has to implement the listen method. When an
infinite loop is running, it becomes hard to cancel this task.
So we are creating a task and set it on a variable so we can
cancel the task.
If we need to reconnect to Twitch, Twitch will allow us to reconnect
immediatly on the first reconnect and then expects us to wait exponentially
to reconnect to the websocket.
"""
while self.is_live:
try:
await self.connect_websocket()
except ConnectionError as e:
_LOGGER.debug(e)
await asyncio.sleep(2 ** self.reconnections)
self.reconnections += 1
await self.connect_websocket()
async def get_messages_loop(self):
"""Listen for and parse messages.
Since we are using aiohttp websockets support we need to manually send a
pong response every time Twitch asks for it. We also need to handle if
the connection was closed and if it was closed but we are still live, then
a ConnectionError exception is raised so we can attempt to reconnect to the
chat server again.
"""
async for msg in self.websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
if "PING" in msg.data:
await self.websocket.send_str("PONG :tmi.twitch.tv")
await self._handle_message(msg.data)
if msg.type == aiohttp.WSMsgType.CLOSED:
await self.websocket.close()
if self.is_live:
raise ConnectionError(
"Connection to Twitch Chat Server dropped, reconnecting..."
)
async def _handle_message(self, message):
"""Handle message from websocket connection.
The message that we get from Twitch contains a lot of metadata, so we are using
regex named groups to get only the data that we need in order to parse a message
received.
We also need to check if whatever we received from the websocket is indeed a text
message or an event that we need to parse. We do a few checks to decide what should
be done with the message.
If opsdroid is running for a long time, the OAuth token will expire and the connection
to the websockets will send us back a ``:tmi.twitch.tv NOTICE * :Login authentication failed``
so if we receive that NOTICE we will attempt to refresh the token.
Twitch websockets send all the messages as strings, this includes PINGs, that means we will
keep getting PINGs as long as our connection is active, these messages tell us nothing important
so we made the decision to just hide them from the logs.
Args:
message (string): Message received from websocket.
"""
_LOGGER.debug(_("Got message from Twitch Connector chat - %s"), message)
chat_message = re.match(TWITCH_IRC_MESSAGE_REGEX, message)
join_event = re.match(r":(?P<user>.*)!.*JOIN", message)
left_event = re.match(r":(?P<user>.*)!.*PART ", message)
authentication_failed = re.match(
r":tmi.twitch.tv NOTICE \* :Login authentication failed", message
)
if authentication_failed:
self.refresh_token()
raise ConnectionError(
"OAuth token expire, need to reconnect to the chat service."
)
if chat_message:
text_message = Message(
text=chat_message.group("message").rstrip(),
user=chat_message.group("user"),
user_id=chat_message.group("user_id"),
raw_event=message,
target=f"#{self.default_target}",
event_id=chat_message.group("message_id"),
connector=self,
)
await self.opsdroid.parse(text_message)
if join_event:
joined_chat = JoinRoom(
user=join_event.group("user"),
raw_event=message,
target=f"#{self.default_target}",
connector=self,
)
await self.opsdroid.parse(joined_chat)
if left_event:
left_chat = LeaveRoom(
user=left_event.group("user"),
raw_event=message,
target=f"#{self.default_target}",
connector=self,
)
await self.opsdroid.parse(left_chat)
@register_event(Message)
async def _send_message(self, message):
"""Send message to twitch.
This method sends a text message to the chat service. We can't use the
default ``send`` method because we are also using different kinds of events
within this connector.
"""
_LOGGER.debug(_("Attempting to send %s to websocket!"), message.text)
await self.send_message(message.text)
@register_event(DeleteMessage)
async def remove_message(self, event):
"""Remove message from the chat.
This event is used when we need to remove a specific message from the chat
service. We need to pass the message id to remove a specific message. So this
method is calling the ``/delete`` method together with the message id to remove
that message.
"""
_LOGGER.debug(
_("DeleteMessage event fired - message with the id %s removed from chat"),
event.linked_event.event_id,
)
await self.send_message(f"/delete {event.linked_event.event_id}")
@register_event(BanUser)
async def ban_user(self, event):
"""Ban user from the channel.
This event will be used when we need to ban a specific user from the chat channel.
Banning a user will also remove all the messages sent by that user, so we don't need
to worry about removing a lot of mensages.
"""
_LOGGER.debug(
_("Ban event fired - user %s was banned from channel"), event.user
)
await self.send_message(f"/ban {event.user}")
@register_event(twitch_event.CreateClip)
async def create_clip(self):
"""Create clip from broadcast.
We send a post request to twitch to create a clip from the broadcast, Twitch will
return a response containing a clip ``id`` and ``edit_url`` . TWitch mentions that the
way to check if the clip was created successfully is by making a ``get`` request
to the ``clips`` API enpoint and query by the ``id`` obtained from the previous
request.
"""
async with aiohttp.ClientSession() as session:
headers = {
"Client-ID": self.client_id,
"Authorization": f"Bearer {self.token}",
}
resp = await session.post(
f"{TWITCH_API_ENDPOINT}/clips?broadcaster_id={self.user_id}",
headers=headers,
)
response = await resp.json()
clip_data = await session.get(
f"{TWITCH_API_ENDPOINT}/clips?id={response['data'][0]['id']}",
headers=headers,
)
if clip_data.status == 200:
resp = await clip_data.json()
[data] = resp.get("data")
_LOGGER.debug(_("Twitch clip created successfully."))
await self.send_message(data["embed_url"])
return
_LOGGER.debug(_("Failed to create Twitch clip %s"), response)
@register_event(twitch_event.UpdateTitle)
async def update_stream_title(self, event):
"""Update Twitch title.
To update your channel details you need to use Twitch API V5(kraken). The so called "New Twitch API"
doesn't have an enpoint to update the channel. To update your channel details you need to do a put
request and pass your title into the url.
Args:
event (twitch.events.UpdateTitle): opsdroid event containing ``status`` (your title).
"""
async with aiohttp.ClientSession() as session:
headers = {
"client-id": self.client_id,
"Authorization": f"Bearer {self.token}",
"Content-Type": "application/json",
}
param = {"title": event.status, "broadcaster_id": self.user_id}
resp = await session.patch(
f"{TWITCH_API_ENDPOINT}/channels",
headers=headers,
params=param,
)
if resp.status == 204:
_LOGGER.debug(_("Twitch channel title updated to %s"), event.status)
return
_LOGGER.debug(
_("Failed to update Twitch channel title. Error %s - %s"),
resp.status,
resp.message,
)
async def disconnect_websockets(self):
"""Disconnect from the websocket."""
self.is_live = False
close_method = getattr(self.websocket, "close", None)
if callable(close_method):
asyncio.ensure_future(close_method(), loop=self.loop)
self.websocket = None
async def disconnect(self):
"""Disconnect from twitch.
Before opsdroid exists we will want to disconnect the Twitch connector, we need to
do some clean up. We first set the while loop flag to False to stop the loop and then
try to unsubscribe from all the webhooks that we subscribed to on connect - we want to
do that because when we start opsdroid and the ``connect`` method is called we will send
another subscribe request to Twitch. After we will send a ``PART`` command to leave the
channel that we joined on connect.
Finally we try to close the websocket connection.
"""
if self.is_live:
await self.disconnect_websockets()
await self.webhook("follows", "unsubscribe")
await self.webhook("stream changed", "unsubscribe")
await self.webhook("subscribers", "unsubscribe")
return
|
the-stack_106_22899 | import numpy as np
class HMMModel(object):
def __init__(self, state_size, observe_size):
self.state_size = state_size
self.observe_size = observe_size
#状态转移矩阵,state[i][j]表示从状态i转移到状态j的概率
self.state = np.zeros((state_size, state_size))
#观测概率矩阵,observe[i][j]表示状态i下生成观测j的概率
self.observe = np.zeros((state_size, observe_size))
#初始状态概率
self.Pi = np.zeros((state_size))
def train(self, word_lists, tag_lists, word2id, tag2id):
'''
采用极大似然估计来估计参数矩阵
'''
#可能读取数据出错,或者数据集有污染
assert len(tag_lists) == len(word_lists)
#估计转移概率矩阵
for tag_list in tag_lists:
for i in range(len(tag_list) - 1):
cur_tagid = tag2id[tag_list[i]]
next_tagid = tag2id[tag_list[i + 1]]
self.state[cur_tagid][next_tagid] += 1
#需要做一个平滑,解决频数为0的情况
self.state[self.state == 0] = 1e-10
self.state = self.state / self.state.sum(axis = 1, keepdims = True)
#估计观测概率矩阵
for tag_list, word_list in zip(tag_lists, word_lists):
assert len(tag_list) == len(word_list)
for tag, word in zip(tag_list, word_list):
tag_id = tag2id[tag]
word_id = word2id[word]
self.observe[tag_id][word_id] += 1
#依然是要做平滑
self.observe[self.observe == 0] = 1e-10
self.observe = self.observe / self.observe.sum(axis = 1, keepdims = True)
#估计初始状态矩阵
for tag_list in tag_lists:
init_tagid = tag2id[tag_list[0]]
self.Pi[init_tagid] += 1
self.Pi[self.Pi == 0] = 1e-10
self.Pi = self.Pi / self.Pi.sum()
def viterbi_decoding(self, word_list, word2id, tag2id):
#需要解决概率相乘造成的数据下溢的问题
state = np.log(self.state)
observe = np.log(self.observe)
Pi = np.log(self.Pi)
#使用动态规划来寻找路径最大值
#其中viterbi[i, j]表示序列第j个的状态是i_0,i_1,....,i_i的概率
seq_len = len(word_list)
viterbi = np.zeros((self.state_size, seq_len))
#backtract是用来回溯找路径的
backtract = np.zeros((self.state_size, seq_len))
#首先是第一步,dp的开始
#observe_T是观测矩阵的转置,所以observe_T[i]表示word是i的所有状态的概率
start_wordid = word2id.get(word_list[0], None)
observe_T = observe.T
if start_wordid is None:
state_tmp = np.log(np.ones(self.state_size) / self.state_size)
else :
state_tmp = observe_T[start_wordid]
viterbi[:, 0] = Pi + state_tmp
backtract[:, 0] = -1
#动规状态转移公式
#viterbi[tag_id, step] = max(viterbi[:, step - 1] * state.T[tag_id] * observe_T[word])
for step in range(1, seq_len):
wordid = word2id.get(word_list[step], None)
if wordid is None:
state_tmp = np.log(np.ones(self.state_size) / self.state_size)
else :
state_tmp = observe_T[wordid]
for tag_id in range(len(tag2id)):
#因为取了log,就变成了+
tmp = viterbi[:, step - 1] + state[:, tag_id]
max_prob = np.max(tmp, axis = 0)
max_id = np.argmax(tmp, axis = 0)
viterbi[tag_id, step] = max_prob + state_tmp[tag_id]
backtract[tag_id, step] = max_id
best_prob = np.max(viterbi[:, seq_len - 1], axis=0)
last_path = np.argmax(viterbi[:, seq_len - 1], axis=0)
#回溯找路径
best_path = [last_path, ]
for cur_step in range(seq_len - 1, 0, -1):
last_path = int(backtract[last_path][cur_step])
best_path.append(last_path)
assert len(best_path) == len(word_list)
id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
tag_list = [id2tag[id_] for id_ in reversed(best_path)]
return tag_list
def test(self, word_lisst, word2id, tag2id):
pred_tag_lists = []
for word_list in word_lisst:
pred_tag_lists.append(self.viterbi_decoding(word_list, word2id, tag2id))
return pred_tag_lists |
the-stack_106_22900 | #!/usr/bin/env python
import argparse
import sys
from es_backup.repository import *
from es_backup.snapshot import *
from es_backup.backup import *
from jinja2 import Environment, PackageLoader
def render_template(template, **variables):
env = Environment(loader=PackageLoader('es_backup', 'templates'))
template = env.get_template(template)
print(template.render(**variables))
class Commands(object):
def __init__(self):
parser = argparse.ArgumentParser(description='Elasticsearch backup '
'management utility',
usage=('''es-backup.py <command> [<args>]
Commands
repo-list
repo-details
repo-create
repo-delete
snapshot-list
snapshot-details
snapshot-create
snapshot-delete
scheduled-backup
'''))
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
command = args.command.replace('-', '_')
if not hasattr(self, command):
print('Unrecognized command')
parser.print_help()
sys.exit(1)
func = getattr(self, command)
func()
def __arg_conf(self, argv=None, config=None):
if argv is None:
return config
return argv
def repo_list(self):
repos = list_repos()
render_template('repo_list', repos=repos)
def repo_details(self):
parser = argparse.ArgumentParser(description='Show repository details')
parser.add_argument('name', help='Name of repository')
parser.add_argument('-t', '--type', default='fs', choices=['fs', 's3',
'azure', 'hdfs'], help='Repository type (fs, s3, '
'azure, hdfs)')
args = parser.parse_args(sys.argv[2:])
name = args.name
if args.type == 'fs':
repo = FileRepository(name)
if args.type == 's3':
repo = S3_Repository(name)
if args.type == 'azure':
repo = Azure_Repository(name)
if args.type == 'hdfs':
repo = HDFS_Repository(name)
render_template('repo_details', repo=repo)
def __fs_repo_create(self, args):
compress = self.__arg_conf(args.compress,
config.get('fs', 'compress'))
chunk_size = self.__arg_conf(args.chunk_size,
config.get('fs', 'chunk_size'))
if chunk_size == 'null':
chunk_size = None
restore_rate = self.__arg_conf(args.restore_rate,
config.get('fs', 'restore_rate'))
snapshot_rate = self.__arg_conf(args.snapshot_rate,
config.get('fs', 'snapshot_rate'))
repo = FileRepository(args.name, location=args.location,
compress=compress, chunk_size=chunk_size,
restore_rate=restore_rate,
snapshot_rate=snapshot_rate)
if repo:
print('Filesystem repository %s created at %s' % (repo.name,
repo.location))
def __s3_repo_create(self, args):
pass
def __azure_repo_create(self, args):
pass
def __hdfs_repo_create(self, args):
pass
def repo_create(self):
parser = argparse.ArgumentParser(description='Create snapshot '
'repository')
subparsers = parser.add_subparsers(help='repository type')
# Filesystem
parser_fs = subparsers.add_parser('fs')
parser_fs.add_argument('name', help='Name of repository')
parser_fs.add_argument('-l', '--location', required=True, help='Path '
'of repository')
parser_fs.add_argument('-c', '--compress', action='store_true',
help='Enable metadata file compression '
'(Default: true)')
parser_fs.add_argument('-C', '--chunk-size', help='Break large files '
'into smaller chunks (Example: 1g, 10m, 5k')
parser_fs.add_argument('-r', '--restore-rate', help='Max rate of '
'restore (Default: 20mb)')
parser_fs.add_argument('-s', '--snapshot-rate', help='Max rate of '
'snapshot creation (Default: 20mb)')
parser_fs.set_defaults(func=self.__fs_repo_create)
# S3
parser_s3 = subparsers.add_parser('s3')
parser_s3.add_argument('name', help='Name of repository')
parser_s3.add_argument('-b', '--bucket', required=True, help='S3 '
'bucket for repository')
parser_s3.add_argument('-r', '--region', help='Region (Default: '
'us-east-1)')
parser_s3.add_argument('-e', '--endpoint', help='S3 API endpoint '
'(Default: s3.amazonaws.com)')
parser_s3.add_argument('-p', '--protocol', help='HTTP protocol '
'(Default: https)')
parser_s3.add_argument('-B', '--base-path', help='Path for the '
'repository within the bucket')
parser_s3.add_argument('-a', '--access-key', help='Access key for '
'auth')
parser_s3.add_argument('-s', '--secret-key', help='Secret key for '
'auth')
parser_s3.add_argument('-c', '--compress', action='store_true',
help='Enable metadata file compression')
parser_s3.add_argument('-C', '--chunk-size', help='Splits large files '
'into chunks (Default: 100m)')
parser_s3.add_argument('-E', '--server-side-encryption',
action='store_true', help='Enable AES256 '
'encryption in repo')
parser_s3.add_argument('--buffer-size', help='Minimum threshold below '
'which the chunk is uploaded using a single '
'request (Default 5mb)')
parser_s3.add_argument('--max-retries', help='Number of retries in '
'case of S3 error')
parser_s3.set_defaults(func=self.__s3_repo_create)
# Azure
parser_azure = subparsers.add_parser('azure')
parser_azure.add_argument('name', help='Name of repository')
parser_azure.set_defaults(func=self.__azure_repo_create)
# HDFS
parser_hdfs = subparsers.add_parser('hdfs')
parser_hdfs.add_argument('name', help='Name of repository')
parser_hdfs.set_defaults(func=self.__hdfs_repo_create)
args = parser.parse_args(sys.argv[2:])
args.func(args)
def repo_delete(self):
parser = argparse.ArgumentParser(description='Delete a repository')
parser.add_argument('name', help='Name of repository')
args = parser.parse_args(sys.argv[2:])
repo = Repository(args.name)
repo.delete()
print('Repository %s deleted' % repo.name)
def snapshot_list(self):
parser = argparse.ArgumentParser(description='List snapshots in a '
'repository')
parser.add_argument('repo', help='Name of repository')
args = parser.parse_args(sys.argv[2:])
repo = Repository(args.repo)
snapshots = repo.list_snapshots()
render_template('snapshot_list', repo=repo, snapshots=snapshots)
def snapshot_details(self):
parser = argparse.ArgumentParser(description='Show details of a '
'snapshot')
parser.add_argument('repo', help='Name of repository')
parser.add_argument('snapshot', help='Name of snapshot')
args = parser.parse_args(sys.argv[2:])
repo = Repository(args.repo)
snapshot = Snapshot(args.snapshot, repo)
render_template('snapshot_details', snapshot=snapshot)
def snapshot_create(self):
parser = argparse.ArgumentParser(description='Create a snapshot')
parser.add_argument('repo', help='Name of repository')
parser.add_argument('snapshot', help='Name of snapshot')
parser.add_argument('-i', '--indices', default='_all',
help='Multi-index syntax formatted list of '
'indices (Default: _all)')
parser.add_argument('--ignore-unavailable', action='store_true',
help='Allow snapshot creation to continue if an '
'index does not exist')
parser.add_argument('--include-global-state', action='store_false',
help='Prevent the inclusion of the cluster global '
'state')
parser.add_argument('--partial', action='store_true', help='Permit '
'snapshot creation when not all primary shards '
'are available')
args = parser.parse_args(sys.argv[2:])
repo = Repository(args.repo)
ign_unavail = self.__arg_conf(args.ignore_unavailable,
config.get('default',
'ignore_unavailable'))
inc_glob_state = self.__arg_conf(args.include_global_state,
config.get('default',
'include_global_state'))
partial = self.__arg_conf(args.partial, config.get('default',
'partial'))
snapshot = Snapshot(args.snapshot, repo, indices=args.indices,
ignore_unavailable=ign_unavail,
include_global_state=inc_glob_state,
partial=partial)
print('Snapshot %s created in repository %s' % (snapshot.name,
repo.name))
def snapshot_delete(self):
parser = argparse.ArgumentParser(description='Delete a snapshot')
parser.add_argument('repo', help='Name of repository')
parser.add_argument('snapshot', help='Name of snapshot')
args = parser.parse_args(sys.argv[2:])
repo = Repository(args.repo)
snapshot = Snapshot(args.snapshot, repo)
snapshot.delete()
print('Snapshot %s deleted from repository %s' % (snapshot.name,
repo.name))
def scheduled_backup(self):
parser = argparse.ArgumentParser(description='Create a backup '
'according to configured schedule')
parser.add_argument('-t', '--type', default='fs', choices=['fs', 's3',
'azure', 'hdfs'], help='Backup type')
parser.add_argument('-c', '--count', help='Full backup count to '
'retain (Default: 4)')
parser.add_argument('-l', '--life', help='Life time of a backup in '
'days before a new full backup will be taken '
'(Default: 7)')
parser.add_argument('-b', '--base-path', help='Base path of backup '
'repositories (Default: '
'/var/backups/elasticsearch')
parser.add_argument('-p', '--prefix', help='Backup repository name '
'prefix (Default: backup)')
parser.add_argument('-i', '--indices', default='_all',
help='Multi-index syntax formatted list of '
'indices (Default: _all)')
parser.add_argument('--ignore-unavailable', action='store_true',
help='Allow snapshot creation to continue if an '
'index does not exist')
parser.add_argument('--include-global-state', action='store_false',
help='Prevent the inclusion of the cluster global '
'state')
parser.add_argument('--partial', action='store_true', help='Permit '
'snapshot creation when not all primary shards '
'are available')
args = parser.parse_args(sys.argv[2:])
repo_type = self.__arg_conf(args.type, config.get('backup',
'backup_type'))
count = self.__arg_conf(args.count, config.getint('backup',
'full_backup_count'))
life = self.__arg_conf(args.life, config.getint('backup',
'full_backup_life'))
path = self.__arg_conf(args.base_path, config.get(repo_type,
'backup_base_path'))
prefix = self.__arg_conf(args.prefix, config.get('backup', 'prefix'))
indices = self.__arg_conf(args.indices, config.get('backup',
'indices'))
ign_unavail = self.__arg_conf(args.ignore_unavailable,
config.getboolean('default',
'ignore_unavailable'))
inc_glob_state = self.__arg_conf(args.include_global_state,
config.getboolean('default',
'include_global_state'))
partial = self.__arg_conf(args.partial, config.getboolean('default',
'partial'))
backup = get_backup_repo(repo_type=repo_type, count=count, life=life,
base_path=path, prefix=prefix)
create_backup(backup, indices=indices, ignore_unavailable=ign_unavail,
include_global_state=inc_glob_state, partial=partial)
remove_old_backups(prefix)
if __name__ == '__main__':
Commands()
|
the-stack_106_22901 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cleanup task for cleaning up unneeded testcases."""
import datetime
import json
import random
from googleapiclient.errors import HttpError
from base import dates
from base import errors
from base import memoize
from base import utils
from chrome import build_info
from crash_analysis import crash_comparer
from crash_analysis import severity_analyzer
from datastore import data_handler
from datastore import data_types
from datastore import ndb
from datastore import ndb_utils
from fuzzing import leak_blacklist
from handlers import base_handler
from libs import handler
from libs import mail
from libs.issue_management import issue_filer
from libs.issue_management import issue_tracker_policy
from libs.issue_management import issue_tracker_utils
from metrics import crash_stats
from metrics import logs
GENERIC_INCORRECT_COMMENT = '\n\nIf this is incorrect, please add {label} label'
OSS_FUZZ_INCORRECT_COMMENT = ('\n\nIf this is incorrect, please file a bug on '
'https://github.com/google/oss-fuzz/issues/new')
AUTO_CC_LIMIT = 5
TOP_CRASHES_LIMIT = 5
TOP_CRASHES_DAYS_LOOKBEHIND = 7
TOP_CRASHES_MIN_THRESHOLD = 50 * TOP_CRASHES_DAYS_LOOKBEHIND
TOP_CRASHES_IGNORE_CRASH_TYPES = [
'Hang', 'Out-of-memory', 'Stack-overflow', 'Timeout'
]
TOP_CRASHES_IGNORE_CRASH_STATES = ['NULL']
# FIXME: Remove from this list once these crashes are fixed.
TOP_CRASHES_IGNORE_CRASH_STATE_KEYWORDS = [
'Zygote', '__printf_chk', 'gtk_', 'sandbox::'
]
FUZZ_TARGET_UNUSED_THRESHOLD = 7
UNUSED_HEARTBEAT_THRESHOLD = 15
def _get_predator_result_item(testcase, key, default=None):
"""Return the suspected components for a test case."""
predator_result = testcase.get_metadata('predator_result')
if not predator_result:
return default
return predator_result['result'].get(key, default)
def _append_generic_incorrect_comment(comment, policy, suffix):
"""Get the generic incorrect comment."""
wrong_label = policy.label('wrong')
if not wrong_label:
return comment
return comment + GENERIC_INCORRECT_COMMENT.format(label=wrong_label) + suffix
def job_platform_to_real_platform(job_platform):
"""Get real platform from job platform."""
for platform in data_types.PLATFORMS:
if platform in job_platform:
return platform
raise ValueError('Unknown platform: ' + job_platform)
def cleanup_reports_metadata():
"""Delete ReportMetadata for uploaded reports."""
uploaded_reports = ndb_utils.get_all_from_query(
data_types.ReportMetadata.query(
ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)),
keys_only=True)
ndb.delete_multi(uploaded_reports)
def cleanup_testcases_and_issues():
"""Clean up unneeded open testcases and their associated issues."""
jobs = data_handler.get_all_job_type_names()
testcase_keys = ndb_utils.get_all_from_query(
data_types.Testcase.query(
ndb_utils.is_false(data_types.Testcase.triaged)),
keys_only=True)
top_crashes_by_project_and_platform_map = (
get_top_crashes_for_all_projects_and_platforms())
for testcase_key in testcase_keys:
try:
testcase = data_handler.get_testcase_by_id(testcase_key.id())
except errors.InvalidTestcaseError:
# Already deleted.
continue
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(testcase)
if not policy:
policy = issue_tracker_policy.get_empty()
# Issue updates.
update_os_labels(policy, testcase, issue)
update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map)
update_component_labels(testcase, issue)
update_issue_ccs_from_owners_file(policy, testcase, issue)
update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue)
update_issue_labels_for_flaky_testcase(policy, testcase, issue)
# Testcase marking rules.
mark_duplicate_testcase_as_closed_with_no_issue(testcase)
mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue)
mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue)
# Notification, to be done at end after testcase state is updated from
# previous rules.
notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
notify_issue_if_testcase_is_invalid(policy, testcase, issue)
notify_uploader_when_testcase_is_processed(policy, testcase, issue)
# Mark testcase as triage complete if both testcase and associated issue
# are closed. This also need to be done before the deletion rules.
mark_testcase_as_triaged_if_needed(testcase, issue)
# Testcase deletion rules.
delete_unreproducible_testcase_with_no_issue(testcase)
def cleanup_unused_fuzz_targets_and_jobs():
"""Clean up unused FuzzTarget and FuzzTargetJob entities."""
last_run_cutoff = utils.utcnow() - datetime.timedelta(
days=FUZZ_TARGET_UNUSED_THRESHOLD)
unused_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run < last_run_cutoff)
valid_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run >= last_run_cutoff)
to_delete = [t.key for t in unused_target_jobs]
valid_fuzz_targets = set(t.fuzz_target_name for t in valid_target_jobs)
for fuzz_target in ndb_utils.get_all_from_model(data_types.FuzzTarget):
if fuzz_target.fully_qualified_name() not in valid_fuzz_targets:
to_delete.append(fuzz_target.key)
ndb.delete_multi(to_delete)
def get_jobs_and_platforms_for_top_crashes():
"""Return list of jobs and platforms to use for picking top crashes."""
jobs = set()
platforms = set()
all_jobs = ndb_utils.get_all_from_model(data_types.Job)
for job in all_jobs:
job_environment = job.get_environment()
# Skip experimental jobs.
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
continue
# Skip custom binary jobs.
if (utils.string_is_true(job_environment.get('CUSTOM_BINARY')) or
job_environment.get('SYSTEM_BINARY_DIR')):
continue
# Skip if explicitly excluded using flag.
if utils.string_is_true(job_environment.get('EXCLUDE_FROM_TOP_CRASHES')):
continue
jobs.add(job.name)
platforms.add(job_platform_to_real_platform(job.platform))
return jobs, platforms
@memoize.wrap(memoize.Memcache(12 * 60 * 60))
def _get_crash_occurrence_platforms_from_crash_parameters(
crash_type, crash_state, security_flag, project_name, lookbehind_days):
"""Get platforms from crash stats based on crash parameters."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return []
where_clause = ('crash_type = {crash_type} AND '
'crash_state = {crash_state} AND '
'security_flag = {security_flag} AND '
'project = {project}').format(
crash_type=json.dumps(crash_type),
crash_state=json.dumps(crash_state),
security_flag=json.dumps(security_flag),
project=json.dumps(project_name),
)
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=lookbehind_days,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=1)
platforms = set()
for row in rows:
for group in row['groups']:
platform = group['name'].split(':')[0].capitalize()
platforms.add(platform)
return list(platforms)
def get_crash_occurrence_platforms(testcase, lookbehind_days=1):
"""Get platforms from crash stats for a testcase."""
return _get_crash_occurrence_platforms_from_crash_parameters(
testcase.crash_type, testcase.crash_state, testcase.security_flag,
testcase.project_name, lookbehind_days)
def get_top_crashes_for_all_projects_and_platforms():
"""Return top crashes for all projects and platforms."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return {}
jobs, platforms = get_jobs_and_platforms_for_top_crashes()
project_names = data_handler.get_all_project_names()
top_crashes_by_project_and_platform_map = {}
for project_name in project_names:
top_crashes_by_project_and_platform_map[project_name] = {}
for platform in platforms:
where_clause = (
'crash_type NOT IN UNNEST(%s) AND '
'crash_state NOT IN UNNEST(%s) AND '
'job_type IN UNNEST(%s) AND '
'platform LIKE %s AND '
'project = %s' %
(json.dumps(TOP_CRASHES_IGNORE_CRASH_TYPES),
json.dumps(TOP_CRASHES_IGNORE_CRASH_STATES), json.dumps(list(jobs)),
json.dumps(platform.lower() + '%'), json.dumps(project_name)))
for keyword in TOP_CRASHES_IGNORE_CRASH_STATE_KEYWORDS:
where_clause += ' AND crash_state NOT LIKE "%%%s%%"' % keyword
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=TOP_CRASHES_DAYS_LOOKBEHIND,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=TOP_CRASHES_LIMIT)
if rows:
rows = [s for s in rows if s['totalCount'] >= TOP_CRASHES_MIN_THRESHOLD]
top_crashes_by_project_and_platform_map[project_name][platform] = (
rows or [])
return top_crashes_by_project_and_platform_map
def get_top_crash_platforms(testcase, top_crashes_by_project_and_platform_map):
"""Return list of platforms where this testcase is a top crasher."""
if testcase.project_name not in top_crashes_by_project_and_platform_map:
return []
top_crashes_by_platform_map = top_crashes_by_project_and_platform_map[
testcase.project_name]
top_crash_platforms = set()
for platform in list(top_crashes_by_platform_map.keys()):
top_crashes = top_crashes_by_platform_map[platform]
if not top_crashes:
continue
for top_crash in top_crashes:
crash_state_comparer = crash_comparer.CrashComparer(
top_crash['crashState'], testcase.crash_state)
crash_type_comparer = crash_comparer.CrashComparer(
top_crash['crashType'], testcase.crash_type)
if (crash_state_comparer.is_similar() and
top_crash['isSecurity'] == testcase.security_flag and
(top_crash['isSecurity'] or crash_type_comparer.is_similar())):
top_crash_platforms.add(platform.lower())
return sorted(list(top_crash_platforms))
def delete_unreproducible_testcase_with_no_issue(testcase):
"""Delete an unreproducible testcase if it has no associated issue and has
been open for a certain time interval."""
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)):
return
# Make sure that testcase is not seen in crash stats for a certain time
# interval.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE):
return
testcase.key.delete()
logs.log(
'Deleted unreproducible testcase %d with no issue.' % testcase.key.id())
def mark_duplicate_testcase_as_closed_with_no_issue(testcase):
"""Closes a duplicate testcase if it has no associated issue and has been open
for a certain time interval."""
# Make sure that this testcase is a duplicate bug. If not, bail out.
if testcase.status != 'Duplicate':
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase has been open for a certain time interval. We do
# a null timestamp check since some older testcases could be missing it.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp, days=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE)):
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed duplicate testcase %d with no issue.' % testcase.key.id())
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue):
"""Mark an issue as fixed if all of its associated reproducible testcase are
fixed."""
verified_label = policy.label('verified')
if not verified_label:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is closed in a status other than Fixed, like Duplicate, WontFix
# or Archived, we shouldn't change it. Bail out.
if not issue.is_open and issue.status != policy.status('fixed'):
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If the testcase is still open, no work needs to be done. Bail out.
if testcase.open:
return
# FIXME: Find a better solution to skip over reproducible tests that are now
# showing up a flaky (esp when we are unable to reproduce crash in original
# crash revision).
if testcase.fixed == 'NA':
return
# We can only verify fixed issues for reproducible testcases. If the testcase
# is unreproducible, bail out. Exception is if we explicitly marked this as
# fixed.
if testcase.one_time_crasher_flag and testcase.fixed != 'Yes':
return
# Make sure that no other testcases associated with this issue are open.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we didn't do the verification already and we didn't
# get called out on issue mistriage.
if (issue_tracker_utils.was_label_added(issue, verified_label) or
issue_tracker_utils.was_label_added(issue, policy.label('wrong'))):
return
issue.labels.add(verified_label)
comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id()
fixed_range_url = data_handler.get_fixed_range_url(testcase)
if fixed_range_url:
comment += ' in ' + fixed_range_url
else:
comment += '.'
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy,
' and re-open the issue.')
issue.status = policy.status('verified')
issue.save(new_comment=comment, notify=True)
logs.log(
'Closed issue %d for fixed testcase %d.' % (issue.id, testcase.key.id()))
def mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue):
"""Mark an unreproducible testcase as fixed if the associated issue is
closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# Make sure that there is an associated bug and it is in closed state.
if not issue or issue.is_open:
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed unreproducible testcase %d with issue closed.' %
testcase.key.id())
def mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue):
"""Closes an unreproducible testcase and its associated issue after a certain
time period."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# If this testcase was manually uploaded, don't change issue state as our
# reproduction result might be incorrect.
if testcase.uploader_email:
return
# Make sure that there is an associated bug and it is in open state.
if not issue or not issue.is_open:
return
# Check if there are any reproducible open testcases are associated with
# this bug. If yes, return.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Handle testcase that turned from reproducible to unreproducible. Account
# for the recent progression task run time.
last_tested_crash_time = testcase.get_metadata('last_tested_crash_time')
if (last_tested_crash_time and not dates.time_has_expired(
last_tested_crash_time,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Make that there is no crash seen in the deadline period.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE):
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we we didn't get called out on issue mistriage.
if issue_tracker_utils.was_label_added(issue, policy.label('wrong')):
return
# Close associated issue and testcase.
comment = ('ClusterFuzz testcase %d is flaky and no longer crashes, '
'so closing issue.' % testcase.key.id())
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy,
' and re-open the issue.')
issue.status = policy.status('wontfix')
issue.save(new_comment=comment, notify=True)
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed unreproducible testcase %d and associated issue.' %
testcase.key.id())
def mark_testcase_as_triaged_if_needed(testcase, issue):
"""Mark testcase as triage complete if both testcase and associated issue
are closed."""
# Check if testcase is open. If yes, bail out.
if testcase.open:
return
# Check if there is an associated bug in open state. If yes, bail out.
if issue:
# Get latest issue object to ensure our update went through.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
if issue.is_open:
return
testcase.triaged = True
testcase.put()
def mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue):
"""Mark testcase as closed if the associated issue is closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# Make sure we passed our deadline based on issue closed timestamp.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE)):
return
# If the issue has an ignore label, don't close the testcase and bail out.
# This helps to prevent new bugs from getting filed for legit WontFix cases.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log('Closed testcase %d with issue closed.' % testcase.key.id())
def mark_testcase_as_closed_if_job_is_invalid(testcase, jobs):
"""Mark testcase as closed if the associated job type does not exist."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check if the testcase job name is in the list of jobs.
if testcase.job_type in jobs:
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log('Closed testcase %d with invalid job.' % testcase.key.id())
def notify_closed_issue_if_testcase_is_open(policy, testcase, issue):
"""Notify closed issue if associated testcase is still open after a certain
time period."""
needs_feedback_label = policy.label('needs_feedback')
if not needs_feedback_label:
return
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# If we have already passed our deadline based on issue closed timestamp,
# no need to notify. We will close the testcase instead.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE)):
return
# Check if there is ignore label on issue already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
# Check if we did add the notification comment already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, needs_feedback_label):
return
issue.labels.add(needs_feedback_label)
if issue.status in [policy.status('fixed'), policy.status('verified')]:
issue_comment = (
'ClusterFuzz testcase {id} is still reproducing on tip-of-tree build '
'(trunk).\n\nPlease re-test your fix against this testcase and if the '
'fix was incorrect or incomplete, please re-open the bug.'
).format(id=testcase.key.id())
wrong_label = policy.label('wrong')
if wrong_label:
issue_comment += (
' Otherwise, ignore this notification and add {label} label.'.format(
label=wrong_label))
else:
# Covers WontFix, Archived cases.
issue_comment = (
'ClusterFuzz testcase {id} is still reproducing on tip-of-tree build '
'(trunk).\n\nIf this testcase was not reproducible locally or '
'unworkable, ignore this notification and we will file another '
'bug soon with hopefully a better and workable testcase.\n\n'.format(
id=testcase.key.id()))
ignore_label = policy.label('ignore')
if ignore_label:
issue_comment += (
'Otherwise, if this is not intended to be fixed (e.g. this is an '
'intentional crash), please add {label} label to prevent future bug '
'filing with similar crash stacktrace.'.format(label=ignore_label))
issue.save(new_comment=issue_comment, notify=True)
logs.log('Notified closed issue for open testcase %d.' % testcase.key.id())
def notify_issue_if_testcase_is_invalid(policy, testcase, issue):
"""Leave comments on associated issues when test cases are no longer valid."""
invalid_fuzzer_label = policy.label('invalid_fuzzer')
if not invalid_fuzzer_label:
return
if not issue or not testcase.bug_information:
return
# If the issue is closed, there's no work to do.
if not issue.is_open:
return
# Currently, this only happens if a test case relies on a fuzzer that has
# been deleted. This can be modified if more cases are needed in the future.
if not testcase.get_metadata('fuzzer_was_deleted'):
return
# Check if we added this message once. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, invalid_fuzzer_label):
return
issue_comment = (
'ClusterFuzz testcase %d is associated with an obsolete fuzzer and can '
'no longer be processed. Please close the issue if it is no longer '
'actionable.') % testcase.key.id()
issue.labels.add(invalid_fuzzer_label)
issue.save(new_comment=issue_comment, notify=True)
logs.log('Closed issue %d for invalid testcase %d.' % (issue.id,
testcase.key.id()))
def _send_email_to_uploader(testcase_id, to_email, content):
"""Send email to uploader when all the testcase tasks are finished."""
subject = 'Your testcase upload %d analysis is complete.' % testcase_id
content_with_footer = (
'%s\n\n'
'If you suspect that the result above is incorrect, '
'try re-doing that job on the testcase report page.') % content.strip()
html_content = content_with_footer.replace('\n', '<br>')
mail.send(to_email, subject, html_content)
def _get_severity_from_labels(security_severity_label, labels):
"""Get the severity from the label list."""
pattern = issue_filer.get_label_pattern(security_severity_label)
for label in labels:
match = pattern.match(label)
if match:
return severity_analyzer.string_to_severity(match.group(1))
return data_types.SecuritySeverity.MISSING
def _update_issue_security_severity_and_get_comment(policy, testcase, issue):
"""Apply a new security severity label if none exists on issue already
and return a comment on this addition. If a label already exists and does
not match security severity label on issue, then just return a comment on
what the recommended severity is."""
security_severity_label = policy.label('security_severity')
if not security_severity_label:
return ''
if not data_types.SecuritySeverity.is_valid(testcase.security_severity):
return ''
issue_severity = _get_severity_from_labels(security_severity_label,
issue.labels)
recommended_severity = issue_filer.apply_substitutions(
policy, security_severity_label, testcase)
if not recommended_severity:
return ''
recommended_severity = recommended_severity[0]
if issue_severity == data_types.SecuritySeverity.MISSING:
issue.labels.add(recommended_severity)
return ('\n\nA recommended severity was added to this bug. '
'Please change the severity if it is inaccurate.')
elif issue_severity != testcase.security_severity:
return (
'\n\nThe recommended severity (%s) is different from what was assigned '
'to the bug. Please double check the accuracy of the assigned '
'severity.' % recommended_severity)
return ''
def _update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, description, upload_metadata):
"""Add issue comment when uploaded testcase is processed."""
if upload_metadata.bug_summary_update_flag and testcase.is_crash():
issue.title = data_handler.get_issue_summary(testcase)
# Impact labels like impacting head/beta/stable only apply for Chromium.
if testcase.project_name == 'chromium':
issue_filer.update_issue_impact_labels(testcase, issue)
# Add severity labels for all project types.
comment = description + _update_issue_security_severity_and_get_comment(
policy, testcase, issue)
issue.save(new_comment=comment)
def notify_uploader_when_testcase_is_processed(policy, testcase, issue):
"""Notify uploader by email when all the testcase tasks are finished."""
testcase_id = testcase.key.id()
# Check if this is a user upload. If not, bail out.
upload_metadata = data_types.TestcaseUploadMetadata.query(
data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get()
if not upload_metadata:
return
# Check that we have a valid email to send the notification. If not, bail out.
to_email = upload_metadata.uploader_email
if not to_email:
return
# If this is a bundled archive with multiple testcases, then don't send email
# for individual testcases.
if upload_metadata.bundled:
return
# Check if the notification is already sent once. If yes, bail out.
if data_handler.is_notification_sent(testcase_id, to_email):
return
# Make sure all testcase taks are done (e.g. minimization, regression, etc).
if not data_handler.critical_tasks_completed(testcase):
return
issue_description = data_handler.get_issue_description(testcase)
if issue:
_update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, issue_description, upload_metadata)
issue_description_without_crash_state = data_handler.get_issue_description(
testcase, hide_crash_state=True)
_send_email_to_uploader(testcase_id, to_email,
issue_description_without_crash_state)
data_handler.create_notification_entry(testcase_id, to_email)
def update_os_labels(policy, testcase, issue):
"""Add OS labels to issue."""
os_label = policy.label('os')
if not os_label:
return
if not issue:
return
platforms = get_crash_occurrence_platforms(testcase)
logs.log(
'Found %d platforms for the testcase %d.' % (len(platforms),
testcase.key.id()),
platforms=platforms)
for platform in platforms:
label = os_label.replace('%PLATFORM%', platform)
if not issue_tracker_utils.was_label_added(issue, label):
issue.labels.add(label)
issue.save(notify=False)
logs.log('Updated labels of issue %d.' % issue.id, labels=issue.labels)
def update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map):
"""Add top crash label to issue."""
fuzz_blocker_label = policy.label('fuzz_blocker')
if not fuzz_blocker_label:
return
if not issue:
return
if not testcase.open:
return
top_crash_platforms = get_top_crash_platforms(
testcase, top_crashes_by_project_and_platform_map)
if not top_crash_platforms:
# Not a top crasher, bail out.
return
if issue_tracker_utils.was_label_added(issue, fuzz_blocker_label):
# Issue was already marked a top crasher, bail out.
return
if len(top_crash_platforms) == 1:
platform_message = '%s platform' % top_crash_platforms[0]
else:
platform_message = '%s and %s platforms' % (', '.join(
top_crash_platforms[:-1]), top_crash_platforms[-1])
fuzzer_name = (
testcase.get_metadata('fuzzer_binary_name') or testcase.fuzzer_name)
update_message = (
'This crash occurs very frequently on %s and is likely preventing the '
'fuzzer %s from making much progress. Fixing this will allow more bugs '
'to be found.' % (platform_message, fuzzer_name))
if utils.is_oss_fuzz():
update_message += OSS_FUZZ_INCORRECT_COMMENT
elif utils.is_chromium():
update_message += '\n\nMarking this bug as a blocker for next Beta release.'
update_message = _append_generic_incorrect_comment(
update_message, policy, ' and remove the %s label.' %
data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)
issue.labels.add(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)
# Update with the next beta for trunk, and remove existing milestone label.
beta_milestone_label = (
'M-%d' % build_info.get_release_milestone('head', testcase.platform))
if beta_milestone_label not in issue.labels:
issue.labels.remove_by_prefix('M-')
issue.labels.add(beta_milestone_label)
logs.log(update_message)
issue.labels.add(fuzz_blocker_label)
issue.save(new_comment=update_message, notify=True)
def update_component_labels(testcase, issue):
"""Add components to the issue if needed."""
if not issue:
return
components = _get_predator_result_item(
testcase, 'suspected_components', default=[])
# Remove components already in issue or whose more specific variants exist.
filtered_components = []
for component in components:
found_component_in_issue = any(
component == issue_component or issue_component.startswith(component +
'>')
for issue_component in issue.components)
if not found_component_in_issue:
filtered_components.append(component)
if not filtered_components:
# If there are no new components to add, then we shouldn't make any changes
# to issue.
return
# Don't run on issues we've already applied automatic components to in case
# labels are removed manually. This may cause issues in the event that we
# rerun a test case, but it seems like a reasonable tradeoff to avoid spam.
if issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL):
return
for filtered_component in filtered_components:
issue.components.add(filtered_component)
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)
issue_comment = (
'Automatically applying components based on crash stacktrace and '
'information from OWNERS files.\n\n'
'If this is incorrect, please apply the %s label.' %
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_COMPONENTS_LABEL)
issue.save(new_comment=issue_comment, notify=True)
def update_issue_ccs_from_owners_file(policy, testcase, issue):
"""Add cc to an issue based on owners list from owners file. This is
currently applicable to fuzz targets only."""
auto_cc_label = policy.label('auto_cc_from_owners')
if not auto_cc_label:
return
if not issue or not issue.is_open:
return
# If we've assigned the ccs before, it likely means we were incorrect.
# Don't try again for this particular issue.
if issue_tracker_utils.was_label_added(issue, auto_cc_label):
return
if testcase.get_metadata('has_issue_ccs_from_owners_file'):
return
ccs_list = utils.parse_delimited(
testcase.get_metadata('issue_owners', ''),
delimiter=',',
strip=True,
remove_empty=True)
if not ccs_list:
return
ccs_added = False
actions = list(issue.actions)
for cc in random.sample(ccs_list, min(AUTO_CC_LIMIT, len(ccs_list))):
if cc in issue.ccs:
continue
# If cc was previously manually removed from the cc list, we assume that
# they were incorrectly added. Don't try to add them again.
cc_was_removed = any(cc in action.ccs.removed for action in actions)
if cc_was_removed:
continue
issue.ccs.add(cc)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue_comment = (
'Automatically adding ccs based on OWNERS file / target commit history.')
if utils.is_oss_fuzz():
issue_comment += OSS_FUZZ_INCORRECT_COMMENT + '.'
else:
issue_comment = _append_generic_incorrect_comment(issue_comment, policy,
'.')
issue.labels.add(auto_cc_label)
issue.save(new_comment=issue_comment, notify=True)
def update_issue_labels_for_flaky_testcase(policy, testcase, issue):
"""Update issue reproducibility label when testcase becomes flaky or
unreproducible."""
if not issue or not issue.is_open:
return
# If the testcase is reproducible, then no change is needed. Bail out.
if not testcase.one_time_crasher_flag:
return
reproducible_label = policy.label('reproducible')
unreproducible_label = policy.label('unreproducible')
if not reproducible_label or not unreproducible_label:
return
# Make sure that this issue is not already marked Unreproducible.
if unreproducible_label in issue.labels:
return
issue.labels.remove(reproducible_label)
issue.labels.add(unreproducible_label)
comment = ('ClusterFuzz testcase %d appears to be flaky, '
'updating reproducibility label.' % testcase.key.id())
issue.save(new_comment=comment)
def update_issue_owner_and_ccs_from_predator_results(policy,
testcase,
issue,
only_allow_ccs=False):
"""Assign the issue to an appropriate owner if possible."""
if not issue or not issue.is_open:
return
# If the issue already has an owner, we don't need to update the bug.
if issue.assignee:
return
# If we've assigned an owner or cc once before, it likely means we were
# incorrect. Don't try again for this particular issue.
if (issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL) or
issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)):
return
# If there are more than 3 suspected CLs, we can't be confident in the
# results. Just skip any sort of notification to CL authors in this case.
suspected_cls = _get_predator_result_item(testcase, 'suspected_cls')
if not suspected_cls or len(suspected_cls) > 3:
return
# Validate that the suspected CLs have all of the information we need before
# continuing. This allows us to assume that they are well-formed later,
# avoiding any potential exceptions that would interrupt this task.
for suspected_cl in suspected_cls:
url = suspected_cl.get('url')
description = suspected_cl.get('description')
author = suspected_cl.get('author')
if not url or not description or not author:
logs.log_error(
'Suspected CL for testcase %d is missing required information.' %
testcase.key.id())
return
if len(suspected_cls) == 1 and not only_allow_ccs:
suspected_cl = suspected_cls[0]
# If this owner has already been assigned before but has since been removed,
# don't assign it to them again.
for action in issue.actions:
if action.assignee == suspected_cls[0]['author']:
return
# We have high confidence for the single-CL case, so we assign the owner.
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL)
issue.assignee = suspected_cl['author']
issue.status = policy.status('assigned')
issue_comment = (
'Automatically assigning owner based on suspected regression '
'changelist %s (%s).\n\n'
'If this is incorrect, please let us know why and apply the %s '
'label. If you aren\'t the correct owner for this issue, please '
'unassign yourself as soon as possible so it can be re-triaged.' %
(suspected_cl['url'], suspected_cl['description'],
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL))
else:
if testcase.get_metadata('has_issue_ccs_from_predator_results'):
return
issue_comment = (
'Automatically adding ccs based on suspected regression changelists:'
'\n\n')
ccs_added = False
for suspected_cl in suspected_cls:
# Update the comment with the suspected CL, regardless of whether or not
# we're ccing the author. This might, for example, catch the attention of
# someone who has already been cced.
author = suspected_cl['author']
issue_comment += '%s by %s - %s\n\n' % (suspected_cl['description'],
author, suspected_cl['url'])
if author in issue.ccs:
continue
# If an author has previously been manually removed from the cc list,
# we assume they were incorrectly added. Don't try to add them again.
author_was_removed = False
for action in issue.actions:
if author in action.ccs.removed:
author_was_removed = True
break
if author_was_removed:
continue
issue.ccs.add(author)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)
issue_comment += (
'If this is incorrect, please let us know why and apply the %s label.' %
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL)
try:
issue.save(new_comment=issue_comment, notify=True)
except HttpError:
# If we see such an error when we aren't setting an owner, it's unexpected.
if only_allow_ccs or not issue.assignee:
logs.log_error(
'Unable to update issue for test case %d.' % testcase.key.id())
return
# Retry without setting the owner. They may not be a chromium project
# member, in which case we can try falling back to cc.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
update_issue_owner_and_ccs_from_predator_results(
policy, testcase, issue, only_allow_ccs=True)
def cleanup_unused_heartbeats():
"""Clean up unused heartbeat entities."""
cutoff_time = utils.utcnow() - datetime.timedelta(
days=UNUSED_HEARTBEAT_THRESHOLD)
unused_heartbeats = ndb_utils.get_all_from_query(
data_types.Heartbeat.query(
data_types.Heartbeat.last_beat_time < cutoff_time),
keys_only=True)
ndb.delete_multi(unused_heartbeats)
class Handler(base_handler.Handler):
"""Cleanup."""
@handler.check_cron()
def get(self):
cleanup_testcases_and_issues()
cleanup_reports_metadata()
leak_blacklist.cleanup_global_blacklist()
cleanup_unused_fuzz_targets_and_jobs()
cleanup_unused_heartbeats()
|
the-stack_106_22904 | import datetime
import json
import jwt
import requests
from django.conf import settings
def request_file_list(job, path, recursive, user_id=None):
"""
Requests the file list for a job
:param job: The CwFollowupJob instance to get the status of
:param user_id: On optional user id to make the request as
:param path: The relative path to the job to fetch the file list for
:param recursive: If the file list should be recursive or not
"""
# Make sure that the job was actually submitted (Might be in a draft state?)
if not job.job_controller_id:
return False, "Job has not been submitted"
# Create the jwt token
jwt_enc = jwt.encode(
{
'userId': user_id or job.user_id,
'exp': datetime.datetime.now() + datetime.timedelta(days=30)
},
settings.JOB_CONTROLLER_JWT_SECRET,
algorithm='HS256'
)
# Build the data object
data = {
'jobId': job.job_controller_id,
'recursive': recursive,
'path': path
}
try:
# Initiate the request to the job controller
result = requests.request(
"PATCH", f"{settings.GWCLOUD_JOB_CONTROLLER_API_URL}/file/",
data=json.dumps(data),
headers={
"Authorization": jwt_enc
}
)
# Check that the request was successful
if result.status_code != 200:
# Oops
msg = f"Error getting job file list, got error code: " \
f"{result.status_code}\n\n{result.headers}\n\n{result.content}"
print(msg)
raise Exception(msg)
# Parse the response from the job controller
result = json.loads(result.content)
return True, result["files"]
except Exception:
return False, "Error getting job file list"
|
the-stack_106_22908 |
# jsc 是一种基于json的文件格式,在json基础上拓展了一些新的特性支持
# 该模块会将jsc解析成json格式
import json
import sys
import os
import traceback
import platform
# 目前支持的platforms
support_platforms = [
"Windows",
"Linux"
]
#########################################################################################################
# utils
#########################################################################################################
def change_path_extension(dir, ext):
return os.path.splitext(dir)[0] + ext
def check_path_directory(dir):
return len(os.path.splitext(dir)[1]) <= 0
def put_stirng_in_quotes(string):
if len(string) > 0:
if string[0] == "\"":
return string
return "\"" + string + "\""
def get_value_type(value):
value = value.strip()
if len(value) == 0:
return "string"
if value[0] == "\"":
return "string"
if value[0] == "{":
return "object"
if value[0] == "[":
return "array"
if value == 'true' or value == 'false':
return "bool"
if value.find(".") != -1:
try:
float(value)
return "float"
except ValueError:
pass
if value.find("0x") != -1:
try:
int(value[2:], 16)
return "hex"
except ValueError:
pass
if value.find("0b") != -1:
try:
int(value[2:], 2)
return "binary"
except ValueError:
pass
try:
int(value)
return "int"
except ValueError:
return "string"
def to_uint(v):
if v < 0:
return sys.maxsize
return v
def find_chars_first(string, pos, chars):
ret = to_uint(-1)
for char in chars:
ret = min(ret, to_uint(string.find(char, pos)))
return ret
def get_closed_brackets_end_pos(open, close, string, start_pos):
start_pos = string.find(open, start_pos)
if start_pos == -1:
return -1
start_pos += 1
stack = [open]
while len(stack) > 0 and start_pos < len(string):
char = string[start_pos]
if char == open:
stack.append(open)
elif char == close:
stack.pop()
start_pos += 1
return start_pos
#########################################################################################################
# object
#########################################################################################################
def quoted_value(value, pos, next_pos):
quoted = ""
value_type = get_value_type(value)
if value_type == "string":
quoted += put_stirng_in_quotes(value)
pos = next_pos
elif value_type == "hex":
hex_value = int(value[2:], 16)
quoted = str(hex_value)
pos = next_pos
elif value_type == "binary":
binary_value = int(value[2:], 2)
quoted = str(binary_value)
pos = next_pos
elif value_type == "float":
quoted = value
pos = next_pos
elif value_type == "int":
quoted = value
pos = next_pos
return (quoted, pos)
def quoted_array(array_string):
ret = ""
pos = 0
while True:
item_end = array_string.find(",", pos)
if item_end == -1:
item_end = len(array_string)
item = array_string[pos:item_end].strip()
if len(item) == 0:
break
item_type = get_value_type(item)
if item_type == "object":
# {}中可能存在",", 重新定位item_end
item_end = get_closed_brackets_end_pos("{", "}", array_string, pos)
if item_end == -1:
break
ret += quoted_object(array_string[pos+1:item_end-1])
elif item_type == "array":
# []中可能存在",", 重新定位item_end
item_end = get_closed_brackets_end_pos("[", "]", array_string, pos)
if item_end == -1:
break
ret += quoted_array(array_string[pos+1:item_end-1])
elif item[0] == "\"":
# 字符串中可能有",", 重新定位item_end
item_end = get_closed_brackets_end_pos("\"", "\"", array_string, pos)
if item_end == -1:
break
ret += quoted_value(array_string[pos:item_end], 0, 0)[0]
else:
ret += quoted_value(item, 0, 0)[0]
pos = item_end + 1
if pos >= len(array_string):
break
ret += ","
return "[" + ret + "]"
def quoted_object_key(key, value, pos):
quoted = ""
if get_value_type(value) != "object":
return quoted
quoted += "{"
# 获取object inherit list
inherits = []
bp = key.find("(")
if bp != -1:
ep = key.find(")")
inherit_str = key[bp+1:ep]
inherits = inherit_str.split(",")
key = key[:bp]
if len(inherits) > 0:
quoted += "\"jsc_inherit\":["
for i in range(0, len(inherits)):
if i > 0:
quoted += ", "
quoted += put_stirng_in_quotes(inherits[i])
quoted += "],"
return key, quoted
def quoted_object(jsc):
delimiters = [",", "{"]
ret = ""
pos = 0
string_list = jsc_get_string_index_list(jsc)
while True:
cur_pos = pos
pos = jsc.find(":", pos)
if pos == -1:
ret += jsc[cur_pos:]
break
# ignore inside quote
in_string = jsc_check_in_string_index_list(string_list, pos)
if in_string:
ret += jsc[cur_pos:in_string]
pos = in_string
continue
# get key
key_start = 0
for d in delimiters:
dd = jsc[:pos].rfind(d)
if dd != -1:
key_start = max(key_start, dd)
key = jsc[key_start+1:pos].strip()
# 如果key_start在()中,则重新寻找key_start
if key.find(")") != -1:
sp = jsc[:pos].rfind("(")
ep = jsc.find(")", key_start)
if sp < key_start < ep:
key_start = 0
for d in delimiters:
dd = jsc[:sp].rfind(d)
if dd != -1:
key_start = max(key_start, dd)
key = jsc[key_start+1:pos].strip()
# get first value pos
pos += 1 # skip ":"
value_end_pos = find_chars_first(jsc, pos, [",", "}", "]"])
while jsc_check_in_string_index_list(string_list, value_end_pos):
value_end_pos = find_chars_first(jsc, value_end_pos + 1, [",", "}", "]"])
value = jsc[pos:value_end_pos]
# 带有继承的object会影响key,需要先特殊处理
object_inherits = ""
if get_value_type(value) == "object":
key, object_inherits = quoted_object_key(key, value, pos)
pos += 1
# put key in quotes
ret += jsc[cur_pos:key_start+1]
ret += put_stirng_in_quotes(key)
ret += ":"
ret += object_inherits
# put value in quotes and connect value
if get_value_type(value) == "array":
end = get_closed_brackets_end_pos("[", "]", jsc, pos)
if end == -1:
print("error: invalid array")
exit(1)
ret += quoted_array(jsc[pos + 1:end - 1])
pos = end
else:
info = quoted_value(value, pos, value_end_pos)
ret += info[0]
pos = info[1]
return ret
def process_inherit(child, parent):
for k, v in parent.items():
if type(v) != dict:
if not k in child:
child[k] = v
else:
# 如果child属性不是dict,则以child为主
if k in child and type(child[k]) != dict:
continue
if k not in child:
child[k] = dict()
process_inherit(child[k], v)
def process_inherit_recursive(jsn, jsn_parent):
inherits = []
for k, v in jsn.items():
if k == "jsc_inherit":
for i in v:
inherits.append(i)
if len(inherits) > 0:
jsn.pop("jsc_inherit", None)
for inherit in inherits:
if inherit in jsn_parent.keys():
process_inherit(jsn, jsn_parent[inherit])
for k, v in jsn.items():
if type(v) == dict:
process_inherit_recursive(v, jsn)
def process_variable(var, def_vars):
variables = []
# get variables
string = str(var)
pos = 0
while True:
sp = string.find("${", pos)
if sp == -1:
break
else:
ep = string.find("}", sp)
variables.append(string[sp:ep+1])
pos = sp + 2
# get variables from def_vars
for variable_index in range(0, len(variables)):
variable = variables[variable_index]
variable_name = variable[2:len(variable)-1]
if variable_name in def_vars.keys():
# type only is list or name
if type(var) == list:
for i in range(0, len(var)):
list_v = var[i]
r_list_v = process_variable(list_v, def_vars)
if r_list_v:
var[i] = r_list_v
return var
else:
if type(def_vars[variable_name]) == str:
var = var.replace(variable, str(def_vars[variable_name]))
if variable_index == len(variables) - 1:
return var
else:
return def_vars[variable_name]
else:
print("error: undefined variable:" + variable_name)
print("current def vars:")
print(json.dumps(def_vars, indent=4))
exit(1)
count += 1
return None
def process_variables_recursive(jsn, def_vars):
# 避免引用传递,放置下一层的变量声明影响这一层
current_def_vars = def_vars.copy()
def_key_word = "jcs_def"
if def_key_word in jsn.keys():
for k, v in jsn[def_key_word].items():
current_def_vars[k] = v
# 递归遍历所有的k,v
for k, v in jsn.items():
if type(v) == dict:
process_variables_recursive(v, current_def_vars)
elif type(v) == list:
for i in range(0, len(v)):
list_v = v[i]
r_list_v = process_variable(list_v, current_def_vars)
if r_list_v:
v[i] = r_list_v
else:
var = process_variable(v, current_def_vars)
if var:
jsn[k] = var
if def_key_word in jsn.keys():
jsn.pop(def_key_word, None)
def process_platform_keys_recursive(jsn, platform_name):
to_removed_keys = []
platform_dict = dict()
for key in jsn.keys():
value = jsn[key]
bp = key.find("<")
ep = key.find(">")
if bp != -1 and ep != -1:
key_platform = key[bp + 1:ep]
real_key = key[:bp]
if key_platform == platform_name:
platform_dict[real_key] = value
to_removed_keys.append(key)
if type(value) == dict:
process_platform_keys_recursive(value, platform_name)
# 将移除platform的key重新写入当前jsn
for key in to_removed_keys:
jsn.pop(key)
if len(platform_dict) > 0:
process_inherit(jsn, platform_dict)
def process_platform_keys(jsn):
platform_name = platform.system()
if platform_name in support_platforms:
process_platform_keys_recursive(jsn, platform_name)
else:
print("warning: unknown platform systme:" + platform_name)
platform_name = "Unknown"
#########################################################################################################
# # jsc parser
#########################################################################################################
def jsc_get_string_index_list(string):
prev_c = ""
quote = ""
index_start = 0
str_list = []
for ic in range(0, len(string)):
c = string[ic]
if c == "'" or c == "\"":
if quote == "":
quote = c
index_start = ic
# \" 和 \' 视为字符串
elif quote == c and prev_c != "\\":
quote = ""
str_list.append((index_start, ic))
# \\ 不视为转义符
if prev_c == "\\" and c == "\\":
prev_c = ""
else:
prev_c = c
return str_list
def jsc_check_in_string_index_list(string_list, index):
for index_slice in string_list:
if index <= index_slice[0]:
break
elif index < index_slice[1]:
return index_slice[1] + 1
return 0
def jsc_remove_comments(jsc):
ret = ""
lines = jsc.split("\n")
in_comment = False
for line in lines:
string_list = jsc_get_string_index_list(line)
# 检查/**/注释
if in_comment:
spos_end = line.find("*/")
if spos_end != -1:
line = line[:spos_end + 2]
in_comment = False
else:
continue
cpos = line.find("//")
spos = line.find("/*")
# 如果注释符号在字符串中则不考虑
if jsc_check_in_string_index_list(string_list, cpos):
cpos = -1
if jsc_check_in_string_index_list(string_list, spos):
spos = -1
if cpos != -1:
ret += line[:cpos] + "\n"
elif spos != -1:
ret += line[:spos] + "\n"
spos_end = line.find("*/")
if spos_end == -1 or spos_end < spos:
in_comment = True
else:
ret += line + "\n"
return ret
def jsc_change_quotes(jsc):
ret = ""
string_list = jsc_get_string_index_list(jsc)
for ic in range(0, len(jsc)):
c = jsc[ic]
if c == "'":
if not jsc_check_in_string_index_list(string_list, ic):
ret += "\""
continue
ret += c
return ret
def jsc_clean_src(jsc):
ret = ""
in_string = False
for c in jsc:
if c == "\"":
in_string = not in_string
if not in_string:
new_c = c.strip()
else:
new_c = c
ret += new_c
return ret
def jsc_get_imports(jsc, import_dir):
imports = []
bp = jsc.find("{")
heads = jsc[:bp].split("\n")
has_imports = False
for head in heads:
if head.find("import") != -1:
has_imports = True
break
if not has_imports:
return jsc, imports
if not import_dir:
import_dir = os.getcwd()
for head in heads:
if head.find("import") != -1:
import_file = head[len("import"):].strip().strip("\"")
import_file = os.path.join(import_dir, import_file)
if os.path.exists(import_file):
imports.append(import_file)
else:
print("ERROR: failed to import file " + import_file)
jsc = jsc[bp:]
return jsc, imports
def jsc_remove_trail_comma(jsc):
ret = ""
for ic in range(0, len(jsc)):
c = jsc[ic]
if ic < len(jsc) - 1:
next_c = jsc[ic + 1]
if c == "," and (next_c == "}" or next_c == "]"):
continue
ret += c
return ret
#########################################################################################################
def jsc_parse_jsc(jsc):
# json 不支持注释,首先移除注释
jsc = jsc_remove_comments(jsc)
# 将'装换成"
jsc = jsc_change_quotes(jsc)
# strip src
jsc = jsc_clean_src(jsc)
# 给unquoted keys添加quotes
jsc = quoted_object(jsc)
# 移除trail comma
jsc = jsc_remove_trail_comma(jsc)
return jsc
class ArgsInfo:
input_files = []
output_dir = ""
import_dir = ""
def PrintHelp():
print("usage: [cmd] [params]")
print("cmd arguments:")
print("-f : list of input files")
print("-o : ouput directory")
print("-i : import directory")
def parse_jsc(jsc, import_dir=None, parse_var=True):
# get imports
jsc, imports = jsc_get_imports(jsc, import_dir)
# 首先处理jsc buffer
jsc = jsc_parse_jsc(jsc)
# 根据处理后的jsc buffer读取json
try:
print(jsc)
jsn = json.loads(jsc)
except:
traceback.print_exc()
exit(1)
# 加载import项
for import_file in imports:
import_jsn = load_jsc(import_file, import_dir, False)
if import_jsn:
process_inherit(jsn, import_jsn)
# 处理platform keys
process_platform_keys(jsn)
# 处理inherit
process_inherit_recursive(jsn, jsn)
# 处理variables
if parse_var:
process_variables_recursive(jsn, dict())
return jsn
def load_jsc(file_name, import_dir=None, parse_var=True):
if not file_name:
return
jsc = open(file_name).read()
if not jsc:
return
return parse_jsc(jsc, import_dir, parse_var)
def parse_args():
args_info = ArgsInfo()
if len(sys.argv) == 1:
PrintHelp()
return args_info
for i in range(1, len(sys.argv)):
if sys.argv[i] == "-f":
file_index = i + 1
while file_index < len(sys.argv) and sys.argv[file_index][0] != "-":
args_info.input_files.append(sys.argv[file_index])
file_index = file_index + 1
i = file_index
elif sys.argv[i] == "-o":
args_info.output_dir = sys.argv[i + 1]
elif sys.argv[i] == "-i":
args_info.import_dir = sys.argv[i + 1]
return args_info
def generate(args_info, input_file_name, output_file_name):
print("start to generate:", input_file_name, output_file_name)
out_file = open(output_file_name, "w+")
json_obj = load_jsc(input_file_name, args_info.import_dir, True)
if json_obj:
out_file.write(json.dumps(json_obj, indent=4))
out_file.close()
print("finish generating:", input_file_name)
def main():
args_info = parse_args()
output_dir = args_info.output_dir
if not output_dir or not check_path_directory(output_dir) or len(args_info.input_files) == 0:
exit(1)
for i in args_info.input_files:
# 如果path未创建,则创建dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file_name = os.path.join(output_dir, i)
output_file_name = change_path_extension(output_file_name, ".json")
generate(args_info, i, output_file_name)
if __name__ == '__main__':
main() |
the-stack_106_22910 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from tornado.httpclient import HTTPError, HTTPRequest
from opentelemetry import trace
from opentelemetry.instrumentation.utils import http_status_to_status_code
from opentelemetry.propagate import inject
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace.status import Status
from opentelemetry.util._time import _time_ns
from opentelemetry.util.http import remove_url_credentials
def _normalize_request(args, kwargs):
req = args[0]
if not isinstance(req, str):
return (args, kwargs)
new_kwargs = {}
for param in ("callback", "raise_error"):
if param in kwargs:
new_kwargs[param] = kwargs.pop(param)
req = HTTPRequest(req, **kwargs)
new_args = [req]
new_args.extend(args[1:])
return (new_args, new_kwargs)
def fetch_async(tracer, request_hook, response_hook, func, _, args, kwargs):
start_time = _time_ns()
# Return immediately if no args were provided (error)
# or original_request is set (meaning we are in a redirect step).
if len(args) == 0 or hasattr(args[0], "original_request"):
return func(*args, **kwargs)
# Force the creation of a HTTPRequest object if needed,
# so we can inject the context into the headers.
args, kwargs = _normalize_request(args, kwargs)
request = args[0]
span = tracer.start_span(
request.method, kind=trace.SpanKind.CLIENT, start_time=start_time,
)
if request_hook:
request_hook(span, request)
if span.is_recording():
attributes = {
SpanAttributes.HTTP_URL: remove_url_credentials(request.url),
SpanAttributes.HTTP_METHOD: request.method,
}
for key, value in attributes.items():
span.set_attribute(key, value)
with trace.use_span(span):
inject(request.headers)
future = func(*args, **kwargs)
future.add_done_callback(
functools.partial(
_finish_tracing_callback,
span=span,
response_hook=response_hook,
)
)
return future
def _finish_tracing_callback(future, span, response_hook):
status_code = None
description = None
exc = future.exception()
if span.is_recording() and exc:
if isinstance(exc, HTTPError):
status_code = exc.code
description = f"{type(exc).__name__}: {exc}"
else:
status_code = future.result().code
if status_code is not None:
span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)
span.set_status(
Status(
status_code=http_status_to_status_code(status_code),
description=description,
)
)
if response_hook:
response_hook(span, future)
span.end()
|
the-stack_106_22911 | import logging
from .what_if_index_creation import WhatIfIndexCreation
class CostEvaluation:
def __init__(self, db_connector, cost_estimation="whatif"):
logging.debug("Init cost evaluation")
self.db_connector = db_connector
self.cost_estimation = cost_estimation
logging.info("Cost estimation with " + self.cost_estimation)
self.what_if = WhatIfIndexCreation(db_connector)
self.current_indexes = set()
self.cost_requests = 0
self.cache_hits = 0
# Cache structure:
# {(query_object, relevant_indexes): cost}
self.cache = {}
self.completed = False
# It is not necessary to drop hypothetical indexes during __init__().
# These are only created per connection. Hence, non should be present.
self.relevant_indexes_cache = {}
def estimate_size(self, index):
# TODO: Refactor: It is currently too complicated to compute
# We must search in current indexes to get an index object with .hypopg_oid
result = None
for i in self.current_indexes:
if index == i:
result = i
break
if result:
# Index does currently exist and size can be queried
if not index.estimated_size:
index.estimated_size = self.what_if.estimate_index_size(result.hypopg_oid)
else:
self._simulate_or_create_index(index, store_size=True)
def which_indexes_utilized_and_cost(self, query, indexes):
self._prepare_cost_calculation(indexes, store_size=True)
plan = self.db_connector.get_plan(query)
cost = plan["Total Cost"]
plan_str = str(plan)
recommended_indexes = set()
# We are iterating over the CostEvalution's indexes and not over `indexes`
# because it is not guaranteed that hypopg_name is set for all items in
# `indexes`. This is caused by _prepare_cost_calculation that only creates
# indexes which are not yet existing. If there is no hypothetical index
# created for an index object, there is no hypopg_name assigned to it. However,
# all items in current_indexes must also have an equivalent in `indexes`.
for index in self.current_indexes:
assert (
index in indexes
), "Something went wrong with _prepare_cost_calculation."
if index.hypopg_name not in plan_str:
continue
recommended_indexes.add(index)
return recommended_indexes, cost
def calculate_cost(self, workload, indexes, store_size=False):
assert (
self.completed is False
), "Cost Evaluation is completed and cannot be reused."
self._prepare_cost_calculation(indexes, store_size=store_size)
total_cost = 0
# TODO: Make query cost higher for queries which are running often
for query in workload.queries:
self.cost_requests += 1
total_cost += self._request_cache(query, indexes)
return total_cost
# Creates the current index combination by simulating/creating
# missing indexes and unsimulating/dropping indexes
# that exist but are not in the combination.
def _prepare_cost_calculation(self, indexes, store_size=False):
for index in set(indexes) - self.current_indexes:
self._simulate_or_create_index(index, store_size=store_size)
for index in self.current_indexes - set(indexes):
self._unsimulate_or_drop_index(index)
assert self.current_indexes == set(indexes)
def _simulate_or_create_index(self, index, store_size=False):
if self.cost_estimation == "whatif":
self.what_if.simulate_index(index, store_size=store_size)
elif self.cost_estimation == "actual_runtimes":
self.db_connector.create_index(index)
self.current_indexes.add(index)
def _unsimulate_or_drop_index(self, index):
if self.cost_estimation == "whatif":
self.what_if.drop_simulated_index(index)
elif self.cost_estimation == "actual_runtimes":
self.db_connector.drop_index(index)
self.current_indexes.remove(index)
def _get_cost(self, query):
if self.cost_estimation == "whatif":
return self.db_connector.get_cost(query)
elif self.cost_estimation == "actual_runtimes":
runtime = self.db_connector.exec_query(query)[0]
return runtime
def complete_cost_estimation(self):
self.completed = True
for index in self.current_indexes.copy():
self._unsimulate_or_drop_index(index)
assert self.current_indexes == set()
def _request_cache(self, query, indexes):
#用于减少cost的计算次数
q_i_hash = (query, frozenset(indexes))
if q_i_hash in self.relevant_indexes_cache:
relevant_indexes = self.relevant_indexes_cache[q_i_hash]
else:
relevant_indexes = self._relevant_indexes(query, indexes)
self.relevant_indexes_cache[q_i_hash] = relevant_indexes
# Check if query and corresponding relevant indexes in cache
if (query, relevant_indexes) in self.cache:
self.cache_hits += 1
return self.cache[(query, relevant_indexes)]
# If no cache hit request cost from database system
else:
cost = self._get_cost(query)
self.cache[(query, relevant_indexes)] = cost
return cost
@staticmethod
def _relevant_indexes(query, indexes):
relevant_indexes = [
x for x in indexes if any(c in query.columns for c in x.columns)
]
return frozenset(relevant_indexes)
|
the-stack_106_22912 | import os
import logging
import pathlib
# from importlib import resources
# for now using importlib_resources instead of importlib
# for compatibility with python 3.8
import importlib_resources as resources
import click
from . import config
DATEFMT = "%Y/%m/%d %H:%M:%S"
# https://stackoverflow.com/a/56944256/13333330
class CustomFormatter(logging.Formatter):
grey = "\x1b[38m"
yellow = "\x1b[33m"
red = "\x1b[31m"
bold_red = "\x1b[31;21m"
reset = "\x1b[0m"
format = "%(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt, DATEFMT)
return formatter.format(record)
class AppmanLogger:
datefmt = "%Y/%m/%d %H:%M:%S"
def __init__(self, script_path, file_log_level=None, console_log_level="DEBUG"):
name = pathlib.Path(script_path).stem
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if console_log_level is not None:
ch = logging.StreamHandler()
chlevel = getattr(logging, console_log_level.upper())
ch.setLevel(chlevel)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
if file_log_level is not None:
fhformat = "%(asctime)s - %(levelname)s - %(message)s"
path = resources.files(config.LOGS_PKG)
log_path = os.path.join(path, f"{name}.log")
fh = logging.FileHandler(log_path)
fhlevel = getattr(logging, file_log_level.upper())
fh.setLevel(fhlevel)
fh.setFormatter(logging.Formatter(fhformat, DATEFMT))
logger.addHandler(fh)
self.logger = logger
def console(self, msg):
click.echo(msg)
def info(self, msg):
self.logger.info(msg)
def success(self, msg):
click.secho(msg, fg="green")
def warning(self, msg):
self.logger.warning(msg)
def error(self, msg):
self.logger.error(msg)
def critical(self, msg):
self.logger.critical(msg)
|
the-stack_106_22913 | import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
import tensorflow.keras as keras
import numpy as np
import utils
import sys
import absl
import bert
import functools
import os
from rb.processings.encoders.bert import BertWrapper
from rb.core.lang import Lang
FLAGS = absl.flags.FLAGS
absl.flags.DEFINE_integer('bert_max_seq_len', 128, 'Maximum sequence length')
absl.flags.DEFINE_string('model_folder_path', '../Models/raw_old/full/clean/trained_128_512/ro0/', 'Path to bert model folder')
absl.flags.DEFINE_float('learning_rate', 1e-5, 'Learning Rate used for optimization')
absl.flags.DEFINE_integer('batch_size', 32, 'Batch size to use during training')
absl.flags.DEFINE_integer('epochs', 1, 'Number of epochs to train')
absl.flags.DEFINE_float('dropout_rate', 0.5, 'Dropout rate')
absl.flags.DEFINE_integer('num_classes', 4, "Number of classes for clasification task")
absl.flags.DEFINE_integer('experiment_index', 1, 'Index of current experiment. Will be appended to weights file')
absl.flags.DEFINE_string('save_folder_path',".", "Save folder prefix")
absl.flags.DEFINE_bool("use_tpu", False, "Use TPU or not")
absl.flags.DEFINE_string("tpu_name", None, "Name of TPU instance")
def gen_data(features, labels):
for index in range(len(features[0])):
yield ({'input_ids': features[0][index], 'segment_ids': features[1][index]}, labels[index])
def create_model():
# define input
input_ids = tf.keras.layers.Input(shape=(FLAGS.bert_max_seq_len), dtype=tf.int32, name="input_ids")
segment_ids = tf.keras.layers.Input(shape=(FLAGS.bert_max_seq_len), dtype=tf.int32, name="segment_ids")
# bert_wrapper
bert_wrapper = BertWrapper(Lang.RO, max_seq_len=FLAGS.bert_max_seq_len, model_name=FLAGS.bert_model_type)
sys.exit()
# define model
bert_model = BertModel.BertModel(FLAGS.model_folder_path, FLAGS.max_seq_len, do_lower_case)
bert_output = bert_model.bert_layer([input_ids, segment_ids])
cls_output = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output)
cls_drop = keras.layers.Dropout(FLAGS.dropout_rate)(cls_output)
fc1 = keras.layers.Dense(units=100, activation="relu")(cls_drop)
prediction = keras.layers.Dense(units=FLAGS.num_classes, activation="softmax")(fc1)
# build model
model = keras.Model(inputs=[input_ids, segment_ids], outputs=prediction)
model.build(input_shape=[(None, FLAGS.max_seq_len), (None, FLAGS.max_seq_len)])
# load pretrained
bert.load_bert_weights(bert_model.bert_layer, FLAGS.model_folder_path+"bert_model.ckpt")
model.compile(optimizer=keras.optimizers.Adam(lr=FLAGS.learning_rate), loss = 'categorical_crossentropy', metrics = [tf.keras.metrics.categorical_accuracy])
model.summary()
print("Do lower case =", do_lower_case)
return model, bert_model
def main(argv):
del argv
model, bert = create_model()
# load data
train_data = utils.readJson("../Dataset/Reviews/4Classes/train.json")
dev_data = utils.readJson("../Dataset/Reviews/4Classes/dev.json")
# test_data = utils.readJson("../Dataset/Reviews/4Classes/test.json")
train_features = utils.getFeatures(train_data, sample_majority=True, sample_count=20000, seed=12345)
train_features, train_labels, train_weights_dict = utils.processFeatures(train_features, bert)
print(len(train_features[0]), len(train_labels), train_weights_dict)
dev_features = utils.getFeatures(dev_data)
dev_features, dev_labels, _ = utils.processFeatures(dev_features, bert)
print(len(dev_features[0]), len(dev_labels))
# test_features = utils.getFeatures(test_data)
# test_features, test_labels, _ = utils.processFeatures(test_features, bert)
# print(len(test_features[0]), len(test_labels))
t1 = []
t2 = []
for i in range(len(train_features[0])):
t1.append(train_features[0][i])
t2.append(train_features[1][i])
train_dataset = tf.data.Dataset.from_tensor_slices(((t1, t2), train_labels))
train_dataset = train_dataset.shuffle(70000)
train_dataset = train_dataset.repeat(-1).batch(FLAGS.batch_size)
d1 = []
d2 = []
for i in range(len(dev_features[0])):
d1.append(dev_features[0][i])
d2.append(dev_features[1][i])
dev_dataset = tf.data.Dataset.from_tensor_slices(((d1, d2), dev_labels))
dev_dataset = dev_dataset.batch(32)
# dataset for metric :(
# dev_dataset_metric = tf.data.Dataset.from_generator(functools.partial(gen_data, dev_features, dev_labels), ({'input_ids': tf.int32, 'segment_ids': tf.int32}, tf.int32),
# ({'input_ids': tf.TensorShape([FLAGS.max_seq_len]), 'segment_ids': tf.TensorShape([FLAGS.max_seq_len])}, tf.TensorShape([None])))
# dev_dataset_metric = dev_dataset_metric.batch(FLAGS.batch_size)
fmetric = utils.FScoreCallback(dev_dataset, len(dev_labels)//32, dev_labels)
folder_name = FLAGS.model_folder_path.split("/")[-2]+"_"+str(FLAGS.experiment_index)
os.makedirs(FLAGS.save_folder_path+"/{0}/".format(folder_name))
results = []
for i in range(FLAGS.epochs):
print("EPOCH ", i+1)
_= model.fit(train_dataset, steps_per_epoch=len(train_labels)//FLAGS.batch_size, epochs=1, verbose=1)
model.evaluate(dev_dataset, callbacks=[fmetric])
model.save(FLAGS.save_folder_path+"/{0}/model{1}.h5".format(folder_name, str(i+1)))
if __name__ == "__main__":
absl.flags.mark_flag_as_required('model_folder_path')
absl.flags.mark_flag_as_required('experiment_index')
absl.app.run(main) |
the-stack_106_22914 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Marc Sensenich <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
module: office_365_connector_card
short_description: Use webhooks to create Connector Card messages within an Office 365 group
description:
- Creates Connector Card messages through
- Office 365 Connectors U(https://dev.outlook.com/Connectors)
author: "Marc Sensenich (@marc-sensenich)"
notes:
- This module is not idempotent, therefore if the same task is run twice
there will be two Connector Cards created
options:
webhook:
type: str
description:
- The webhook URL is given to you when you create a new Connector.
required: true
summary:
type: str
description:
- A string used for summarizing card content.
- This will be shown as the message subject.
- This is required if the text parameter isn't populated.
color:
type: str
description:
- Accent color used for branding or indicating status in the card.
title:
type: str
description:
- A title for the Connector message. Shown at the top of the message.
text:
type: str
description:
- The main text of the card.
- This will be rendered below the sender information and optional title,
- and above any sections or actions present.
actions:
type: list
elements: dict
description:
- This array of objects will power the action links
- found at the bottom of the card.
sections:
type: list
elements: dict
description:
- Contains a list of sections to display in the card.
- For more information see https://dev.outlook.com/Connectors/reference.
'''
EXAMPLES = """
- name: Create a simple Connector Card
community.general.office_365_connector_card:
webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
text: 'Hello, World!'
- name: Create a Connector Card with the full format
community.general.office_365_connector_card:
webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
summary: This is the summary property
title: This is the **card's title** property
text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
color: E81123
sections:
- title: This is the **section's title** property
activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
activity_title: This is the section's **activityTitle** property
activity_subtitle: This is the section's **activitySubtitle** property
activity_text: This is the section's **activityText** property.
hero_image:
image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
title: This is the image's alternate text
text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
facts:
- name: This is a fact name
value: This is a fact value
- name: This is a fact name
value: This is a fact value
- name: This is a fact name
value: This is a fact value
images:
- image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
title: This is the image's alternate text
- image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
title: This is the image's alternate text
- image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
title: This is the image's alternate text
actions:
- "@type": ActionCard
name: Comment
inputs:
- "@type": TextInput
id: comment
is_multiline: true
title: Input's title property
actions:
- "@type": HttpPOST
name: Save
target: http://...
- "@type": ActionCard
name: Due Date
inputs:
- "@type": DateInput
id: dueDate
title: Input's title property
actions:
- "@type": HttpPOST
name: Save
target: http://...
- "@type": HttpPOST
name: Action's name prop.
target: http://...
- "@type": OpenUri
name: Action's name prop
targets:
- os: default
uri: http://...
- start_group: true
title: This is the title of a **second section**
text: This second section is visually separated from the first one by setting its
**startGroup** property to true.
"""
RETURN = """
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
OFFICE_365_CARD_TYPE = "MessageCard"
OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
def build_actions(actions):
action_items = []
for action in actions:
action_item = snake_dict_to_camel_dict(action)
action_items.append(action_item)
return action_items
def build_sections(sections):
sections_created = []
for section in sections:
sections_created.append(build_section(section))
return sections_created
def build_section(section):
section_payload = dict()
if 'title' in section:
section_payload['title'] = section['title']
if 'start_group' in section:
section_payload['startGroup'] = section['start_group']
if 'activity_image' in section:
section_payload['activityImage'] = section['activity_image']
if 'activity_title' in section:
section_payload['activityTitle'] = section['activity_title']
if 'activity_subtitle' in section:
section_payload['activitySubtitle'] = section['activity_subtitle']
if 'activity_text' in section:
section_payload['activityText'] = section['activity_text']
if 'hero_image' in section:
section_payload['heroImage'] = section['hero_image']
if 'text' in section:
section_payload['text'] = section['text']
if 'facts' in section:
section_payload['facts'] = section['facts']
if 'images' in section:
section_payload['images'] = section['images']
if 'actions' in section:
section_payload['potentialAction'] = build_actions(section['actions'])
return section_payload
def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
payload = dict()
payload['@context'] = OFFICE_365_CARD_CONTEXT
payload['@type'] = OFFICE_365_CARD_TYPE
if summary is not None:
payload['summary'] = summary
if color is not None:
payload['themeColor'] = color
if title is not None:
payload['title'] = title
if text is not None:
payload['text'] = text
if actions:
payload['potentialAction'] = build_actions(actions)
if sections:
payload['sections'] = build_sections(sections)
payload = module.jsonify(payload)
return payload
def do_notify_connector_card_webhook(module, webhook, payload):
headers = {
'Content-Type': 'application/json'
}
response, info = fetch_url(
module=module,
url=webhook,
headers=headers,
method='POST',
data=payload
)
if info['status'] == 200:
module.exit_json(changed=True)
elif info['status'] == 400 and module.check_mode:
if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
module.exit_json(changed=True)
else:
module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
else:
module.fail_json(
msg="failed to send %s as a connector card to Incoming Webhook: %s"
% (payload, info['msg'])
)
def main():
module = AnsibleModule(
argument_spec=dict(
webhook=dict(required=True, no_log=True),
summary=dict(type='str'),
color=dict(type='str'),
title=dict(type='str'),
text=dict(type='str'),
actions=dict(type='list', elements='dict'),
sections=dict(type='list', elements='dict')
),
supports_check_mode=True
)
webhook = module.params['webhook']
summary = module.params['summary']
color = module.params['color']
title = module.params['title']
text = module.params['text']
actions = module.params['actions']
sections = module.params['sections']
payload = build_payload_for_connector_card(
module,
summary,
color,
title,
text,
actions,
sections)
if module.check_mode:
# In check mode, send an empty payload to validate connection
check_mode_payload = build_payload_for_connector_card(module)
do_notify_connector_card_webhook(module, webhook, check_mode_payload)
do_notify_connector_card_webhook(module, webhook, payload)
if __name__ == '__main__':
main()
|
the-stack_106_22916 | #!/usr/bin/env python
import os
import sagemaker
import sys
import time
govuk_environment = os.environ["GOVUK_ENVIRONMENT"]
image = os.environ["IMAGE"]
role = os.environ["ROLE_ARN"]
training_data = os.environ["SCRIPT_INPUT_DATA"].strip()
image_tag = os.getenv("IMAGE_TAG", "latest")
s3_bucket = os.getenv("S3_BUCKET", f"govuk-{govuk_environment}-search-relevancy")
instance_count = os.getenv("INSTANCE_COUNT", 1)
instance_size = os.getenv("INSTANCE_SIZE", "ml.c5.xlarge")
session = sagemaker.Session()
train_key = f"data/{training_data}/train.txt"
test_key = f"data/{training_data}/test.txt"
validate_key = f"data/{training_data}/validate.txt"
model_name = f"{training_data}-{str(time.time())}"
# train model
estimator = sagemaker.estimator.Estimator(
f"{image}:{image_tag}",
role,
instance_count,
instance_size,
output_path=f"s3://{s3_bucket}/model/{model_name}",
sagemaker_session=session,
)
estimator.fit(
inputs={
"train": f"s3://{s3_bucket}/{train_key}",
"test": f"s3://{s3_bucket}/{test_key}",
"validate": f"s3://{s3_bucket}/{validate_key}",
}
)
print(model_name)
print("done", file=sys.stderr)
|
the-stack_106_22917 | # System libs
import os
import time
# import math
import random
import argparse
from distutils.version import LooseVersion
# Numerical libs
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
# Our libs
from config import cfg
from dataset import TrainDataset
from models import ModelBuilder, SegmentationModule
from utils import AverageMeter, parse_devices, setup_logger
from lib.nn import UserScatteredDataParallel, user_scattered_collate, patch_replication_callback
# train one epoch
def train(segmentation_module, iterator, optimizers, history, epoch, cfg):
batch_time = AverageMeter()
data_time = AverageMeter()
ave_total_loss = AverageMeter()
ave_acc = AverageMeter()
segmentation_module.train(not cfg.TRAIN.fix_bn)
# main loop
tic = time.time()
for i in range(cfg.TRAIN.epoch_iters):
# load a batch of data
batch_data = next(iterator)
data_time.update(time.time() - tic)
segmentation_module.zero_grad()
# adjust learning rate
cur_iter = i + (epoch - 1) * cfg.TRAIN.epoch_iters
adjust_learning_rate(optimizers, cur_iter, cfg)
# forward pass
loss, acc = segmentation_module(batch_data)
loss = loss.mean()
acc = acc.mean()
# Backward
loss.backward()
for optimizer in optimizers:
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - tic)
tic = time.time()
# update average loss and acc
ave_total_loss.update(loss.data.item())
ave_acc.update(acc.data.item()*100)
# calculate accuracy, and display
if i % cfg.TRAIN.disp_iter == 0:
print('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '
'lr_encoder: {:.6f}, lr_decoder: {:.6f}, '
'Accuracy: {:4.2f}, Loss: {:.6f}'
.format(epoch, i, cfg.TRAIN.epoch_iters,
batch_time.average(), data_time.average(),
cfg.TRAIN.running_lr_encoder, cfg.TRAIN.running_lr_decoder,
ave_acc.average(), ave_total_loss.average()))
fractional_epoch = epoch - 1 + 1. * i / cfg.TRAIN.epoch_iters
history['train']['epoch'].append(fractional_epoch)
history['train']['loss'].append(loss.data.item())
history['train']['acc'].append(acc.data.item())
def checkpoint(nets, history, cfg, epoch):
print('Saving checkpoints...')
(net_encoder, net_decoder, crit) = nets
dict_encoder = net_encoder.state_dict()
dict_decoder = net_decoder.state_dict()
torch.save(
history,
'{}/history_epoch_{}.pth'.format(cfg.DIR, epoch))
torch.save(
dict_encoder,
'{}/encoder_epoch_{}.pth'.format(cfg.DIR, epoch))
torch.save(
dict_decoder,
'{}/decoder_epoch_{}.pth'.format(cfg.DIR, epoch))
def group_weight(module):
group_decay = []
group_no_decay = []
for m in module.modules():
if isinstance(m, nn.Linear):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.conv._ConvNd):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
assert len(list(module.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
def create_optimizers(nets, cfg):
(net_encoder, net_decoder, crit) = nets
optimizer_encoder = torch.optim.SGD(
group_weight(net_encoder),
lr=cfg.TRAIN.lr_encoder,
momentum=cfg.TRAIN.beta1,
weight_decay=cfg.TRAIN.weight_decay)
optimizer_decoder = torch.optim.SGD(
group_weight(net_decoder),
lr=cfg.TRAIN.lr_decoder,
momentum=cfg.TRAIN.beta1,
weight_decay=cfg.TRAIN.weight_decay)
return (optimizer_encoder, optimizer_decoder)
def adjust_learning_rate(optimizers, cur_iter, cfg):
scale_running_lr = ((1. - float(cur_iter) / cfg.TRAIN.max_iters) ** cfg.TRAIN.lr_pow)
cfg.TRAIN.running_lr_encoder = cfg.TRAIN.lr_encoder * scale_running_lr
cfg.TRAIN.running_lr_decoder = cfg.TRAIN.lr_decoder * scale_running_lr
(optimizer_encoder, optimizer_decoder) = optimizers
for param_group in optimizer_encoder.param_groups:
param_group['lr'] = cfg.TRAIN.running_lr_encoder
for param_group in optimizer_decoder.param_groups:
param_group['lr'] = cfg.TRAIN.running_lr_decoder
def main(cfg, gpus):
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder.lower(),
input_c=cfg.MODEL.input_c,
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder)
crit = nn.NLLLoss(ignore_index=-1)
if cfg.MODEL.arch_decoder.endswith('deepsup'):
segmentation_module = SegmentationModule(
net_encoder, net_decoder, crit, cfg.TRAIN.deep_sup_scale)
else:
segmentation_module = SegmentationModule(
net_encoder, net_decoder, crit)
tensorboard = SummaryWriter(log_dir=cfg.DIR)
tensorboard.add_graph(
segmentation_module,
{'img_data': torch.zeros(1, cfg.MODEL.input_c, cfg.DATASET.imgMaxSize, cfg.DATASET.imgMaxSize),
'seg_label': torch.zeros(
1,
cfg.DATASET.imgMaxSize // cfg.DATASET.segm_downsampling_rate,
cfg.DATASET.imgMaxSize // cfg.DATASET.segm_downsampling_rate,
dtype=torch.long)})
# Dataset and Loader
dataset_train = TrainDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_train,
cfg.DATASET,
batch_per_gpu=cfg.TRAIN.batch_size_per_gpu)
loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=len(gpus), # we have modified data_parallel
shuffle=False, # we do not use this param
collate_fn=user_scattered_collate,
num_workers=cfg.TRAIN.workers,
drop_last=True,
pin_memory=True)
print('1 Epoch = {} iters'.format(cfg.TRAIN.epoch_iters))
# create loader iterator
iterator_train = iter(loader_train)
# load nets into gpu
if len(gpus) > 1:
segmentation_module = UserScatteredDataParallel(
segmentation_module,
device_ids=gpus)
# For sync bn
patch_replication_callback(segmentation_module)
segmentation_module.cuda()
# Set up optimizers
nets = (net_encoder, net_decoder, crit)
optimizers = create_optimizers(nets, cfg)
# Main loop
history = {'train': {'epoch': [], 'loss': [], 'acc': []}}
for epoch in range(cfg.TRAIN.start_epoch, cfg.TRAIN.num_epoch):
train(segmentation_module, iterator_train, optimizers, history, epoch+1, cfg)
# checkpointing
checkpoint(nets, history, cfg, epoch+1)
tensorboard.close()
print('Training Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Training"
)
parser.add_argument(
"--cfg",
default="config/bizcard-resnet18dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpus",
default="0",
help="gpus to use, e.g. 0-3 or 0,1,2,3"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
# Output directory
if not os.path.isdir(cfg.DIR):
os.makedirs(cfg.DIR)
logger.info("Outputing checkpoints to: {}".format(cfg.DIR))
with open(os.path.join(cfg.DIR, 'config.yaml'), 'w') as f:
f.write("{}".format(cfg))
# Start from checkpoint
if cfg.TRAIN.start_epoch > 0:
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
# Parse gpu ids
gpus = parse_devices(args.gpus)
gpus = [x.replace('gpu', '') for x in gpus]
gpus = [int(x) for x in gpus]
num_gpus = len(gpus)
cfg.TRAIN.batch_size = num_gpus * cfg.TRAIN.batch_size_per_gpu
cfg.TRAIN.max_iters = cfg.TRAIN.epoch_iters * cfg.TRAIN.num_epoch
cfg.TRAIN.running_lr_encoder = cfg.TRAIN.lr_encoder
cfg.TRAIN.running_lr_decoder = cfg.TRAIN.lr_decoder
random.seed(cfg.TRAIN.seed)
torch.manual_seed(cfg.TRAIN.seed)
main(cfg, gpus)
|
the-stack_106_22918 | # Simple script to calculate halo/subhalo mass functions from hdf5
#
# Below run gives mass functions of the Micro-Uchuu simulation at z=0
# python uchuu_h5_mfunc.py MicroUchuu_halolist_z0p00.h5 mfunc.pdf
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sys
args = sys.argv
inputfile = args[1]
outputfile = args[2]
hf = h5py.File( inputfile, 'r')
mvir = np.array( hf['Mvir'])
pid = np.array(hf['pid'])
hf.close()
mvir_halo = mvir[pid==-1]
mvir_subhalo = mvir[pid!=-1]
bins0 = np.logspace( 8, 16, 33)
n_halo, bins = np.histogram( mvir_halo, bins=(bins0))
n_subhalo, bins = np.histogram( mvir_subhalo, bins=(bins0))
mbins = np.zeros_like(n_halo)
for i in range( len(bins)-1):
mbins[i] = np.sqrt( bins[i] * bins[i+1])
plt.xscale("log")
plt.yscale("log")
plt.plot( mbins, n_halo, "o-", label="halo")
plt.plot( mbins, n_subhalo, "s-", label="subhalo")
plt.legend()
plt.savefig( outputfile)
|
the-stack_106_22921 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import alphabets as al
from .polybius_square import PolybiusSquare
from itertools import cycle
class Bazeries:
"""
The Bazeries Cipher
"""
def __crypt(self, alphabet, text, key, is_encrypt=True):
# prepare digit key
temp = key[0]
digitkey = []
while temp:
temp, rmd = divmod(temp, 10)
digitkey.append(rmd)
digitkey = digitkey[::-1]
# prepare text: reversion
i = 0
revtext = []
digitkey = cycle(digitkey)
while i < len(text):
num = next(digitkey)
s = text[i:i + num]
revtext.append(s[::-1])
i += num
revtext = u"".join(revtext)
sq1 = PolybiusSquare(alphabet)
# key is a number, make it a string
sq2 = PolybiusSquare(alphabet, key[1])
if is_encrypt:
sq1, sq2 = sq2, sq1
# prepare substitution from alphabet
subst = {c: sq1.get_char(*reversed(sq2.get_coordinates(c))) for letters in alphabet for c in letters}
# cryption
return u"".join(subst[t] for t in revtext)
def encrypt(self, text, key, alphabet=al.ENGLISH_SQUARE_IJ):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return self.__crypt(alphabet, text, key, True)
def decrypt(self, text, key, alphabet=al.ENGLISH_SQUARE_IJ):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return self.__crypt(alphabet, text, key, False)
|
the-stack_106_22923 | # -*- coding: utf-8 -*-
import asyncio
import json
import logging
import os
import re
import warnings
from typing import Optional, List, Text, Any, Dict, TYPE_CHECKING, Iterable
import rasa.utils.io as io_utils
from rasa.constants import DOCS_BASE_URL
from rasa.core import utils
from rasa.core.constants import INTENT_MESSAGE_PREFIX
from rasa.core.events import ActionExecuted, UserUttered, Event, SlotSet
from rasa.core.exceptions import StoryParseError
from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter
from rasa.core.training.structures import (
Checkpoint,
STORY_START,
StoryStep,
GENERATED_CHECKPOINT_PREFIX,
GENERATED_HASH_LENGTH,
FORM_PREFIX,
)
from rasa.nlu.training_data.formats import MarkdownReader
from rasa.core.domain import Domain
if TYPE_CHECKING:
from rasa.nlu.training_data import Message
logger = logging.getLogger(__name__)
class EndToEndReader(MarkdownReader):
def _parse_item(self, line: Text) -> Optional["Message"]:
"""Parses an md list item line based on the current section type.
Matches expressions of the form `<intent>:<example>. For the
syntax of <example> see the Rasa docs on NLU training data:
{}/nlu/training-data-format/#markdown-format""".format(
DOCS_BASE_URL
)
item_regex = re.compile(r"\s*(.+?):\s*(.*)")
match = re.match(item_regex, line)
if match:
intent = match.group(1)
self.current_title = intent
message = match.group(2)
example = self._parse_training_example(message)
example.data["true_intent"] = intent
return example
raise ValueError(
"Encountered invalid end-to-end format for message "
"`{}`. Please visit the documentation page on "
"end-to-end evaluation at {}/user-guide/evaluating-models/"
"end-to-end-evaluation/".format(line, DOCS_BASE_URL)
)
class StoryStepBuilder(object):
def __init__(self, name):
self.name = name
self.story_steps = []
self.current_steps = []
self.start_checkpoints = []
def add_checkpoint(self, name: Text, conditions: Optional[Dict[Text, Any]]) -> None:
# Depending on the state of the story part this
# is either a start or an end check point
if not self.current_steps:
self.start_checkpoints.append(Checkpoint(name, conditions))
else:
if conditions:
logger.warning(
"End or intermediate checkpoints "
"do not support conditions! "
"(checkpoint: {})".format(name)
)
additional_steps = []
for t in self.current_steps:
if t.end_checkpoints:
tcp = t.create_copy(use_new_id=True)
tcp.end_checkpoints = [Checkpoint(name)]
additional_steps.append(tcp)
else:
t.end_checkpoints = [Checkpoint(name)]
self.current_steps.extend(additional_steps)
def _prev_end_checkpoints(self):
if not self.current_steps:
return self.start_checkpoints
else:
# makes sure we got each end name only once
end_names = {e.name for s in self.current_steps for e in s.end_checkpoints}
return [Checkpoint(name) for name in end_names]
def add_user_messages(self, messages: List[UserUttered]):
self.ensure_current_steps()
if len(messages) == 1:
# If there is only one possible intent, we'll keep things simple
for t in self.current_steps:
t.add_user_message(messages[0])
else:
# If there are multiple different intents the
# user can use the express the same thing
# we need to copy the blocks and create one
# copy for each possible message
prefix = GENERATED_CHECKPOINT_PREFIX + "OR_"
generated_checkpoint = utils.generate_id(prefix, GENERATED_HASH_LENGTH)
updated_steps = []
for t in self.current_steps:
for m in messages:
copied = t.create_copy(use_new_id=True)
copied.add_user_message(m)
copied.end_checkpoints = [Checkpoint(generated_checkpoint)]
updated_steps.append(copied)
self.current_steps = updated_steps
def add_event(self, event):
self.ensure_current_steps()
for t in self.current_steps:
t.add_event(event)
def ensure_current_steps(self):
completed = [step for step in self.current_steps if step.end_checkpoints]
unfinished = [step for step in self.current_steps if not step.end_checkpoints]
self.story_steps.extend(completed)
if unfinished:
self.current_steps = unfinished
else:
self.current_steps = self._next_story_steps()
def flush(self):
if self.current_steps:
self.story_steps.extend(self.current_steps)
self.current_steps = []
def _next_story_steps(self):
start_checkpoints = self._prev_end_checkpoints()
if not start_checkpoints:
start_checkpoints = [Checkpoint(STORY_START)]
current_turns = [
StoryStep(block_name=self.name, start_checkpoints=start_checkpoints)
]
return current_turns
class StoryFileReader(object):
"""Helper class to read a story file."""
def __init__(
self,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
template_vars: Optional[Dict] = None,
use_e2e: bool = False,
):
self.story_steps = []
self.current_step_builder = None # type: Optional[StoryStepBuilder]
self.domain = domain
self.interpreter = interpreter
self.template_variables = template_vars if template_vars else {}
self.use_e2e = use_e2e
@staticmethod
async def read_from_folder(
resource_name: Text,
domain: Domain,
interpreter: NaturalLanguageInterpreter = RegexInterpreter(),
template_variables: Optional[Dict] = None,
use_e2e: bool = False,
exclusion_percentage: Optional[int] = None,
) -> List[StoryStep]:
"""Given a path reads all contained story files."""
if not os.path.exists(resource_name):
raise ValueError(
"Story file or folder could not be found. Make "
"sure '{}' exists and points to a story folder "
"or file.".format(os.path.abspath(resource_name))
)
files = io_utils.list_files(resource_name)
return await StoryFileReader.read_from_files(
files,
domain,
interpreter,
template_variables,
use_e2e,
exclusion_percentage,
)
@staticmethod
async def read_from_files(
files: Iterable[Text],
domain: Domain,
interpreter: NaturalLanguageInterpreter = RegexInterpreter(),
template_variables: Optional[Dict] = None,
use_e2e: bool = False,
exclusion_percentage: Optional[int] = None,
) -> List[StoryStep]:
story_steps = []
for f in files:
steps = await StoryFileReader.read_from_file(
f, domain, interpreter, template_variables, use_e2e
)
story_steps.extend(steps)
# if exclusion percentage is not 100
if exclusion_percentage and exclusion_percentage != 100:
import random
idx = int(round(exclusion_percentage / 100.0 * len(story_steps)))
random.shuffle(story_steps)
story_steps = story_steps[:-idx]
return story_steps
@staticmethod
async def read_from_file(
filename: Text,
domain: Domain,
interpreter: NaturalLanguageInterpreter = RegexInterpreter(),
template_variables: Optional[Dict] = None,
use_e2e: bool = False,
) -> List[StoryStep]:
"""Given a md file reads the contained stories."""
try:
with open(filename, "r", encoding=io_utils.DEFAULT_ENCODING) as f:
lines = f.readlines()
reader = StoryFileReader(domain, interpreter, template_variables, use_e2e)
return await reader.process_lines(lines)
except ValueError as err:
file_info = "Invalid story file format. Failed to parse '{}'".format(
os.path.abspath(filename)
)
logger.exception(file_info)
if not err.args:
err.args = ("",)
err.args = err.args + (file_info,)
raise
@staticmethod
def _parameters_from_json_string(s: Text, line: Text) -> Dict[Text, Any]:
"""Parse the passed string as json and create a parameter dict."""
if s is None or not s.strip():
# if there is no strings there are not going to be any parameters
return {}
try:
parsed_slots = json.loads(s)
if isinstance(parsed_slots, dict):
return parsed_slots
else:
raise Exception(
"Parsed value isn't a json object "
"(instead parser found '{}')"
".".format(type(parsed_slots))
)
except Exception as e:
raise ValueError(
"Invalid to parse arguments in line "
"'{}'. Failed to decode parameters"
"as a json object. Make sure the event"
"name is followed by a proper json "
"object. Error: {}".format(line, e)
)
@staticmethod
def _parse_event_line(line):
"""Tries to parse a single line as an event with arguments."""
# the regex matches "slot{"a": 1}"
m = re.search("^([^{]+)([{].+)?", line)
if m is not None:
event_name = m.group(1).strip()
slots_str = m.group(2)
parameters = StoryFileReader._parameters_from_json_string(slots_str, line)
return event_name, parameters
else:
warnings.warn(
"Failed to parse action line '{}'. Ignoring this line.".format(line)
)
return "", {}
async def process_lines(self, lines: List[Text]) -> List[StoryStep]:
multiline_comment = False
for idx, line in enumerate(lines):
line_num = idx + 1
try:
line = self._replace_template_variables(self._clean_up_line(line))
if line.strip() == "":
continue
elif line.startswith("<!--"):
multiline_comment = True
continue
elif multiline_comment and line.endswith("-->"):
multiline_comment = False
continue
elif multiline_comment:
continue
elif line.startswith("#"):
# reached a new story block
name = line[1:].strip("# ")
self.new_story_part(name)
elif line.startswith(">"):
# reached a checkpoint
name, conditions = self._parse_event_line(line[1:].strip())
self.add_checkpoint(name, conditions)
elif re.match(r"^[*\-]\s+{}".format(FORM_PREFIX), line):
logger.debug(
"Skipping line {}, "
"because it was generated by "
"form action".format(line)
)
elif line.startswith("-"):
# reached a slot, event, or executed action
event_name, parameters = self._parse_event_line(line[1:])
self.add_event(event_name, parameters)
elif line.startswith("*"):
# reached a user message
user_messages = [el.strip() for el in line[1:].split(" OR ")]
if self.use_e2e:
await self.add_e2e_messages(user_messages, line_num)
else:
await self.add_user_messages(user_messages, line_num)
else:
# reached an unknown type of line
logger.warning(
"Skipping line {}. "
"No valid command found. "
"Line Content: '{}'"
"".format(line_num, line)
)
except Exception as e:
msg = "Error in line {}: {}".format(line_num, e)
logger.error(msg, exc_info=1) # pytype: disable=wrong-arg-types
raise ValueError(msg)
self._add_current_stories_to_result()
return self.story_steps
def _replace_template_variables(self, line: Text) -> Text:
def process_match(matchobject):
varname = matchobject.group(1)
if varname in self.template_variables:
return self.template_variables[varname]
else:
raise ValueError(
"Unknown variable `{var}` "
"in template line '{line}'"
"".format(var=varname, line=line)
)
template_rx = re.compile(r"`([^`]+)`")
return template_rx.sub(process_match, line)
@staticmethod
def _clean_up_line(line: Text) -> Text:
"""Removes comments and trailing spaces"""
return re.sub(r"<!--.*?-->", "", line).strip()
def _add_current_stories_to_result(self):
if self.current_step_builder:
self.current_step_builder.flush()
self.story_steps.extend(self.current_step_builder.story_steps)
def new_story_part(self, name):
self._add_current_stories_to_result()
self.current_step_builder = StoryStepBuilder(name)
def add_checkpoint(self, name: Text, conditions: Optional[Dict[Text, Any]]) -> None:
# Ensure story part already has a name
if not self.current_step_builder:
raise StoryParseError(
"Checkpoint '{}' is at an invalid location. "
"Expected a story start.".format(name)
)
self.current_step_builder.add_checkpoint(name, conditions)
async def _parse_message(self, message: Text, line_num: int):
if message.startswith(INTENT_MESSAGE_PREFIX):
parse_data = await RegexInterpreter().parse(message)
else:
parse_data = await self.interpreter.parse(message)
utterance = UserUttered(
message, parse_data.get("intent"), parse_data.get("entities"), parse_data
)
intent_name = utterance.intent.get("name")
if intent_name not in self.domain.intents:
logger.warning(
"Found unknown intent '{}' on line {}. "
"Please, make sure that all intents are "
"listed in your domain yaml."
"".format(intent_name, line_num)
)
return utterance
async def add_user_messages(self, messages, line_num):
if not self.current_step_builder:
raise StoryParseError(
"User message '{}' at invalid location. "
"Expected story start.".format(messages)
)
parsed_messages = await asyncio.gather(
*[self._parse_message(m, line_num) for m in messages]
)
self.current_step_builder.add_user_messages(parsed_messages)
async def add_e2e_messages(self, e2e_messages, line_num):
if not self.current_step_builder:
raise StoryParseError(
"End-to-end message '{}' at invalid "
"location. Expected story start."
"".format(e2e_messages)
)
e2e_reader = EndToEndReader()
parsed_messages = []
for m in e2e_messages:
message = e2e_reader._parse_item(m)
parsed = await self._parse_message(message.text, line_num)
parsed.parse_data["true_intent"] = message.data["true_intent"]
parsed.parse_data["true_entities"] = message.data.get("entities") or []
parsed_messages.append(parsed)
self.current_step_builder.add_user_messages(parsed_messages)
def add_event(self, event_name, parameters):
# add 'name' only if event is not a SlotSet,
# because there might be a slot with slot_key='name'
if "name" not in parameters and event_name != SlotSet.type_name:
parameters["name"] = event_name
parsed_events = Event.from_story_string(
event_name, parameters, default=ActionExecuted
)
if parsed_events is None:
raise StoryParseError(
"Unknown event '{}'. It is Neither an event "
"nor an action).".format(event_name)
)
if self.current_step_builder is None:
raise StoryParseError(
"Failed to handle event '{}'. There is no "
"started story block available. "
"".format(event_name)
)
for p in parsed_events:
self.current_step_builder.add_event(p)
|
the-stack_106_22924 | class ListNode():
def __init__(self, x):
self.val = x
self.next = None
def make_list(A):
head = ListNode(A[0])
ptr = head
for i in A[1:]:
ptr.next = ListNode(i)
ptr = ptr.next
return head
class SubtractList():
# @param A : head node of linked list
# @return the head node in the linked list
def subtract(self, A):
s, ptr = A, A
prev = None
revA = rev(A)
myA = None
while A and revA:
if not myA:
x = revA.val - A.val
myA = ListNode(x)
final = myA
myA = ListNode(x)
A = A.next
revA = revA.next
myA = myA.next
return final
def rev(A):
B = A
prev = None
C = B
while B:
temp = B
B = B.next
temp.next = prev
prev = temp
return prev
def display(head):
L = []
while head.next:
L.append(head.val)
head = head.next
L.append(head.val)
print(L)
A = [1,2,3,4,5]
a = make_list(A)
display(a)
s = SubtractList()
# r = s.rev(a)
# display(r)
# display(a)
f = s.subtract(a)
display(f)
|
the-stack_106_22925 | """
Parses OpenAPI spec files and other related classes
MIT License
(C) Copyright [2020] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from copy import copy
from cray.nesteddict import NestedDict
from cray.constants import IGNORE_TAG, CONVERSTION_FLAG
class Schema(object):
""" OpenAPI Schema Parser """
_NESTED_ARRAY = 'nested_array'
_NESTED_OBJECT = 'nested_object'
_NESTINGS = [_NESTED_ARRAY, _NESTED_OBJECT]
def __init__(self, schema, prefix=None, required=None, **kwargs):
self.schema = schema
self.prefix = prefix
schema_req = self.schema.get('required')
# Remove any children required if the parent is not required
if required is False and schema_req is not None:
del self.schema['required']
self.required = required or schema_req
self.parsed = self.parse(**kwargs)
def _make_name(self, name=None, default=None):
keys = []
if name is None and self.prefix is None and default is None:
raise ValueError("Either name or prefix is required")
if self.prefix is not None:
keys = keys + self.prefix.split('-')
if name is not None:
keys = keys + name.split('-')
if not keys and default is not None:
keys.append(default)
return '-'.join([k for k in keys if k != ''])
@classmethod
def _set_nesting(cls, param, default=None):
nested = len(param['name'].split('-')) > 1
if param.get('nesting') in cls._NESTINGS:
# Nothing to change.
return None
if not nested:
return False
default = default or cls._NESTED_OBJECT
param['nesting'] = default
return True
def get_parser(self):
""" Get the parsed data """
return self.parsed
def parse(self, **kwargs):
""" Parse schema """
raise NotImplementedError
class SchemaObject(Schema):
""" OpenAPI Object Schema Parser """
def parse(self, **kwargs):
params = []
required = [i.lower() for i in self.schema.get('required', [])]
for name, param in self.schema.get('properties', {}).items():
fullname = self._make_name(name=name)
param_type = _find_type(param, 'type', 'object')
required_param = (name.lower() in required)
kwargs.update({
'prefix': fullname,
'required': required_param
})
parsed = parse_schema_type(param_type, param, **kwargs).parsed
parsed_params = parsed['params']
for parsed_param in parsed_params:
parsed_param.update(parsed['options'])
self._set_nesting(parsed_param)
params = params + parsed_params
return {'params': params, 'options': {}}
class SchemaArray(Schema):
""" OpenAPI Array Schema Parser """
def _get_opts(self):
""" Get array options to pass back in kwargs """
opts = self.schema.copy()
to_remove = ('items', 'type')
for key in to_remove:
opts.pop(key, None)
return opts
def parse(self, **kwargs):
params = []
kwargs.update(self._get_opts())
items = self.schema['items']
item_type = _find_type(items, 'type', 'object')
item_name = self._make_name(name=self.schema.get('name'), default='')
kwargs.update({
'prefix': item_name,
'required': self.required or kwargs.get('required', False)
})
parsed = parse_schema_type(item_type, items, **kwargs).parsed
params = parsed['params']
for param in params:
self._set_nesting(param, default=self._NESTED_ARRAY)
options = parsed['options']
options.update({
'nesting': self._NESTED_ARRAY,
'array_item_type': item_type
})
return {'params': params, 'options': options}
class SchemaString(Schema):
""" OpenAPI String/Catchall Schema Parser """
@staticmethod
def _get_type(param):
ptype = param.get('type')
pformat = param.get('format')
pname = param.get('name')
if param.get('enum'):
return 'choice'
if pformat == 'binary':
return 'filepath'
if 'password' in pname:
return 'password'
return ptype
@classmethod
def _format_body_param(cls, name, param):
param['required'] = param.get('required', False)
remove = ['xml', 'properties']
to_remove = [r for r in param.keys() if r in remove]
for remove in to_remove:
del param[remove]
param.update({
'name': name,
})
param['type'] = cls._get_type(param)
return param
def parse(self, **kwargs):
name = self.schema.get('name')
kwargs['required'] = self.required or kwargs.get('required', False)
name_opts = {}
# required = self.required or kwargs.get('required', False)
if self.schema.get('format') == 'binary':
if name is None:
name_opts['default'] = 'file'
kwargs['required'] = True
self.schema.update(**kwargs)
fullname = self._make_name(name=name, **name_opts)
params = [self._format_body_param(fullname, self.schema)]
return {'params': params, 'options': {}}
def parse_schema_type(stype, schema, **kwargs):
""" Return the proper schema class based on schema type """
schemas = {
'object': SchemaObject,
'array': SchemaArray,
'string': SchemaString,
'allOf': handle_complex,
'oneOf': handle_complex,
'anyOf': handle_complex
}
return schemas.get(stype, SchemaString)(schema, **kwargs)
def _find_type(param, default_type, default_value=None):
if 'allOf' in param:
param_type = 'allOf'
elif 'oneOf' in param:
param_type = 'oneOf'
elif 'anyOf' in param:
param_type = 'anyOf'
else:
param_type = param.get(default_type, default_value)
return param_type
def handle_complex(schema, **kwargs):
""" Return the nested *Of: schemas """
out = _merge_complex(schema)
return parse_schema_type(out['type'], out, **kwargs)
def _merge_complex(schema):
schemas = []
for k in schema.keys():
if isinstance(schema[k], dict):
schemas.extend(schema[k])
out = {
"type": "object",
"required": [],
"properties": {}
}
for schema_entry in schemas:
if bool(set(schema_entry.keys()) & set(['allOf', 'anyOf', 'oneOf'])):
schema_entry = _merge_complex(schema_entry)
out['properties'].update(schema_entry['properties'])
return out
class Swagger(object):
""" Parses a swagger file and schemas as generates a dict consumbale by the
Cray CLI
"""
_MIME_JSON = 'application/json'
_MIME_FORM = 'application/x-www-form-urlencoded'
_MIME_OCTET = 'application/octet-stream'
_MIME_MULTIPART = 'multipart/form-data'
_MIME_CATCHALL = '*/*'
# _SUPPORTED_MIMES should be preferred order or choice.
# Make sure _CATCHALL is always last in the list.
_SUPPORTED_MIMES = [_MIME_JSON, _MIME_FORM, _MIME_MULTIPART, _MIME_OCTET]
_SUPPORTED_MIMES.append(_MIME_CATCHALL)
_VOCABULARY = {
'getall': 'list',
'get': 'describe',
'post': 'create',
'put': 'update',
'patch': 'update',
'delete': 'delete',
'deleteall': 'delete'
}
def __init__(self, data, ignore_endpoints=None, **kwargs):
ignore_endpoints = ignore_endpoints or []
# Make sure to copy the class vocab to prevent changes affecting
# other class instances.
vocab = copy(self._VOCABULARY)
vocab.update(kwargs.get('vocabulary', {}))
self.data = data
self.ignore_endpoints = ignore_endpoints or []
self.vocab = dict((k.lower(), v.lower())
for k, v in vocab.items())
self.ignore = ignore_endpoints
self.parsed = NestedDict()
self.mime = None
self.parse()
def _parse_body(self, body):
data = NestedDict(**body['content'])
mime = self._get_prefered_mime(data.keys())
self.mime = mime
schema = data[mime]['schema']
# Assume top level schemas are object type if not provided.
schema_type = _find_type(schema, 'type', 'object')
results = parse_schema_type(schema_type, schema).parsed
optional = results['options']
if optional.get('nesting') is not None:
del optional['nesting']
params = results['params']
optional.update({'params': params, 'payload_type': schema_type})
return optional
@classmethod
def _get_prefered_mime(cls, mimes):
supported = [m for m in mimes if m in cls._SUPPORTED_MIMES]
msg = 'Provided mime(s) not supported: {}'.format(mimes)
if not supported:
raise NotImplementedError(msg)
found = supported[0]
for mime in cls._SUPPORTED_MIMES:
if mime in supported:
found = mime
break
return found
@staticmethod
def _parse_route(route):
end_in_arg = False
keys = [i for i in route.split('/') if i != '']
commands = []
args = []
for k in keys:
# Ignore parameters since we'll get those later in the spec.
if (lambda c: (c and c.find('{') == -1 and c.find('}') == -1))(k):
commands.append(k)
else:
args.append(k)
if args and keys and keys[-1] == args[-1]:
end_in_arg = True
return (commands, args, end_in_arg)
@staticmethod
def _format_param(param):
schema = param
schema.update(**param['schema'])
del param['schema']
parsed = parse_schema_type(schema['type'], schema).parsed
return parsed['params']
@classmethod
def _parse_params(cls, params):
resp = {}
for param in params:
resp.setdefault(param['in'], [])
resp[param['in']] = resp[param['in']] + cls._format_param(param)
return resp
def _get_command(self, key, route, method):
existing = self.parsed.get(key)
if existing is not None: # pragma: NO COVER
conflict = existing
template = '{m}:{r} conflicts with {cm}:{cr}'
msg = template.format(m=method, r=route,
cm=conflict['method'],
cr=conflict['route'])
raise ValueError(msg)
return key
def _get_key(self, commands, verb):
return '.'.join(commands + [self.vocab[verb.lower()]])
def get_parsed(self):
""" Get parsed data """
return self.parsed
@staticmethod
def _parse_servers(current_servers):
servers = []
for server in current_servers:
url = server.get('url')
if url:
if url[-1] == '/':
url = url[:-1]
server['url'] = url
servers.append(server)
return servers
def parse(self):
""" Parse data and return groups, commands, and parameters """
# pylint: disable=too-many-locals
endpoint_key = 'endpoints'
# Remove any trailing / from servers to prevent urllib errors
self.data['servers'] = self._parse_servers(self.data['servers'])
for key in ['info', 'servers']:
if key in self.data:
self.parsed[key] = self.data[key]
self.parsed.setdefault(endpoint_key, NestedDict())
for route, data in self.data['paths'].items():
if route not in self.ignore_endpoints:
commands, _, end_in_arg = self._parse_route(route)
parameters = self._parse_params(data.get('parameters', []))
if 'parameters' in data:
del data['parameters']
if 'servers' in data:
del data['servers']
for verb, details in data.items():
if IGNORE_TAG in details.get('tags', []):
continue
method = verb.upper()
if verb.lower() == 'get' and not end_in_arg:
verb = 'getall'
if verb.lower() == 'delete' and not end_in_arg:
verb = 'deleteall'
keep_keys = ['tags']
command_data = {
key: value for key, value in details.items()
if key in keep_keys}
command_data.update({
'route': route,
'method': method,
})
command_data.update(parameters)
command_data.update(self._parse_params(details.get(
'parameters', [])))
if details.get('requestBody') is not None:
body = self._parse_body(details['requestBody'])
body['mime'] = self.mime
command_data.update(body)
command = self._get_command(self._get_key(commands, verb),
route, method)
self.parsed[endpoint_key].set_deep(command, command_data)
self.parsed[CONVERSTION_FLAG] = True
def parse(path, **kwargs):
""" Parse a Swagger/OpenAPI file and return an object that can be consumed
by the Cray CLI Framework """
# pylint: disable=invalid-name
with open(path, encoding='utf-8') as filep:
data = json.load(filep)
s = Swagger(data, **kwargs)
return s
|
the-stack_106_22930 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class StorageManagementConfiguration(Configuration):
"""Configuration for StorageManagement.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(StorageManagementConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2016-12-01"
self.credential_scopes = ['https://management.azure.com/.default']
self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
kwargs.setdefault('sdk_moniker', 'mgmt-storage/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_106_22931 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_swiftpm = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', store_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], store_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], store_true('build_llbuild'),
help='build llbuild')
option(['--libcxx'], store_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], store_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], store_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftevolve'], store_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current master')
option(['--install-pythonkit'], toggle_true('install_pythonkit'),
help='install PythonKit')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
store_true('install_playgroundsupport'),
help='install playground support')
option('--pythonkit', store_true('build_pythonkit'),
help='build PythonKit')
option('--tensorflow-swift-apis', store_true('build_tensorflow_swift_apis'),
help='build TensorFlow Swift APIs')
option('--install-tensorflow-swift-apis',
store_true('install_tensorflow_swift_apis'),
help='install TensorFlow Swift APIs')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], store_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
option('--test-pythonkit', toggle_true('test_pythonkit'),
help='skip testing PythonKit')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64'],
default='armv7',
help='The Android target architecture when building for Android. '
'Currently only armv7 and aarch64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
|
the-stack_106_22932 | from LucidDynamodb import DynamoDb
from LucidDynamodb.exceptions import (
TableNotFound
)
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
try:
db = DynamoDb()
db.delete_table(table_name='dev_jobs')
logging.info("Table deleted successfully")
table_names = db.read_all_table_names()
logging.info(f"Table names: {table_names}")
except TableNotFound as e:
logging.error(f"Table delete operation failed {e}")
"""
dineshsonachalam@macbook examples % python 14-delete-a-table.py
INFO:botocore.credentials:Found credentials in environment variables.
INFO:root:Table deleted successfully
INFO:root:Table names: ['CertMagic', 'dev_test', 'kp-config-v1', 'test-1']
""" |
the-stack_106_22934 | # @author Avtandil Kikabidze
# @copyright Copyright (c) 2008-2014, Avtandil Kikabidze aka LONGMAN ([email protected])
# @link http://long.ge
# @license GNU General Public License version 2 or later;
import os
import sys
import re
import sublime
import subprocess
import cssbeautifier
class CssFormatter:
def __init__(self, formatter):
self.formatter = formatter
def format(self, text):
text = text.decode("utf-8")
opts = self.formatter.settings.get('codeformatter_css_options')
stderr = ""
stdout = ""
options = cssbeautifier.default_options()
if ("indent_size" in opts and opts["indent_size"]):
options.indent_size = opts["indent_size"]
else:
options.indent_size = 4
if ("indent_char" in opts and opts["indent_char"]):
options.indent_char = opts["indent_char"]
else:
options.indent_char = ' '
if ("indent_with_tabs" in opts and opts["indent_with_tabs"]):
options.indent_with_tabs = True
else:
options.indent_with_tabs = False
if ("selector_separator_newline" in opts and opts["selector_separator_newline"]):
options.selector_separator_newline = True
else:
options.selector_separator_newline = False
if ("end_with_newline" in opts and opts["end_with_newline"]):
options.end_with_newline = True
else:
options.end_with_newline = False
try:
stdout = cssbeautifier.beautify(text, options)
except Exception as e:
stderr = str(e)
if (not stderr and not stdout):
stderr = "Formatting error!"
return stdout, stderr
|
the-stack_106_22935 | # coding: UTF-8
import torch
import torch.nn as nn
import numpy as np
class Config(object):
"""配置参数"""
def __init__(self, dataset, embedding):
self.model_name = 'TextRNN'
self.train_path = dataset + '/data/train.csv' # 训练集
self.dev_path = dataset + '/data/dev.csv' # 验证集
self.test_path = dataset + '/data/test.csv' # 测试集
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.material_path = '/home/chiyao/projects/HANpytorch/material/' # 数据材料文件夹(停用词,词典)
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/log/' + self.model_name
self.embedding_pretrained = torch.tensor(
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
if embedding != 'random' else None # 预训练词向量
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.dropout = 0.5 # 随机失活
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 0 # 词表大小,在运行时赋值
self.num_epochs = 100 # epoch数
self.batch_size = 128 # mini-batch大小
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 1e-3 # 学习率
self.embed = self.embedding_pretrained.size(1)\
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
self.hidden_size = 128 # lstm隐藏层
self.num_layers = 2 # lstm层数
'''Recurrent Neural Network for Text Classification with Multi-Task Learning'''
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if config.embedding_pretrained is not None:
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)
def forward(self, x):
x, _ = x
out = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
out, _ = self.lstm(out)
out = self.fc(out[:, -1, :]) # 句子最后时刻的 hidden state
return out
'''变长RNN,效果差不多,甚至还低了点...'''
# def forward(self, x):
# x, seq_len = x
# out = self.embedding(x)
# _, idx_sort = torch.sort(seq_len, dim=0, descending=True) # 长度从长到短排序(index)
# _, idx_unsort = torch.sort(idx_sort) # 排序后,原序列的 index
# out = torch.index_select(out, 0, idx_sort)
# seq_len = list(seq_len[idx_sort])
# out = nn.utils.rnn.pack_padded_sequence(out, seq_len, batch_first=True)
# # [batche_size, seq_len, num_directions * hidden_size]
# out, (hn, _) = self.lstm(out)
# out = torch.cat((hn[2], hn[3]), -1)
# # out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
# out = out.index_select(0, idx_unsort)
# out = self.fc(out)
# return out
|
the-stack_106_22936 | ovr = [0.115, -0.2921, 0.9834]
hr = [0.1, 0.188, 2.33]
knobs = {
1: [-0.148, 0.22, 1.243], # botright
2: [-0.271, 0.22, 1.243], # botleft
3: [-0.148, 0.22, 1.357], # topright
4: [-0.271, 0.22, 1.357], # topleft
}
for n in range(1, 4 + 1):
p = [ovr[i] - hr[i] + knobs[n][i] for i in range(3)]
print('knob', n, p)
#
# # order matters, flip b/c original had knob body before burner body
# 'bottomknob': np.array([14, 10]), # bottom burner, left
# 'topknob': np.array([16, 12]), # top burner, right
# 'bottomknobr': np.array([13, 9]), # bottom burner, right
# 'topknobr': np.array([15, 11]), # top burner, right
ol = [0, 0.2, 2.25]
p = [hr[i] - ol[i] for i in range(3)]
print('ol', p)
|
the-stack_106_22944 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='SageMaker Ground Truth Job')
parser.add_argument('--region', type=str.strip, required=True, help='The region where the resources are.')
parser.add_argument('--role', type=str.strip, required=True, help='The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.')
parser.add_argument('--job_name', type=str.strip, required=True, help='The name of the labeling job.')
parser.add_argument('--label_attribute_name', type=str.strip, required=False, help='The attribute name to use for the label in the output manifest file. Default is the job name.', default='')
parser.add_argument('--manifest_location', type=str.strip, required=True, help='The Amazon S3 location of the manifest file that describes the input data objects.')
parser.add_argument('--output_location', type=str.strip, required=True, help='The Amazon S3 location to write output data.')
parser.add_argument('--output_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--task_type', type=str.strip, required=True, help='Built in image classification, bounding box, text classification, or semantic segmentation, or custom. If custom, please provide pre- and post-labeling task lambda functions.')
parser.add_argument('--worker_type', type=str.strip, required=True, help='The workteam for data labeling, either public, private, or vendor.')
parser.add_argument('--workteam_arn', type=str.strip, required=False, help='The ARN of the work team assigned to complete the tasks.')
parser.add_argument('--no_adult_content', type=_utils.str_to_bool, required=False, help='If true, your data is free of adult content.', default='False')
parser.add_argument('--no_ppi', type=_utils.str_to_bool, required=False, help='If true, your data is free of personally identifiable information.', default='False')
parser.add_argument('--label_category_config', type=str.strip, required=False, help='The S3 URL of the JSON structured file that defines the categories used to label the data objects.', default='')
parser.add_argument('--max_human_labeled_objects', type=_utils.str_to_int, required=False, help='The maximum number of objects that can be labeled by human workers.', default=0)
parser.add_argument('--max_percent_objects', type=_utils.str_to_int, required=False, help='The maximum percentatge of input data objects that should be labeled.', default=0)
parser.add_argument('--enable_auto_labeling', type=_utils.str_to_bool, required=False, help='Enables auto-labeling, only for bounding box, text classification, and image classification.', default=False)
parser.add_argument('--initial_model_arn', type=str.strip, required=False, help='The ARN of the final model used for a previous auto-labeling job.', default='')
parser.add_argument('--resource_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--ui_template', type=str.strip, required=True, help='The Amazon S3 bucket location of the UI template.')
parser.add_argument('--pre_human_task_function', type=str.strip, required=False, help='The ARN of a Lambda function that is run before a data object is sent to a human worker.', default='')
parser.add_argument('--post_human_task_function', type=str.strip, required=False, help='The ARN of a Lambda function implements the logic for annotation consolidation.', default='')
parser.add_argument('--task_keywords', type=str.strip, required=False, help='Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task.', default='')
parser.add_argument('--title', type=str.strip, required=True, help='A title for the task for your human workers.')
parser.add_argument('--description', type=str.strip, required=True, help='A description of the task for your human workers.')
parser.add_argument('--num_workers_per_object', type=_utils.str_to_int, required=True, help='The number of human workers that will label an object.')
parser.add_argument('--time_limit', type=_utils.str_to_int, required=True, help='The amount of time that a worker has to complete a task in seconds')
parser.add_argument('--task_availibility', type=_utils.str_to_int, required=False, help='The length of time that a task remains available for labelling by human workers.', default=0)
parser.add_argument('--max_concurrent_tasks', type=_utils.str_to_int, required=False, help='The maximum number of data objects that can be labeled by human workers at the same time.', default=0)
parser.add_argument('--workforce_task_price', type=_utils.str_to_float, required=False, help='The price that you pay for each task performed by a public worker in USD. Specify to the tenth fractions of a cent. Format as "0.000".', default=0.000)
parser.add_argument('--tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting Ground Truth Job request to SageMaker...')
_utils.create_labeling_job(client, vars(args))
logging.info('Ground Truth labeling job request submitted. Waiting for completion...')
_utils.wait_for_labeling_job(client, args.job_name)
output_manifest, active_learning_model_arn = _utils.get_labeling_job_outputs(client, args.job_name, args.enable_auto_labeling)
logging.info('Ground Truth Labeling Job completed.')
with open('/tmp/output_manifest_location.txt', 'w') as f:
f.write(output_manifest)
with open('/tmp/active_learning_model_arn.txt', 'w') as f:
f.write(active_learning_model_arn)
if __name__== "__main__":
main()
|
the-stack_106_22947 | """
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
http://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
from __future__ import absolute_import, print_function, division
from statsmodels.compat.python import range
# for 2to3 with extensions
import warnings
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
from . import bandwidths
from .kdetools import (forrt, revrt, silverman_transform, counts)
from .linbin import fast_linbin
#### Kernels Switch for estimators ####
kernel_switch = dict(gau=kernels.Gaussian, epa=kernels.Epanechnikov,
uni=kernels.Uniform, tri=kernels.Triangular,
biw=kernels.Biweight, triw=kernels.Triweight,
cos=kernels.Cosine, cos2=kernels.Cosine2)
def _checkisfit(self):
try:
self.density
except:
raise ValueError("Call fit to fit the density first")
#### Kernel Density Estimator Class ###
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array-like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = np.asarray(endog)
def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None,
gridsize=None, adjust=1, cut=3, clip=(-np.inf, np.inf)):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, it is the bandwidth.
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of X so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
"""
try:
bw = float(bw)
self.bw_method = "user-given"
except:
self.bw_method = bw
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
else:
density, grid, bw = kdensity(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = resettable_cache()
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
density = self.density
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a,b = -np.inf,np.inf
else:
a,b = kern.domain
func = lambda x,s: kern.density(s,x)
support = self.support
support = np.r_[a,support]
gridsize = len(support)
endog = self.endog
probs = [integrate.quad(func, support[i-1], support[i],
args=endog)[0] for i in range(1,gridsize)]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x,s):
pdf = kern.density(s,x)
return pdf*np.log(pdf+1e-12)
pdf = self.density
kern = self.kernel
if kern.domain is not None:
a,b = self.domain
else:
a,b = -np.inf,np.inf
endog = self.endog
#TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a,b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0,1,
gridsize))
def evaluate(self, point):
"""
Evaluate density at a single point.
Parameters
----------
point : float
Point at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
#### Kernel Density Estimator Functions ####
def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:,None]
clip_x = np.logical_and(X>clip[0], X<clip[1])
X = X[clip_x]
nobs = len(X) # after trim
if gridsize == None:
gridsize = max(nobs,50) # don't need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given X."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
# if bw is None, select optimal bandwidth for kernel
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern)
bw *= adjust
a = np.min(X,axis=0) - cut*bw
b = np.max(X,axis=0) + cut*bw
grid = np.linspace(a, b, gridsize)
k = (X.T - grid[:,None])/bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if kern.domain is not None: # won't work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k<0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k,weights)/(q*bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(X), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{X.min() or X.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights aren't implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
---------- ::
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
X = np.asarray(X)
X = X[np.logical_and(X>clip[0], X<clip[1])] # won't work for two columns.
# will affect underlying data?
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
bw *= adjust
nobs = len(X) # after trim
# 1 Make grid and discretize the data
if gridsize == None:
gridsize = np.max((nobs,512.))
gridsize = 2**np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(X)-cut*bw
b = np.max(X)+cut*bw
grid,delta = np.linspace(a,b,gridsize,retstep=True)
RANGE = b-a
#TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(X,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of X in the grid here
# Xingrid = X[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
#NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(X,a,b,gridsize)/(delta*nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# don't have to redo the above if just changing bw, ie., for cross val
#NOTE: silverman_transform is the closed form solution of the FFT of the
#gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE)*y # 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
if __name__ == "__main__":
import numpy as np
np.random.seed(12345)
xi = np.random.randn(100)
f,grid, bw1 = kdensity(xi, kernel="gau", bw=.372735, retgrid=True)
f2, bw2 = kdensityfft(xi, kernel="gau", bw="silverman",retgrid=False)
# do some checking vs. silverman algo.
# you need denes.f, http://lib.stat.cmu.edu/apstat/176
#NOTE: I (SS) made some changes to the Fortran
# and the FFT stuff from Munro http://lib.stat.cmu.edu/apstat/97o
# then compile everything and link to denest with f2py
#Make pyf file as usual, then compile shared object
#f2py denest.f -m denest2 -h denest.pyf
#edit pyf
#-c flag makes it available to other programs, fPIC builds a shared library
#/usr/bin/gfortran -Wall -c -fPIC fft.f
#f2py -c denest.pyf ./fft.o denest.f
try:
from denest2 import denest # @UnresolvedImport
a = -3.4884382032045504
b = 4.3671504686785605
RANGE = b - a
bw = bandwidths.bw_silverman(xi)
ft,smooth,ifault,weights,smooth1 = denest(xi,a,b,bw,np.zeros(512),np.zeros(512),0,
np.zeros(512), np.zeros(512))
# We use a different binning algo, so only accurate up to 3 decimal places
np.testing.assert_almost_equal(f2, smooth, 3)
#NOTE: for debugging
# y2 = forrt(weights)
# RJ = np.arange(512/2+1)
# FAC1 = 2*(np.pi*bw/RANGE)**2
# RJFAC = RJ**2*FAC1
# BC = 1 - RJFAC/(6*(bw/((b-a)/M))**2)
# FAC = np.exp(-RJFAC)/BC
# SMOOTH = np.r_[FAC,FAC[1:-1]] * y2
# dens = revrt(SMOOTH)
except:
# ft = np.loadtxt('./ft_silver.csv')
# smooth = np.loadtxt('./smooth_silver.csv')
print("Didn't get the estimates from the Silverman algorithm")
|
the-stack_106_22948 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v1.model.creator import Creator
from datadog_api_client.v1.model.slo_correction_category import SLOCorrectionCategory
globals()["Creator"] = Creator
globals()["SLOCorrectionCategory"] = SLOCorrectionCategory
class SLOCorrectionResponseAttributes(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"category": (SLOCorrectionCategory,), # noqa: E501
"creator": (Creator,), # noqa: E501
"description": (str,), # noqa: E501
"end": (int,), # noqa: E501
"slo_id": (str,), # noqa: E501
"start": (int,), # noqa: E501
"timezone": (str,), # noqa: E501
}
discriminator = None
attribute_map = {
"category": "category", # noqa: E501
"creator": "creator", # noqa: E501
"description": "description", # noqa: E501
"end": "end", # noqa: E501
"slo_id": "slo_id", # noqa: E501
"start": "start", # noqa: E501
"timezone": "timezone", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SLOCorrectionResponseAttributes - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
category (SLOCorrectionCategory): [optional] # noqa: E501
creator (Creator): [optional] # noqa: E501
description (str): Description of the correction being made.. [optional] # noqa: E501
end (int): Ending time of the correction in epoch seconds.. [optional] # noqa: E501
slo_id (str): ID of the SLO that this correction will be applied to.. [optional] # noqa: E501
start (int): Starting time of the correction in epoch seconds.. [optional] # noqa: E501
timezone (str): The timezone to display in the UI for the correction times (defaults to \"UTC\").. [optional] # noqa: E501
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Helper creating a new instance from a response."""
self = super(SLOCorrectionResponseAttributes, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
|
the-stack_106_22951 | import sys
import os
import torch
import pandas as pd
import datetime
from argparse import ArgumentParser
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
from network.ecgresnet_mcdropout import ECGResNet_MCDropout
from utils.helpers import create_results_directory
from utils.focalloss_weights import FocalLoss
class ECGResNetMCDropoutSystem(pl.LightningModule):
"""
This class implements the ECGResNet with Monte Carlo dropout PyTorch Lightning.
It can estimate the epistemic uncertainty of its predictions.
"""
def __init__(self, in_channels, n_grps, N,
num_classes, dropout, first_width, stride,
dilation, learning_rate, n_dropout_samples, sampling_dropout_rate, loss_weights=None,
**kwargs):
"""
Initializes the ECGResNetMCDropoutSystem
Args:
in_channels: number of channels of input
n_grps: number of ResNet groups
N: number of blocks per groups
num_classes: number of classes of the classification problem
dropout: probability of an argument to get zeroed in the dropout layer
first_width: width of the first input
stride: tuple with stride value per block per group
dilation: spacing between the kernel points of the convolutional layers
learning_rate: the learning rate of the model
n_dropout_samples: number of Monte Carlo dropout samples to take
sampling_dropout_rate: the ratio of dropped-out neurons during Monte Carlo sampling
loss_weights: array of weights for the loss term
"""
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.IDs = torch.empty(0).type(torch.LongTensor)
self.predicted_labels = torch.empty(0).type(torch.LongTensor)
self.correct_predictions = torch.empty(0).type(torch.BoolTensor)
self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.model = ECGResNet_MCDropout(in_channels,
n_grps, N, num_classes,
dropout, first_width,
stride, dilation, n_dropout_samples, sampling_dropout_rate)
if loss_weights is not None:
weights = torch.tensor(loss_weights, dtype = torch.float)
else:
weights = loss_weights
self.loss = FocalLoss(gamma=1, weights = weights)
def forward(self, x):
"""
Performs a forward through the model.
Args:
x (tensor): Input data.
Returns:
output1: output at the auxiliary point of the ECGResNet
output2: output at the end of the model
"""
output1, output2 = self.model(x)
return output1, output2
def training_step(self, batch, batch_idx):
"""Performs a training step.
Args:
batch (dict): Output of the dataloader.
batch_idx (int): Index no. of this batch.
Returns:
tensor: Total loss for this step.
"""
data, target = batch['waveform'], batch['label']
output1, output2 = self(data)
train_loss1 = self.loss(output1.squeeze(), target)
train_loss2 = self.loss(output2.squeeze(), target)
total_train_loss = (0.3 * train_loss1) + train_loss2
self.log('train_loss', total_train_loss)
return {'loss': total_train_loss}
def validation_step(self, batch, batch_idx):
data, target = batch['waveform'], batch['label']
output1, output2 = self(data)
val_loss = self.loss(output2.squeeze(), target)
acc = FM.accuracy(output2.squeeze(), target)
# loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on'
metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}
self.log('val_acc', acc.item())
self.log('val_loss', val_loss.item())
return metrics
def test_step(self, batch, batch_idx, save_to_csv=False):
# Enable dropout at test time.
self.model.enable_dropout()
data, target = batch['waveform'], batch['label']
# Make prediction with dropout turned on, sampling multiple times.
samples, sample_mean, sample_var = self.model.mc_sample(data)
# Get predicted labels by choosing the labels with the highest average Softmax value
predicted_labels = sample_mean.argmax(dim=1).cpu()
# Get the variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_var = torch.gather(sample_var, 1, sample_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
# Get metrics
test_loss = self.loss(sample_mean, target)
acc = FM.accuracy(sample_mean, target)
self.log('test_acc', acc.item())
self.log('test_loss', test_loss.item())
self.IDs = torch.cat((self.IDs, batch['id']), 0)
self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)
self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)
self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)
return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='mcdropout_none')
parser.add_argument('--n_dropout_samples', type=int, default=20)
parser.add_argument('--sampling_dropout_rate', type=float, default=0.1)
parser.add_argument('--ensembling_method', type=bool, default=False)
return parser
# Combine results into single dataframe and save to disk
def save_results(self):
"""
Combine results into single dataframe and save to disk as .csv file
"""
results = pd.concat([
pd.DataFrame(self.IDs.numpy(), columns= ['ID']),
pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),
pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),
pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']),
], axis=1)
create_results_directory()
results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
|
the-stack_106_22953 | import logging, multiprocessing, signal, time, timeit
from multiprocessing import Pool, Manager
from classes.game import Game
from classes.reader import Grabber
from classes.gui import Screen
from classes.environment import Environment
from classes.agent import Agent
from consts import EMULATOR_PATH, ROM_PATH, ROM_NAME,\
GAME_WINDOW_XY
class App():
def __init__(self):
self.__init_logging()
def __init_logging(self):
self.log = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def init_process(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def __init_mp_pool(self, pool_size=4):
if pool_size > 16:
raise ValueError('num of child processes must be lt 16')
elif pool_size <= 0:
raise ValueError('num of child processes must be gt 0')
elif not isinstance(pool_size, int):
raise ValueError('num of child processes must integer')
pool = Pool(processes=pool_size, initializer=self.init_process)
return pool
def init_environment(self, pool, end, q_source=None, q_target=None):
return [pool.apply_async(Environment().loop, args=(end, q_source, q_target,))]
def init_agent(self, pool, end, q_source=None, q_target=None):
return [pool.apply_async(Agent().loop, args=(end, q_source, q_target,))]
def init_grabber(self, pool, end, q_source=None, q_target=None):
return [pool.apply_async(Grabber(window_xy=GAME_WINDOW_XY).capture, args=(end, q_source, q_target,))]
def init_gui(self, pool, end, q_source=None, q_target=None):
return [pool.apply_async(Screen().show, args=(end, q_source, q_target,))]
def main(self):
t0 = timeit.default_timer()
# shared resources manager
m = Manager()
# original screenshots of the game window
raw_frames = m.Queue(maxsize=3)
# environment data
env_data = m.Queue(maxsize=2)
# output images
output_frames = m.Queue(maxsize=3)
# end event
end = m.Event()
# proc's pool
pool = self.__init_mp_pool(pool_size=16)
# need to get sub-process (external game) pid to
# for graceful shutdown
game_pid = m.Value('pid', None)
game = Game(rom_path=ROM_PATH,
emulator_path=EMULATOR_PATH,
rom_name=ROM_NAME,
pid=game_pid)
processes = [pool.apply_async(game.run, args=(end,))] + \
self.init_environment(pool, end, q_source=raw_frames, q_target=env_data) + \
self.init_agent(pool, end, q_source=env_data, q_target=output_frames) + \
self.init_grabber(pool, end, q_target=raw_frames) + \
self.init_gui(pool, end, q_source=output_frames)
fin_processes = []
try:
while True:
for proc in processes:
if (proc.ready() and proc not in fin_processes):
fin_processes.append(proc)
if len(fin_processes) == len(processes):
break
except KeyboardInterrupt:
self.log.info('\nCaught Ctrl+C, terminating workers.')
game.stop(game_pid.value)
pool.terminate()
pool.join()
except Exception as err:
self.log.error('\nMain process err: %s ' % err)
pool.terminate()
pool.join()
else:
pool.close()
pool.join()
finally:
m.shutdown()
self.log.info('Finished processing.\n'+
'Main process worked for %.2f seconds'
% (timeit.default_timer() - t0))
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
app = App()
app.main() |
the-stack_106_22955 | # -*- encoding: utf-8 -*-
import math
from ascii_table import ascii_table
class Data:
def __init__(self, *numbers):
self.numbers = list(numbers)
self.numbers.sort()
@property
def median(self):
length = len(self.numbers)
if length % 2 == 0:
return Data(*self.numbers[int(length / 2):int(length / 2)+2]).mean
else:
return self.numbers[length // 2]
@property
def mean(self):
return self.average
@property
def average(self):
return sum(self.numbers) / len(self.numbers)
@property
def standard_deviation(self):
average = self.average
squared_differences = []
for number in self.numbers:
squared_differences.append((number - average) ** 2)
return math.sqrt(Data(*squared_differences).average)
@property
def range(self):
return max(self.numbers) - min(self.numbers)
@property
def length(self):
return len(self.numbers)
def frequency_of(self, x):
return self.numbers.count(x)
def __str__(self):
return '<Data [{}]>'.format(len(self.numbers))
@property
def frequency_table(self):
rows = [
['x', 'f', 'fx', 'Cumu. f'],
]
cumulative_fr = 0
for number in sorted(set(self.numbers)):
fr = self.frequency_of(number)
cumulative_fr += fr
rows.append([number, fr, number * fr, cumulative_fr])
return ascii_table(rows, min_width=4)
#
# functions
#
def mini(x, y):
return x if x > y else y
def isdigit(x):
try:
int(x)
except:
return False
return True
def ascii_table(rows, title=True, min_width=0):
length = len(rows[0])
for i, row in enumerate(rows):
for j, cell in enumerate(row):
rows[i][j] = str(cell)
if len(row) != length:
raise ValueError("The rows do not have the same length")
sizes = []
for i in range(len(rows[0])):
sizes.append(max(mini(len(cell), min_width) for cell in (row[i] for row in rows)))
table_rows = []
for i, row in enumerate(rows):
if title and i == 1:
table_rows.append('|' + '|'.join('-' * (sizes[j] + 2) for j, _ in enumerate(row)) + '|')
table_row = []
for j, cell in enumerate(row):
if title and i == 0:
cell = cell.center(sizes[j])
elif isdigit(cell):
cell = cell.rjust(sizes[j])
else:
cell = cell.ljust(sizes[j])
table_row.append(cell)
table_rows.append('| ' + ' | '.join(table_row) + ' |')
return '\n'.join(table_rows)
def numbers(string, func=int):
if '.' in string:
func = float
if string[0] == '.':
string = list(string)
string[0:1] = ['0', '.']
string = ''.join(string)
return map(func, string.replace(' .', ' 0.').split(' '))
|
the-stack_106_22956 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.7 Python SDK
Pure Storage FlashBlade REST 1.7 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.7
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ObjectStoreAccount(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'created': 'int',
'space': 'Space',
'object_count': 'int'
}
attribute_map = {
'name': 'name',
'created': 'created',
'space': 'space',
'object_count': 'object_count'
}
def __init__(self, name=None, created=None, space=None, object_count=None): # noqa: E501
"""ObjectStoreAccount - a model defined in Swagger""" # noqa: E501
self._name = None
self._created = None
self._space = None
self._object_count = None
self.discriminator = None
if name is not None:
self.name = name
if created is not None:
self.created = created
if space is not None:
self.space = space
if object_count is not None:
self.object_count = object_count
@property
def name(self):
"""Gets the name of this ObjectStoreAccount. # noqa: E501
The name of the object (e.g., a file system or snapshot) # noqa: E501
:return: The name of this ObjectStoreAccount. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ObjectStoreAccount.
The name of the object (e.g., a file system or snapshot) # noqa: E501
:param name: The name of this ObjectStoreAccount. # noqa: E501
:type: str
"""
self._name = name
@property
def created(self):
"""Gets the created of this ObjectStoreAccount. # noqa: E501
Creation timestamp of the object # noqa: E501
:return: The created of this ObjectStoreAccount. # noqa: E501
:rtype: int
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this ObjectStoreAccount.
Creation timestamp of the object # noqa: E501
:param created: The created of this ObjectStoreAccount. # noqa: E501
:type: int
"""
self._created = created
@property
def space(self):
"""Gets the space of this ObjectStoreAccount. # noqa: E501
the space specification of the object store account # noqa: E501
:return: The space of this ObjectStoreAccount. # noqa: E501
:rtype: Space
"""
return self._space
@space.setter
def space(self, space):
"""Sets the space of this ObjectStoreAccount.
the space specification of the object store account # noqa: E501
:param space: The space of this ObjectStoreAccount. # noqa: E501
:type: Space
"""
self._space = space
@property
def object_count(self):
"""Gets the object_count of this ObjectStoreAccount. # noqa: E501
the number of object within the account. # noqa: E501
:return: The object_count of this ObjectStoreAccount. # noqa: E501
:rtype: int
"""
return self._object_count
@object_count.setter
def object_count(self, object_count):
"""Sets the object_count of this ObjectStoreAccount.
the number of object within the account. # noqa: E501
:param object_count: The object_count of this ObjectStoreAccount. # noqa: E501
:type: int
"""
self._object_count = object_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ObjectStoreAccount, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectStoreAccount):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_22957 | import os
import re
import subprocess
import botocore
import boto3
import time
from packaging.version import Version
import pytest
import requests
from urllib3.util.retry import Retry
from invoke.context import Context
from botocore.exceptions import ClientError
from src.buildspec import Buildspec
from test.test_utils import (
LOGGER,
CONTAINER_TESTS_PREFIX,
ec2,
get_container_name,
get_framework_and_version_from_tag,
get_neuron_framework_and_version_from_tag,
is_canary_context,
is_tf_version,
is_dlc_cicd_context,
is_pr_context,
run_cmd_on_container,
start_container,
stop_and_remove_container,
is_time_for_canary_safety_scan,
is_mainline_context,
is_nightly_context,
get_repository_local_path,
get_repository_and_tag_from_image_uri,
get_python_version_from_image_uri,
is_tf_version,
get_processor_from_image_uri,
)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run stray file test regularly on production images")
def test_stray_files(image):
"""
Test to ensure that unnecessary build artifacts are not present in any easily visible or tmp directories
:param image: ECR image URI
"""
ctx = Context()
container_name = get_container_name("test_tmp_dirs", image)
start_container(container_name, image, ctx)
# Running list of artifacts/artifact regular expressions we do not want in any of the directories
stray_artifacts = [r"\.py"]
# Running list of allowed files in the /tmp directory
allowed_tmp_files = ["hsperfdata_root"]
# Ensure stray artifacts are not in the tmp directory
tmp = run_cmd_on_container(container_name, ctx, "ls -A /tmp")
_assert_artifact_free(tmp, stray_artifacts)
# Ensure tmp dir is empty except for whitelisted files
tmp_files = tmp.stdout.split()
for tmp_file in tmp_files:
assert (
tmp_file in allowed_tmp_files
), f"Found unexpected file in tmp dir: {tmp_file}. Allowed tmp files: {allowed_tmp_files}"
# We always expect /var/tmp to be empty
var_tmp = run_cmd_on_container(container_name, ctx, "ls -A /var/tmp")
_assert_artifact_free(var_tmp, stray_artifacts)
assert var_tmp.stdout.strip() == ""
# Additional check of home and root directories to ensure that stray artifacts are not present
home = run_cmd_on_container(container_name, ctx, "ls -A ~")
_assert_artifact_free(home, stray_artifacts)
root = run_cmd_on_container(container_name, ctx, "ls -A /")
_assert_artifact_free(root, stray_artifacts)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run python version test regularly on production images")
def test_python_version(image):
"""
Check that the python version in the image tag is the same as the one on a running container.
:param image: ECR image URI
"""
ctx = Context()
container_name = get_container_name("py-version", image)
py_version = ""
for tag_split in image.split("-"):
if tag_split.startswith("py"):
if len(tag_split) > 3:
py_version = f"Python {tag_split[2]}.{tag_split[3]}"
else:
py_version = f"Python {tag_split[2]}"
start_container(container_name, image, ctx)
output = run_cmd_on_container(container_name, ctx, "python --version")
# Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to
# stream to stderr (in some cases).
container_py_version = output.stdout + output.stderr
assert py_version in container_py_version, f"Cannot find {py_version} in {container_py_version}"
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
def test_ubuntu_version(image):
"""
Check that the ubuntu version in the image tag is the same as the one on a running container.
:param image: ECR image URI
"""
ctx = Context()
container_name = get_container_name("ubuntu-version", image)
ubuntu_version = ""
for tag_split in image.split("-"):
if tag_split.startswith("ubuntu"):
ubuntu_version = tag_split.split("ubuntu")[-1]
start_container(container_name, image, ctx)
output = run_cmd_on_container(container_name, ctx, "cat /etc/os-release")
container_ubuntu_version = output.stdout
assert "Ubuntu" in container_ubuntu_version
assert ubuntu_version in container_ubuntu_version
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run non-gpu framework version test regularly on production images")
def test_framework_version_cpu(image):
"""
Check that the framework version in the image tag is the same as the one on a running container.
This function tests CPU, EIA, and Neuron images.
:param image: ECR image URI
"""
if "gpu" in image:
pytest.skip(
"GPU images will have their framework version tested in test_framework_and_cuda_version_gpu")
image_repo_name, _ = get_repository_and_tag_from_image_uri(image)
if re.fullmatch(r"(pr-|beta-|nightly-)?tensorflow-inference(-eia)?", image_repo_name):
pytest.skip(
msg="TF inference for CPU/GPU/EIA does not have core tensorflow installed")
tested_framework, tag_framework_version = get_framework_and_version_from_tag(
image)
# Framework name may include huggingface
if tested_framework.startswith('huggingface_'):
tested_framework = tested_framework[len("huggingface_"):]
# Module name is torch
if tested_framework == "pytorch":
tested_framework = "torch"
elif tested_framework == "autogluon":
tested_framework = "autogluon.core"
ctx = Context()
container_name = get_container_name("framework-version", image)
start_container(container_name, image, ctx)
output = run_cmd_on_container(
container_name, ctx, f"import {tested_framework}; print({tested_framework}.__version__)", executable="python"
)
if is_canary_context():
assert tag_framework_version in output.stdout.strip()
else:
if tested_framework == "autogluon.core":
assert output.stdout.strip().startswith(tag_framework_version)
elif tested_framework == "torch" and Version(tag_framework_version) >= Version("1.10.0"):
torch_version_pattern = r"{torch_version}(\+cpu)".format(torch_version=tag_framework_version)
assert re.fullmatch(torch_version_pattern, output.stdout.strip()), (
f"torch.__version__ = {output.stdout.strip()} does not match {torch_version_pattern}\n"
f"Please specify framework version as X.Y.Z+cpu"
)
else:
if "neuron" in image:
assert tag_framework_version in output.stdout.strip()
else:
assert tag_framework_version == output.stdout.strip()
stop_and_remove_container(container_name, ctx)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
def test_framework_and_neuron_sdk_version(neuron):
"""
Gets the neuron sdk tag from the image. For that neuron sdk and the frame work version from
the image, it gets the expected frame work version. Then checks that the expected framework version
same as the one on a running container.
This function test only Neuron images.
:param image: ECR image URI
"""
image = neuron
tested_framework, neuron_tag_framework_version = get_neuron_framework_and_version_from_tag(image)
# neuron tag is there in pytorch images for now. Once all frameworks have it, then this will
# be removed
if neuron_tag_framework_version is None:
if tested_framework is "pytorch":
assert neuron_tag_framework_version != None
else:
pytest.skip(msg="Neuron SDK tag is not there as part of image")
if tested_framework == "pytorch":
tested_framework = "torch_neuron"
elif tested_framework == "tensorflow":
tested_framework = "tensorflow_neuron"
elif tested_framework == "mxnet":
tested_framework = "mxnet"
ctx = Context()
container_name = get_container_name("framework-version-neuron", image)
start_container(container_name, image, ctx)
output = run_cmd_on_container(
container_name, ctx, f"import {tested_framework}; print({tested_framework}.__version__)", executable="python"
)
if tested_framework == "mxnet":
# TODO -For neuron the mx_neuron module does not support the __version__ yet and we
# can get the version of only the base mxnet model. The base mxnet model just
# has framework version and does not have the neuron semantic version yet. Till
# the mx_neuron supports __version__ do the minimal check and not exact match
_ , tag_framework_version = get_framework_and_version_from_tag(image)
assert tag_framework_version == output.stdout.strip()
else:
assert neuron_tag_framework_version == output.stdout.strip()
stop_and_remove_container(container_name, ctx)
# TODO: Enable as canary once resource cleaning lambda is added
@pytest.mark.usefixtures("sagemaker", "huggingface")
@pytest.mark.model("N/A")
@pytest.mark.parametrize("ec2_instance_type", ["p3.2xlarge"], indirect=True)
def test_framework_and_cuda_version_gpu(gpu, ec2_connection):
"""
Check that the framework and cuda version in the image tag is the same as the one on a running container.
:param gpu: ECR image URI with "gpu" in the name
:param ec2_connection: fixture to establish connection with an ec2 instance
"""
image = gpu
tested_framework, tag_framework_version = get_framework_and_version_from_tag(
image)
# Framework Version Check #
# Skip framework version test for tensorflow-inference, since it doesn't have core TF installed
if "tensorflow-inference" not in image:
# Framework name may include huggingface
if tested_framework.startswith('huggingface_'):
tested_framework = tested_framework[len("huggingface_"):]
# Module name is "torch"
if tested_framework == "pytorch":
tested_framework = "torch"
elif tested_framework == "autogluon":
tested_framework = "autogluon.core"
cmd = f"import {tested_framework}; print({tested_framework}.__version__)"
output = ec2.execute_ec2_training_test(ec2_connection, image, cmd, executable="python")
if is_canary_context():
assert tag_framework_version in output.stdout.strip()
else:
if tested_framework == "autogluon.core":
assert output.stdout.strip().startswith(tag_framework_version)
elif tested_framework == "torch" and Version(tag_framework_version) >= Version("1.10.0"):
torch_version_pattern = r"{torch_version}(\+cu\d+)".format(torch_version=tag_framework_version)
assert re.fullmatch(torch_version_pattern, output.stdout.strip()), (
f"torch.__version__ = {output.stdout.strip()} does not match {torch_version_pattern}\n"
f"Please specify framework version as X.Y.Z+cuXXX"
)
else:
assert tag_framework_version == output.stdout.strip()
# CUDA Version Check #
cuda_version = re.search(r"-cu(\d+)-", image).group(1)
# MXNet inference/HF tensorflow inference and Autogluon containers do not currently have nvcc in /usr/local/cuda/bin, so check symlink
if "mxnet-inference" in image or "autogluon" in image or "huggingface-tensorflow-inference" in image:
cuda_cmd = "readlink /usr/local/cuda"
else:
cuda_cmd = "nvcc --version"
cuda_output = ec2.execute_ec2_training_test(
ec2_connection, image, cuda_cmd, container_name="cuda_version_test")
# Ensure that cuda version in tag is in the container
assert cuda_version in cuda_output.stdout.replace(".", "")
class DependencyCheckFailure(Exception):
pass
def _run_dependency_check_test(image, ec2_connection):
# Record any whitelisted medium/low severity CVEs; I.E. allowed_vulnerabilities = {CVE-1000-5555, CVE-9999-9999}
allowed_vulnerabilities = {
# Those vulnerabilities are fixed. Current openssl version is 1.1.1g. These are false positive
"CVE-2016-2109",
"CVE-2016-2177",
"CVE-2016-6303",
"CVE-2016-2182",
# CVE-2020-13936: vulnerability found in apache velocity package which is a dependency for dependency-check package. Hence, ignoring.
"CVE-2020-13936",
}
processor = get_processor_from_image_uri(image)
# Whitelist CVE #CVE-2021-3711 for DLCs where openssl is installed using apt-get
framework, _ = get_framework_and_version_from_tag(image)
short_fw_version = re.search(r"(\d+\.\d+)", image).group(1)
# Check that these versions have been matched on https://ubuntu.com/security/CVE-2021-3711 before adding
allow_openssl_cve_fw_versions = {
"tensorflow": {
"1.15": ["cpu", "gpu", "neuron"],
"2.3": ["cpu", "gpu"],
"2.4": ["cpu", "gpu"],
"2.5": ["cpu", "gpu", "neuron"],
"2.6": ["cpu", "gpu"],
},
"mxnet": {"1.8": ["neuron"], "1.9": ["cpu", "gpu", "graviton"]},
"pytorch": {"1.10": ["graviton"]},
"huggingface_pytorch": {"1.8": ["cpu", "gpu"], "1.9": ["cpu", "gpu"]},
"huggingface_tensorflow": {"2.4": ["cpu", "gpu"], "2.5": ["cpu", "gpu"]},
"autogluon": {"0.3": ["graviton"]},
}
if processor in allow_openssl_cve_fw_versions.get(framework, {}).get(short_fw_version, []):
allowed_vulnerabilities.add("CVE-2021-3711")
container_name = f"dep_check_{processor}"
report_addon = get_container_name("depcheck-report", image)
dependency_check_report = f"{report_addon}.html"
html_file = f"{container_name}:/build/dependency-check-report.html"
test_script = os.path.join(CONTAINER_TESTS_PREFIX, "testDependencyCheck")
# Execute test, copy results to s3
ec2.execute_ec2_training_test(
ec2_connection, image, test_script, container_name=container_name, bin_bash_entrypoint=True
)
ec2_connection.run(f"docker cp {html_file} ~/{dependency_check_report}")
ec2_connection.run(
f"aws s3 cp ~/{dependency_check_report} s3://dlc-dependency-check")
# Check for any vulnerabilities not mentioned in allowed_vulnerabilities
html_output = ec2_connection.run(
f"cat ~/{dependency_check_report}", hide=True).stdout
cves = re.findall(r">(CVE-\d+-\d+)</a>", html_output)
vulnerabilities = set(cves) - allowed_vulnerabilities
if vulnerabilities:
vulnerability_severity = {}
# Check NVD for vulnerability severity to provide this useful info in error message.
for vulnerability in vulnerabilities:
try:
cve_url = f"https://services.nvd.nist.gov/rest/json/cve/1.0/{vulnerability}"
session = requests.Session()
session.mount(
"https://",
requests.adapters.HTTPAdapter(max_retries=Retry(
total=5, status_forcelist=[404, 504, 502])),
)
response = session.get(cve_url)
if response.status_code == 200:
severity = (
response.json()
.get("result", {})
.get("CVE_Items", [{}])[0]
.get("impact", {})
.get("baseMetricV2", {})
.get("severity", "UNKNOWN")
)
if vulnerability_severity.get(severity):
vulnerability_severity[severity].append(vulnerability)
else:
vulnerability_severity[severity] = [vulnerability]
except ConnectionError:
LOGGER.exception(
f"Failed to load NIST data for CVE {vulnerability}")
# TODO: Remove this once we have whitelisted appropriate LOW/MEDIUM vulnerabilities
if not (vulnerability_severity.get("CRITICAL") or vulnerability_severity.get("HIGH")):
return
raise DependencyCheckFailure(
f"Unrecognized CVEs have been reported : {vulnerability_severity}. "
f"Allowed vulnerabilities are {allowed_vulnerabilities or None}. Please see "
f"{dependency_check_report} for more details."
)
@pytest.mark.usefixtures("sagemaker", "huggingface")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run dependency tests regularly on production images")
@pytest.mark.parametrize("ec2_instance_type", ["c5.4xlarge"], indirect=True)
@pytest.mark.skipif(
(is_canary_context() and not is_time_for_canary_safety_scan()),
reason="Executing test in canaries pipeline during only a limited period of time.",
)
def test_dependency_check_cpu(cpu, ec2_connection, cpu_only):
_run_dependency_check_test(cpu, ec2_connection)
@pytest.mark.usefixtures("sagemaker", "huggingface")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run dependency tests regularly on production images")
@pytest.mark.parametrize("ec2_instance_type", ["p3.2xlarge"], indirect=True)
@pytest.mark.skipif(
(is_canary_context() and not is_time_for_canary_safety_scan()),
reason="Executing test in canaries pipeline during only a limited period of time.",
)
def test_dependency_check_gpu(gpu, ec2_connection, gpu_only):
_run_dependency_check_test(gpu, ec2_connection)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run dependency tests regularly on production images")
@pytest.mark.parametrize("ec2_instance_type", ["c5.4xlarge"], indirect=True)
@pytest.mark.skipif(
(is_canary_context() and not is_time_for_canary_safety_scan()),
reason="Executing test in canaries pipeline during only a limited period of time.",
)
def test_dependency_check_eia(eia, ec2_connection, eia_only):
_run_dependency_check_test(eia, ec2_connection)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run dependency tests regularly on production images")
@pytest.mark.parametrize("ec2_instance_type", ["inf1.xlarge"], indirect=True)
@pytest.mark.skipif(
(is_canary_context() and not is_time_for_canary_safety_scan()),
reason="Executing test in canaries pipeline during only a limited period of time.",
)
def test_dependency_check_neuron(neuron, ec2_connection, neuron_only):
_run_dependency_check_test(neuron, ec2_connection)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run dependency tests regularly on production images")
@pytest.mark.parametrize("ec2_instance_type", ["c6g.4xlarge"], indirect=True)
@pytest.mark.skipif(
(is_canary_context() and not is_time_for_canary_safety_scan()),
reason="Executing test in canaries pipeline during only a limited period of time.",
)
def test_dependency_check_graviton(graviton, ec2_connection, graviton_only):
_run_dependency_check_test(graviton, ec2_connection)
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
def test_dataclasses_check(image):
"""
Ensure there is no dataclasses pip package is installed for python 3.7 and above version.
Python version retrieved from the ecr image uri is expected in the format `py<major_verion><minor_version>`
:param image: ECR image URI
"""
ctx = Context()
pip_package = "dataclasses"
container_name = get_container_name("dataclasses-check", image)
python_version = get_python_version_from_image_uri(image).replace("py","")
python_version = int(python_version)
if python_version >= 37:
start_container(container_name, image, ctx)
output = run_cmd_on_container(
container_name, ctx, f"pip show {pip_package}", warn=True)
if output.return_code == 0:
pytest.fail(
f"{pip_package} package exists in the DLC image {image} that has py{python_version} version which is greater than py36 version")
else:
LOGGER.info(
f"{pip_package} package does not exists in the DLC image {image}")
else:
pytest.skip(f"Skipping test for DLC image {image} that has py36 version as {pip_package} is not included in the python framework")
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.model("N/A")
@pytest.mark.canary("Run pip check test regularly on production images")
def test_pip_check(image):
"""
Ensure there are no broken requirements on the containers by running "pip check"
:param image: ECR image URI
"""
ctx = Context()
gpu_suffix = "-gpu" if "gpu" in image else ""
# TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error
# to occur in order to catch other pip check issues that may be associated with TF inference
# smclarify binaries have s3fs->aiobotocore dependency which uses older version of botocore. temporarily
# allowing this to catch other issues
allowed_tf_exception = re.compile(
rf"^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires "
rf"tensorflow{gpu_suffix}, which is not installed.$"
)
allowed_smclarify_exception = re.compile(
r"^aiobotocore \d+(\.\d+)* has requirement botocore<\d+(\.\d+)*,>=\d+(\.\d+)*, "
r"but you have botocore \d+(\.\d+)*\.$"
)
# Add null entrypoint to ensure command exits immediately
output = ctx.run(
f"docker run --entrypoint='' {image} pip check", hide=True, warn=True)
if output.return_code != 0:
if not (allowed_tf_exception.match(output.stdout) or allowed_smclarify_exception.match(output.stdout)):
# Rerun pip check test if this is an unexpected failure
ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True)
@pytest.mark.usefixtures("sagemaker", "huggingface")
@pytest.mark.model("N/A")
def test_cuda_paths(gpu):
"""
Test to ensure that:
a. buildspec contains an entry to create the same image as the image URI
b. directory structure for GPU Dockerfiles has framework version, python version, and cuda version in it
:param gpu: gpu image uris
"""
image = gpu
if "example" in image:
pytest.skip(
"Skipping Example Dockerfiles which are not explicitly tied to a cuda version")
dlc_path = os.getcwd().split("/test/")[0]
job_type = "training" if "training" in image else "inference"
# Ensure that image has a supported framework
framework, framework_version = get_framework_and_version_from_tag(image)
# Get cuda, framework version, python version through regex
cuda_version = re.search(r"-(cu\d+)-", image).group(1)
framework_short_version = None
python_version = re.search(r"(py\d+)", image).group(1)
short_python_version = None
image_tag = re.search(
r":(\d+(\.\d+){2}(-transformers\d+(\.\d+){2})?-(gpu)-(py\d+)(-cu\d+)-(ubuntu\d+\.\d+)((-e3)?-example|-e3|-sagemaker)?)",
image,
).group(1)
# replacing '_' by '/' to handle huggingface_<framework> case
framework_path = framework.replace("_", "/")
framework_version_path = os.path.join(
dlc_path, framework_path, job_type, "docker", framework_version)
if not os.path.exists(framework_version_path):
framework_short_version = re.match(
r"(\d+.\d+)", framework_version).group(1)
framework_version_path = os.path.join(
dlc_path, framework_path, job_type, "docker", framework_short_version)
if not os.path.exists(os.path.join(framework_version_path, python_version)):
# Use the pyX version as opposed to the pyXY version if pyXY path does not exist
short_python_version = python_version[:3]
# Check buildspec for cuda version
buildspec = "buildspec.yml"
if is_tf_version("1", image):
buildspec = "buildspec-tf1.yml"
image_tag_in_buildspec = False
dockerfile_spec_abs_path = None
buildspec_path = os.path.join(dlc_path, framework_path, buildspec)
buildspec_def = Buildspec()
buildspec_def.load(buildspec_path)
for name, image_spec in buildspec_def["images"].items():
if image_spec["device_type"] == "gpu" and image_spec["tag"] == image_tag:
image_tag_in_buildspec = True
dockerfile_spec_abs_path = os.path.join(
os.path.dirname(
framework_version_path), image_spec["docker_file"].lstrip("docker/")
)
break
try:
assert image_tag_in_buildspec, f"Image tag {image_tag} not found in {buildspec_path}"
except AssertionError as e:
if not is_dlc_cicd_context():
LOGGER.warn(
f"{e} - not failing, as this is a(n) {os.getenv('BUILD_CONTEXT', 'empty')} build context.")
else:
raise
image_properties_expected_in_dockerfile_path = [
framework_short_version or framework_version,
short_python_version or python_version,
cuda_version,
]
assert all(prop in dockerfile_spec_abs_path for prop in image_properties_expected_in_dockerfile_path), (
f"Dockerfile location {dockerfile_spec_abs_path} does not contain all the image properties in "
f"{image_properties_expected_in_dockerfile_path}"
)
assert os.path.exists(
dockerfile_spec_abs_path), f"Cannot find dockerfile for {image} in {dockerfile_spec_abs_path}"
def _assert_artifact_free(output, stray_artifacts):
"""
Manage looping through assertions to determine that directories don't have known stray files.
:param output: Invoke result object
:param stray_artifacts: List of things that should not be present in these directories
"""
for artifact in stray_artifacts:
assert not re.search(
artifact, output.stdout
), f"Matched {artifact} in {output.stdout} while running {output.command}"
@pytest.mark.usefixtures("sagemaker")
@pytest.mark.integration("oss_compliance")
@pytest.mark.model("N/A")
@pytest.mark.skipif(not is_dlc_cicd_context(), reason="We need to test OSS compliance only on PRs and pipelines")
def test_oss_compliance(image):
"""
Run oss compliance check on a container to check if license attribution files exist.
And upload source of third party packages to S3 bucket.
"""
THIRD_PARTY_SOURCE_CODE_BUCKET = "aws-dlinfra-licenses"
THIRD_PARTY_SOURCE_CODE_BUCKET_PATH = "third_party_source_code"
file = "THIRD_PARTY_SOURCE_CODE_URLS"
container_name = get_container_name("oss_compliance", image)
context = Context()
local_repo_path = get_repository_local_path()
start_container(container_name, image, context)
# run compliance test to make sure license attribution files exists. testOSSCompliance is copied as part of Dockerfile
run_cmd_on_container(container_name, context,
"/usr/local/bin/testOSSCompliance /root")
try:
context.run(
f"docker cp {container_name}:/root/{file} {os.path.join(local_repo_path, file)}")
finally:
context.run(f"docker rm -f {container_name}", hide=True)
s3_resource = boto3.resource("s3")
with open(os.path.join(local_repo_path, file)) as source_code_file:
for line in source_code_file:
name, version, url = line.split(" ")
file_name = f"{name}_v{version}_source_code"
s3_object_path = f"{THIRD_PARTY_SOURCE_CODE_BUCKET_PATH}/{file_name}.tar.gz"
local_file_path = os.path.join(local_repo_path, file_name)
for i in range(3):
try:
if not os.path.isdir(local_file_path):
context.run(
f"git clone {url.rstrip()} {local_file_path}")
context.run(
f"tar -czvf {local_file_path}.tar.gz {local_file_path}")
except Exception as e:
time.sleep(1)
if i == 2:
LOGGER.error(f"Unable to clone git repo. Error: {e}")
raise
continue
try:
if os.path.exists(f"{local_file_path}.tar.gz"):
LOGGER.info(f"Uploading package to s3 bucket: {line}")
s3_resource.Object(
THIRD_PARTY_SOURCE_CODE_BUCKET, s3_object_path).load()
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
try:
# using aws cli as using boto3 expects to upload folder by iterating through each file instead of entire folder.
context.run(
f"aws s3 cp {local_file_path}.tar.gz s3://{THIRD_PARTY_SOURCE_CODE_BUCKET}/{s3_object_path}"
)
object = s3_resource.Bucket(
THIRD_PARTY_SOURCE_CODE_BUCKET).Object(s3_object_path)
object.Acl().put(ACL="public-read")
except ClientError as e:
LOGGER.error(
f"Unable to upload source code to bucket {THIRD_PARTY_SOURCE_CODE_BUCKET}. Error: {e}"
)
raise
else:
LOGGER.error(
f"Unable to check if source code is present on bucket {THIRD_PARTY_SOURCE_CODE_BUCKET}. Error: {e}"
)
raise
|
the-stack_106_22958 | import matplotlib.pyplot as plt
import Variables as prep
import pandas as pd
import calendar
import shutil
import os
d = pd.read_csv('data.csv', encoding = "utf-8")
text_counts = d.groupby(['month', 'person'])['text'].count().reset_index(name='count')
days_count = d['dateTime'].dropna()
yearStart = (pd.to_datetime(days_count.reset_index(name='count').iloc[:,1].head(1)))[0].year
left2 = text_counts.loc[text_counts['person'] == prep.leftName].drop(['person'], axis = 1).reset_index(drop=True)
right2 = text_counts.loc[text_counts['person'] == prep.rightName].drop(['person'], axis = 1).reset_index(drop=True)
left2.index = left2['month']
right2.index = right2['month']
left = pd.DataFrame({'month': range(1, 13), 'count': 0})
left.index = left['month']
right = pd.DataFrame({'month': range(1, 13), 'count': 0})
right.index = right['month']
left['count'] = left2['count']
right['count'] = right2['count']
left = left.fillna(0)
right = right.fillna(0)
left = left.drop('month', 1).reset_index()
right = right.drop('month', 1).reset_index()
x = right.iloc[:,0]
x = x.drop_duplicates()
x = x.reset_index(drop=True)
t = []
for i in x:
if (i != 12):
l = int(i / 12)
i = int(i % 12)
t.append((str(calendar.month_abbr[int(i)]) + "\n" + str(l + yearStart)))
ax = plt.subplot(111)
ax.bar(x, right['count'], color='lightskyblue', bottom=left['count'])
ax.bar(x, left['count'], color='lightcoral')
ax.legend(labels=['Me', 'You'], loc="upper right", ncol=1, bbox_to_anchor=(1.3, 1))
plt.xticks(x, labels=t, rotation='0')
plt.subplots_adjust(left=0.12, right=0.79, top=0.95, bottom=0.22)
plt.margins(x=0)
plt.xlabel('Month')
plt.ylabel('Text Count')
plt.title('Messages Sent Per Month')
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
os.mkdir(dir_path + "\Data")
except OSError:
print ("Directory Already Exists")
else:
print ("Successfully created the directory")
shutil.rmtree('__pycache__')
plt.savefig(dir_path + '\Data\PerMonthMessagesBAR.png')
|
the-stack_106_22959 | import os
import json
import numpy as np
import tensorflow as tf
from collections import OrderedDict
# these weights have been trained over thousands of images. They are designed to be multiplied by the loss
# of each layer to normalize the loss differential from layer to layer
layer_weights = {
"conv1_1": {
"content": 0.0003927100042346865,
"style": 0.27844879031181335
},
"conv1_2": {
"content": 2.99037346849218e-05,
"style": 0.0004943962558172643
},
"conv2_1": {
"content": 2.0568952095345594e-05,
"style": 0.0009304438135586679
},
"conv2_2": {
"content": 1.073586827260442e-05,
"style": 0.00040253016049973667
},
"conv3_1": {
"content": 1.0999920050380751e-05,
"style": 0.0001156232028733939
},
"conv3_2": {
"content": 1.0808796105266083e-05,
"style": 7.009495311649516e-05
},
"conv3_3": {
"content": 4.947870365867857e-06,
"style": 7.687774996156804e-06
},
"conv3_4": {
"content": 1.2470403589759371e-06,
"style": 8.033587732825254e-07
},
"conv4_1": {
"content": 1.4441507119045127e-06,
"style": 5.199814836487349e-07
},
"conv4_2": {
"content": 2.3558966404380044e-06,
"style": 2.2772749161958927e-06
},
"conv4_3": {
"content": 5.842243808729108e-06,
"style": 2.7995649361400865e-05
},
"conv4_4": {
"content": 3.0219671316444874e-05,
"style": 0.001985269133001566
},
"conv5_1": {
"content": 6.438765558414161e-05,
"style": 0.000784530770033598
},
"conv5_2": {
"content": 0.00033032899955287576,
"style": 0.018374426290392876
},
"conv5_3": {
"content": 0.0016348531935364008,
"style": 0.42564332485198975
},
"conv5_4": {
"content": 0.02764303795993328,
"style": 95.27446746826172
}
}
_weights_vgg_style = None
def _dequantize_weights(quantized_data, scale, min_val, original_dtype=np.float32):
return quantized_data.astype(original_dtype) * scale + min_val
def _get_dtype(dtype_string, is_tf=False):
if dtype_string == 'uint8':
return tf.uint8 if is_tf else np.uint8
elif dtype_string == 'uint16':
return tf.uint16 if is_tf else np.uint16
elif dtype_string == 'uint32':
return tf.uint32 if is_tf else np.uint32
elif dtype_string == 'uint64':
return tf.uint64 if is_tf else np.uint64
elif dtype_string == 'int16':
return tf.int16 if is_tf else np.int16
elif dtype_string == 'int32':
return tf.int32 if is_tf else np.int32
elif dtype_string == 'int64':
return tf.int64 if is_tf else np.int64
elif dtype_string == 'float16':
return tf.float16 if is_tf else np.float16
elif dtype_string == 'float32':
return tf.float32 if is_tf else np.float32
elif dtype_string == 'float64':
return tf.float64 if is_tf else np.float64
else:
raise ValueError('Unknown dtype {}'.format(dtype_string))
def _load_weights(dtype='float32'):
weights_folder = os.path.join(
os.path.dirname(__file__), 'weights', 'vgg19'
)
weight_dict = {}
manifest_path = os.path.join(weights_folder, 'manifest.json')
with open(manifest_path) as f:
manifest = json.load(f)
for weight_name, weight_obj in manifest['weights'].items():
weight_file_path = os.path.join(weights_folder, weight_obj['filename'])
with open(weight_file_path, "rb") as binary_file:
# Read the whole file at once
data = binary_file.read()
if 'quantization' in weight_obj:
target_dtype = _get_dtype(weight_obj['dtype'])
quant = weight_obj['quantization']
weight_np_quant = np.frombuffer(data, dtype=_get_dtype(quant['dtype']))
weight_np = _dequantize_weights(
quantized_data=weight_np_quant,
scale=quant['scale'],
min_val=quant['min_value'],
original_dtype=target_dtype
)
else:
weight_np = np.frombuffer(data, dtype=np.float32)
weights_reshaped = np.reshape(weight_np, tuple(weight_obj['shape']))
weights = tf.constant(
weights_reshaped, dtype=tf.float32, shape=weight_obj['shape'],
name='{}/{}'.format('vgg_style', weight_name)
)
if dtype == 'float16':
weights = tf.cast(weights, tf.float16)
weight_dict[weight_name] = weights
return weight_dict
def process(input_tensor, network=None, reuse_weights=True, dtype='float32'):
layers = [
'conv1_1', 'conv1_2', 'pool1',
'conv2_1', 'conv2_2', 'pool2',
'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4', 'pool3',
'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'pool4',
'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4', 'pool5'
]
if network is None:
network = OrderedDict()
def _conv_layer(inputs, kernel_weights, bias_weights):
conv_out = tf.nn.conv2d(
input=inputs,
filters=kernel_weights,
strides=(1, 1, 1, 1),
padding='SAME'
)
bias_added = tf.nn.bias_add(conv_out, bias_weights)
return tf.nn.relu(bias_added)
def _pool_layer(inputs):
return tf.nn.max_pool2d(
input=inputs,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='SAME'
)
def get_weights():
global _weights_vgg_style
nonlocal reuse_weights
nonlocal dtype
if not reuse_weights or _weights_vgg_style is None:
weights_vgg_style = _load_weights(dtype)
if reuse_weights:
_weights_vgg_style = weights_vgg_style
else:
weights_vgg_style = _weights_vgg_style
return weights_vgg_style
r, g, b = tf.split(axis=-1, num_or_size_splits=3, value=input_tensor)
mean_pixel = [103.939, 116.779, 123.68]
inputs = tf.concat(values=[b - mean_pixel[0], g - mean_pixel[1], r - mean_pixel[2]], axis=-1)
network['input'] = inputs
weights = get_weights()
current = network['input']
for name in layers:
kind = name[:4]
if kind == 'conv':
kernels = weights['{}/kernel'.format(name)]
bias = weights['{}/bias'.format(name)]
current = _conv_layer(current, kernels, bias)
network['{}'.format(name)] = current
elif kind == 'pool':
current = _pool_layer(current)
network['{}'.format(name)] = current
# return the network
return network
|
the-stack_106_22963 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dd4hep(CMakePackage):
"""DD4hep is a software framework for providing a complete solution for
full detector description (geometry, materials, visualization, readout,
alignment, calibration, etc.) for the full experiment life cycle
(detector concept development, detector optimization, construction,
operation). It offers a consistent description through a single source
of detector information for simulation, reconstruction, analysis, etc.
It distributed under the LGPLv3 License."""
homepage = "https://dd4hep.web.cern.ch/dd4hep/"
url = "https://github.com/AIDASoft/DD4hep/archive/v01-12-01.tar.gz"
git = "https://github.com/AIDASoft/DD4hep.git"
maintainers = ['vvolkl', 'drbenmorgan']
version('master', branch='master')
version('1.13.1', sha256='83fa70cd74ce93b2f52f098388dff58d179f05ace5b50aea3f408bb8abf7cb73')
version('1.13.0', sha256='0b1f9d902ebe21a9178c1e41204c066b29f68c8836fd1d03a9ce979811ddb295')
version('1.12.1', sha256='85e8c775ec03c499ce10911e228342e757c81ce9ef2a9195cb253b85175a2e93')
version('1.12.0', sha256='133a1fb8ce0466d2482f3ebb03e60b3bebb9b2d3e33d14ba15c8fbb91706b398')
version('1.11.2', sha256='96a53dd26cb8df11c6dae54669fbc9cc3c90dd47c67e07b24be9a1341c95abc4')
version('1.11.1', sha256='d7902dd7f6744bbda92f6e303ad5a3410eec4a0d2195cdc86f6c1167e72893f0')
version('1.11.0', sha256='25643296f15f9d11ad4ad550b7c3b92e8974fc56f1ee8e4455501010789ae7b6')
version('1.10.0', sha256='1d6b5d1c368dc8bcedd9c61b7c7e1a44bad427f8bd34932516aff47c88a31d95')
# Workarounds for various TBB issues in DD4hep v1.11
# See https://github.com/AIDASoft/DD4hep/pull/613 .
patch('tbb-workarounds.patch', when='@1.11.0')
patch('tbb2.patch', when='@1.12.1')
variant('xercesc', default=False, description="Enable 'Detector Builders' based on XercesC")
variant('geant4', default=False, description="Enable the simulation part based on Geant4")
variant('assimp', default=False, description="Enable CAD interface based on Assimp")
variant('hepmc3', default=False, description="Enable build with hepmc3")
variant('lcio', default=False, description="Enable build with lcio")
depends_on('cmake @3.12:', type='build')
depends_on('boost @1.49:')
depends_on('root @6.08: +gdml +math +opengl +python +x')
extends('python')
depends_on('xerces-c', when='+xercesc')
depends_on('[email protected]:', when='+geant4')
depends_on('assimp', when='+assimp')
depends_on('hepmc3', when="+hepmc3")
depends_on('lcio', when="+lcio")
def cmake_args(self):
spec = self.spec
cxxstd = spec['root'].variants['cxxstd'].value
# root can be built with cxxstd=11, but dd4hep requires 14
if cxxstd == "11":
cxxstd = "14"
args = [
"-DCMAKE_CXX_STANDARD={0}".format(cxxstd),
"-DDD4HEP_USE_XERCESC={0}".format(spec.satisfies('+xercesc')),
"-DDD4HEP_USE_GEANT4={0}".format(spec.satisfies('+geant4')),
"-DDD4HEP_USE_LCIO={0}".format(spec.satisfies('+lcio')),
"-DDD4HEP_LOAD_ASSIMP={0}".format(spec.satisfies('+assimp')),
"-DDD4HEP_USE_HEPMC3={0}".format(spec.satisfies('+hepmc3')),
"-DBUILD_TESTING={0}".format(self.run_tests),
"-DBOOST_ROOT={0}".format(spec['boost'].prefix),
"-DBoost_NO_BOOST_CMAKE=ON",
"-DPYTHON_EXECUTABLE={0}".format(spec['python'].command.path),
]
return args
def setup_run_environment(self, env):
# used p.ex. in ddsim to find DDDetectors dir
env.set("DD4hepINSTALL", self.prefix)
env.set("DD4hep_DIR", self.prefix)
env.set("DD4hep_ROOT", self.prefix)
def url_for_version(self, version):
# dd4hep releases are dashes and padded with a leading zero
# the patch version is omitted when 0
# so for example v01-12-01, v01-12 ...
major = (str(version[0]).zfill(2))
minor = (str(version[1]).zfill(2))
patch = (str(version[2]).zfill(2))
if version[2] == 0:
url = "https://github.com/AIDASoft/DD4hep/archive/v%s-%s.tar.gz" % (major, minor)
else:
url = "https://github.com/AIDASoft/DD4hep/archive/v%s-%s-%s.tar.gz" % (major, minor, patch)
return url
|
the-stack_106_22966 | import numpy as np
import cv2
import timeit
import hdf5storage
import math
import TrainNetwork.TN_BaseFunctions as basefunctions
from copy import copy
from copy import deepcopy as deepcopy
from MovingObjectDetector.BackgroundModel import BackgroundModel
from MovingObjectDetector.DetectionRefinement import DetectionRefinement
from SimpleTracker.KalmanFilter import KalmanFilter
from MovingObjectDetector.MOD_BaseFunctions import TimePropagate, TimePropagate_, draw_error_ellipse2d
from MovingObjectDetector.Init_Track_From_Groundtruth import init_Track_From_Groundtruth
class location:
def __init__(self, x, y, delta=None, points=None):
self.x = x
self.y = y
self.delta = delta
self.points = points
def distance(loc1, loc2):
x_diff = loc1.x - loc2.x
y_diff = loc1.y - loc2.y
return math.sqrt(x_diff * x_diff + y_diff * y_diff)
## to measure the difference between two tracks
## each track is a vector of locations
def diff_mean(track1, track2):
n = len(track1)
res = 0
for i in range(0, n):
res += distance(track1[i], track2[i])
return res * 1.0 / n
def diff_max(track1, track2):
n = len(track1)
res = 0
for i in range(0, n):
tmp = distance(track1[i], track2[i])
if res < tmp:
res = tmp
return res
####
def run_detection_main(attack, model_folder, imagefolder, input_image_idx, ROI_centre,
writeimagefolder, ROI_window,num_of_template):
## to run the WAMI tracker
## d_out : output directory
## frames : a vector of frames to attack
ref_track = None
image_idx_offset = 0
model_binary, aveImg_binary, model_regression, aveImg_regression = basefunctions.ReadModels(model_folder)
# load transformation matrices
matlabfile = hdf5storage.loadmat(model_folder+'Data/TransformationMatrices_train.mat')
TransformationMatrices = matlabfile.get("TransMatrix")
# Load background
images = []
for i in range(num_of_template):
frame_idx = input_image_idx + image_idx_offset + i - num_of_template
ReadImage = cv2.imread(imagefolder + "frame%06d.png" % frame_idx, cv2.IMREAD_GRAYSCALE)
ReadImage = ReadImage[ROI_centre[1] - ROI_window:ROI_centre[1] + ROI_window + 1,
ROI_centre[0] - ROI_window:ROI_centre[0] + ROI_window + 1]
ROI_centre = TimePropagate_(ROI_centre, TransformationMatrices[frame_idx - 1][0])
ROI_centre = [int(i) for i in ROI_centre]
images.append(ReadImage)
bgt = BackgroundModel(num_of_template=num_of_template, templates=images)
# Work out initialisation of a track with groundtruth
frame_idx = input_image_idx + image_idx_offset
min_r = ROI_centre[1] - ROI_window
max_r = ROI_centre[1] + ROI_window
min_c = ROI_centre[0] - ROI_window
max_c = ROI_centre[0] + ROI_window
show_available_tracks = True
if show_available_tracks:
ImageForInitTrack = cv2.imread(imagefolder + "frame%06d.png" % frame_idx, cv2.IMREAD_GRAYSCALE)
Init_Candidate_tracks = init_Track_From_Groundtruth(TransformationMatrices, frame_idx, (min_r, max_r, min_c, max_c), Image=ImageForInitTrack)
print(Init_Candidate_tracks)
else:
Init_Candidate_tracks = init_Track_From_Groundtruth(TransformationMatrices, frame_idx, (min_r, max_r, min_c, max_c))
# initialise Kalman filter
init_idx = 11
if init_idx >= len(Init_Candidate_tracks):
init_idx = 0
print("warning: the init track index is unavailable.")
x = Init_Candidate_tracks[init_idx][0]
y = Init_Candidate_tracks[init_idx][1]
vx = Init_Candidate_tracks[init_idx][2]
vy = Init_Candidate_tracks[init_idx][3]
kf = KalmanFilter(np.array([[x], [y], [vx], [vy]]), np.diag([900, 900, 400, 400]), 5, 6)
kf_attack = deepcopy(kf)
track_attack_store = []
track_store = []
for i in range(20):
print("Starting the step %s:"%i)
starttime = timeit.default_timer()
# Read input image
frame_idx = input_image_idx + image_idx_offset + i
ReadImage = cv2.imread(imagefolder + "frame%06d.png" % frame_idx, cv2.IMREAD_GRAYSCALE)
input_image = ReadImage[ROI_centre[1] - ROI_window:ROI_centre[1] + ROI_window + 1,
ROI_centre[0] - ROI_window:ROI_centre[0] + ROI_window + 1]
ROI_centre = TimePropagate_(ROI_centre, TransformationMatrices[frame_idx - 1][0])
ROI_centre = [int(i) for i in ROI_centre]
Hs = bgt.doCalculateHomography(input_image)
bgt.doMotionCompensation(Hs, input_image.shape)
BackgroundSubtractionCentres, BackgroundSubtractionProperties = bgt.doBackgroundSubtraction(input_image, thres=8)
dr = DetectionRefinement(input_image, bgt.getCompensatedImages(), BackgroundSubtractionCentres,
BackgroundSubtractionProperties, model_binary, aveImg_binary, model_regression,
aveImg_regression,attack)
# dr.refinementID=refinementID
refinedDetections, refinedProperties = dr.doMovingVehicleRefinement()
regressedDetections = dr.doMovingVehiclePositionRegression()
regressedDetections = np.asarray(regressedDetections)
# Kalman filter update
if i > 0:
# tracking without attack
kf.TimePropagate(Hs[num_of_template - 1])
kf.predict()
kf.NearestNeighbourAssociator(regressedDetections)
kf.update()
track_x = kf.mu_t[0, 0]
track_y = kf.mu_t[1, 0]
# propagate all detections
track_store = TimePropagate(track_store, Hs[num_of_template - 1])
track_store.append(np.array([track_x, track_y]).reshape(2, 1))
# tracking with attack
kf_attack.TimePropagate(Hs[num_of_template - 1])
kf_attack.predict()
# the id in the regressed detections
regressionID = kf_attack.NearestNeighbourAssociator(regressedDetections)
# the id in the refinement detections (input to the CNN)
old_kfz = kf.z
if isinstance(regressionID, np.int64):
regression2refinedID = dr.regressedDetectionID[regressionID]
refinementID = dr.refinedDetectionsID[regression2refinedID]
print("Background subtraction id:" + str(refinementID))
print("Background subtraction id type:" + str(type(refinementID)))
else:
refinementID = None
print("Data Association failed (No detection is assigned to this track)...")
# here to play 'attack': to call the dr again with refinementID
if isinstance(refinementID, np.int64) and (i > 5):
dr.refinementID = refinementID
refinedDetections, refinedProperties = dr.doMovingVehicleRefinement()
regressedDetections = dr.doMovingVehiclePositionRegression()
regressedDetections = np.asarray(regressedDetections)
# the id in the regressed detections
regressionID = kf_attack.NearestNeighbourAssociator(regressedDetections)
new_kfz = kf_attack.z
print('*********************')
print(old_kfz)
print('####')
print(new_kfz)
print('*********************')
# the id in the refinement detections (input to the CNN)
print('#### old refinementID', refinementID)
if regressionID is None:
print('#### new refinementID does not exist, because there is no associated detection')
else:
regression2refinedID = dr.regressedDetectionID[regressionID]
refinementID = dr.refinedDetectionsID[regression2refinedID]
print('#### new refinementID', refinementID)
kf_attack.update()
track_attack_x = kf_attack.mu_t[0, 0]
track_attack_y = kf_attack.mu_t[1, 0]
# propagate all detections
track_attack_store = TimePropagate(track_attack_store, Hs[num_of_template - 1])
track_attack_store.append(np.array([track_attack_x, track_attack_y]).reshape(2, 1))
print('Estimated State (Attacked): ' + str(kf.mu_t.transpose()))
else:
track_attack_x = kf_attack.mu_t[0, 0]
track_attack_y = kf_attack.mu_t[1, 0]
track_attack_store.append(np.array([track_attack_x, track_attack_y]).reshape(2, 1))
track_x = kf.mu_t[0, 0]
track_y = kf.mu_t[1, 0]
track_store.append(np.array([track_x, track_y]).reshape(2, 1))
# update background
bgt.updateTemplate(input_image)
# plt.figure()
minx = np.int32(track_attack_x - 300)
if minx <= 0:
minx = 1
miny = np.int32(track_attack_y - 300)
if miny <= 0:
miny = 1
maxx = np.int32(track_attack_x + 301)
if maxx >= input_image.shape[1]:
maxx = input_image.shape[1]
maxy = np.int32(track_attack_y + 301)
if maxy >= input_image.shape[0]:
maxy = input_image.shape[0]
print("write roi image windows: " + str(miny) + "," + str(maxy) + "," + str(minx) + "," + str(maxx))
roi_image = np.repeat(np.expand_dims(input_image[miny:maxy, minx:maxx], -1), 3, axis=2)
# Not necessary: cv2.circle(roi_image, (301, 301), 10, (255, 0, 0), 1)
validRegressedDetections = np.int32(copy(regressedDetections))
validRegressedDetections[:, 0] = validRegressedDetections[:, 0] - minx
validRegressedDetections[:, 1] = validRegressedDetections[:, 1] - miny
for thisDetection in validRegressedDetections:
if (thisDetection[0] > 0) and (thisDetection[0] < 600) and (thisDetection[1] > 0) and (thisDetection[1] < 600):
cv2.circle(roi_image, (thisDetection[0], thisDetection[1]), 3, (100, 100, 0), -1)
for idx in range(1, len(track_attack_store)):
point1x = np.int32(track_attack_store[idx - 1][0, 0]) - minx
point1y = np.int32(track_attack_store[idx - 1][1, 0]) - miny
point2x = np.int32(track_attack_store[idx][0, 0]) - minx
point2y = np.int32(track_attack_store[idx][1, 0]) - miny
cv2.line(roi_image, (point1x, point1y), (point2x, point2y), (0, 0, 255), 1)
for idx in range(1, len(track_store)):
point1x = np.int32(track_store[idx - 1][0, 0]) - minx
point1y = np.int32(track_store[idx - 1][1, 0]) - miny
point2x = np.int32(track_store[idx][0, 0]) - minx
point2y = np.int32(track_store[idx][1, 0]) - miny
cv2.line(roi_image, (point1x, point1y), (point2x, point2y), (0, 255, 0), 1)
# draw_error_ellipse2d(roi_image, (kf1.mu_t[0]-minx, kf1.mu_t[1]-miny), kf1.sigma_t)
# cv2.circle(input_image, (np.int32(trackx), np.int32(tracky)), 15, (255, 0, 0), 3)
#print("writing into %s"%(writeimagefolder + "%05d.png" % i))
cv2.imwrite(writeimagefolder + "%05d.png" % i, roi_image)
"""
plt.figure()
plt.imshow(np.repeat(np.expand_dims(input_image, -1), 3, axis=2))
#plt.plot(BackgroundSubtractionCentres[:,0], BackgroundSubtractionCentres[:,1], 'g.')
#plt.plot(refinedDetections[:,0], refinedDetections[:,1], 'y.')
plt.plot(np.int32(regressedDetections[:,0]), np.int32(regressedDetections[:,1]), 'r.', markersize=3)
plt.plot(np.int32(trackx), np.int32(tracky), 'yo', markersize=5)
plt.show()
"""
endtime = timeit.default_timer()
print("Processing Time (Total): " + str(endtime - starttime) + " s... ")
print('-------------------------------------------------------------------------------------------------------')
|
the-stack_106_22967 | import os
import numpy as np
import pandas as pd
from torchvision import transforms as tf
from utils.face_processing import RandomLowQuality, RandomHorizontalFlip
import config as cfg
from utils.io import makedirs
import cv2
from skimage import io
from utils import face_processing as fp, face_processing
# To avoid exceptions when loading truncated image files
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def save_to_dataframe(preds, annots):
from datasets.affectnet import CLASS_NAMES as affectnet_classes
from datasets.emotiw import EmotiW
def load_rotations_into_dataframe(df, rotation_dir):
rots = []
for fname in df['filename']:
imname = fname.split('/')[1].split('.')[0]
rots.append(np.loadtxt(os.path.join(rotation_dir, imname + '_rot.txt')).transpose())
rots = np.vstack(rots)
rots = np.array(map(np.rad2deg, rots))
df['pitch'] = rots[:,0]
df['yaw'] = rots[:,1]
df['roll'] = rots[:,2]
return df
if preds.shape[1] == 8:
columns=affectnet_classes
elif preds.shape[1] == 7:
columns=EmotiW.classes[:7]
elif preds.shape[1] == 2:
columns=['valence', 'arousal']
elif preds.shape[1] == 8+2:
columns=['valence', 'arousal']+affectnet_classes
elif preds.shape[1] > 8+2:
columns=None
else:
raise ValueError
if preds.shape[1] == 1:
preds = np.repeat(preds, repeats=2, axis=1)
df = pd.DataFrame(preds, columns=columns)
try:
df.insert(0, 'filename', annots['filename'].tolist())
except KeyError:
pass
if preds.shape[1] > 2:
df['class'] = np.argmax(preds[:, -8:], axis=1)
try:
df['gt_class'] = annots['class'].tolist()
except KeyError:
df['gt_class'] = annots['emotion_plus'].tolist()
try:
df['gt_valence'] = annots['valence'].tolist()
df['gt_arousal'] = annots['arousal'].tolist()
except KeyError:
pass
# rotation_dir = os.path.join(dataset.feature_root, '3D')
# df = load_rotations_into_dataframe(df, rotation_dir)
return df
def save_results(filepath, preds, dataset):
df = save_to_dataframe(preds, dataset)
df.to_csv(filepath, index=False)
def denormalize(tensor):
# assert(len(tensor.shape[1] == 3)
if tensor.shape[1] == 3:
tensor[:, 0] += 0.518
tensor[:, 1] += 0.418
tensor[:, 2] += 0.361
elif tensor.shape[-1] == 3:
tensor[..., 0] += 0.518
tensor[..., 1] += 0.418
tensor[..., 2] += 0.361
def denormalized(tensor):
# assert(len(tensor.shape[1] == 3)
if isinstance(tensor, np.ndarray):
t = tensor.copy()
else:
t = tensor.clone()
denormalize(t)
return t
def read_openface_detection(lmFilepath, numpy_lmFilepath=None, from_sequence=False, use_cache=True,
return_num_faces=False, expected_face_center=None):
num_faces_in_image = 0
try:
if numpy_lmFilepath is not None:
npfile = numpy_lmFilepath + '.npz'
else:
npfile = lmFilepath + '.npz'
if os.path.isfile(npfile) and use_cache:
try:
data = np.load(npfile)
of_conf, landmarks, pose = [data[arr] for arr in data.files]
if of_conf > 0:
num_faces_in_image = 1
except:
print('Could not open file {}'.format(npfile))
raise
else:
if from_sequence:
lmFilepath = lmFilepath.replace('features', 'features_sequence')
lmDir, fname = os.path.split(lmFilepath)
clip_name = os.path.split(lmDir)[1]
lmFilepath = os.path.join(lmDir, clip_name)
features = pd.read_csv(lmFilepath + '.csv', skipinitialspace=True)
frame_num = int(os.path.splitext(fname)[0])
features = features[features.frame == frame_num]
else:
features = pd.read_csv(lmFilepath + '.csv', skipinitialspace=True)
features.sort_values('confidence', ascending=False, inplace=True)
selected_face_id = 0
num_faces_in_image = len(features)
if num_faces_in_image > 1 and expected_face_center is not None:
max_face_size = 0
min_distance = 1000
for fid in range(len(features)):
face = features.iloc[fid]
# if face.confidence < 0.2:
# continue
landmarks_x = face.as_matrix(columns=['x_{}'.format(i) for i in range(68)])
landmarks_y = face.as_matrix(columns=['y_{}'.format(i) for i in range(68)])
landmarks = np.vstack((landmarks_x, landmarks_y)).T
face_center = landmarks.mean(axis=0)
distance = ((face_center - expected_face_center)**2).sum()**0.5
if distance < min_distance:
min_distance = distance
selected_face_id = fid
# print("Warning: {} faces in image {}!".format(len(features), lmFilepath))
# cv2.imshow('read_openface_detection', cv2.imread(lmFilepath.replace('features', 'crops/tight')+'.jpg'))
# cv2.waitKey()
# width = landmarks_x.max() - landmarks_x.min()
# height = landmarks_y.max() - landmarks_y.min()
# face_size = np.sqrt(height**2 + width**2)
# if face_size > max_face_size:
# max_face_size = face_size
# selected_face_id = fid
# if num_faces_in_image > 1:
# min_dist = 125
# for fid in range(len(features)):
# face = features.iloc[fid]
# landmarks_x = face.as_matrix(columns=['x_{}'.format(i) for i in range(68)])
# landmarks_y = face.as_matrix(columns=['y_{}'.format(i) for i in range(68)])
# landmarks = np.vstack((landmarks_x, landmarks_y)).T
# face_center = landmarks.mean(axis=0)
# image_center = [125,125]
# dist_image_center = ((face_center - image_center)**2).sum()**0.5
# if dist_image_center < min_dist:
# min_dist = dist_image_center
# selected_face_id = fid
try:
face = features.iloc[selected_face_id]
except KeyError:
face = features
of_conf = face.confidence
landmarks_x = face.as_matrix(columns=['x_{}'.format(i) for i in range(68)])
landmarks_y = face.as_matrix(columns=['y_{}'.format(i) for i in range(68)])
landmarks = np.vstack((landmarks_x, landmarks_y)).T
pitch = face.pose_Rx
yaw = face.pose_Ry
roll = face.pose_Rz
pose = np.array((pitch, yaw, roll), dtype=np.float32)
if numpy_lmFilepath is not None:
makedirs(npfile)
np.savez(npfile, of_conf, landmarks, pose)
except IOError as e:
# raise IOError("\tError: Could not load landmarks from file {}!".format(lmFilepath))
# pass
# print(e)
of_conf = 0
landmarks = np.zeros((68,2), dtype=np.float32)
pose = np.zeros(3, dtype=np.float32)
result = [of_conf, landmarks.astype(np.float32), pose]
if return_num_faces:
result += [num_faces_in_image]
return result
def read_300W_detection(lmFilepath):
lms = []
with open(lmFilepath) as f:
for line in f:
try:
x,y = [float(e) for e in line.split()]
lms.append((x, y))
except:
pass
assert(len(lms) == 68)
landmarks = np.vstack(lms)
return landmarks
def build_transform(deterministic, color, daug=0):
transforms = []
if not deterministic:
transforms = [
RandomLowQuality(),
RandomHorizontalFlip(),
tf.ToPILImage(),
tf.ColorJitter(brightness=0.2, contrast=0.2),
# tf.RandomRotation(10, resample=PIL.Image.BICUBIC),
tf.RandomResizedCrop(cfg.CROP_SIZE, scale=(0.95, 1.0)),
tf.RandomCrop(cfg.INPUT_SIZE)
]
if color:
transforms += [tf.RandomGrayscale(0.1)]
transforms = [fp.RandomHorizontalFlip(0.5)]
if daug == 1:
transforms += [fp.RandomAffine(3, translate=[0.025,0.025], scale=[0.975, 1.025], shear=0, keep_aspect=False)]
elif daug == 2:
transforms += [fp.RandomAffine(3, translate=[0.035,0.035], scale=[0.970, 1.030], shear=2, keep_aspect=False)]
elif daug == 3:
transforms += [fp.RandomAffine(20, translate=[0.035,0.035], scale=[0.970, 1.030], shear=5, keep_aspect=False)]
elif daug == 4: # for roation invariance
# transforms += [fp.RandomAffine(degrees=45, translate=[0.030,0.030], scale=[0.97, 1.03], shear=0, keep_aspect=False)]
# transforms += [fp.RandomRotation(degrees=30)]
transforms += [fp.RandomAffine(45, translate=[0.035,0.035], scale=[0.940, 1.030], shear=5, keep_aspect=False)]
elif daug == 5: # for AFLW
transforms += [fp.RandomAffine(60, translate=[0.035,0.035], scale=[0.940, 1.030], shear=5, keep_aspect=False)]
elif daug == 6: # for LM CNN
transforms += [fp.RandomAffine(0, translate=[0.035,0.035], scale=[0.940, 1.030], shear=0, keep_aspect=False)]
elif daug == 7: # for CFPW profiles (shift left/right)
transforms += [fp.RandomAffine(10, translate=[0.05,0.035], scale=[0.940, 1.000], shear=0, keep_aspect=False)]
# transforms = [fp.CenterCrop(cfg.INPUT_SIZE)]
# transforms += [fp.ToTensor() ]
# transforms += [ fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1]) # VGGFace(2) ]
return tf.Compose(transforms)
def build_coarse_lmdet_transform(deterministic):
if deterministic:
transforms = [
# fp.Rescale(cfg.INPUT_SIZE*1.2),
# fp.RandomAffine(35, translate=[0.2,0.2]),
# fp.RandomAffine(shear=20),
# fp.RandomResizedCrop(cfg.INPUT_SIZE, p=1.0, scale=(0.4,1.0), keep_aspect=False),
# fp.RandomAffine(0, shear=0.5),
# fp.RandomAffine(40, translate=[0.15,0.15], scale=[0.70, 2.25], shear=15, keep_aspect=False),
# fp.RandomAffine(0, translate=[0.,0.], scale=[1.20, 1.20], shear=0, keep_aspect=True),
fp.CenterCrop(cfg.INPUT_SIZE)
]
else:
transforms = [
fp.RandomHorizontalFlip(0.5),
fp.RandomAffine(40, translate=[0.15,0.15], scale=[0.70, 2.25], shear=15, keep_aspect=False),
# # fp.Rescale(cfg.INPUT_SIZE*1.1),
# fp.RandomRotation(35),
# fp.RandomResizedCrop(cfg.INPUT_SIZE, p=1.0, scale=(0.65,1.0)),
fp.CenterCrop(cfg.INPUT_SIZE)
]
transforms += [fp.ToTensor(),
fp.Normalize([0.518, 0.418, 0.361], [1, 1, 1]), # VGGFace(2)
]
return tf.Compose(transforms)
def get_face(filename, fullsize_img_dir, cropped_img_dir, landmarks, pose=None, bb=None, size=(cfg.CROP_SIZE, cfg.CROP_SIZE),
use_cache=True, cropper=None):
filename_noext = os.path.splitext(filename)[0]
crop_filepath = os.path.join(cropped_img_dir, filename_noext + '.jpg')
is_cached_crop = False
if use_cache and os.path.isfile(crop_filepath):
try:
img = io.imread(crop_filepath)
except:
raise IOError("\tError: Could not cropped image {}!".format(crop_filepath))
if img.shape[:2] != size:
img = cv2.resize(img, size, interpolation=cv2.INTER_CUBIC)
is_cached_crop = True
else:
# Load image from dataset
img_path = os.path.join(fullsize_img_dir, filename)
try:
img = io.imread(img_path)
except:
raise IOError("\tError: Could not load image {}!".format(img_path))
if len(img.shape) == 2 or img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
assert(img.shape[2] == 3)
if (landmarks is None or not landmarks.any()) and not 'crops_celeba' in cropped_img_dir:
# if 'crops_celeba' in cropped_img_dir and not is_cached_crop:
# crop = face_processing.crop_celeba(img, size)
# else:
assert(bb is not None)
# Fall back to bounding box if no landmarks found
# print('falling back to bounding box')
crop = face_processing.crop_by_bb(img, face_processing.scale_bb(bb, f=1.075), size=size)
else:
if 'crops_celeba' in cropped_img_dir:
if is_cached_crop:
crop = img
else:
crop = face_processing.crop_celeba(img, size)
else:
# try:
# cropper.calculate_crop_parameters(img, landmarks, img_already_cropped=is_cached_crop)
# crop = cropper.apply_crop_to_image(img)
# landmarks, pose = cropper.apply_crop_to_landmarks(landmarks, pose)
crop, landmarks, pose = face_processing.crop_face(img,
landmarks,
img_already_cropped=is_cached_crop,
pose=pose,
output_size=size,
crop_by_eye_mouth_dist=cfg.CROP_BY_EYE_MOUTH_DIST,
align_face_orientation=cfg.CROP_ALIGN_ROTATION,
crop_square=cfg.CROP_SQUARE)
# except:
# print(filename)
# print(landmarks)
# crop = img
if use_cache and not is_cached_crop:
makedirs(crop_filepath)
io.imsave(crop_filepath, crop)
return crop, landmarks, pose |
the-stack_106_22968 | import argparse
import os
import random
import shutil
import time
import warnings
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import models.mnist as models
import datetime
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='[Derek]PyTorch All Classification Training')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=150, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
#derek add
parser.add_argument('--dataset', type = str, help = 'mnist, cifar10, cifar100 or imagenet', default = 'mnist')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--imagenet_data', default='../../../data/imagenet', type=str, metavar='DIR',
help='path to imagenet dataset (default: none)')
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs.')
parser.add_argument('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--results', type = str, help = 'dir to save result txt files', default = 'results/')
args = parser.parse_args()
best_acc1 = 0
save_dir = args.results +'/' +args.dataset
if not os.path.exists(save_dir):
os.system('mkdir -p %s' % save_dir)
model_str=str(args.arch)+'_'+args.dataset
txtfile=save_dir+"/"+model_str+".txt"
nowTime=datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if os.path.exists(txtfile):
os.system('mv %s %s' % (txtfile, txtfile+".bak-%s" % nowTime))
def main():
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
num_classes=10
args.gpu = gpu
with open(txtfile, "a") as myfile:
myfile.write('epoch: best_acc test_acc\n')
state = {k: v for k, v in args._get_kwargs()}
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
if args.arch.startswith('resnext'):
model = models.__dict__[args.arch](
cardinality=args.cardinality,
num_classes=num_classes,
depth=args.depth,
widen_factor=args.widen_factor,
dropRate=args.drop,
)
elif args.arch.startswith('densenet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
growthRate=args.growthRate,
compressionRate=args.compressionRate,
dropRate=args.drop,
)
elif args.arch.startswith('wrn'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
widen_factor=args.widen_factor,
dropRate=args.drop,
)
elif args.arch.endswith('resnet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
block_name=args.block_name,
)
elif args.arch.endswith('pnasnet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
num_cells=6,
num_planes=44,
cell_type=args.cell_type,
)
elif args.arch.endswith('shufflnetv2'):
model = models.__dict__[args.arch](
num_classes=num_classes,
net_size=args.net_size,
)
else:
model = models.__dict__[args.arch](num_classes=num_classes)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data
print('==> Preparing dataset %s' % args.dataset)
if args.dataset=='mnist':
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data/MNIST', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data/MNIST', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
with open(txtfile, "a") as myfile:
myfile.write(str(int(epoch)) + ': '+' ' + str(best_acc1.item()) +' '+ str(acc1.item()) +"\n")
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
#with open(txtfile, "a") as myfile:
# myfile.write(str(int(epoch)) + ': ' +'top1 =' + str(top1.avg.item()) +' '+'top5 ='+ str(top5.avg.item())+"\n")
return top1.avg
'''
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
'''
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
global state
state = {k: v for k, v in args._get_kwargs()}
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
the-stack_106_22970 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 17 15:22:07 2017
@author: jkcm
"""
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
import numpy as np
import os
import glob
import xarray as xr
from importlib import reload
import pickle
import netCDF4 as nc
from scipy.stats import linregress
import warnings
import sys
sys.path.insert(0, '/home/disk/p/jkcm/Code')
from Lagrangian_CSET import utils
from Lagrangian_CSET import met_utils as mu
from Lagrangian_CSET.LoopTimer import LoopTimer
CSET_dir = r'/home/disk/eos4/jkcm/Data/CSET'
flight_dir = os.path.join(CSET_dir, 'flight_data')
"""
get all the profiles from CSET (upsoundings, downsoundings)
for each profile, estimate the inversion height using:
RH 50%
Chris' fancy one
at least 80% of the time, one could
identify a 'RH inversion base' as the altitude of max RH for which RH(zi
+ 300 m) - RH(zi) < -0.3. If such a layer does not exist below 4 km or
the top of the sounding, we say an inversion is not present.
heffter
Richardson
"""
def get_data_from_dropsonde(file):
# file = os.path.join(dropsonde_dir, 'D20150712_201424_PQC.nc')
data = xr.open_dataset(file)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
index = data.GPSAlt.values < 4000
ret = {}
ret['TIME']=utils.as_datetime(data.time_offset.values[index])
ret['GGLAT']=data.Lat.values[index]
ret['GGLON']=data.Lon.values[index]
ret['GGALT']=data.GPSAlt.values[index]
ret['RHUM']=data.RH.values[index]
ret['ATX']=data.Temp.values[index]+273.15
ret['PSX']=data.Press.values[index]
ret['DPXC']= data.Dewpt.values[index]+273.15
ret['QV'] = mu.qv_from_p_T_RH(ret['PSX']*100, ret['ATX'], ret['RHUM'])*1000
ret['MR'] = ret['QV']/(1-ret['QV']/1000)
ret['TVIR'] = mu.tvir_from_T_w(ret['ATX'], ret['MR']/1000)
ret['DENS'] = mu.density_from_p_Tv(ret['PSX']*100, ret['TVIR'])
ret['THETA']= mu.theta_from_p_T(ret['PSX'], ret['ATX'])
ret['THETAE']= mu.thetae_from_t_tdew_mr_p(ret['ATX'], ret['DPXC'], ret['MR']/1000, ret['PSX']*100) #equiv pot temp, K we can get this if we really want
ret['QL'] = np.full_like(ret['PSX'], fill_value=np.nan)
ret['THETAL'] = np.full_like(ret['PSX'], fill_value=np.nan)
ret['PLWCC']= np.full_like(ret['PSX'], fill_value=np.nan)
return ret
def get_GOES_cloud_top_height(lat, lon, time, percentile, degrees=2, remove_highcloud=True):
"""
Get the GOES cloud top height value from to the space/time, filtering for high cloud
"""
variable_list = ['reflectance_vis', 'cloud_phase', 'cloud_top_height', 'cloud_top_temperature']
data = utils.get_GOES_data(variable_list, lat=lat, lon=lon, time=time, degree=degrees)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
warm_low_cloud = (
(data['cloud_phase'] == 1) &
(data['cloud_top_height'] < 4.) &
(data['cloud_top_temperature'] > 273.15))
cloud_top_heights = data['cloud_top_height'][warm_low_cloud].flatten()
if percentile == "mean":
res = np.nanmean(cloud_top_heights)
elif type(percentile) in (float, int) and percentile < 100 and percentile > 0:
res = np.nanpercentile(cloud_top_heights, percentile)
else:
raise TypeError("percentile should be an int, float, or 'mean'")
return res
def get_data_from_flight(flight_num, start=None, end=None, var_list=[]):
flight_file = glob.glob(os.path.join(flight_dir, 'RF{:02d}*.nc'.format(flight_num)))[0]
# data = xr.open_dataset(flight_file, decode_times=False)
# data['time'] = nc.num2date(data.Time[:],units=data.Time.units)
data = xr.open_dataset(flight_file, decode_times=True)
dates = utils.as_datetime(data.Time.values)
alt = data['GGALT'].values
if start is None:
start = dates[0]
if end is None:
end = dates[-1]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
index = np.logical_and(np.logical_and(dates >= start, dates <= end), alt < 3600)
[(k, v.long_name) for k,v in data.data_vars.items() if 'Pres' in v.long_name]
ret = {}
ret['TIME'] = dates[index]
for i in var_list:
if i == 'ATX':
ret[i] = data[i].values[index]+273.15
else:
ret[i] = data[i].values[index]
ret['DENS'] = mu.density_from_p_Tv(data['PSX'].values[index]*100, data['TVIR'].values[index]+273.15)
ret['QL'] = data['PLWCC'].values[index]/ret['DENS']
ret['THETAL'] = mu.get_liquid_water_theta(ret['ATX'], ret['THETA'], ret['QL'])
ret['QV'] = data['MR'].values[index]/(1+data['MR'].values[index]/1000)
return ret
def calc_decoupling_and_zi_from_flight_data(flight_data, usetheta=False):
var_list = ['GGLAT', 'GGLON', 'GGALT', 'RHUM', 'ATX', 'MR', 'THETAE', 'THETA', 'PSX', 'DPXC', 'PLWCC']
sounding_dict = {}
sounding_dict['TIME'] = flight_data.time.values
for i in var_list:
sounding_dict[i] = flight_data[i].values
if 'ATX' in var_list:
sounding_dict['ATX'] = sounding_dict['ATX'] + 273.15
sounding_dict['DENS'] = mu.density_from_p_Tv(flight_data['PSX'].values*100, flight_data['TVIR'].values+273.15)
sounding_dict['QL'] = flight_data['PLWCC'].values/sounding_dict['DENS']
sounding_dict['THETAL'] = mu.get_liquid_water_theta(
sounding_dict['ATX'], sounding_dict['THETA'], sounding_dict['QL'])
sounding_dict['QV'] = flight_data['MR'].values/(1+flight_data['MR'].values/1000)
decoupling_dict = mu.calc_decoupling_from_sounding(sounding_dict, usetheta=usetheta)
zi_dict = mu.calc_zi_from_sounding(sounding_dict)
return {**decoupling_dict, **zi_dict}
def label_points(x, y, labs, ax):
for label, x, y, in zip(labs, x, y):
ax.annotate(
label,
xy=(x, y), xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', alpha=0.5),
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
# %% Main execution
if __name__ == "__main__":
path = r'/home/disk/eos4/jkcm/Data/CSET/LookupTable_all_flights.xls'
flight = utils.read_CSET_Lookup_Table(path,
rf_num='all',
sequences=['d', 'k'],
variables=['Date', 'ST', 'ET'])
start_times = utils.as_datetime([utils.CSET_date_from_table(d, t) for d, t in
zip(flight['Date']['values'], flight['ST']['values'])])
end_times = utils.as_datetime([utils.CSET_date_from_table(d, t) for d, t in
zip(flight['Date']['values'], flight['ET']['values'])])
sounding_times = list(zip(flight['rf'], start_times, end_times))
# %% read in data
# get flight info for each sounding
var_list = ['GGLAT', 'GGLON', 'GGALT', 'RHUM', 'ATX', 'MR', 'THETAE', 'THETA', 'PSX', 'DPXC', 'PLWCC']
soundings = []
lt = LoopTimer(len(sounding_times))
for i in sounding_times:
lt.update()
soundings.append(get_data_from_flight(i[0], i[1], i[2], var_list))
add_dropsondes = True
if add_dropsondes:
sondes = []
sonde_files = glob.glob(os.path.join(utils.dropsonde_dir, "*.nc"))
for f in sonde_files:
sondes.append(get_data_from_dropsonde(f))
# %% calc inv and dec
# get inversion height estimates for each sounding
heights = []
for i, snd in enumerate(soundings):
heights.append(mu.calc_zi_from_sounding(snd))
snd_heights = []
for sonde in sondes:
snd_heights.append(mu.calc_zi_from_sounding(sonde))
# get decoupling ests for each sounding
decouplings = []
lt = LoopTimer(len(soundings))
for i, snd in enumerate(soundings):
lt.update(i)
decouplings.append(mu.calc_decoupling_from_sounding(snd))
snd_decouplings = []
for sonde in sondes:
snd_decouplings.append(mu.calc_decoupling_from_sounding(sonde, usetheta=True))
# %% get goes data
percentiles = [50, 75, 90, 95]
all_GOES_percentiles = {}
lt = LoopTimer(len(heights + snd_heights)*len(percentiles))
for percentile in percentiles:
GOES_cth = []
for i, hgt in enumerate((heights + snd_heights)):
lt.update()
goes_hgt = get_GOES_cloud_top_height(hgt['lat'], hgt['lon'], hgt['time'], percentile=percentile, degrees=1)
GOES_cth.append(goes_hgt)
all_GOES_percentiles[str(percentile)] = np.array(GOES_cth)
lt = LoopTimer(len(heights + snd_heights))
GOES_CF = []
for i, hgt in enumerate((heights + snd_heights)):
cf = get_GOES_cloud_fraction(hgt['lat'], hgt['lon'], hgt['time'], degrees=1)
GOES_CF.append(cf)
# %% Plots start here
# %%
# fig, ax = plt.subplots()
# for i,(snd,hgt) in enumerate(zip(soundings, heights)):
## for i in range(5):
# snd, hgt = soundings[i], heights[i]
## marker = '.' if hgt['Heff']['inversion'] else 'o'
# inv = hgt['Heff']['inversion']
# if inv:
# p = ax.plot(snd['QV'], snd['GGALT'])
# c = p[0].get_color()
# mfc = c if inv else 'w'
## ax.plot(snd['THETA'][hgt['Heff']['i_bot']], hgt['Heff']['z_bot'], '.', ms=20, c = c, mfc=mfc, mew=2)
# ax.set_ylim(0, 4000)
# fig, ax = plt.subplots()
# rhs = np.ones(len(soundings)) * 100
# for i,(snd,hgt) in enumerate(zip(soundings, heights)):
## for i in range(5):
## for i in lows:
# snd, hgt = soundings[i], heights[i]
## marker = '.' if hgt['Heff']['inversion'] else 'o'
# inv = hgt['RH50']['inversion']
# if inv:
# rhs[i] = snd['RHUM'][hgt['RH50']['i']]
# print('inv')
# p = ax.plot(snd['RHUM'], snd['GGALT'])
# c = p[0].get_color()
## mfc = c if inv else 'w'
## ax.plot(i, hgt['RH50']['z'], '.')
# ax.plot(snd['RHUM'][hgt['RH50']['i']], hgt['RH50']['z'], '.', ms=20, c=c)
## ax.plot(snd['THETA'][hgt['Heff']['i_bot']], hgt['Heff']['z_bot'], '.', ms=20, c = c, mfc=mfc, mew=2)
#
# ax.set_ylim(0, 3000)
# lows = np.argwhere(rhs < 40).flatten()
source = np.concatenate((np.full_like(heights, fill_value='gv'), np.full_like(snd_heights, fill_value='sonde')))
heights = heights + snd_heights
decouplings = decouplings + snd_decouplings
all_soundings = soundings + sondes
zi_RHCB = np.empty_like(heights, dtype=float)
zi_Heff_bot = np.empty_like(zi_RHCB)
zi_Heff_top = np.empty_like(zi_RHCB)
zi_RH50 = np.empty_like(zi_RHCB)
lon_p = np.empty_like(zi_RHCB)
d_theta_e = np.empty_like(zi_RHCB)
d_theta_l = np.empty_like(zi_RHCB)
d_qt = np.empty_like(zi_RHCB)
alpha_thetae = np.empty_like(zi_RHCB)
alpha_thetal = np.empty_like(zi_RHCB)
alpha_qt = np.empty_like(zi_RHCB)
goes_cf = np.empty_like(zi_RHCB)
lats = np.empty_like(zi_RHCB)
lons = np.empty_like(zi_RHCB)
Heff_inv_flag = np.empty_like(zi_RHCB)
RHCB_inv_flag = np.empty_like(zi_RHCB)
time = np.empty_like(zi_RHCB, dtype='object')
# zi_RHCB = np.empty_like(len(heights))
for i, (hgt, dec) in enumerate(zip(heights, decouplings)):
zi_RHCB[i] = hgt['RHCB']['z']
time[i] = hgt['time']
RHCB_inv_flag[i] = hgt['RHCB']['inversion']
zi_RH50[i] = hgt['RH50']['z']
zi_Heff_bot[i] = hgt['Heff']['z_bot']
zi_Heff_top[i] = hgt['Heff']['z_top']
Heff_inv_flag[i] = hgt['Heff']['inversion']
lon_p[i] = hgt['lon_p']
d_theta_e[i] = dec['d_theta_e']
d_theta_l[i] = dec['d_theta_l']
d_qt[i] = dec['d_qt']
alpha_thetae[i] = dec['alpha_thetae']
alpha_thetal[i] = dec['alpha_thetal']
alpha_qt[i] = dec['alpha_qt']
x = np.argsort(lon_p)
to_exclude = [153]
x = [i for i in x if i not in to_exclude]
zi_RHCB = zi_RHCB[x]
zi_Heff_bot = zi_Heff_bot[x]
zi_Heff_top = zi_Heff_top[x]
zi_RH50 = zi_RH50[x]
lon_p = lon_p[x]
d_theta_e = d_theta_e[x]
d_theta_l = d_theta_l[x]
d_qt = d_qt[x]
source = source[x]
time = time[x]
RHCB_inv_flag = RHCB_inv_flag[x]
Heff_inv_flag = Heff_inv_flag[x]
gv_i = source == 'gv'
labs = np.argsort(x)
alpha_thetae = alpha_thetae[x]
alpha_thetal = alpha_thetal[x]
alpha_qt = alpha_qt[x]
GOES_sorted = {}
for k, v in all_GOES_percentiles.items():
GOES_sorted[k] = v[x]
save_dict = {"lon_prime": lon_p,
"date": time,
"Heffter_inversion_base": zi_Heff_bot,
"Heffter_inversion_top": zi_Heff_top,
"Heffter_inversion_flag": Heff_inv_flag,
"RelHum_inversion_base": zi_RHCB,
"RHCV_inversion_flag": RHCB_inv_flag,
"d_theta": d_theta_l,
"d_q": d_qt,
"source": source}
savefile = r"/home/disk/eos4/jkcm/Data/CSET/Python/inversion_and_decoupling.pickle"
# with open(savefile, 'wb') as f:
# pickle.dump(save_dict, f)
# %% GOES_CTH vs zi
fig, ax = plt.subplots()
cols = list(reversed(['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']))
for k, v in GOES_sorted.items():
c = cols.pop()
mask = ~np.isnan(v)
p = ax.plot(zi_Heff_bot, v*1000, '.', c=c, label='{}th %ile'.format(k))
slp, icept, rval, _, _ = linregress(zi_Heff_bot[mask], v[mask]*1000)
x = np.arange(0,3500, 100)
ax.plot(x, icept + slp*x, c=p[0].get_color())
# p1 = ax.plot(zi_Heff_bot, GOES_sorted['50']*1000, '.', c='b', label='50th %ile')
# ax.plot(zi_Heff_bot, GOES_sorted['75']*1000, '.', c='r', label='75th %ile')
# ax.plot(zi_Heff_bot, GOES_sorted['90']*1000, '.', c='g', label='90th %ile')
# ax.plot(zi_Heff_bot, GOES_sorted['95']*1000, '.', c='y', label='95th %ile')
ax.plot([0,3500],[0,3500], c='k')
ax.set_ylabel('GOES CTH percentile')
ax.set_xlabel('Heffter inversion base')
ax.legend()
# %%
all_flight_dict = {}
for flt in np.arange(1,17):
flight_dict = {}
(s, e) = utils.get_flight_start_end_times(flt, path)
s, e = utils.as_datetime(s), utils.as_datetime(e)
mask = np.logical_and(save_dict['date'] >= s, save_dict['date'] <= e)
print(sum(mask))
for k,v in save_dict.items():
flight_dict[k] = v[mask]
all_flight_dict['rf{:02}'.format(flt)] = flight_dict
savefile = r"/home/disk/eos4/jkcm/Data/CSET/Python/inversion_and_decoupling_by_flight.pickle"
with open(savefile, 'wb') as f:
pickle.dump(all_flight_dict, f)
# %%
fig, ax = plt.subplots()
ax.set_title("RH50 vs Heffter Top")
for i,(snd,hgt) in enumerate(zip(all_soundings, heights)):
ax.plot(hgt['RH50']['z'], hgt['Heff']['z_top'], '.')
ax.plot([0,3000], [0, 3000], 'k')
ax.set_xlabel('z_i using RH 50% (m)')
ax.set_ylabel('z_i using Heffter (top) (m)')
ax.set_ylim(0,3000)
ax.set_xlim(0,3000)
# %%
# fig, ax = plt.subplots()
# ax.set_title("RH50 vs Chris' RH")
# for i,(snd,hgt) in enumerate(zip(all_soundings, heights)):
# ax.plot(hgt['RH50']['z'], hgt['RHCB']['z'], '.')
# ax.plot([0,3000], [0, 3000], 'k')
# ax.set_xlabel('z_i using RH 50% (m)')
# ax.set_ylabel('z_i using Chris\' fancy RH (m)')
# ax.set_ylim(0,3000)
# ax.set_xlim(0,3000)
# %%
fig, ax = plt.subplots()
ax.set_title("RH Chris vs Heffter bottom")
for i,(snd,hgt) in enumerate(zip(all_soundings, heights)):
ax.plot(hgt['RHCB']['z'], hgt['Heff']['z_bot'], '.')
ax.plot([0,3000], [0, 3000], 'k')
ax.set_xlabel('z_i using Chris\' fancy RH (m)')
ax.set_ylabel('z_i using Heffter (bottom) (m)')
ax.set_ylim(0,3000)
ax.set_xlim(0,3000)
# %%
fig, ax = plt.subplots()
ax.set_title("all measures along lon")
ax.plot(lon_p, zi_RHCB, '-', marker='o', ms=5, label='Chris \' fancy RH, ({})'.format(sum(~np.isnan(zi_RHCB))))
ax.plot(lon_p, zi_RH50, '-', marker='o', ms=5, label='RH below 50%, ({})'.format(sum(~np.isnan(zi_RH50))))
ax.plot(lon_p, zi_Heff_bot, '-', marker='o', ms=5, label='Heffter (bottom), ({})'.format(sum(~np.isnan(zi_Heff_bot))))
ax.plot(lon_p, zi_Heff_top, '-', marker='o', ms=5, label='Heffer (top), ({})'.format(sum(~np.isnan(zi_Heff_top))))
ax.set_xlabel('lon-prime coordinate (deg E)')
ax.set_ylabel('inversion height estimate (m)')
ax.legend()
# %%
fig, (ax, ax2) = plt.subplots(ncols=2, figsize=(8,4))
ax.set_title("PBL depth vs decoupling")
ax.plot(alpha_thetal[gv_i], zi_Heff_bot[gv_i], '.', c='b', label='GV soundings (q_t, theta_l)')
ax.plot(alpha_thetal[~gv_i], zi_Heff_bot[~gv_i], '.', c='r', label='dropsondes (q_v , theta, only)')
ax.legend()
ax.set_xlabel('α$_ϴ$')
ax.set_ylabel('Heffter inversion base (m)')
ax.grid('on')
ax2.set_title('decoupling vs longitude')
ax2.plot(lon_p[gv_i], alpha_thetal[gv_i], '.', c='b', label='GV soundings')
ax2.plot(lon_p[~gv_i], alpha_thetal[~gv_i], '.', c='r', label='dropsondes (q_v only)')
ax2.set_xlabel('lon-prime (deg)')
ax2.set_ylabel('α$_ϴ$')
ax2.legend()
ax2.grid('on')
#ϴα
fig.tight_layout()
fig.savefig('/home/disk/p/jkcm/plots/cset_lagrangian/dec_Betts_vs_zi.png')
# %%
fig, (ax, ax2) = plt.subplots(ncols=2, figsize=(8,4))
ax.set_title("decoupling vs PBL depth")
ax.plot(d_qt[gv_i], zi_Heff_bot[gv_i], '.', c='b', label='GV soundings (q_t, theta_l)')
ax.plot(d_qt[~gv_i], zi_Heff_bot[~gv_i], '.', c='r', label='dropsondes (q_v , theta, only)')
ax.legend()
ax.set_xlabel('d_q (g/kg)')
ax.set_ylabel('Heffter inversion base (m)')
ax.grid('on')
ax2.set_title('decoupling vs longitude')
ax2.plot(lon_p[gv_i], d_qt[gv_i], '.', c='b', label='GV soundings')
ax2.plot(lon_p[~gv_i], d_qt[~gv_i], '.', c='r', label='dropsondes (q_v only)')
ax2.set_xlabel('lon-prime (deg)')
ax2.set_ylabel('d_q (g/kg)')
ax2.axhline(0.5, ls='--', label='Jones et al decoupling threshold')
ax2.legend()
ax2.grid('on')
fig.tight_layout()
fig.savefig('/home/disk/p/jkcm/plots/cset_lagrangian/dec_vs_zi.png')
# %%
fig, ax = plt.subplots()
ax.plot(d_qt[gv_i], d_theta_l[gv_i], '.', c='b', label='GV soundings (q_t, theta_l)')
ax.plot(d_qt[~gv_i], d_theta_l[~gv_i], '.', c='r', label='dropsondes (q_v , theta, only)')
lons = [int(i) for i in lon_p]
ax.set_title('theta_l decoupling vs qt decoupling')
ax.set_xlabel('d_q')
ax.set_ylabel('d_theta')
ax.axvline(0.5, ls='--', label='Jones et al decoupling threshold')
ax.axhline(0.5, ls='--')
ax.legend()
# %%
# fig, ax = plt.subplots()
# ax.set_title("decoupling along lon")
# ax.plot(lon_p, d_theta_e, '-', marker='o', ms=5, label='Theta_e')
# ax.plot(lon_p, d_theta_l, '-', marker='o', ms=5, label='Theta_l')
#
## ax.plot(lon_p, zi_RH50, '-', marker='o', ms=5, label='RH below 50%, ({})'.format(sum(~np.isnan(zi_RH50))))
#
## ax.plot(lon_p, zi_Heff_bot, '-', marker='o', ms=5, label='Heffter (bottom), ({})'.format(sum(~np.isnan(zi_Heff_bot))))
## ax.plot(lon_p, zi_Heff_top, '-', marker='o', ms=5, label='Heffer (top), ({})'.format(sum(~np.isnan(zi_Heff_top))))
# ax.set_xlabel('lon-prime coordinate (deg E)')
# ax.set_ylabel('decoupling estimate (C)')
# ax.legend()
# %%
fig, (ax, ax2, ax3) = plt.subplots(ncols=3)
ax.plot(d_qt[gv_i], alpha_qt[gv_i], '.', c='b', label='GV soundings (q_t, theta_l)')
ax.plot(d_qt[~gv_i], alpha_qt[~gv_i], '.', c='r', label='dropsondes (q_v , theta, only)')
ax.set_title('alpha_qt vs qt decoupling')
ax.set_xlabel('d_q')
ax.set_ylabel('alpha_qt')
ax.axvline(0.5, ls='--', label='Jones et al decoupling threshold')
ax.grid('on')
ax.legend()
ax2.plot(d_theta_l[gv_i], alpha_thetal[gv_i], '.', c='b', label='GV soundings (q_t, theta_l)')
ax2.plot(d_theta_l[~gv_i], alpha_thetal[~gv_i], '.', c='r', label='dropsondes (q_v , theta, only)')
ax2.set_title('alpha_thetal vs theta_l decoupling')
ax2.set_xlabel('d_thetal')
ax2.set_ylabel('alpha_theta_l')
ax2.axvline(0.5, ls='--', label='Jones et al decoupling threshold')
ax2.grid('on')
ax2.legend()
ax3.plot(alpha_qt[gv_i], alpha_thetal[gv_i], '.', c='b', label='GV soundings (q_t, theta_l)')
ax3.plot(alpha_qt[~gv_i], alpha_thetal[~gv_i], '.', c='r', label='dropsondes (q_v , theta, only)')
ax3.set_title('alpha_thetal vs alpha_qt')
ax3.set_xlabel('alpha_qt')
ax3.set_ylabel('alpha_theta_l')
ax3.plot([0,1],[0,1], c='k')
ax3.grid('on')
ax3.set_xlim([0,1])
ax3.set_ylim([0,1])
ax3.legend()
# label_points(d_qt, d_theta_l, x, ax)
# %%
fig, ax = plt.subplots()
ax.set_title('theta decoupling vs depth')
ax.plot(zi_Heff_bot[gv_i], alpha_thetal[gv_i], '.', c='b', label='GV soundings')
ax.plot(zi_Heff_bot[~gv_i], alpha_thetal[~gv_i], '.', c='r', label='dropsondes (q_v only)')
ax.set_xlabel('Heffter inversion height (m)')
ax.set_ylabel('alpha_thetal')
ax.grid('on')
ax.legend()
# %%
fig, ax = plt.subplots()
ax.set_title('decoupling vs longitude')
ax.plot(lon_p[gv_i], d_qt[gv_i], '.', c='b', label='GV soundings')
ax.plot(lon_p[~gv_i], d_qt[~gv_i], '.', c='r', label='dropsondes (q_v only)')
ax.set_xlabel('lon-prime (deg)')
ax.set_ylabel('d_q (g/kg)')
ax.axhline(0.5, ls='--', label='Jones et al decoupling threshold')
ax.legend()
# label_points(lon_p, d_qt, x, ax)
# %%
i = 154
# trouble cases: 100/154
# 77: deep sounding, true inversion, but some cu in BL
# 12: is heff getting the inversion wrong?
snd = all_soundings[i]
dec = decouplings[i]
hgt = heights[i]
fig, [ax, ax4, ax2, ax5, ax3] = plt.subplots(ncols=5)
ax.plot(snd['RHUM'], snd['GGALT'])
ax.axhline(hgt['Heff']['z_bot'], label='Heff bot')
ax.axhline(hgt['Heff']['z_top'], ls='--', label='Heff top')
ax.axhline(hgt['RHCB']['z'], c='r', label='RHCB')
ax.legend()
ax.set_xlabel('rel hum')
ax2.plot(snd['THETAL'], snd['GGALT'], label='liq_theta')
ax2.plot(snd['THETA'], snd['GGALT'], label='theta')
ax2.legend()
ax2.set_xlabel('temp')
ax3.plot(snd['QL']+snd['QV'], snd['GGALT'], label='qt')
ax3.plot(snd['QL']*10, snd['GGALT'], label='ql x 10')
ax3.plot(snd['QV'], snd['GGALT'], label='qv')
ax3.legend()
ax3.set_xlabel('q')
dt = snd['GGALT'][1:] - snd['GGALT'][:-1]
drhdt = (snd['RHUM'][1:] - snd['RHUM'][:-1])/dt
ax4.plot(drhdt, snd['GGALT'][1:])
dthetadt = (snd['THETA'][1:] - snd['THETA'][:-1])/dt
ax5.plot(dthetadt, snd['GGALT'][1:])
ax5.axvline(0.005, c='r')
ax
# ax3.plot(snd[]) |
the-stack_106_22971 | import os
import uuid
import dj_database_url
DEBUG = bool(os.environ.get('DEBUG', False))
TEST = bool(os.environ.get('TEST', False))
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
PROJECT_DIR = os.path.dirname(__file__)
DEFAULT_DATABASE_URL = "sqlite:///%s" % os.path.join(PROJECT_DIR, 'db.sqlite3')
if TEST:
# Need to disable rate limiting for test purposes
if not bool(os.environ.get('TRAVIS', False)):
DEFAULT_DATABASE_URL = 'sqlite://:memory:'
RATELIMIT_ENABLE = False
# Change default address if env-var is set
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', 'webmaster@localhost')
SERVER_EMAIL = DEFAULT_FROM_EMAIL
DATABASES = {'default': dj_database_url.config(default=DEFAULT_DATABASE_URL)}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, '../media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, '../static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
# STATICFILES_DIRS = (os.path.join(PROJECT_DIR, 'static'),)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', str(uuid.uuid4()))
# Logins URLs
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/logout/'
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'cellcounter.statistics.middleware.StatsSessionMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'ratelimit.middleware.RatelimitMiddleware',
)
# HTTPS_SUPPORT = True
HTTPS_SUPPORT = False
SECURE_REQUIRED_PATHS = (
# '/admin/',
# '/count/',
# '/login/',
# '/accounts/',
)
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS', '').split(',')
ROOT_URLCONF = 'cellcounter.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'cellcounter.wsgi.application'
# Template settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'colorful',
'rest_framework',
'compressor',
'cellcounter.main',
'cellcounter.cc_kapi',
'cellcounter.accounts',
'cellcounter.statistics'
)
CACHES = {'default': {}}
if DEBUG or TEST:
CACHES['default']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache'
else:
CACHES['default']['BACKEND'] = 'django.core.cache.backends.memcached.PyLibMCCache'
CACHES['default']['LOCATION'] = os.environ.get('MEMCACHED_LOCATION')
RATELIMIT_VIEW = 'cellcounter.accounts.views.rate_limited'
# Logging config
if 'ENABLE_DJANGO_LOGGING' in os.environ:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(funcName)s %(lineno)d %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
'logfile': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': os.environ.get('DJANGO_LOG_PATH'),
'formatter': 'verbose'
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': True,
},
'cellcounter': {
'handlers': ['mail_admins', 'console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': False
},
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Store session cookies for 1 week only
SESSION_COOKIE_AGE = 604800
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
|
the-stack_106_22974 | from collections import Counter
import numpy as np
from scipy.signal import find_peaks, peak_widths
from sklearn.cluster import dbscan
from scipy.spatial.distance import euclidean
def find_anchors(pos, min_count=3, min_dis=20000, wlen=800000, res=10000):
min_dis = max(min_dis//res, 1)
wlen = min(wlen//res, 20)
count = Counter(pos)
refidx = range(min(count), max(count)+1)
signal = np.r_[[count[i] for i in refidx]]
summits = find_peaks(signal, height=min_count, distance=min_dis)[0]
sorted_summits = [(signal[i],i) for i in summits]
sorted_summits.sort(reverse=True) # sort by peak count
peaks = set()
records = {}
for _, i in sorted_summits:
tmp = peak_widths(signal, [i], rel_height=1, wlen=wlen)[2:4]
li, ri = int(np.round(tmp[0][0])), int(np.round(tmp[1][0]))
lb = refidx[li]
rb = refidx[ri]
if not len(peaks):
peaks.add((refidx[i], lb, rb))
for b in range(lb, rb+1):
records[b] = (refidx[i], lb, rb)
else:
for b in range(lb, rb+1):
if b in records:
# merge anchors
m_lb = min(lb, records[b][1])
m_rb = max(rb, records[b][2])
summit = records[b][0] # always the highest summit
peaks.remove(records[b])
break
else: # loop terminates normally
m_lb, m_rb, summit = lb, rb, refidx[i]
peaks.add((summit, m_lb, m_rb))
for b in range(m_lb, m_rb+1):
records[b] = (summit, m_lb, m_rb)
return peaks
def _cluster_core(sort_list, r, visited, final_list):
pos = np.r_[[i[1] for i in sort_list]]
if len(pos) >= 2:
_, labels = dbscan(pos, eps=r, min_samples=2)
pool = set()
for i, p in enumerate(sort_list):
if p[1] in pool:
continue
c = labels[i]
if c==-1:
continue
sub = pos[labels==c]
cen = p[1]
rad = r
Local = [p[1]]
ini = -1
while len(sub):
out = []
for q in sub:
if tuple(q) in pool:
continue
tmp = euclidean(q, cen)
if tmp<=rad:
Local.append(tuple(q))
else:
out.append(tuple(q))
if len(out)==ini:
break
ini = len(out)
tmp = np.r_[Local]
# assign centroid to a certain pixel
cen = tuple(tmp.mean(axis=0).round().astype(int))
rad = np.int(np.round(max([euclidean(cen,q) for q in Local]))) + r
sub = np.r_[out]
for q in Local:
pool.add(q)
final_list.append((p[1], cen, rad))
visited.update(pool)
def local_clustering(Donuts, res, min_count=3, r=20000, sumq=1):
final_list = []
x = np.r_[[i[0] for i in Donuts]]
y = np.r_[[i[1] for i in Donuts]]
if x.size == 0:
return final_list
x_anchors = find_anchors(x, min_count=min_count, min_dis=r, res=res)
y_anchors = find_anchors(y, min_count=min_count, min_dis=r, res=res)
r = max(r//res, 1)
visited = set()
lookup = set(zip(x, y))
for x_a in x_anchors:
for y_a in y_anchors:
sort_list = []
for i in range(x_a[1], x_a[2]+1):
for j in range(y_a[1], y_a[2]+1):
if (i, j) in lookup:
sort_list.append((Donuts[(i,j)], (i,j)))
sort_list.sort(reverse=True)
_cluster_core(sort_list, r, visited, final_list)
sort_list = [] # out of anchor
for i, j in zip(x, y):
if (i,j) in visited:
continue
sort_list.append((Donuts[(i,j)], (i,j)))
sort_list.sort(reverse=True)
_cluster_core(sort_list, r, visited, final_list)
x_summits = set([i[0] for i in x_anchors])
y_summits = set([i[0] for i in y_anchors])
for i, j in zip(x, y):
if (i,j) in visited:
continue
if (i in x_summits) or (j in y_summits):
final_list.append(((i,j), (i,j), 0))
return final_list |
the-stack_106_22976 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
15パズルを解く
"""
def main():
"""
動作確認
"""
pattern = [[1, 2, 6, 3], [4, 5, 0, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
puzzle = Puzzle15(pattern)
print(puzzle.start)
print(puzzle.goal)
result = puzzle.solve()
if result == -2:
print("can't solve")
elif result >= 0:
print("resolved", puzzle.step)
else:
print("not resolved(limit over)", puzzle.step)
class Puzzle15:
"""
15パズルを解く
"""
length = 4
width = 4
goal = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
step = -1
max_step = 15
def __init__(self, start):
self.start = [[0 for i in range(self.length)] for j in range(self.width)]
for i in range(self.length):
for j in range(self.width):
self.start[j][i] = start[j][i]
self.start_patterns = [self.start]
self.goal_patterns = [self.goal]
def solve(self):
"""
パズルを解く
"""
tmp_step = 0
# 解が存在する場合
if self.can_solve(self.start):
if self.is_solved():
self.step = tmp_step
else:
while tmp_step < self.max_step:
# スタート側から次の候補を検索
self.start_patterns = self.search_next_patterns(self.start_patterns)
tmp_step += 1
if self.is_solved():
self.step = tmp_step
break
# ゴール側から次の候補を検索
self.goal_patterns = self.search_next_patterns(self.goal_patterns)
tmp_step += 1
if self.is_solved():
self.step = tmp_step
break
# 解が存在しない場合
else:
self.step = -2
return self.step
def can_solve(self, in_pattern):
"""
解が存在するか判定
"""
pattern = [flatten for inner in in_pattern for flatten in inner]
# ありえないパターン
if set(pattern) != set(range(self.length * self.width)):
return False
# 空きの最短距離
index = pattern.index(0)
length = index % self.length
width = index // self.width
blank_distance = length + width
# 置換のパリティ
parity = 0
for i in range(1, self.length * self.width - 1):
index = pattern.index(i)
if i != index:
pattern[i], pattern[index] = pattern[index], pattern[i]
parity += 1
# 置換のパリティ(偶奇)と「空き」の最短距離の偶奇が等しい
if parity % 2 == blank_distance % 2:
return True
return False
def is_solved(self):
"""
解が見つかったか判定
"""
for i in self.start_patterns:
if i in self.goal_patterns:
return True
return False
def search_blank(self, pattern):
"""
空きパネルの位置を探す
"""
ret_i, ret_j = -1, -1
for i in range(self.length):
for j in range(self.width):
if pattern[j][i] == 0:
ret_i, ret_j = i, j
break
return ret_i, ret_j
def search_next_patterns(self, patterns):
"""
次の候補を検索
"""
next_patterns = []
for i in patterns:
p_x, p_y = self.search_blank(i)
# 上へ移動
if p_y > 0:
top = [j[:] for j in i]
top[p_y][p_x], top[p_y - 1][p_x] = top[p_y - 1][p_x], top[p_y][p_x]
next_patterns += [top]
# 下へ移動
if p_y < self.width - 1:
bottom = [j[:] for j in i]
bottom[p_y][p_x], bottom[p_y + 1][p_x] = bottom[p_y + 1][p_x], bottom[p_y][p_x]
next_patterns += [bottom]
# 左へ移動
if p_x > 0:
left = [j[:] for j in i]
left[p_y][p_x], left[p_y][p_x - 1] = left[p_y][p_x - 1], left[p_y][p_x]
next_patterns += [left]
# 右へ移動
if p_x < self.length - 1:
right = [j[:] for j in i]
right[p_y][p_x], right[p_y][p_x + 1] = right[p_y][p_x + 1], right[p_y][p_x]
next_patterns += [right]
return [[i[:] for i in j] for j in next_patterns]
if __name__ == '__main__':
main()
|
the-stack_106_22978 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_kwargs,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
qualities,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
SubException
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)[0-9A-Za-z-_]{10,}'
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _download_webpage_handle(self, *args, **kwargs):
query = kwargs.get('query', {}).copy()
query['disable_polymer'] = 'true'
kwargs['query'] = query
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
(?:www\.)?invidio\.us/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
_SUBTITLE_FORMATS = ('ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'duration': 180,
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'creator': 'Icona Pop',
'track': 'I Love It (feat. Charli XCX)',
'artist': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
'duration': 419,
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'creator': 'Justin Timberlake',
'track': 'Tunnel Vision',
'artist': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:bec2185232c05479482cb5a9b82719bf',
'duration': 242,
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'duration': 246,
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/rg3/youtube-dl/issues/1892,
# https://github.com/rg3/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*c\s*&&\s*d\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*d\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*d\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/rg3/youtube-dl/issues/7468,
# https://github.com/rg3/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
@staticmethod
def _extract_chapters(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
player_response = {}
# Get video info
embed_webpage = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
sts = None
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/rg3/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
sts = ytplayer_config.get('sts')
if not player_response:
pl_response = str_or_none(args.get('player_response'))
if pl_response:
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
player_response = pl_response
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el in ('info', 'embedded', 'detailpage', 'vevo', ''):
query = {
'video_id': video_id,
'ps': 'default',
'eurl': '',
'gl': 'US',
'hl': 'en',
}
if el:
query['el'] = el
if sts:
query['sts'] = sts
video_info_webpage = self._download_webpage(
'%s://www.youtube.com/get_video_info' % proto,
video_id, note=False,
errnote='unable to download video info webpage',
fatal=False, query=query)
if not video_info_webpage:
continue
get_video_info = compat_parse_qs(video_info_webpage)
if not player_response:
pl_response = get_video_info.get('player_response', [None])[0]
if isinstance(pl_response, dict):
player_response = pl_response
add_dash_mpd_pr(player_response)
add_dash_mpd(get_video_info)
if view_count is None:
view_count = extract_view_count(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if 'token' not in video_info:
video_info = get_video_info
break
def extract_unavailable_message():
return self._html_search_regex(
r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
video_webpage, 'unavailable message', default=None)
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
if video_info.get('license_info'):
raise ExtractorError('This video is DRM protected.', expected=True)
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
# title
if 'title' in video_info:
video_title = video_info['title'][0]
elif 'title' in player_response:
video_title = video_details['title']
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/rg3/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/rg3/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
q = qualities(['small', 'medium', 'hd720'])
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list)
if streaming_formats:
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
'quality': q(quality),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
[r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
r'(?:www|player(?:_ias)?)-([^/]+)(?:/[a-z]{2,3}_[A-Z]{2})?/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0]
more_fields = {
'filesize': filesize,
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or quality,
'quality': q(quality),
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str)) or
url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = clean_html(video_info.get('reason', [None])[0])
if not error_message:
error_message = extract_unavailable_message()
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
self._downloader.report_warning('unable to extract uploader nickname')
channel_id = self._html_search_meta(
'channelId', video_webpage, 'channel id')
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if not video_subtitles or not automatic_captions:
print('Skipping because no captions')
raise SubException('No captions')
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
chapters = self._extract_chapters(description_original, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
}
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
'skip': 'This playlist is private',
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 485,
'info_dict': {
'title': '2017 華語最新單曲 (2/24更新)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'license': 'Standard YouTube License',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# the yt-alert-message now has tabindex attribute (see https://github.com/rg3/youtube-dl/issues/11604)
for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
if mobj:
reason = mobj.group('reason')
message = 'This playlist %s' % reason
if 'private' in reason:
message += ', use --username or --netrc to access it'
message += '.'
raise ExtractorError(message, expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title', default=None)
_UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
uploader = self._search_regex(
r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
page, 'uploader', default=None)
mobj = re.search(
r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
page)
if mobj:
uploader_id = mobj.group('uploader_id')
uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
else:
uploader_id = uploader_url = None
has_videos = True
if not playlist_title:
try:
# Some playlist URLs don't actually serve a playlist (e.g.
# https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
next(self._entries(page, playlist_id))
except StopIteration:
has_videos = False
playlist = self.playlist_result(
self._entries(page, playlist_id), playlist_id, playlist_title)
playlist.update({
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
})
return has_videos, playlist
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = query_dict.get('v', [None])[0] or self._search_regex(
r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
'video id', default=None)
if video_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
return video_id, None
return None, None
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video_id, video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
has_videos, playlist = self._extract_playlist(playlist_id)
if has_videos or not video_id:
return playlist
# Some playlist URLs don't actually serve a playlist (see
# https://github.com/rg3/youtube-dl/issues/10537).
# Fallback to plain video extraction if there is a video id
# along with playlist id.
return self.url_result(video_id, 'Youtube', video_id=video_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
},
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _build_template_url(self, url, channel_id):
return self._TEMPLATE_URL % channel_id
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._build_template_url(url, channel_id)
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
}
}, {
# Only available via https://www.youtube.com/c/12minuteathlete/videos
# but not https://www.youtube.com/user/12minuteathlete/videos
'url': 'https://www.youtube.com/c/12minuteathlete/videos',
'playlist_mincount': 249,
'info_dict': {
'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
'title': 'Uploads from 12 Minute Athlete',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/gametrailers',
'only_matching': True,
}, {
# This channel is not available, geo restricted to JP
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _build_template_url(self, url, channel_id):
mobj = re.match(self._VALID_URL, url)
return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'Thirst for Science',
},
}, {
# with "Load more" button
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
}]
class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
url_query = {
'search_query': query.encode('utf-8'),
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
for pagenum in itertools.count(1):
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page',
query={'spf': 'navigate'})
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = list(self._process_page(html_content))
videos += new_videos
if not new_videos or len(videos) > limit:
break
next_link = self._html_search_regex(
r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
html_content, 'next link', default=None)
if next_link is None:
break
result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
_, video = self._check_download_just_video(url, 'WL')
if video:
return video
_, playlist = self._extract_playlist('WL')
return playlist
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
the-stack_106_22979 | from flask import session
from flask_login import current_user
from app.models import Answer, Sentence
def _get_user():
return current_user if current_user.is_authenticated else None
def correct_answers(id):
attempt = session.get('attempt')
# Collection of correct answers previously given, returning just the `text` column
correct = Answer.query.join(Sentence).with_entities(Answer.text).filter(
Answer.is_correct or Answer.is_correct is None,
Sentence.quiz_id == id,
Answer.user == _get_user(),
Answer.attempt == attempt,
).all()
# Convert it to a list, and the list to a set
correct = [r for r, in correct]
return correct
def template_setup(question, id):
correct = correct_answers(id)
# The percentage of questions that have been answered correctly
progress = float(len(correct)) / Sentence.query.filter_by(quiz_id=id).count() * 100
# True if the set (list of unique) latin translations is not in the set of correct answers
# Used in quiz_base for hint
unknown = not(Answer.query.join(Sentence).filter(
Sentence.quiz_id == id,
Answer.sentence == question,
Answer.user == _get_user(),
Answer.is_correct
).count())
return progress, unknown
def calculate_score(quiz_id, user):
score = Sentence.query.filter_by(quiz_id=quiz_id).count() * 2
neg_score = Answer.query.join(Sentence) \
.filter(Answer.is_correct is False, Answer.user == user, Sentence.quiz_id == quiz_id).count()
final_score = score - neg_score
if final_score < 3:
final_score = 3
return final_score
|
the-stack_106_22982 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement Learning based Locally Interpretabel Modeling (RL-LIM) tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
from sklearn import linear_model
import tensorflow.compat.v1 as tf
from rllim import rllim
class RllimTest(tf.test.TestCase):
"""RL-LIM test class."""
def setUp(self):
"""Sets parameters and datasets."""
super(RllimTest, self).setUp()
self.temp_dir = tempfile.mkdtemp()
# Network parameters
self.parameters = dict()
self.parameters['hidden_dim'] = 5
self.parameters['iterations'] = 10
self.parameters['num_layers'] = 3
self.parameters['batch_size'] = 10
self.parameters['batch_size_inner'] = 2
self.parameters['lambda'] = 1.0
# Train / Valid / Test set
self.x_train = np.random.rand(100, 10)
self.y_train_hat = np.random.rand(100, 1)
self.x_probe = np.random.rand(40, 10)
self.y_probe_hat = np.random.rand(40, 1)
self.x_test = np.random.rand(200, 10)
# Others
self.checkpoint_file_name = self.temp_dir+'/model1.ckpt'
self.interp_model = linear_model.Ridge(alpha=1)
self.baseline_model = linear_model.Ridge(alpha=1)
self.baseline_model.fit(self.x_train, self.y_train_hat)
def tearDown(self):
super(RllimTest, self).tearDown()
shutil.rmtree(self.temp_dir)
def testRllimLocalExplanation(self):
"""Tests local explanation of RL-LIM."""
tf.reset_default_graph()
rllim_class = rllim.Rllim(
x_train=self.x_train, y_train=self.y_train_hat,
x_probe=self.x_probe, y_probe=self.y_probe_hat,
parameters=self.parameters,
interp_model=self.interp_model,
baseline_model=self.baseline_model,
checkpoint_file_name=self.checkpoint_file_name)
rllim_class.rllim_train()
_, test_coef = \
rllim_class.rllim_interpreter(
x_train=self.x_train, y_train=self.y_train_hat,
x_test=self.x_test, interp_model=self.interp_model)
self.assertAllEqual([200, 11], test_coef.shape)
def testRllimPrediction(self):
"""Tests local predictions of RL-LIM."""
tf.reset_default_graph()
rllim_class = rllim.Rllim(
x_train=self.x_train, y_train=self.y_train_hat,
x_probe=self.x_probe, y_probe=self.y_probe_hat,
parameters=self.parameters,
interp_model=self.interp_model,
baseline_model=self.baseline_model,
checkpoint_file_name=self.checkpoint_file_name)
rllim_class.rllim_train()
test_y_fit, _ = \
rllim_class.rllim_interpreter(
x_train=self.x_train, y_train=self.y_train_hat,
x_test=self.x_test, interp_model=self.interp_model)
self.assertAllEqual([200,], test_y_fit.shape)
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_22983 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "learnergy"
copyright = "2020, Mateus Roder and Gustavo de Rosa"
author = "Mateus Roder and Gustavo de Rosa"
# The short X.Y version
version = "1.1.3"
# The full version, including alpha/beta/rc tags
release = "1.1.3"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "autoapi.extension"]
autoapi_dirs = ["../learnergy"]
autoapi_generate_api_docs = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "learnergy_doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"learnergy.tex",
"Learnergy Documentation",
"Mateus Roder and Gustavo de Rosa",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "learnergy", "Learnergy Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"learnergy",
"Learnergy Documentation",
author,
"learnergy",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
autodoc_default_options = {"exclude-members": "__weakref__"}
autodoc_member_order = "bysource"
|
the-stack_106_22984 | """A module for consuming the Penn Libraries API"""
import requests
BASE_URL = "http://dla.library.upenn.edu/2.0.0/search"
def search(query):
"""Search Penn Libraries Franklin for documents
The maximum pagesize currently is 50.
"""
params = {
's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query
}
return requests.get(BASE_URL, params=params, timeout=10).json()
|
the-stack_106_22985 | """
We use this validator to filter transparent ips, and give the ip resources an
initial score.
"""
import json
import requests
from json.decoder import JSONDecodeError
from scrapy.http import Request
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import (DNSLookupError, ConnectionRefusedError,
TimeoutError, TCPTimedOutError)
from ..redis_spiders import RedisSpider
from ..items import ProxyStatInc
class HttpbinValidator(RedisSpider):
name = 'vhttpbin'
custom_settings = {
'CONCURRENT_REQUESTS': 100,
'CONCURRENT_REQUESTS_PER_DOMAIN': 100,
'RETRY_ENABLED': False,
'ITEM_PIPELINES': {
'haipproxy.crawler.pipelines.ProxyStatPipeline': 200,
}
}
success_key = ''
def __init__(self):
super().__init__()
self.origin_ip = requests.get('http://httpbin.org/ip').json().get(
'origin')
def start_requests(self):
for proxy in self.redis_conn.scan_iter(match='*://*'):
proxy = proxy.decode()
if proxy.startswith('https'):
url = 'https://httpbin.org/ip'
elif proxy.startswith('http'):
url = 'http://httpbin.org/ip'
else:
self.logger.warning(f'Unknown proxy: {proxy}')
continue
req = Request(url,
meta={'proxy': proxy},
callback=self.parse,
errback=self.parse_error)
yield req
def parse(self, response):
proxy = response.meta.get('proxy')
seconds = int(response.meta.get('download_latency'))
success = 1
fail = ''
if self.is_transparent(response):
success = 0
fail = 'transparent'
else:
self.logger.info(f'good ip {proxy}')
yield ProxyStatInc(proxy=proxy,
success=success,
seconds=seconds,
fail=fail)
def parse_error(self, failure):
request = failure.request
proxy = request.meta.get('proxy')
self.logger.warning(f'proxy {proxy} has failed with:\n{repr(failure)}')
fail = 'unknown'
if failure.check(HttpError):
fail = 'HttpError'
# these exceptions come from HttpError spider middleware
# you can get the non-200 response
elif failure.check(DNSLookupError):
fail = 'DNSLookupError'
# this is the original request
elif failure.check(TimeoutError):
fail = 'TimeoutError'
elif failure.check(TCPTimedOutError):
fail = 'TCPTimedOutError'
elif failure.check(ConnectionRefusedError):
fail = 'ConnectionRefusedError'
yield ProxyStatInc(proxy=proxy, success=0, seconds=0, fail=fail)
def is_ok(self, response):
return self.success_key in response.text
def is_transparent(self, response):
"""filter transparent ip resources"""
if not response.body_as_unicode():
self.logger.error('no body')
return True
try:
ip = json.loads(response.body_as_unicode()).get('origin')
if self.origin_ip in ip:
self.logger.error('is transparent ip')
return True
except (AttributeError, JSONDecodeError):
self.logger.error('transparent ip AttributeError, JSONDecodeError')
return True
return False
|
the-stack_106_22986 | '''
Created on 17 nov. 2019
@author: Juan Carlos Ruiloba
'''
from odoo import models, fields, api
class taller(models.Model):
_name = 'upocar.taller'
_rec_name = "nombre"
cif = fields.Char("CIF del taller", size=9, required=True)
nombre = fields.Char("Nombre del taller", size=64, required=True)
state_id = fields.Many2one("res.country.state", string='Provincia', help='Seleccionar una provincia/estado', ondelete='restrict')
country_id = fields.Many2one('res.country', string='Pais', help='Seleccionar un pais', ondelete='restrict')
city = fields.Char('Ciudad', help='Introducir la ciudad')
direccion = fields.Char("Direccion del taller", size=64, required=True)
hide = fields.Boolean(string='Hide', compute="_compute_hide")
cliente_ids = fields.Many2many("upocar.cliente", string="Cliente del taller")
reparacion_ids = fields.One2many("upocar.reparacion", "taller_id", string="Reparaciones del taller")
mecanico_ids = fields.One2many("upocar.mecanico", "taller_id", string="Mecanicos del taller")
linea_taller_ids = fields.One2many("upocar.linea_taller", "taller_id", string="Repuestos del taller")
pedido_ids = fields.One2many("upocar.pedido", "taller_id", string="Pedidos del taller")
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id:
return {'domain': {'state_id': [('country_id', '=', self.country_id.id)]}}
else:
return {'domain': {'state_id': []}}
# Show Hide State selection based on Country
@api.depends('country_id')
def _compute_hide(self):
if self.country_id:
self.hide = False
else:
self.hide = True
|
the-stack_106_22987 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tests.pcluster.config.utils as utils
from pcluster.config.mappings import EFS
from tests.pcluster.config.defaults import DefaultCfnParams, DefaultDict
@pytest.mark.parametrize(
"cfn_params_dict, expected_section_dict",
[
({"EFSOptions": "NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE"}, DefaultDict["efs"].value),
({"EFSOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE"}, DefaultDict["efs"].value),
(
{"EFSOptions": "test,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE"},
{
"shared_dir": "test",
"efs_fs_id": None,
"performance_mode": "generalPurpose",
"efs_kms_key_id": None,
"provisioned_throughput": None,
"encrypted": False,
"throughput_mode": "bursting",
},
),
(
{"EFSOptions": "test,test,maxIO,test,1024,true,provisioned,NONE,NONE"},
{
"shared_dir": "test",
"efs_fs_id": "test",
"performance_mode": "maxIO",
"efs_kms_key_id": "test",
"provisioned_throughput": 1024,
"encrypted": True,
"throughput_mode": "provisioned",
},
),
],
)
def test_efs_section_from_cfn(mocker, cfn_params_dict, expected_section_dict):
utils.assert_section_from_cfn(mocker, EFS, cfn_params_dict, expected_section_dict)
@pytest.mark.parametrize(
"section_dict, expected_config_parser_dict, expected_message",
[
# default
({}, {"efs default": {}}, None),
# default values
(
{"performance_mode": "generalPurpose"},
{"efs default": {"performance_mode": "generalPurpose"}},
"No section.*",
),
# other values
({"performance_mode": "maxIO"}, {"efs default": {"performance_mode": "maxIO"}}, None),
({"encrypted": True}, {"efs default": {"encrypted": "true"}}, None),
],
)
def test_cluster_section_to_file(mocker, section_dict, expected_config_parser_dict, expected_message):
utils.assert_section_to_file(mocker, EFS, section_dict, expected_config_parser_dict, expected_message)
@pytest.mark.parametrize(
"param_key, param_value, expected_value, expected_message",
[
("shared_dir", None, None, None),
("shared_dir", "", None, None),
("shared_dir", "fake_value", "fake_value", None),
("shared_dir", "/test", "/test", None),
("shared_dir", "/test/test2", "/test/test2", None),
("shared_dir", "/t_ 1-2( ):&;<>t?*+|", "/t_ 1-2( ):&;<>t?*+|", None),
("shared_dir", "//test", None, "has an invalid value"),
("shared_dir", "./test", None, "has an invalid value"),
("shared_dir", ".\\test", None, "has an invalid value"),
("shared_dir", ".test", None, "has an invalid value"),
("shared_dir", "NONE", "NONE", None), # Note: NONE is considered as a valid path
("efs_fs_id", None, None, None),
("efs_fs_id", "", None, None),
("efs_fs_id", "wrong_value", None, "has an invalid value"),
("efs_fs_id", "fs-12345", None, "has an invalid value"),
("efs_fs_id", "fs-123456789", None, "has an invalid value"),
("efs_fs_id", "NONE", "NONE", None), # Note: NONE is considered valid
("efs_fs_id", "fs-12345678", "fs-12345678", None),
("efs_fs_id", "fs-12345678901234567", "fs-12345678901234567", None),
("performance_mode", None, "generalPurpose", None),
("performance_mode", "", "generalPurpose", None),
("performance_mode", "maxIO", "maxIO", None),
("performance_mode", "wrong_value", None, "Allowed values are"),
("performance_mode", "NONE", None, "Allowed values are"),
("efs_kms_key_id", None, None, None),
("efs_kms_key_id", "", None, None),
("efs_kms_key_id", "fake_value", "fake_value", None),
("efs_kms_key_id", "test", "test", None),
("efs_kms_key_id", "NONE", "NONE", None), # NONE is evaluated as a valid kms id
("provisioned_throughput", "0.1", 0.1, None),
("provisioned_throughput", "3", 3, None),
("provisioned_throughput", "1024.9", 1024.9, None),
("provisioned_throughput", "102000", None, "has an invalid value"),
("provisioned_throughput", "0.01", None, "has an invalid value"),
("provisioned_throughput", "1025", None, "has an invalid value"),
("provisioned_throughput", "wrong_value", None, "must be a Float"),
("encrypted", None, False, None),
("encrypted", "", False, None),
("encrypted", "NONE", None, "must be a Boolean"),
("encrypted", "true", True, None),
("encrypted", "false", False, None),
("throughput_mode", None, "bursting", None),
("throughput_mode", "", "bursting", None),
("throughput_mode", "provisioned", "provisioned", None),
("throughput_mode", "wrong_value", None, "Allowed values are"),
("throughput_mode", "NONE", None, "Allowed values are"),
],
)
def test_efs_param_from_file(mocker, param_key, param_value, expected_value, expected_message):
utils.assert_param_from_file(mocker, EFS, param_key, param_value, expected_value, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_cfn_params",
[
(DefaultDict["efs"].value, DefaultCfnParams["efs"].value),
({"shared_dir": "NONE"}, DefaultCfnParams["efs"].value),
({"shared_dir": "test"}, {"EFSOptions": "test,NONE,generalPurpose,NONE,NONE,false,bursting,Valid,Valid"}),
(
{
"shared_dir": "test",
"efs_fs_id": "test2",
"performance_mode": "test3",
"efs_kms_key_id": "test4",
"provisioned_throughput": 10,
"encrypted": True,
"throughput_mode": "test5",
},
{"EFSOptions": "test,test2,test3,test4,10,true,test5,Valid,Valid"},
),
(
{
"shared_dir": "test",
"efs_fs_id": None,
"performance_mode": "test1",
"efs_kms_key_id": "test2",
"provisioned_throughput": 1024,
"encrypted": False,
"throughput_mode": "test3",
},
{"EFSOptions": "test,NONE,test1,test2,1024,false,test3,Valid,Valid"},
),
],
)
def test_efs_section_to_cfn(mocker, section_dict, expected_cfn_params):
mocker.patch("pcluster.config.param_types.get_efs_mount_target_id", return_value="valid_mount_target_id")
mocker.patch(
"pcluster.config.pcluster_config.PclusterConfig.get_master_availability_zone", return_value="mocked_avail_zone"
)
utils.assert_section_to_cfn(mocker, EFS, section_dict, expected_cfn_params)
@pytest.mark.parametrize(
"settings_label, expected_cfn_params",
[
(
"test1",
utils.merge_dicts(
DefaultCfnParams["cluster"].value,
DefaultCfnParams["efs"].value,
{
"MasterSubnetId": "subnet-12345678",
"AvailabilityZone": "mocked_avail_zone",
"ComputeSubnetId": "subnet-23456789",
},
),
),
(
"test2",
utils.merge_dicts(
DefaultCfnParams["cluster"].value,
{
"MasterSubnetId": "subnet-12345678",
"AvailabilityZone": "mocked_avail_zone",
"ComputeSubnetId": "subnet-23456789",
"EFSOptions": "efs,NONE,generalPurpose,NONE,NONE,false,bursting,Valid,Valid",
},
),
),
(
"test3",
utils.merge_dicts(
DefaultCfnParams["cluster"].value,
{
"MasterSubnetId": "subnet-12345678",
"AvailabilityZone": "mocked_avail_zone",
"ComputeSubnetId": "subnet-23456789",
"EFSOptions": "efs,fs-12345678,maxIO,key1,1020.0,false,provisioned,Valid,Valid",
},
),
),
(
"test4",
utils.merge_dicts(
DefaultCfnParams["cluster"].value,
{
"MasterSubnetId": "subnet-12345678",
"AvailabilityZone": "mocked_avail_zone",
"ComputeSubnetId": "subnet-23456789",
"EFSOptions": "/efs,NONE,generalPurpose,NONE,NONE,true,bursting,Valid,Valid",
},
),
),
("test1,test2", SystemExit()),
],
)
def test_efs_from_file_to_cfn(mocker, pcluster_config_reader, settings_label, expected_cfn_params):
"""Unit tests for parsing EFS related options."""
mocker.patch(
"pcluster.config.param_types.get_efs_mount_target_id",
side_effect=lambda efs_fs_id, avail_zone: "master_mt" if avail_zone == "mocked_avail_zone" else None,
)
mocker.patch("pcluster.config.param_types.get_avail_zone", return_value="mocked_avail_zone")
utils.assert_section_params(mocker, pcluster_config_reader, settings_label, expected_cfn_params)
|
the-stack_106_22989 | import random
from . import signals
from django import forms
from .models import DeferredAction
from .main import LONG
class DeferredFormMixIn(object):
"""
This is a MixIn class, so that you can also build deferred forms
from already existing modified ModelForm classes.
If you build your form from scratch you want to use the ``DeferredForm``
class.
"""
token_format = LONG
def _gen_token(self, format=None, step=0):
"""
generates a unique (in terms of DeferredAction objects) token based
on the format tuple in the form of (alphabet, length).
"""
if format is None:
format = self.token_format
chars, length = format
token = ''.join([random.choice(chars) for i in range(length)])
try:
DeferredAction.objects.get(token=token)
except DeferredAction.DoesNotExist:
return token
if step > 9:
raise Exception("10 attempts to generate a unique token failed.")
return self._gen_token(format=format, step=step+1)
def save(self, user=None, **kwargs):
"""
replaces the ModelForm save method with our own to defer the action
by storing the data in the db.
Returns a unique token which is needed to confirm the action and
resume it.
"""
if not self.is_valid():
raise Exception("only call save() on a form after calling is_valid().")
form_class_name = "%s.%s" % (self.__class__.__module__,
self.__class__.__name__)
# we save the uncleaned data here, because form.full_clean() will
# alter the data in cleaned_data and a second run with cleaned_data as
# form input will fail for foreignkeys and manytomany fields.
# additionally, storing the original input is a bit safer, because
# this is only data which was transfered over http, so we won't
# get any pickle errors here
data = {'form_class':form_class_name, 'form_input':self.data,
'token':self._gen_token(), 'form_prefix': self.prefix}
valid_until = kwargs.pop('valid_until', None)
if valid_until is not None:
data.update({'valid_until': valid_until})
defer = DeferredAction.objects.create(**data)
if user is not None:
defer.requested_by = user
if self.instance is not None:
# this extra step makes sure that ModelForms for editing and for
# creating objects both work.
defer.instance_object = self.instance
defer.save()
# inform anyone else that confirmation is requested
signals.confirmation_required.send(sender=self._meta.model,
instance=defer, user=user)
if hasattr(self, 'send_notification') and callable(self.send_notification):
self.send_notification(user, instance=defer)
return defer.token
def save_original(self, *args, **kwargs):
"""
triggr the original ModelForm save
"""
return forms.ModelForm.save(self, *args, **kwargs)
class DeferredForm(DeferredFormMixIn, forms.ModelForm):
"""
Inherit from this form to get a ModelForm which will
automatically defer it's save method until the action
is confirmed.
"""
class ConfirmationForm(forms.Form):
"""
Form to use in views to confirm an action with a token.
Makes sure the token exists and on calling ``save()``
will resume the defered action.
"""
token = forms.CharField(required=True)
def clean_token(self):
try:
obj = DeferredAction.objects.get(token=self.cleaned_data['token'])
return self.cleaned_data['token']
except DeferredAction.DoesNotExist:
raise forms.ValidationError("wrong token") #FIXME: i18n
def save(self):
return DeferredAction.objects.confirm(token=self.cleaned_data['token'])
|
the-stack_106_22991 | #!/usr/bin/env python3
# Copyright 2019 Tetrate
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),
"python")) # noqa: E402
from getenvoy import version, workspace
import argparse
import atexit
import base64
import collections
import logging
import platform
import shutil
import subprocess
import tempfile
def runBazel(command, targets, startup_options={}, options={}):
argv = ["bazel"]
for k, v in startup_options.items():
if v:
argv.append("--{}={}".format(k, v))
argv.append(command)
for k, v in options.items():
if v:
argv.extend(["--{}={}".format(k, i) for i in v])
argv.extend(targets)
logging.debug(" ".join(argv))
subprocess.check_call(argv)
def bazelOptions(args):
options = collections.defaultdict(list)
if args.local_resources:
options["local_resources"].append(args.local_resources)
if args.config:
options["config"].append(args.config)
options["config"].append(args.variant)
options["config"].append(args.dist)
if os.path.isdir("envoy-override"):
# Split up this str + append, otherwise the formatter formats
# one way, but lints another way.
override_str = "envoy=" + os.getcwd() + "/envoy-override"
options["override_repository"].append(override_str)
if platform.system() == "Darwin":
options["action_env"].append(
"PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin:/usr/sbin:/sbin")
return options
def buildPackages(args):
targets = [
"//packages/{}:tar-package-symbol.tar.xz".format(args.variant),
"//packages/{}:tar-package-stripped.tar.xz".format(args.variant),
]
if args.build_deb_package:
targets.append("//packages/{}:deb-package.deb".format(args.variant))
if args.build_rpm_package:
targets.append("//packages/{}:rpm-package.rpm".format(args.variant))
if args.build_distroless_docker:
targets.append("//packages/{}:distroless-package.tar".format(
args.variant))
if args.build_istio_compat:
targets.append("//packages/{}:istio-tar-package-symbol.tar.gz".format(
args.variant))
targets.append(
"//packages/{}:istio-tar-package-stripped.tar.gz".format(
args.variant))
runBazel("build", targets, options=bazelOptions(args))
if args.build_rpm_package and args.gpg_secret_key and args.gpg_name:
signRpmPackage(
"bazel-bin/packages/{}/rpm-package.rpm".format(args.variant),
args.gpg_secret_key,
args.gpg_name,
)
def signRpmPackage(package_path, gpg_secret_key, gpg_name):
# b64decode may raise TypeError but its error message doesn't contain our secret value.
decoded_secret = base64.b64decode(gpg_secret_key)
p = subprocess.Popen(["gpg", "--import", "-"], stdin=subprocess.PIPE)
p.stdin.write(decoded_secret)
p.stdin.close()
p.wait()
if p.returncode != 0:
raise Exception("Failed to import gpg key")
# yapf: disable
# run as newly created pgroup so that prevent rpmsign to connect to current tty for GPG passphrase.
# https://github.com/rpm-software-management/rpm/blob/rpm-4.11.3-release/rpmsign.c#L123
p2 = subprocess.Popen(['rpmsign', '-D', '_gpg_name {}'.format(gpg_name), '--addsign', package_path],
stdin=subprocess.PIPE,
preexec_fn=os.setsid)
# yapf: enable
p2.stdin.close()
p2.wait()
if p2.returncode != 0:
raise Exception("rpmsign failed")
return
def storeArtifacts(args, workspace_info):
directory = args.artifacts_directory
if not os.path.exists(directory):
os.makedirs(directory)
shutil.copy(
"bazel-bin/packages/{}/tar-package-symbol.tar.xz".format(args.variant),
os.path.join(directory, version.tarFileName(workspace_info,
symbol=True)),
)
shutil.copy(
"bazel-bin/packages/{}/tar-package-stripped.tar.xz".format(
args.variant),
os.path.join(directory, version.tarFileName(workspace_info)),
)
if args.build_deb_package:
shutil.copy(
"bazel-bin/packages/{}/deb-package.deb".format(args.variant),
os.path.join(directory, version.debFileName(workspace_info)),
)
if args.build_rpm_package:
shutil.copy(
"bazel-bin/packages/{}/rpm-package.rpm".format(args.variant),
os.path.join(directory, version.rpmFileName(workspace_info)),
)
if args.build_distroless_docker:
docker_image_tar = os.path.join(
directory, version.distrolessFileName(workspace_info))
shutil.copy(
"bazel-bin/packages/{}/distroless-package.tar".format(
args.variant),
docker_image_tar,
)
subprocess.check_call(["xz", "-f", docker_image_tar])
if args.build_istio_compat:
shutil.copy(
"bazel-bin/packages/{}/istio-tar-package-symbol.tar.gz".format(
args.variant),
os.path.join(directory,
version.istioTarFileName(workspace_info,
symbol=True)),
)
shutil.copy(
"bazel-bin/packages/{}/istio-tar-package-stripped.tar.gz".format(
args.variant),
os.path.join(directory, version.istioTarFileName(workspace_info)),
)
def bailIfPackagesExist(args, workspace_info):
rc = subprocess.call([
"./cloudsmith_uploader.py",
"--version",
version.debVersion(workspace_info),
"--check_nonexisting",
os.path.join(args.artifacts_directory,
version.tarFileName(workspace_info)),
])
if rc != 0:
sys.exit(0)
rc = subprocess.call([
"./cloudsmith_uploader.py",
"--version",
version.debVersion(workspace_info),
"--check_nonexisting",
os.path.join(
args.artifacts_directory,
version.tarFileName(workspace_info, symbol=True),
),
])
if rc != 0:
sys.exit(0)
def uploadArtifacts(args, workspace_info):
directory = args.artifacts_directory
override_args = []
if args.override:
override_args = ["--override"]
for filename in [
os.path.join(directory, version.tarFileName(workspace_info)),
os.path.join(directory,
version.tarFileName(workspace_info, symbol=True)),
]:
exists = subprocess.call([
"./cloudsmith_uploader.py",
"--raw",
"--version",
version.debVersion(workspace_info),
filename,
] + override_args)
if exists != 0:
return
if args.build_deb_package:
subprocess.check_call([
"./cloudsmith_uploader.py",
"--deb",
"--variant",
workspace_info["variant"],
"--version",
version.debVersion(workspace_info),
"--release_level",
args.release_level,
os.path.join(directory, version.debFileName(workspace_info)),
])
if args.build_rpm_package:
subprocess.check_call([
"./cloudsmith_uploader_rpm.py",
"--rpm",
"--variant",
workspace_info["variant"],
"--version",
workspace_info["source_version"],
"--release_level",
args.release_level,
os.path.join(directory, version.rpmFileName(workspace_info)),
])
if args.build_distroless_docker:
docker_image_tar = os.path.join(
directory, version.distrolessFileName(workspace_info))
load_cmd = 'xzcat "{}.xz" | docker load'.format(docker_image_tar)
subprocess.check_call(load_cmd, shell=True)
subprocess.check_call([
"./docker_upload.py",
"--docker_version",
version.dockerVersion(workspace_info),
"--variant",
workspace_info["variant"],
version.dockerTag(workspace_info),
])
if args.build_istio_compat:
subprocess.call([
"./cloudsmith_uploader.py",
"--raw",
"--version",
version.debVersion(workspace_info),
os.path.join(directory,
version.istioTarFileName(workspace_info,
symbol=True)),
] + override_args)
# Istio doesn't have a concept of debug stripped builds.
if workspace_info["release_level"] == "stable":
subprocess.call([
"./cloudsmith_uploader.py",
"--raw",
"--version",
version.debVersion(workspace_info),
os.path.join(directory, version.istioTarFileName(
workspace_info)),
] + override_args)
def testPackage(args):
runBazel("test", ["//test/..."], options=bazelOptions(args))
if args.test_distroless:
runBazel("build", ["//test:distroless-package.tar"],
options=bazelOptions(args))
def testEnvoy(args):
options = bazelOptions(args)
options["run_under"].append("//bazel:envoy_test_wrapper")
runBazel("test", ["@envoy//test/integration/..."], options=options)
def checkArguments(args):
if args.build_rpm_package and args.upload:
if args.gpg_secret_key is None or args.gpg_name is None:
raise Exception(
"gpg_secret_key and gpg_name args are required to build RPM package"
)
def main():
parser = argparse.ArgumentParser(description="Envoy packaging script")
parser.add_argument("--variant",
default="envoy",
choices=["envoy", "istio-proxy"])
parser.add_argument("--envoy_commit",
default=os.environ.get("ENVOY_COMMIT", "main"))
parser.add_argument("--envoy_repo")
parser.add_argument("--local_resources",
default=os.environ.get("LOCAL_RESOURCES"))
parser.add_argument(
"--gpg_secret_key",
default=os.environ.get("GPG_SECRET_KEY"),
help="Base64 encoded ASCII armored secret key value",
)
parser.add_argument("--gpg_name", default=os.environ.get("GPG_NAME"))
parser.add_argument("--nosetup", action="store_true")
parser.add_argument("--nocleanup", action="store_true")
parser.add_argument("--upload", action="store_true")
parser.add_argument("--override", action="store_true", default=False)
parser.add_argument(
"--override_envoy_repository",
default=os.environ.get("OVERRIDE_ENVOY_REPOSITORY", ""),
)
parser.add_argument("--override_envoy_commit",
default=os.environ.get("OVERRIDE_ENVOY_COMMIT", ""))
parser.add_argument("--test_distroless", action="store_true")
parser.add_argument("--test_package", action="store_true")
parser.add_argument(
"--test_envoy",
action="store_true",
default=os.environ.get("ENVOY_BUILD_TESTS", False),
)
parser.add_argument("--dist",
default=os.environ.get("ENVOY_DIST", "unknown"))
parser.add_argument("--config",
default=os.environ.get("ENVOY_BUILD_CONFIG",
"release"))
parser.add_argument("--target")
parser.add_argument("--binary_path")
parser.add_argument(
"--build_deb_package",
action="store_true",
default=(os.environ.get("BUILD_DEB_PACKAGE", "0") == "1"),
)
parser.add_argument(
"--build_rpm_package",
action="store_true",
default=(os.environ.get("BUILD_RPM_PACKAGE", "0") == "1"),
)
parser.add_argument(
"--build_distroless_docker",
action="store_true",
default=(os.environ.get("BUILD_DISTROLESS_DOCKER", "0") == "1"),
)
parser.add_argument(
"--build_istio_compat",
action="store_true",
default=(os.environ.get("BUILD_ISTIO_COMPAT", "0") == "1"),
)
parser.add_argument("--artifacts_directory")
parser.add_argument(
"--release_level",
default=os.environ.get("ENVOY_RELEASE_LEVEL", "nightly"),
choices=["nightly", "stable"],
)
args = parser.parse_args()
checkArguments(args)
if not args.nocleanup:
atexit.register(workspace.cleanup)
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
workspace.cleanup()
args.tar_suffix = "-".join([args.dist, args.config])
workspace_info = workspace.setup(args)
if platform.system() == "Darwin" and not args.nosetup:
subprocess.check_call(["mac/setup.sh"])
if args.test_package:
testPackage(args)
else:
if not args.artifacts_directory:
tempdir = tempfile.TemporaryDirectory()
args.artifacts_directory = tempdir.name
atexit.register(tempdir.cleanup)
if args.upload and not args.override:
bailIfPackagesExist(args, workspace_info)
if args.test_envoy:
testEnvoy(args)
buildPackages(args)
storeArtifacts(args, workspace_info)
if args.upload:
uploadArtifacts(args, workspace_info)
if __name__ == "__main__":
main()
|
the-stack_106_22992 | #import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
# This program uses regression method of ML
# Using airfoil data from here : https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise
# load the data
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat', sep='\t', header=None)
# split the data from the target values
data = df[[0,1,2,3,4]].values
target = df[5].values
# split up the testing and training data
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.33)
# train the model
model = RandomForestRegressor()
model.fit(X_train, y_train)
# score the model
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print("R^2 score for training:", train_score)
print("R^2 score for testing:", test_score)
# Use the model to do a prediction
prediction = model.predict(X_test[[5]])
print('\n' + "Predcition of the first element - ")
print("Expected result:", prediction[0])
print("Model prediction:", y_test[5]) |
the-stack_106_22994 |
# -*- coding: utf-8 -*
import os
import json
from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from django.forms import ValidationError
from django.test.utils import override_settings
from django.utils import translation
import mock
from rest_framework.response import Response
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.api.tests.utils import APIKeyAuthTestCase
from olympia.applications.models import AppVersion
from olympia.devhub import tasks
from olympia.files.models import File, FileUpload
from olympia.signing.views import VersionView
from olympia.users.models import UserProfile
from olympia.versions.models import Version
class SigningAPITestCase(APIKeyAuthTestCase):
fixtures = ['base/addon_3615', 'base/user_4043307']
def setUp(self):
self.user = UserProfile.objects.get(email='[email protected]')
self.api_key = self.create_api_key(self.user, str(self.user.pk) + ':f')
class BaseUploadVersionCase(SigningAPITestCase):
def setUp(self):
super(BaseUploadVersionCase, self).setUp()
self.guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
self.view = VersionView.as_view()
create_version_patcher = mock.patch(
'olympia.devhub.tasks.create_version_for_upload',
tasks.create_version_for_upload.non_atomic)
self.create_version_for_upload = create_version_patcher.start()
self.addCleanup(create_version_patcher.stop)
auto_sign_version_patcher = mock.patch(
'olympia.devhub.views.auto_sign_version')
self.auto_sign_version = auto_sign_version_patcher.start()
self.addCleanup(auto_sign_version_patcher.stop)
def url(self, guid, version, pk=None):
if guid is None:
args = [version]
else:
args = [guid, version]
if pk is not None:
args.append(pk)
return reverse('signing.version', args=args)
def create_version(self, version):
response = self.request('PUT', self.url(self.guid, version), version)
assert response.status_code in [201, 202]
def xpi_filepath(self, addon, version):
return os.path.join(
'src', 'olympia', 'signing', 'fixtures',
'{addon}-{version}.xpi'.format(addon=addon, version=version))
def request(self, method='PUT', url=None, version='3.0',
addon='@upload-version', filename=None):
if filename is None:
filename = self.xpi_filepath(addon, version)
if url is None:
url = self.url(addon, version)
with open(filename) as upload:
data = {'upload': upload}
if method == 'POST' and version:
data['version'] = version
return getattr(self.client, method.lower())(
url, data,
HTTP_AUTHORIZATION=self.authorization(),
format='multipart')
def make_admin(self, user):
admin_group = Group.objects.create(name='Admin', rules='*:*')
GroupUser.objects.create(group=admin_group, user=user)
class TestUploadVersion(BaseUploadVersionCase):
def test_not_authenticated(self):
# Use self.client.put so that we don't add the authorization header.
response = self.client.put(self.url(self.guid, '12.5'))
assert response.status_code == 401
@override_settings(READ_ONLY=True)
def test_read_only_mode(self):
response = self.request('PUT', self.url(self.guid, '12.5'))
assert response.status_code == 503
assert 'website maintenance' in response.data['error']
def test_addon_does_not_exist(self):
guid = '@create-version'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request('PUT', addon=guid, version='1.0')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert not addon.is_listed
assert addon.status == amo.STATUS_NOMINATED
assert addon.latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
addon.latest_version, is_beta=False)
def test_user_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_does_not_own_addon(self):
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
self.make_admin(self.user)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_version_does_not_match_manifest_file(self):
response = self.request('PUT', self.url(self.guid, '2.5'))
assert response.status_code == 400
assert response.data['error'] == (
'Version does not match the manifest file.')
def test_version_already_exists(self):
response = self.request(
'PUT', self.url(self.guid, '2.1.072'), version='2.1.072')
assert response.status_code == 409
assert response.data['error'] == 'Version already exists.'
@mock.patch('olympia.devhub.views.Version.from_upload')
def test_no_version_yet(self, from_upload):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added(self):
assert Addon.objects.get(guid=self.guid).status == amo.STATUS_PUBLIC
qs = Version.objects.filter(addon__guid=self.guid, version='3.0')
assert not qs.exists()
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
version = qs.get()
assert version.addon.guid == self.guid
assert version.version == '3.0'
assert version.statuses[0][1] == amo.STATUS_AWAITING_REVIEW
assert version.addon.status == amo.STATUS_PUBLIC
assert version.channel == amo.RELEASE_CHANNEL_LISTED
self.auto_sign_version.assert_called_with(version, is_beta=False)
def test_version_already_uploaded(self):
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 202
assert 'processed' in response.data
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == 'Version already exists.'
def test_version_failed_review(self):
self.create_version('3.0')
version = Version.objects.get(addon__guid=self.guid, version='3.0')
version.update(reviewed=datetime.today())
version.files.get().update(reviewed=datetime.today(),
status=amo.STATUS_DISABLED)
response = self.request('PUT', self.url(self.guid, '3.0'))
assert response.status_code == 409
assert response.data['error'] == 'Version already exists.'
# Verify that you can check the status after upload (#953).
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_added_is_experiment(self):
self.grant_permission(self.user, 'Experiments:submit')
guid = 'experiment@xpi'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'telemetry_experiment.xpi')
assert response.status_code == 201
assert qs.exists()
addon = qs.get()
assert addon.has_author(self.user)
assert not addon.is_listed
assert addon.status == amo.STATUS_NOMINATED
assert addon.latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
addon.latest_version, is_beta=False)
def test_version_added_is_experiment_reject_no_perm(self):
guid = 'experiment@xpi'
qs = Addon.unfiltered.filter(guid=guid)
assert not qs.exists()
response = self.request(
'PUT',
addon=guid, version='0.1',
filename='src/olympia/files/fixtures/files/'
'telemetry_experiment.xpi')
assert response.status_code == 400
assert response.data['error'] == (
'You cannot submit this type of add-on')
def test_version_is_beta_unlisted(self):
Addon.objects.get(guid=self.guid).update(
status=amo.STATUS_PUBLIC, is_listed=False)
version_string = '4.0-beta1'
qs = Version.objects.filter(
addon__guid=self.guid, version=version_string)
assert not qs.exists()
response = self.request(
'PUT',
self.url(self.guid, version_string), version=version_string)
assert response.status_code == 202
assert 'processed' in response.data
version = qs.get()
assert version.addon.guid == self.guid
assert version.version == version_string
assert version.statuses[0][1] == amo.STATUS_AWAITING_REVIEW
assert version.addon.status == amo.STATUS_PUBLIC
assert version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert not version.is_beta
self.auto_sign_version.assert_called_with(version, is_beta=False)
def test_version_is_beta(self):
assert Addon.objects.get(guid=self.guid).status == amo.STATUS_PUBLIC
version_string = '4.0-beta1'
qs = Version.objects.filter(
addon__guid=self.guid, version=version_string)
assert not qs.exists()
response = self.request(
'PUT',
self.url(self.guid, version_string), version=version_string)
assert response.status_code == 202
assert 'processed' in response.data
version = qs.get()
assert version.addon.guid == self.guid
assert version.version == version_string
assert version.statuses[0][1] == amo.STATUS_BETA
assert version.addon.status == amo.STATUS_PUBLIC
assert version.channel == amo.RELEASE_CHANNEL_LISTED
assert version.is_beta
self.auto_sign_version.assert_called_with(version, is_beta=True)
def test_invalid_version_response_code(self):
# This raises an error in parse_addon which is not covered by
# an exception handler.
response = self.request(
'PUT',
self.url(self.guid, '1.0'),
addon='@create-webextension-invalid-version',
version='1.0')
assert response.status_code == 400
def test_raises_response_code(self):
# A check that any bare error in handle_upload will return a 400.
with mock.patch('olympia.signing.views.handle_upload') as patch:
patch.side_effect = ValidationError(message='some error')
response = self.request('PUT', self.url(self.guid, '1.0'))
assert response.status_code == 400
class TestUploadVersionWebextension(BaseUploadVersionCase):
def setUp(self):
super(TestUploadVersionWebextension, self).setUp()
AppVersion.objects.create(application=amo.FIREFOX.id, version='42.0')
AppVersion.objects.create(application=amo.FIREFOX.id, version='*')
validate_patcher = mock.patch('validator.validate.validate')
run_validator = validate_patcher.start()
run_validator.return_value = json.dumps(amo.VALIDATOR_SKELETON_RESULTS)
self.addCleanup(validate_patcher.stop)
def test_addon_does_not_exist_webextension(self):
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@create-webextension',
version='1.0')
assert response.status_code == 201
guid = response.data['guid']
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid is not None
assert addon.guid != self.guid
version = Version.objects.get(addon__guid=guid, version='1.0')
assert version.files.all()[0].is_webextension is True
assert addon.has_author(self.user)
assert not addon.is_listed
assert addon.status == amo.STATUS_NOMINATED
assert addon.latest_version.channel == amo.RELEASE_CHANNEL_UNLISTED
self.auto_sign_version.assert_called_with(
addon.latest_version, is_beta=False)
def test_optional_id_not_allowed_for_regular_addon(self):
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@create-version-no-id',
version='1.0')
assert response.status_code == 400
def test_webextension_reuse_guid(self):
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
guid = response.data['guid']
assert guid == '@webextension-with-guid'
addon = Addon.unfiltered.get(guid=guid)
assert addon.guid == '@webextension-with-guid'
def test_webextension_reuse_guid_but_only_create(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
assert response.status_code == 201
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@create-webextension-with-guid',
version='1.0')
assert response.status_code == 400
assert response.data['error'] == 'Duplicate add-on ID found.'
def test_webextension_optional_version(self):
# Uploading the same version with the same id fails. People
# have to use the regular `PUT` endpoint for that.
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@create-webextension-with-guid-and-version',
version='99.0')
assert response.status_code == 201
assert (
response.data['guid'] ==
'@create-webextension-with-guid-and-version')
assert response.data['version'] == '99.0'
def test_webextension_resolve_translations(self):
fname = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
response = self.request(
'POST',
url=reverse('signing.version'),
addon='@notify-link-clicks-i18n',
version='1.0',
filename=fname)
assert response.status_code == 201
addon = Addon.unfiltered.get(guid=response.data['guid'])
# Normalized from `en` to `en-US`
assert addon.default_locale == 'en-US'
assert addon.name == 'Notify link clicks i18n'
assert addon.summary == (
'Shows a notification when the user clicks on links.')
translation.activate('de')
addon.reload()
assert addon.name == 'Meine Beispielerweiterung'
assert addon.summary == u'Benachrichtigt den Benutzer über Linkklicks'
class TestCheckVersion(BaseUploadVersionCase):
def test_not_authenticated(self):
# Use self.client.get so that we don't add the authorization header.
response = self.client.get(self.url(self.guid, '12.5'))
assert response.status_code == 401
def test_addon_does_not_exist(self):
response = self.get(self.url('foo', '12.5'))
assert response.status_code == 404
assert response.data['error'] == 'Could not find add-on with id "foo".'
def test_user_does_not_own_addon(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 403
assert response.data['error'] == 'You do not own this addon.'
def test_admin_can_view(self):
self.create_version('3.0')
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.make_admin(self.user)
self.api_key = self.create_api_key(self.user, 'bar')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_does_not_exist(self):
response = self.get(self.url(self.guid, '2.5'))
assert response.status_code == 404
assert (response.data['error'] ==
'No uploaded file for that addon and version.')
def test_version_exists(self):
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
def test_version_exists_with_pk(self):
# Mock Version.from_upload so the Version won't be created.
with mock.patch('olympia.devhub.tasks.Version.from_upload'):
self.create_version('3.0')
upload = FileUpload.objects.latest()
upload.update(created=datetime.today() - timedelta(hours=1))
self.create_version('3.0')
newer_upload = FileUpload.objects.latest()
assert newer_upload != upload
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 200
# For backwards-compatibility reasons, we return the uuid as "pk".
assert response.data['pk'] == upload.uuid.hex
assert 'processed' in response.data
def test_version_exists_with_pk_not_owner(self):
orig_user, orig_api_key = self.user, self.api_key
# This will create a version for the add-on with guid @create-version
# using a new user.
self.user = UserProfile.objects.create(
read_dev_agreement=datetime.now())
self.api_key = self.create_api_key(self.user, 'bar')
response = self.request('PUT', addon='@create-version', version='1.0')
assert response.status_code == 201
upload = FileUpload.objects.latest()
# Check that the user that created the upload can access it properly.
response = self.get(
self.url('@create-version', '1.0', upload.uuid.hex))
assert response.status_code == 200
assert 'processed' in response.data
# This will create a version for the add-on from the fixture with the
# regular fixture user.
self.user, self.api_key = orig_user, orig_api_key
self.create_version('3.0')
# Check that we can't access the FileUpload by uuid even if we pass in
# an add-on and version that we own if we don't own the FileUpload.
response = self.get(self.url(self.guid, '3.0', upload.uuid.hex))
assert response.status_code == 404
assert 'error' in response.data
def test_version_download_url(self):
version_string = '3.0'
qs = File.objects.filter(version__addon__guid=self.guid,
version__version=version_string)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
assert response.data['files'][0]['download_url'] == \
file_.get_signed_url('api')
def test_file_hash(self):
version_string = '3.0'
qs = File.objects.filter(version__addon__guid=self.guid,
version__version=version_string)
assert not qs.exists()
self.create_version(version_string)
response = self.get(self.url(self.guid, version_string))
assert response.status_code == 200
file_ = qs.get()
filename = self.xpi_filepath('@upload-version', version_string)
assert response.data['files'][0]['hash'] == \
file_.generate_hash(filename=filename)
def test_has_failed_upload(self):
addon = Addon.objects.get(guid=self.guid)
FileUpload.objects.create(addon=addon, version='3.0')
self.create_version('3.0')
response = self.get(self.url(self.guid, '3.0'))
assert response.status_code == 200
assert 'processed' in response.data
class TestSignedFile(SigningAPITestCase):
def setUp(self):
super(TestSignedFile, self).setUp()
self.file_ = self.create_file()
def url(self):
return reverse('signing.file', args=[self.file_.pk])
def create_file(self):
addon = Addon.objects.create(name='thing', is_listed=False)
addon.save()
AddonUser.objects.create(user=self.user, addon=addon)
version = Version.objects.create(addon=addon)
return File.objects.create(version=version)
def test_can_download_once_authenticated(self):
response = self.get(self.url())
assert response.status_code == 302
assert response['X-Target-Digest'] == self.file_.hash
def test_cannot_download_without_authentication(self):
response = self.client.get(self.url()) # no auth
assert response.status_code == 401
def test_api_relies_on_version_downloader(self):
with mock.patch('olympia.versions.views.download_file') as df:
df.return_value = Response({})
self.get(self.url())
assert df.called is True
assert df.call_args[0][0].user == self.user
assert df.call_args[0][1] == str(self.file_.pk)
|
the-stack_106_22995 | # -*- coding: utf-8 -*-
"""
@date: 2021/3/15 下午8:05
@file: test_cifar.py
@author: zj
@description:
"""
import numpy as np
from rotnet.data.datasets.cifar import CIFAR
def test_cifar10():
root_data = './data/cifar'
data_set = CIFAR(root_data, is_cifar100=False)
print(data_set.classes)
print(len(data_set))
img, target = data_set.__getitem__(100)
print(np.array(img).shape, target)
if __name__ == '__main__':
test_cifar10()
|
the-stack_106_22997 | import viewflow
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from viewflow.create_dag import ParseContext
from unittest.mock import MagicMock, patch, ANY
def test_parse_external_dependencies():
parsed = viewflow.parse_dag_dir(
"./tests/projects/external_deps/dag_2", ParseContext(dag_id="dag_2")
)
assert "tasks" in parsed
tasks = parsed["tasks"]
for task in tasks:
if task["task_id"] == "task_2":
assert len(tasks) == 2
task_2_deps = task.get("depends_on", [])
assert len(task_2_deps) == 0
elif task["task_id"] == "task_3":
assert len(tasks) == 2
task_3_deps = task.get("depends_on", [])
assert len(task_3_deps) == 1
assert task_3_deps[0].get("dag") == "dag_1"
assert task_3_deps[0].get("task") == "task_1"
def test_create_external_dependencies():
mocked_operator = MagicMock()
dag = viewflow.create_dag(
"./tests/projects/external_deps/dag_2", {"PostgresOperator": mocked_operator}
)
assert "wait_for_dag_1_task_1" in dag.task_dict
external_sensor = dag.task_dict.get("wait_for_dag_1_task_1")
assert isinstance(external_sensor, ExternalTaskSensor)
assert "task_3" in dag.task_dict
task_3 = dag.task_dict.get("task_3")
assert task_3._upstream_task_ids == {external_sensor.task_id}
# assert dag.task_dict.get("task_2")._upstream_task_ids == task2._upstream_task_ids
|
the-stack_106_22998 | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For details on how to write such tests, please refer to
# https://github.com/oppia/oppia/wiki/Writing-Tests-For-Pylint
import os
import sys
import tempfile
import unittest
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.8.4')
sys.path.insert(0, _PYLINT_PATH)
# Since these module needs to be imported after adding Pylint path,
# we need to disable isort for the below lines to prevent import
# order errors.
# pylint: disable=wrong-import-position
# pylint: disable=relative-import
import astroid # isort:skip
import custom_lint_checks # isort:skip
from pylint import testutils # isort:skip
# pylint: enable=wrong-import-position
# pylint: enable=relative-import
class ExplicitKeywordArgsCheckerTest(unittest.TestCase):
def test_finds_non_explicit_keyword_args(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
custom_lint_checks.ExplicitKeywordArgsChecker)
checker_test_object.setup_method()
func_call_node_one, func_call_node_two, func_call_node_three = (
astroid.extract_node("""
def test(test_var_one, test_var_two=4, test_var_three=5, test_var_four="test_checker"):
test_var_five = test_var_two + test_var_three
return test_var_five
test(2, 5, test_var_three=6) #@
test(2) #@
test(2, 6, test_var_two=5, test_var_four="test_checker") #@
"""))
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='non-explicit-keyword-args',
node=func_call_node_one,
args=(
"'test_var_two'",
'function',
'test'
)
),
):
checker_test_object.checker.visit_call(
func_call_node_one)
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_call(
func_call_node_two)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='non-explicit-keyword-args',
node=func_call_node_three,
args=(
"'test_var_three'",
'function',
'test'
)
)
):
checker_test_object.checker.visit_call(
func_call_node_three)
class HangingIndentCheckerTest(unittest.TestCase):
def test_finds_hanging_indent(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
custom_lint_checks.HangingIndentChecker)
checker_test_object.setup_method()
node1 = astroid.scoped_nodes.Module(name='test', doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with open(filename, 'w') as tmp:
tmp.write(
"""self.post_json('/ml/trainedclassifierhandler',
self.payload, expect_errors=True, expected_status_int=401)
""")
node1.file = filename
node1.path = filename
checker_test_object.checker.process_module(node1)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='no-break-after-hanging-indent',
line=1
),
):
temp_file.close()
node2 = astroid.scoped_nodes.Module(name='test', doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with open(filename, 'w') as tmp:
tmp.write(
"""master_translation_dict = json.loads(
utils.get_file_contents(os.path.join(
os.getcwd(), 'assets', 'i18n', 'en.json')))
""")
node2.file = filename
node2.path = filename
checker_test_object.checker.process_module(node2)
with checker_test_object.assertNoMessages():
temp_file.close()
class DocstringParameterCheckerTest(unittest.TestCase):
def test_finds_docstring_parameter(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
custom_lint_checks.DocstringParameterChecker)
checker_test_object.setup_method()
func_node = astroid.extract_node("""
def test(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_functiondef(func_node)
class ImportOnlyModulesCheckerTest(unittest.TestCase):
def test_finds_import_from(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
custom_lint_checks.ImportOnlyModulesChecker)
checker_test_object.setup_method()
importfrom_node1 = astroid.extract_node("""
from os import path #@
import sys
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node1)
importfrom_node2 = astroid.extract_node("""
from os import error #@
import sys
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='import-only-modules',
node=importfrom_node2,
args=('error', 'os')
),
):
checker_test_object.checker.visit_importfrom(
importfrom_node2)
|
the-stack_106_23001 | # Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
intervals.sort(key=lambda x:x.start)
result = []
p1 = 0
p2 = 0
num_intervals = len(intervals)
while p1<num_intervals:
start_index = intervals[p1].start
end_index = intervals[p1].end
while p2<num_intervals and intervals[p2].start<=end_index:
end_index = max(intervals[p2].end, end_index)
p2+=1
result.append(Interval(start_index, end_index))
p1 = p2
return result
if __name__ == "__main__":
s = Solution()
t1 = [Interval(x[0], x[1]) for x in [[1,3],[2,6],[15,18],[8,10]]]
for interval in s.merge(t1):
print(interval.start, interval.end)
t2 = [Interval(x[0],x[1]) for x in [[1,4],[4,5]]]
for interval in s.merge(t2):
print(interval.start, interval.end) |
the-stack_106_23002 | import sys
import argparse
import numpy as np
import tensorflow as tf
from load_mnist import MNIST
import data_utils
import model
tf.logging.set_verbosity(tf.logging.INFO)
MODELS = ['simpleNN']
def get_simple_nn_experiment(args):
"""
Function for creating an experiment using the SimpleNN model on MNIST
"""
train_input_fn = data_utils.get_input_fn(
data_dir=args.data_dir,
is_training=True,
num_epochs=args.num_epochs,
batch_size=args.batch_size,
shuffle=True,
normalize=args.normalize)
val_input_fn = data_utils.get_input_fn(
data_dir=args.data_dir,
is_training=False,
num_epochs=1,
batch_size=2*args.batch_size,
shuffle=False,
normalize=args.normalize)
simplecnn = model.SimpleMnistModel(
num_classes=args.num_classes,
scope='SimpleMnist')
config = tf.estimator.RunConfig(
keep_checkpoint_max=10000,
tf_random_seed=1234,
save_summary_steps=50,
save_checkpoints_secs=120)
estimator = tf.estimator.Estimator(
model_fn=simplecnn.get_model_fn(),
model_dir=args.model_dir,
config=config,
params={'learning_rate': args.lr}
)
experiment = tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=val_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=None,
train_monitors=[],
min_eval_frequency=1,
)
return experiment
def main(args):
parser = argparse.ArgumentParser(
description='Train a model on the MNIST dataset.')
parser.add_argument('-m', '--model', required=True, choices=MODELS,
help='Select which model to train')
parser.add_argument('-md', '--model-dir', required=True,
help='the directory where the model and related'
'files are saved')
parser.add_argument('-dd', '--data-dir', default='./',
help='directory which contains the data files')
parser.add_argument('-nc', '--num-classes', default=10, type=int,
help='number of classes')
parser.add_argument('-b', '--batch-size', default=60,
type=int, help='the batch size')
parser.add_argument('-e', '--num-epochs', default=20,
type=int, help='number of steps (minibatches)')
parser.add_argument('--lr', default=1e-4, type=float,
help='the learning rate of the model')
parser.add_argument('-n', '--normalize', default=True,
type=bool, help='normalize images' )
args = parser.parse_args(args)
if args.model == 'simpleNN':
experiment = get_simple_nn_experiment(args)
else:
raise NotImplementedError()
experiment.train_and_evaluate()
if __name__ == '__main__':
main(sys.argv[1:]) |
the-stack_106_23003 | def bits(i):
I = 0
ret = []
while i:
if i & 1:
ret.append(I)
I += 1
i >>= 1
return ret
def all_keys(sol, inds, num_keys):
keys = [False for i in range(num_keys)]
for i in inds:
for j in sol[i]:
keys[j] |= True
return all(keys)
def next_combination(a, n):
k = len(a)
for i in range(k - 1, -1, -1):
if a[i] < n - k + i:
a[i] += 1
for j in range(i + 1, k):
a[j] = a[j - 1] + 1
return True
return False
def is_valid(sol, num_required):
"""
O((n choose num_required) * num_required * num_keys)
"""
num_keys = max([max(i) for i in sol]) + 1
num_buns = len(sol)
f_all_keys = lambda b : all_keys(sol, b, num_keys)
#bunnies = list(range(num_required))
#while next_combination(bunnies, num_buns)
for i in range(1 << num_buns):
b = bits(i)
if len(b) < num_required:
if f_all_keys(b):
print('not valid with num_required: {}'.format(num_required))
print('found countercase: {}, {}'.format(len(b), b))
return False
if len(b) == num_required:
if not f_all_keys(b):
return False
print('is valid with num_required: {}'.format(num_required))
return True
def print_sol(ans):
num_keys = max([max(i) for i in ans]) + 1
print(' \t', end='')
for i in range(0, num_keys):
print(str(i) + '\t', end='')
print()
for i, bun in enumerate(ans):
keys = [False] * num_keys
print(str(i) + '\t', end='')
for i in bun:
keys[i] = True
for i in range(num_keys):
if keys[i]:
print('X\t', end='')
else:
print(' \t', end='')
print()
def solution(num_buns, num_required):
if num_required == 0:
return [[] for i in range(num_buns)]
if num_required == 1:
return [[0] for i in range(num_buns)]
if num_buns == num_required:
return [[i] for i in range(num_buns)]
if num_required == 2:
keys = list(range(num_buns))
sol = []
for i in range(num_buns):
j = num_buns - 1 - i
sol.append(keys[:j] + keys[j + 1:])
return sol
left = solution(num_buns - 1, num_required)
right = solution(num_buns - 1, num_required - 1)
num_keys_left = max([max(i) for i in left]) + 1
keys_per_bun = len(left[0]) + len(right[0])
sol = []
sol.append(list(range(keys_per_bun)))
for i in range(num_buns - 1):
left_i = left[i]
right_i = right[i]
right_i = [ j + num_keys_left for j in right_i]
sol.append(left_i + right_i)
return sol
found = False
tried = 0
def brute_aux(buns, i, num_buns, num_required, num_keys):
global found, tried
if found:
return
buns[i] = buns[i - 1].copy()
if i == num_buns - 1:
while next_combination(buns[i], num_keys):
tried += 1
if tried % 100000 == 0:
print('tried: ', tried)
print_sol(buns)
if is_valid(buns, num_required):
found = True
break
return
while next_combination(buns[i], num_keys):
brute_aux(buns, i + 1, num_buns, num_required, num_keys)
if found:
return
def brute_try(num_buns, num_required, num_keys, keys_per_bun):
global found
buns = [[]] * num_buns
buns[0] = list(range(keys_per_bun))
brute_aux(buns, 1, num_buns, num_required, num_keys)
if found:
return buns
return False
#ans = brute_try(num_buns=6, num_required=3, num_keys=15, keys_per_bun=10)
#if found:
#print_sol(ans)
#else:
#print('sol not found')
"""
for i in range(1, 10):
for j in range(i + 1):
sol = solution(i, j)
if i < 9 and j > 0:
print('num_buns: {}, num_required: {}'.format(i, j))
print_sol(sol)
assert(
solution(3, 1) == [[0], [0], [0]]
)
assert(
solution(2, 2) == [[0], [1]]
)
assert(
solution(5, 4) == [
[0, 1, 2, 3],
[0, 4, 5, 6],
[1, 4, 7, 8],
[2, 5, 7, 9],
[3, 6, 8, 9]
]
)
assert(
solution(3, 2) == [
[0, 1],
[0, 2],
[1, 2]
]
)
assert (
solution(6, 3) == [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 10, 11, 12, 13],
[0, 1, 2, 6, 7, 8, 10, 11, 12, 14],
[0, 3, 4, 6, 7, 9, 10, 11, 13, 14],
[1, 3, 5, 6, 8, 9, 10, 12, 13, 14],
[2, 4, 5, 7, 8, 9, 11, 12, 13, 14]
]
)
"""
|
the-stack_106_23004 | import numpy as np
import pandas as pd
from scipy.stats import mode
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
def change_labels(sample, n_sleep_stages=1):
"""
Returns:
sample - contains only label 1(awake) and 0(sleep) for polisomnography if n_sleep_stages == 1,
else if n_sleep_stages == 4, then labels (5 classes): 0 - REM, 1,2,3 - no-REM sleep stages 3-1, 4 - awake,
else if n_sleep_stages == 2, then labels (3 classes): 0 - REM, 1 - no-REM sleep stage, 2 - awake.
"""
if n_sleep_stages == 4:
# for 5 classes
sample.gt[sample.gt==0] = 8
sample.gt[sample.gt==5] = 0
sample.gt[np.logical_or.reduce((sample.gt==6, sample.gt==7, sample.gt==8))] = 4
elif n_sleep_stages == 2:
# for 3 classes
sample.gt[sample.gt==0] = 8
sample.gt[sample.gt==5] = 0
sample.gt[np.logical_or.reduce((sample.gt==1, sample.gt==2, sample.gt==3))] = 1
sample.gt[np.logical_or.reduce((sample.gt==6, sample.gt==7, sample.gt==8))] = 2
elif n_sleep_stages == 1:
# for 2 classes
sample.gt[sample.gt==0] = 8
sample.gt[np.logical_or.reduce((sample.gt==1, sample.gt==2, sample.gt==3, sample.gt==5))] = 0
sample.gt[np.logical_or.reduce((sample.gt==6, sample.gt==7, sample.gt==8))] = 1
else:
print("Error! Wrong number of classes! Possible values: 1, 2 and 4")
return sample
#-------------------------------------------------------------------------
def decoder(sample):
'''
Returns:
decoded_sample - contains accelerometer and ps data for each sensor record, ndarray of shape (n_records, 4)
'''
sample = np.repeat(sample, sample.d, axis=0)
n_records = sample.shape[0]
decoded_sample = np.zeros((n_records, 4))
decoded_sample[:, 0] = sample.x
decoded_sample[:, 1] = sample.y
decoded_sample[:, 2] = sample.z
decoded_sample[:, 3] = sample.gt
return decoded_sample
#-------------------------------------------------------------------------
def divide_by_windows(decoded_sample, window_len=60, scaler=False):
"""
Parameters:
window_len - length of each window in seconds, int
Returns:
X - accelerometer data, ndarray of shape (n_windows, window_len, 3)
y - polisomnography data, ndarray of shape (n_windows, )
"""
window_len *= 100
n_windows = decoded_sample.shape[0] // window_len
if scaler:
scaler = StandardScaler()
scaler.fit(decoded_sample[:, 0:3])
decoded_sample[:, 0:3] = scaler.transform(decoded_sample[:, 0:3])
X = np.zeros((n_windows, window_len, 3))
y = np.zeros(n_windows)
for i in range(n_windows):
X[i] = decoded_sample[window_len * i: window_len * i + window_len, 0: 3]
y[i], _ = mode(decoded_sample[window_len * i: window_len * i + window_len, 3], axis=0)
return X, y
#-------------------------------------------------------------------------
def get_one_patient_data(data_path, patient, window_len=60, n_sleep_stages=1, divide_by_win=True, scaler=False):
"""
Returns:
X, y - for one patient
"""
sample = np.load("%s\p%s.npy"%(data_path, patient)).view(np.recarray)
sample = change_labels(sample, n_sleep_stages=n_sleep_stages)
sample = decoder(sample)
if divide_by_win:
X, y = divide_by_windows(sample, window_len, scaler=scaler)
elif scaler:
scaler = StandardScaler()
scaler.fit(sample[:, 0:3])
sample[:, 0:3] = scaler.transform(sample[:, 0:3])
X = sample[:, 0: 3]
y = sample[:, 3]
else:
X = sample[:, 0: 3]
y = sample[:, 3]
return X, y
#-------------------------------------------------------------------------
def get_data_for_model(data_path, patient_list, window_len=60, divide_by_win=True, scaler=False):
"""
Returns:
X, y - for all patient in list, ndarray of shape (n_records, n_features, n_channels=3)
"""
X_all_data = []
y_all_data = []
for patient in patient_list:
X, y = get_one_patient_data(data_path, patient, window_len, divide_by_win=divide_by_win, scaler=scaler)
X_all_data.append(X)
y_all_data.append(y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data
#----------------------------------------------------------------------------
def save_statistic_features(patient_list, sorce_path="ICHI14_dataset\data", save_path="features.csv",
window_len=60, n_sleep_stages=1, scaler=False):
"""
Save .csv file with extracted statistic features for each windows and axis.
List of all features: ["id", "sleep_stage", "gender", "age", "std_x", "std_y", "std_z", "ptp_x", "ptp_y", "ptp_z", "mean_x", "mean_y", "mean_z", "rms_x", "rms_y", "rms_z", "crest_factor_x", "crest_factor_y", "crest_factor_z", "max_val_x", "max_val_y", "max_val_z", "min_val_x", "min_val_y", "min_val_z"]
"""
columns = ["id", "sleep_stage", "gender", "age", "std_x", "std_y", "std_z", "ptp_x", "ptp_y", "ptp_z",
"mean_x", "mean_y", "mean_z", "rms_x", "rms_y", "rms_z", "crest_factor_x", "crest_factor_y",
"crest_factor_z", "max_val_x", "max_val_y", "max_val_z", "min_val_x", "min_val_y", "min_val_z"]
statistics_df = pd.DataFrame(columns=columns)
patient_data = np.load(sorce_path + '/pat_inf.npy')
for patient in patient_list:
X, y = get_one_patient_data(data_path=sorce_path, patient=patient,
window_len=window_len, n_sleep_stages=n_sleep_stages, scaler=scaler)
patient_id = np.array([patient] * y.shape[0]).reshape(y.shape[0], 1)
std = np.std(X, axis=1)
ptp = np.ptp(X, axis=1)
mean = np.mean(X, axis=1)
rms = np.sqrt(np.mean(np.square(X), axis=1))
crest_factor = np.max(X, axis=1) / rms
max_val = np.amax(X, axis=1)
min_val = np.amin(X, axis=1)
gender = 0
age = 0
for i, p in enumerate(patient_data[1:, 0]):
if patient == p.decode('utf-8'):
age = int(patient_data[i+1, 2].decode('utf-8'))
if "m" == patient_data[i+1, 1].decode('utf-8'):
gender = 1
age = age * np.ones((y.shape[0], 1))
gender = gender * np.ones((y.shape[0], 1))
y = y.reshape(y.shape[0], 1)
X_new = np.concatenate((patient_id, y, gender, age, std, ptp, mean, rms, crest_factor, max_val, min_val), axis=1)
X_new_df = pd.DataFrame(X_new, columns=columns)
statistics_df = statistics_df.append(X_new_df, ignore_index=True)
statistics_df.to_csv(save_path, sep=',', header=True, index=None)
#-----------------------------------------------------------------
def load_statistic_features(patient_list, data_path="statistic_features.csv",
statistics_list=["std_x", "std_y", "std_z"]):
statistics_df = pd.read_csv(data_path)
indexes = np.logical_or.reduce([statistics_df.id == i for i in patient_list])
X = statistics_df.loc[indexes, statistics_list]
y = statistics_df.loc[indexes, "sleep_stage"]
X = np.array(X)
y = np.array(y)
return X, y
#--------------------------------------------------------------
def load_stat_features_others_windows(patient_list, data_path="statistic_features.csv",
statistics_list=["std_x", "std_y", "std_z"], n_others_windows=40):
"""
Returns:
X_all_data - ndarray of shape(n_records, n_new_features), feature-vector consist of features of current window and several others ( n_others_windows // 2 before current window and n_others_windows // 2 after it)
y_all_data - ndarray of shape(n_records,)
"""
statistics_df = pd.read_csv(data_path)
X_all_data = []
y_all_data = []
for patient in patient_list:
X = np.array(statistics_df.loc[statistics_df.id == patient, statistics_list])
y = np.array(statistics_df.loc[statistics_df.id == patient, "sleep_stage"])
X_new = np.zeros((X.shape[0]-n_others_windows, X.shape[1]*(n_others_windows+1)))
for i in range(0, X.shape[0]-n_others_windows):
X_buff = X[i]
for j in range(1, n_others_windows+1):
X_buff = np.concatenate((X_buff, X[i+j]))
X_new[i] = X_buff
y = y[(n_others_windows//2): -(n_others_windows//2)]
#y_test_new = y_test[previous:]
X_all_data.append(X_new)
y_all_data.append(y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data
#--------------------------------------------------------------------
def load_stat_features_others_windows_rnn(patient_list, data_path="statistic_features_60s.csv",
statistics_list=["std_x", "std_y", "std_z"], n_others_windows=40):
"""
Returns:
X_all_data - ndarray of shape(n_records, n_others_windows + 1, n_statistic_features), feature-vector consist of features of current window and several others ( n_others_windows // 2 before current window and n_others_windows // 2 after it)
y_all_data - ndarray of shape(n_records,)
"""
statistics_df = pd.read_csv(data_path)
X_all_data = []
y_all_data = []
for patient in patient_list:
X = np.array(statistics_df.loc[statistics_df.id == patient, statistics_list])
y = np.array(statistics_df.loc[statistics_df.id == patient, "sleep_stage"])
X_new = np.zeros((X.shape[0]-n_others_windows, (n_others_windows + 1), len(statistics_list)))
for i in range(0, X.shape[0]-n_others_windows):
X_buff = np.zeros((n_others_windows + 1, len(statistics_list)))
for j in range(0, n_others_windows + 1):
X_buff[j] = X[i+j]
X_new[i] = X_buff
y = y[(n_others_windows//2): -(n_others_windows//2)]
#y_test_new = y_test[previous:]
X_all_data.append(X_new)
y_all_data.append(y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data |
the-stack_106_23005 | import torch
import numpy as np
class NME:
def __init__(self, nme_left_index, nme_right_index):
self.nme_left_index = nme_left_index
self.nme_right_index = nme_right_index
def __repr__(self):
return "NME()"
def test(self, label_pd, label_gt):
sum_nme = 0
total_cnt = 0
label_pd = label_pd.data.cpu().numpy()
label_gt = label_gt.data.cpu().numpy()
for i in range(label_gt.shape[0]):
landmarks_gt = label_gt[i]
landmarks_pv = label_pd[i]
pupil_distance = np.linalg.norm(landmarks_gt[self.nme_left_index] - landmarks_gt[self.nme_right_index])
landmarks_delta = landmarks_pv - landmarks_gt
nme = (np.linalg.norm(landmarks_delta, axis=1) / pupil_distance).mean()
sum_nme += nme
total_cnt += 1
return sum_nme, total_cnt
|
the-stack_106_23006 | import pendulum
from dagster_graphql.test.utils import (
execute_dagster_graphql,
infer_repository_selector,
infer_sensor_selector,
main_repo_location_name,
main_repo_name,
)
from dagster.core.definitions.run_request import InstigatorType
from dagster.core.scheduler.instigation import InstigatorState, InstigatorStatus
from dagster.core.test_utils import create_test_daemon_workspace
from dagster.daemon import get_default_daemon_logger
from dagster.daemon.sensor import execute_sensor_iteration
from dagster.utils import Counter, traced_counter
from .graphql_context_test_suite import (
ExecutingGraphQLContextTestMatrix,
NonLaunchableGraphQLContextTestMatrix,
)
GET_SENSORS_QUERY = """
query SensorsQuery($repositorySelector: RepositorySelector!) {
sensorsOrError(repositorySelector: $repositorySelector) {
__typename
... on PythonError {
message
stack
}
... on Sensors {
results {
name
targets {
pipelineName
solidSelection
mode
}
description
minIntervalSeconds
sensorState {
status
runs {
id
runId
}
runsCount
ticks {
id
status
timestamp
runIds
error {
message
stack
}
skipReason
}
}
}
}
}
}
"""
GET_SENSOR_QUERY = """
query SensorQuery($sensorSelector: SensorSelector!) {
sensorOrError(sensorSelector: $sensorSelector) {
__typename
... on PythonError {
message
stack
}
... on Sensor {
name
targets {
pipelineName
solidSelection
mode
}
minIntervalSeconds
nextTick {
timestamp
}
sensorState {
status
runs {
id
runId
}
runsCount
ticks {
id
status
timestamp
runIds
error {
message
stack
}
}
}
}
}
}
"""
GET_SENSOR_STATUS_QUERY = """
query SensorStateQuery($sensorSelector: SensorSelector!) {
sensorOrError(sensorSelector: $sensorSelector) {
__typename
... on Sensor {
sensorState {
id
status
}
}
}
}
"""
GET_SENSOR_TICK_RANGE_QUERY = """
query SensorQuery($sensorSelector: SensorSelector!, $dayRange: Int, $dayOffset: Int) {
sensorOrError(sensorSelector: $sensorSelector) {
__typename
... on PythonError {
message
stack
}
... on Sensor {
id
sensorState {
id
ticks(dayRange: $dayRange, dayOffset: $dayOffset) {
id
timestamp
}
}
}
}
}
"""
START_SENSORS_QUERY = """
mutation($sensorSelector: SensorSelector!) {
startSensor(sensorSelector: $sensorSelector) {
... on PythonError {
message
className
stack
}
... on Sensor {
id
jobOriginId
sensorState {
status
}
}
}
}
"""
STOP_SENSORS_QUERY = """
mutation($jobOriginId: String!) {
stopSensor(jobOriginId: $jobOriginId) {
... on PythonError {
message
className
stack
}
... on StopSensorMutationResult {
instigationState {
status
}
}
}
}
"""
REPOSITORY_SENSORS_QUERY = """
query RepositorySensorsQuery($repositorySelector: RepositorySelector!) {
repositoryOrError(repositorySelector: $repositorySelector) {
... on Repository {
id
sensors {
id
name
sensorState {
id
runs(limit: 1) {
id
runId
}
}
}
}
}
}
"""
class TestSensors(NonLaunchableGraphQLContextTestMatrix):
def test_get_sensors(self, graphql_context, snapshot):
selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
GET_SENSORS_QUERY,
variables={"repositorySelector": selector},
)
assert result.data
assert result.data["sensorsOrError"]
assert result.data["sensorsOrError"]["__typename"] == "Sensors"
results = result.data["sensorsOrError"]["results"]
snapshot.assert_match(results)
def test_get_sensor(self, graphql_context, snapshot):
sensor_selector = infer_sensor_selector(graphql_context, "always_no_config_sensor")
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["sensorOrError"]
assert result.data["sensorOrError"]["__typename"] == "Sensor"
sensor = result.data["sensorOrError"]
snapshot.assert_match(sensor)
class TestSensorMutations(ExecutingGraphQLContextTestMatrix):
def test_start_sensor(self, graphql_context):
sensor_selector = infer_sensor_selector(graphql_context, "always_no_config_sensor")
result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["startSensor"]["sensorState"]["status"] == InstigatorStatus.RUNNING.value
def test_stop_sensor(self, graphql_context):
sensor_selector = infer_sensor_selector(graphql_context, "always_no_config_sensor")
# start sensor
start_result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert (
start_result.data["startSensor"]["sensorState"]["status"]
== InstigatorStatus.RUNNING.value
)
job_origin_id = start_result.data["startSensor"]["jobOriginId"]
result = execute_dagster_graphql(
graphql_context,
STOP_SENSORS_QUERY,
variables={"jobOriginId": job_origin_id},
)
assert result.data
assert (
result.data["stopSensor"]["instigationState"]["status"]
== InstigatorStatus.STOPPED.value
)
def test_start_sensor_with_default_status(self, graphql_context):
sensor_selector = infer_sensor_selector(graphql_context, "running_in_code_sensor")
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_STATUS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data["sensorOrError"]["sensorState"]["status"] == "RUNNING"
sensor_origin_id = result.data["sensorOrError"]["sensorState"]["id"]
start_result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert (
"You have attempted to start sensor running_in_code_sensor, but it is already running"
in start_result.data["startSensor"]["message"]
)
stop_result = execute_dagster_graphql(
graphql_context,
STOP_SENSORS_QUERY,
variables={"jobOriginId": sensor_origin_id},
)
assert stop_result.data["stopSensor"]["instigationState"]["status"] == "STOPPED"
# Now can be restarted
start_result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert start_result.data["startSensor"]["sensorState"]["status"] == "RUNNING"
def test_sensor_next_ticks(graphql_context):
external_repository = graphql_context.get_repository_location(
main_repo_location_name()
).get_repository(main_repo_name())
sensor_name = "always_no_config_sensor"
external_sensor = external_repository.get_external_sensor(sensor_name)
sensor_selector = infer_sensor_selector(graphql_context, sensor_name)
result = execute_dagster_graphql(
graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}
)
# test default sensor off
assert result.data
assert result.data["sensorOrError"]["__typename"] == "Sensor"
next_tick = result.data["sensorOrError"]["nextTick"]
assert not next_tick
# test default sensor with no tick
graphql_context.instance.add_instigator_state(
InstigatorState(
external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.RUNNING
)
)
result = execute_dagster_graphql(
graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}
)
assert result.data
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0
assert result.data["sensorOrError"]["__typename"] == "Sensor"
next_tick = result.data["sensorOrError"]["nextTick"]
assert not next_tick
# test default sensor with last tick
_create_tick(graphql_context)
result = execute_dagster_graphql(
graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
assert result.data
assert result.data["sensorOrError"]["__typename"] == "Sensor"
next_tick = result.data["sensorOrError"]["nextTick"]
assert next_tick
def _create_tick(graphql_context):
with create_test_daemon_workspace(
graphql_context.process_context.workspace_load_target
) as workspace:
list(
execute_sensor_iteration(
graphql_context.instance, get_default_daemon_logger("SensorDaemon"), workspace
)
)
def test_sensor_tick_range(graphql_context):
external_repository = graphql_context.get_repository_location(
main_repo_location_name()
).get_repository(main_repo_name())
sensor_name = "always_no_config_sensor"
external_sensor = external_repository.get_external_sensor(sensor_name)
sensor_selector = infer_sensor_selector(graphql_context, sensor_name)
# test with no job state
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": None, "dayOffset": None},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0
# turn the sensor on
graphql_context.instance.add_instigator_state(
InstigatorState(
external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.RUNNING
)
)
now = pendulum.now("US/Central")
one = now.subtract(days=2).subtract(hours=1)
with pendulum.test(one):
_create_tick(graphql_context)
two = now.subtract(days=1).subtract(hours=1)
with pendulum.test(two):
_create_tick(graphql_context)
three = now.subtract(hours=1)
with pendulum.test(three):
_create_tick(graphql_context)
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": None, "dayOffset": None},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 3
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": 1, "dayOffset": None},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
assert result.data["sensorOrError"]["sensorState"]["ticks"][0]["timestamp"] == three.timestamp()
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={"sensorSelector": sensor_selector, "dayRange": 1, "dayOffset": 1},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
assert result.data["sensorOrError"]["sensorState"]["ticks"][0]["timestamp"] == two.timestamp()
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_TICK_RANGE_QUERY,
variables={
"sensorSelector": sensor_selector,
"dayRange": 2,
"dayOffset": None,
},
)
assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 2
def test_repository_batching(graphql_context):
traced_counter.set(Counter())
selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
REPOSITORY_SENSORS_QUERY,
variables={"repositorySelector": selector},
)
assert result.data
assert "repositoryOrError" in result.data
assert "sensors" in result.data["repositoryOrError"]
counter = traced_counter.get()
counts = counter.counts()
assert counts
assert len(counts) == 2
# We should have a single batch call to fetch run records (to fetch sensor runs) and a single
# batch call to fetch instigator state, instead of separate calls for each sensor (~5 distinct
# sensors in the repo)
# 1) `get_run_records` is fetched to instantiate GrapheneRun
# 2) `all_instigator_state` is fetched to instantiate GrapheneSensor
assert counts.get("DagsterInstance.get_run_records") == 1
assert counts.get("DagsterInstance.all_instigator_state") == 1
|
the-stack_106_23007 | import inspect
from trac.core import *
from trac.web import IRequestHandler
from trac.web.chrome import ITemplateProvider
from trac.ticket import TicketSystem
import json
import pkg_resources
def custom_json(obj):
return json.dumps(obj, cls=MagicEncoder)
class TracMobilePlugin(Component):
implements(ITemplateProvider, IRequestHandler)
# IRequestHandler
def match_request(self, req):
result = False
try:
from mobiledetect import MobileDetect
if req.path_info == '/' and MobileDetect(useragent=req.get_header('user-agent')).is_mobile():
result = True
except ImportError:
self.log.info('For enabling mobile detection, you need to install pymobiledetect package.')
if req.path_info == '/mobile':
result = True
return result
def process_request(self, req):
if req.path_info == '/':
try:
from mobiledetect import MobileDetect
if MobileDetect(useragent=req.get_header('user-agent')).is_mobile():
req.redirect('/mobile')
except ImportError:
pass
else:
ts = TicketSystem(self.env)
fields = ts.get_ticket_fields()
return 'test.html', {"JSON": custom_json, "fields": fields}, None
#ITemplateProvider methods
def get_htdocs_dirs(self):
return [('tracmobile', pkg_resources.resource_filename('tracmobile', 'htdocs'))]
def get_templates_dirs(self):
return [pkg_resources.resource_filename('tracmobile', 'htdocs')]
class MagicEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return self.default(obj.to_json())
elif hasattr(obj, "__dict__"):
d = dict(
(key, value)
for key, value in inspect.getmembers(obj)
if not key.startswith("__")
and not inspect.isabstract(value)
and not inspect.isbuiltin(value)
and not inspect.isfunction(value)
and not inspect.isgenerator(value)
and not inspect.isgeneratorfunction(value)
and not inspect.ismethod(value)
and not inspect.ismethoddescriptor(value)
and not inspect.isroutine(value)
)
return self.default(d)
return obj |
the-stack_106_23009 | import time
import warnings
from typing import Optional, Tuple
import numpy as np
from stable_baselines3.common.vec_env.base_vec_env import (
VecEnv,
VecEnvObs,
VecEnvStepReturn,
VecEnvWrapper,
)
class VecMonitor(VecEnvWrapper):
"""
A vectorized monitor wrapper for *vectorized* Gym environments,
it is used to record the episode reward, length, time and other data.
Some environments like `openai/procgen <https://github.com/openai/procgen>`_
or `gym3 <https://github.com/openai/gym3>`_ directly initialize the
vectorized environments, without giving us a chance to use the ``Monitor``
wrapper. So this class simply does the job of the ``Monitor`` wrapper on
a vectorized level.
:param venv: The vectorized environment
:param filename: the location to save a log file, can be None for no log
:param info_keywords: extra information to log, from the information return of env.step()
"""
def __init__(
self,
venv: VecEnv,
filename: Optional[str] = None,
info_keywords: Tuple[str, ...] = (),
):
# Avoid circular import
from stable_baselines3.common.monitor import Monitor, ResultsWriter
# This check is not valid for special `VecEnv`
# like the ones created by Procgen, that does follow completely
# the `VecEnv` interface
try:
is_wrapped_with_monitor = venv.env_is_wrapped(Monitor)[0]
except AttributeError:
is_wrapped_with_monitor = False
if is_wrapped_with_monitor:
warnings.warn(
"The environment is already wrapped with a `Monitor` wrapper"
"but you are wrapping it with a `VecMonitor` wrapper, the `Monitor` statistics will be"
"overwritten by the `VecMonitor` ones.",
UserWarning,
)
VecEnvWrapper.__init__(self, venv)
self.episode_returns = None
self.episode_lengths = None
self.episode_count = 0
self.t_start = time.time()
env_id = None
if hasattr(venv, "spec") and venv.spec is not None:
env_id = venv.spec.id
if filename:
self.results_writer = ResultsWriter(
filename,
header={"t_start": self.t_start, "env_id": env_id},
extra_keys=info_keywords,
)
else:
self.results_writer = None
self.info_keywords = info_keywords
def reset(self) -> VecEnvObs:
obs = self.venv.reset()
self.episode_returns = np.zeros(self.num_envs, dtype=np.float32)
self.episode_lengths = np.zeros(self.num_envs, dtype=np.int32)
return obs
def step_wait(self) -> VecEnvStepReturn:
obs, rewards, dones, infos = self.venv.step_wait()
self.episode_returns += rewards
self.episode_lengths += 1
new_infos = list(infos[:])
for i in range(len(dones)):
if dones[i]:
info = infos[i].copy()
episode_return = self.episode_returns[i]
episode_length = self.episode_lengths[i]
episode_info = {
"r": episode_return,
"l": episode_length,
"t": round(time.time() - self.t_start, 6),
}
info["episode"] = episode_info
self.episode_count += 1
self.episode_returns[i] = 0
self.episode_lengths[i] = 0
if self.results_writer:
self.results_writer.write_row(episode_info)
new_infos[i] = info
return obs, rewards, dones, new_infos
def close(self) -> None:
if self.results_writer:
self.results_writer.close()
return self.venv.close()
|
the-stack_106_23012 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from cryptography import fernet
from oslo_config import cfg
from octavia.certificates.common import local
from octavia.common import utils
from octavia.controller.worker.v1.tasks import cert_task
import octavia.tests.unit.base as base
CONF = cfg.CONF
class TestCertTasks(base.TestCase):
@mock.patch('stevedore.driver.DriverManager.driver')
def test_execute(self, mock_driver):
key = utils.get_compatible_server_certs_key_passphrase()
fer = fernet.Fernet(key)
dummy_cert = local.LocalCert(
utils.get_compatible_value('test_cert'),
utils.get_compatible_value('test_key'))
mock_driver.generate_cert_key_pair.side_effect = [dummy_cert]
c = cert_task.GenerateServerPEMTask()
pem = c.execute('123')
self.assertEqual(
fer.decrypt(pem),
dummy_cert.get_certificate() +
dummy_cert.get_private_key()
)
mock_driver.generate_cert_key_pair.assert_called_once_with(
cn='123', validity=CONF.certificates.cert_validity_time)
|
the-stack_106_23014 | # Copyright (c) 2007-2013 Cyrus Daboo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar.icalendar.calendar import Calendar
import cStringIO as StringIO
import unittest
class TestCalendar(unittest.TestCase):
def testDuplicateWithRecurrenceChange(self):
data = (
"""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//mulberrymail.com//Mulberry v4.0//EN
BEGIN:VEVENT
UID:C3184A66-1ED0-11D9-A5E0-000A958A3252
DTSTART;VALUE=DATE:20020101
DTEND;VALUE=DATE:20020102
DTSTAMP:20020101T000000Z
RRULE:FREQ=YEARLY
SUMMARY:New Year's Day
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n"),
"""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//mulberrymail.com//Mulberry v4.0//EN
BEGIN:VEVENT
UID:C3184A66-1ED0-11D9-A5E0-000A958A3252
DTSTART;VALUE=DATE:20020101
DTEND;VALUE=DATE:20020102
DTSTAMP:20020101T000000Z
RRULE:FREQ=YEARLY;COUNT=400
SUMMARY:New Year's Day
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n"),
)
cal1 = Calendar()
cal1.parse(StringIO.StringIO(data[0]))
cal2 = cal1.duplicate()
vevent = cal2.getComponents()[0]
rrules = vevent.getRecurrenceSet()
for rrule in rrules.getRules():
rrule.setUseCount(True)
rrule.setCount(400)
rrules.changed()
self.assertEqual(data[0], str(cal1))
self.assertEqual(data[1], str(cal2))
|
the-stack_106_23016 | # Copyright [2019] [Christopher Syben, Markus Michen]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def circular_trajectory_2d(geometry):
"""
Generates the central ray vectors defining a circular trajectory for use with the 2d projection layers.
Args:
geometry: 2d Geometry class including angular_range and number_of_projections
Returns:
Central ray vectors as np.array.
"""
rays = np.zeros([geometry.number_of_projections, 2])
angular_increment = geometry.angular_range / geometry.number_of_projections
for i in range(geometry.number_of_projections):
rays[i] = [np.cos(i * angular_increment), np.sin(i * angular_increment)]
return rays
def circular_trajectory_3d(geometry):
"""
Generates the projection matrices defining a circular trajectory around the z-axis
for use with the 3d projection layers.
Adapted from CONRAD Source code https://github.com/akmaier/CONRAD.
Args:
geometry: 3d Geometry class including angular_range, number_of_projections, source_detector_distance,
detector_shape, detector_spacing, volume_origin, volume_shape and volume_spacing.
Returns:
Projection matrices with shape (num_projections, 3, 4) as np.array.
"""
# init empty
projection_matrices = np.zeros((geometry.number_of_projections, 3, 4))
# axes for later use
x_axis = np.array([1.0, 0.0, 0.0])
y_axis = np.array([0.0, 1.0, 0.0])
z_axis = np.array([0.0, 0.0, 1.0])
# defining u and v directions by: main coord axes
u_dir = y_axis
v_dir = -x_axis
# configure intrinsic camera parameters
intrinsic_params_mat = np.eye(3, 3)
for i in range(2):
intrinsic_params_mat[i, i] = geometry.source_detector_distance / geometry.detector_spacing[1 - i]
# calc and set detector origin
intrinsic_params_mat[0:2, 2] = (geometry.detector_shape * 0.5)[::-1]
# configure extrinisc pararams and create projection_matrices
current_angle = 0.0
angular_increment = geometry.angular_range / geometry.number_of_projections
for p in range(geometry.number_of_projections):
# calculate extrinsic params
extrinsic_params_mat = np.eye(4, 4)
# rotation of axes from world system to plane of rotation system
R_to_plane = np.eye(4, 4)
R_to_plane[0:3, 0:3] = np.array([z_axis, np.cross(z_axis, x_axis), -x_axis])
# rotation for u and v direction
axis_align_R = np.eye(4, 4)
axis_align_R[0:3, 0] = u_dir
axis_align_R[0:3, 1] = v_dir
axis_align_R[0:3, 2] = np.cross(u_dir, v_dir)
axis_align_R = axis_align_R.T
# rotation about x axis
R_x_axis = np.eye(4, 4)
R_x_axis[0:3, 0:3] = np.array([1, 0, 0,
0, np.cos(-current_angle), -np.sin(-current_angle),
0, np.sin(-current_angle), np.cos(-current_angle)]).reshape((3, 3))
# translation of camera
translation = np.eye(4, 4)
translation[0:4, 3] = np.array([0, 0, geometry.source_isocenter_distance, 1])
# combine the above into 4x4 extrinsic params matrix
extrinsic_params_mat = np.dot(np.dot(np.dot(translation, axis_align_R), R_x_axis), R_to_plane)
extrinsic_params_mat = extrinsic_params_mat / extrinsic_params_mat[3, 3]
# calculate projection matrix
projection_matrices[p][0:3, 0:3] = np.dot(intrinsic_params_mat, extrinsic_params_mat[0:3, 0:3])
projection_matrices[p][0:3, 3] = np.dot(intrinsic_params_mat, extrinsic_params_mat[0:3, 3])
# next angle
current_angle += angular_increment
return projection_matrices
|
the-stack_106_23020 | # Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: Lei Cai, Space Physics and Astronomy, University of Oulu
__author__ = "Lei Cai"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "[email protected]"
__docformat__ = "reStructureText"
from geospacelab.cs._cs_base import SphericalCoordinates, CartesianCoordinates, SpaceCartesianCS, SpaceSphericalCS
from geospacelab.cs._geo import *
from geospacelab.cs._aacgm import AACGM
from geospacelab.cs._apex import APEX
from geopack import geopack
def set_cs(name=None, **kwargs):
kind = kwargs.pop('kind', None)
if name.upper() == 'GEO':
cls = GEO
elif name.upper() == 'AACGM':
cls = AACGM
elif name.upper() == 'APEX':
cls = APEX
elif name.upper() == 'GEOD':
cls = GEOD
elif name.upper() == 'GEOC':
if kind == 'sph':
cls = GEOCSpherical
else:
cls = GEOCCartesian
elif name.upper() == 'LENU':
if kind == 'sph':
cls = LENUSpherical
else:
cls = LENUCartesian
else:
raise NotImplementedError
return cls(**kwargs) |
the-stack_106_23024 | # -*- coding: utf-8 -*-
"""
General description:
---------------------
This script shows how use the custom component `solph.custom.Link` to build
a simple transshipment model.
Installation requirements:
---------------------------
This example requires the latest version of oemof. Install by:
pip install oemof
pip install matplotlib
12.12.2017 - [email protected]
"""
import pandas as pd
import networkx as nx
from matplotlib import pyplot as plt
# solph imports
from oemof.solph import (EnergySystem, Model, Bus, Flow, Source, Sink,
custom, Investment)
from oemof.outputlib import processing, views
from oemof.graph import create_nx_graph
def draw_graph(grph, edge_labels=True, node_color='#AFAFAF',
edge_color='#CFCFCF', plot=True, node_size=2000,
with_labels=True, arrows=True, layout='neato'):
"""
Draw a graph. This function will be removed in future versions.
Parameters
----------
grph : networkxGraph
A graph to draw.
edge_labels : boolean
Use nominal values of flow as edge label
node_color : dict or string
Hex color code oder matplotlib color for each node. If string, all
colors are the same.
edge_color : string
Hex color code oder matplotlib color for edge color.
plot : boolean
Show matplotlib plot.
node_size : integer
Size of nodes.
with_labels : boolean
Draw node labels.
arrows : boolean
Draw arrows on directed edges. Works only if an optimization_model has
been passed.
layout : string
networkx graph layout, one of: neato, dot, twopi, circo, fdp, sfdp.
"""
if type(node_color) is dict:
node_color = [node_color.get(g, '#AFAFAF') for g in grph.nodes()]
# set drawing options
options = {
'prog': 'dot',
'with_labels': with_labels,
'node_color': node_color,
'edge_color': edge_color,
'node_size': node_size,
'arrows': arrows
}
# draw graph
pos = nx.drawing.nx_agraph.graphviz_layout(grph, prog=layout)
nx.draw(grph, pos=pos, **options)
# add edge labels for all edges
if edge_labels is True and plt:
labels = nx.get_edge_attributes(grph, 'weight')
nx.draw_networkx_edge_labels(grph, pos=pos, edge_labels=labels)
# show output
if plot is True:
plt.show()
datetimeindex = pd.date_range('1/1/2017', periods=2, freq='H')
es = EnergySystem(timeindex=datetimeindex)
b_0 = Bus(label='b_0')
b_1 = Bus(label='b_1')
es.add(b_0, b_1)
es.add(custom.Link(label="line_0",
inputs={
b_0: Flow(), b_1: Flow()},
outputs={
b_1: Flow(investment=Investment()),
b_0: Flow(investment=Investment())},
conversion_factors={
(b_0, b_1): 0.95, (b_1, b_0): 0.9}))
es.add(Source(label="gen_0", outputs={
b_0: Flow(nominal_value=100,
variable_costs=50)}))
es.add(Source(label="gen_1", outputs={
b_1: Flow(nominal_value=100,
variable_costs=50)}))
es.add(Sink(label="load_0", inputs={
b_0: Flow(nominal_value=150,
actual_value=[0, 1],
fixed=True)}))
es.add(Sink(label="load_1", inputs={
b_1: Flow(nominal_value=150,
actual_value=[1, 0],
fixed=True)}))
m = Model(energysystem=es)
# m.write('transshipment.lp', io_options={'symbolic_solver_labels': True})
m.solve(solver='cbc',
solve_kwargs={'tee': True, 'keepfiles': False})
m.results()
graph = create_nx_graph(es, m)
draw_graph(graph, plot=True, layout='neato', node_size=3000,
node_color={
'b_0': '#cd3333',
'b_1': '#7EC0EE',
'b_2': '#eeac7e'})
results = processing.results(m)
print(views.node(results, 'gen_0'))
print(views.node(results, 'gen_1'))
views.node(results, 'line_0')['sequences'].plot(kind='bar')
# look at constraints of Links in the pyomo model LinkBlock
m.LinkBlock.pprint()
|
the-stack_106_23026 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Stack and ParallelStack Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.bool, np.float32, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Convert [data[0], data[1], ...] separately to tensorflow
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
# Stack back into a single tensorflow tensor
c = array_ops.stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testSimpleParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testSimpleParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testConst(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c.eval(), data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data)
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
@test_util.run_deprecated_v1
def testConstParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testConstParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
with self.cached_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.cached_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs, axis=1)
err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testZeroSizeCPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testZeroSizeGPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testAxis0DefaultCPU(self):
with self.session(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
@test_util.run_deprecated_v1
def testAxis0DefaultGPU(self):
with self.session(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
expected = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
test_arrays = np_split_squeeze(expected, j)
with self.cached_session(use_gpu=True):
actual_pack = array_ops.stack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"):
array_ops.stack(t, axis=-3)
class AutomaticStackingTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=True):
self.assertAllEqual(
[1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], [0, constant_op.constant(1), 0],
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], constant_op.constant([0, 1, 0]),
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]).eval())
def testWithNDArray(self):
with self.session(use_gpu=True):
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
@test_util.run_deprecated_v1
def testVariable(self):
with self.session(use_gpu=True):
v = variables.Variable(17)
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
v.initializer.run()
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]],
self.evaluate(result))
v.assign(38).op.run()
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
@test_util.run_deprecated_v1
def testPlaceholder(self):
with self.session(use_gpu=True):
# Test using placeholder with a defined shape.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 2}))
@test_util.run_deprecated_v1
def testShapeErrors(self):
# Static shape error.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.session(use_gpu=True):
with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
test.main()
|
the-stack_106_23027 | #!/usr/bin/python
import os
import shutil
import subprocess
import pdb
############################################################
# Build script
# ------------
# This script will build our project into build/ directory.
#
#
# Prerequisite
# ------------
# - brew install cmake
#
############################################################
# Build folder
BUILD_FOLDER = "build"
# Our cmake call
CMAKE_CALL = "cmake -G Xcode ../."
# Xcode proj
XCODE_PROJ = "Playground.xcodeproj"
# Build
def build():
shutil.rmtree(BUILD_FOLDER)
os.mkdir(BUILD_FOLDER)
os.chdir(BUILD_FOLDER)
subprocess.call(CMAKE_CALL, shell=True)
subprocess.call("open " + XCODE_PROJ, shell=True)
# Main
def main():
build()
if __name__ == "__main__":
main()
|
the-stack_106_23028 | import re
from biothings.utils.web.es_dsl import AsyncSearch
from biothings.web.handlers.exceptions import BadRequest
from biothings.web.pipeline import ESQueryBuilder
class MyGenesetQueryBuilder(ESQueryBuilder):
def default_string_query(self, q, options):
search = super().default_string_query(q, options)
search = self._extra_query_options(search, options)
return search
#def default_match_query(self, q, options):
# search = super().default_match_query(q, options)
#search = self._extra_query_options(search, options)
# return search
def build_string_query(self, q, options):
search = super().build_string_query(q, options)
search = self._extra_query_options(search, options)
return search
#def build_match_query(self, q, options):
# search = super().build_match_query(q, options)
#search = self._extra_query_options(search, options)
# return search
def _extra_query_options(self, search, options):
search = AsyncSearch().query(
"function_score",
query=search.query,
functions=[
{"filter": {"term": {"taxid": 9606}}, "weight": "1.55"}, # human
{"filter": {"term": {"taxid": 10090}}, "weight": "1.3"}, # mouse
{"filter": {"term": {"taxid": 10116}}, "weight": "1.1"}, # rat
], score_mode="first")
if options.species:
if 'all' in options.species:
pass
elif not all(isinstance(string, str) for string in options.species):
raise BadRequest(reason="species must be strings or integer strings.")
elif not all(string.isnumeric() for string in options.species):
raise BadRequest(reason="cannot map some species to taxids.")
else:
search = search.filter('terms', taxid=options.species)
if options.aggs and options.species_facet_filter:
search = search.post_filter('terms', taxid=options.species_facet_filter)
return search
|
the-stack_106_23030 | from hashlib import sha256
from typing import Mapping, NamedTuple, Dict
from common.serializers.serialization import serialize_msg_for_signing
from plenum.common.constants import REQKEY, FORCE, TXN_TYPE, OPERATION_SCHEMA_IS_STRICT
from plenum.common.messages.client_request import ClientMessageValidator
from plenum.common.types import f, OPERATION
from plenum.common.util import getTimeBasedId
from stp_core.types import Identifier
from plenum import PLUGIN_CLIENT_REQUEST_FIELDS
class Request:
idr_delimiter = ','
def __init__(self,
identifier: Identifier = None,
reqId: int = None,
operation: Mapping = None,
signature: str = None,
signatures: Dict[str, str] = None,
protocolVersion: int = None,
taaAcceptance: Dict = None,
endorser: Identifier = None,
# Intentionally omitting *args
**kwargs):
self._identifier = identifier
self.signature = signature
self.signatures = signatures
self.reqId = reqId
self.operation = operation
self.protocolVersion = protocolVersion
self.taaAcceptance = taaAcceptance
self.endorser = endorser
self._digest = None
self._payload_digest = None
for nm in PLUGIN_CLIENT_REQUEST_FIELDS:
if nm in kwargs:
setattr(self, nm, kwargs[nm])
@property
def digest(self):
if self._digest is None:
self._digest = self.getDigest()
return self._digest
@property
def payload_digest(self):
if self._payload_digest is None:
self._payload_digest = self.getPayloadDigest()
return self._payload_digest
@property
def as_dict(self):
rv = {
f.REQ_ID.nm: self.reqId,
OPERATION: self.operation
}
if self._identifier is not None:
rv[f.IDENTIFIER.nm] = self._identifier
if self.signatures is not None:
rv[f.SIGS.nm] = self.signatures
if self.signature is not None:
rv[f.SIG.nm] = self.signature
for nm in PLUGIN_CLIENT_REQUEST_FIELDS:
if hasattr(self, nm):
rv[nm] = getattr(self, nm)
if self.protocolVersion is not None:
rv[f.PROTOCOL_VERSION.nm] = self.protocolVersion
if self.taaAcceptance is not None:
rv[f.TAA_ACCEPTANCE.nm] = self.taaAcceptance
if self.endorser is not None:
rv[f.ENDORSER.nm] = self.endorser
return rv
def __eq__(self, other):
return self.as_dict == other.as_dict
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.as_dict)
@property
def key(self):
return self.digest
def getDigest(self):
return sha256(serialize_msg_for_signing(self.signingState())).hexdigest()
def getPayloadDigest(self):
return sha256(serialize_msg_for_signing(self.signingPayloadState())).hexdigest()
def __getstate__(self):
return self.__dict__
def signingState(self, identifier=None):
state = self.signingPayloadState(identifier)
if self.signatures is not None:
state[f.SIGS.nm] = self.signatures
if self.signature is not None:
state[f.SIG.nm] = self.signature
for nm in PLUGIN_CLIENT_REQUEST_FIELDS:
val = getattr(self, nm, None)
if getattr(self, nm, None):
state[nm] = val
return state
def signingPayloadState(self, identifier=None):
# TODO: separate data, metadata and signature, so that we don't
# need to have this kind of messages
dct = {
f.IDENTIFIER.nm: identifier or self.identifier,
f.REQ_ID.nm: self.reqId,
OPERATION: self.operation
}
if self.protocolVersion is not None:
dct[f.PROTOCOL_VERSION.nm] = self.protocolVersion
if self.taaAcceptance is not None:
dct[f.TAA_ACCEPTANCE.nm] = self.taaAcceptance
if self.endorser is not None:
dct[f.ENDORSER.nm] = self.endorser
return dct
def __setstate__(self, state):
self.__dict__.update(state)
return self
@classmethod
def fromState(cls, state):
obj = cls.__new__(cls)
cls.__setstate__(obj, state)
return obj
def serialized(self):
return serialize_msg_for_signing(self.__getstate__())
def isForced(self):
force = self.operation.get(FORCE)
return str(force) == 'True'
@property
def txn_type(self):
return self.operation.get(TXN_TYPE)
@property
def identifier(self):
return self._identifier or self.gen_idr_from_sigs(self.signatures)
@property
def all_identifiers(self):
if self.signatures is None:
return []
return sorted(self.signatures.keys())
@staticmethod
def gen_req_id():
return getTimeBasedId()
@staticmethod
def gen_idr_from_sigs(signatures: Dict):
return Request.idr_delimiter.join(sorted(signatures.keys())) if signatures else None
def add_signature(self, identifier, signature):
if not isinstance(self.signatures, Dict):
self.signatures = {}
self.signatures[identifier] = signature
def __hash__(self):
return hash(self.serialized())
class ReqKey(NamedTuple(REQKEY, [f.DIGEST])):
pass
class SafeRequest(Request, ClientMessageValidator):
def __init__(self, **kwargs):
ClientMessageValidator.__init__(self,
operation_schema_is_strict=OPERATION_SCHEMA_IS_STRICT)
self.validate(kwargs)
Request.__init__(self, **kwargs)
|
the-stack_106_23031 | from typing import List, Tuple
import sympy
import torch
import numpy as np
from sklearn.metrics import f1_score
from sympy import to_dnf, lambdify
def test_explanation(formula: str, x: torch.Tensor, y: torch.Tensor, target_class: int):
"""
Tests a logic formula.
:param formula: logic formula
:param x: input data
:param y: input labels (MUST be one-hot encoded)
:param target_class: target class
:return: Accuracy of the explanation and predictions
"""
if formula in ['True', 'False', ''] or formula is None:
return 0.0, None
else:
assert len(y.shape) == 2
y = y[:, target_class]
concept_list = [f"feature{i:010}" for i in range(x.shape[1])]
# get predictions using sympy
explanation = to_dnf(formula)
fun = lambdify(concept_list, explanation, 'numpy')
x = x.cpu().detach().numpy()
predictions = fun(*[x[:, i] > 0.5 for i in range(x.shape[1])])
# get accuracy
accuracy = f1_score(y, predictions, average='macro')
return accuracy, predictions
def complexity(formula: str, to_dnf: bool = False) -> float:
"""
Estimates the complexity of the formula.
:param formula: logic formula.
:param to_dnf: whether to convert the formula in disjunctive normal form.
:return: The complexity of the formula.
"""
if formula != "" and formula is not None:
if to_dnf:
formula = str(sympy.to_dnf(formula))
return np.array([len(f.split(' & ')) for f in formula.split(' | ')]).sum()
return 0
def concept_consistency(formula_list: List[str]) -> dict:
"""
Computes the frequency of concepts in a list of logic formulas.
:param formula_list: list of logic formulas.
:return: Frequency of concepts.
"""
concept_dict = _generate_consistency_dict(formula_list)
return {k: v / len(formula_list) for k, v in concept_dict.items()}
def formula_consistency(formula_list: List[str]) -> float:
"""
Computes the average frequency of concepts in a list of logic formulas.
:param formula_list: list of logic formulas.
:return: Average frequency of concepts.
"""
concept_dict = _generate_consistency_dict(formula_list)
concept_consistency = np.array([c for c in concept_dict.values()]) / len(formula_list)
return concept_consistency.mean()
def _generate_consistency_dict(formula_list: List[str]) -> dict:
concept_dict = {}
for i, formula in enumerate(formula_list):
concept_dict_i = {}
for minterm_list in formula.split(' | '):
for term in minterm_list.split(' & '):
concept = term.replace('(', '').replace(')', '').replace('~', '')
if concept in concept_dict_i:
continue
elif concept in concept_dict:
concept_dict_i[concept] = 1
concept_dict[concept] += 1
else:
concept_dict_i[concept] = 1
concept_dict[concept] = 1
return concept_dict
|
the-stack_106_23032 | import json
import os
from pathlib import Path
from typing import Callable, List, Optional
import numpy as np
import scipy.sparse as sp
import torch
from torch_geometric.data import Data, InMemoryDataset, download_url
class AmazonProducts(InMemoryDataset):
r"""The Amazon dataset from the `"GraphSAINT: Graph Sampling Based
Inductive Learning Method" <https://arxiv.org/abs/1907.04931>`_ paper,
containing products and its categories.
Args:
root (string): Root directory where the dataset should be saved.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
Stats:
.. list-table::
:widths: 10 10 10 10
:header-rows: 1
* - #nodes
- #edges
- #features
- #classes
* - 1,569,960
- 264,339,468
- 200
- 107
"""
url = 'https://docs.google.com/uc?export=download&id={}&confirm=t'
adj_full_id = '17qhNA8H1IpbkkR-T2BmPQm8QNW5do-aa'
feats_id = '10SW8lCvAj-kb6ckkfTOC5y0l8XXdtMxj'
class_map_id = '1LIl4kimLfftj4-7NmValuWyCQE8AaE7P'
role_id = '1npK9xlmbnjNkV80hK2Q68wTEVOFjnt4K'
def __init__(self, root: str, transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None):
super().__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> List[str]:
return ['adj_full.npz', 'feats.npy', 'class_map.json', 'role.json']
@property
def processed_file_names(self) -> str:
return 'data.pt'
def download(self):
path = download_url(self.url.format(self.adj_full_id), self.raw_dir)
os.rename(path, Path.joinpath(Path(self.raw_dir), 'adj_full.npz'))
path = download_url(self.url.format(self.feats_id), self.raw_dir)
os.rename(path, Path.joinpath(Path(self.raw_dir), 'feats.npy'))
path = download_url(self.url.format(self.class_map_id), self.raw_dir)
os.rename(path, Path.joinpath(Path(self.raw_dir), 'class_map.json'))
path = download_url(self.url.format(self.role_id), self.raw_dir)
os.rename(path, Path.joinpath(Path(self.raw_dir), 'role.json'))
def process(self):
f = np.load(Path.joinpath(Path(self.raw_dir), 'adj_full.npz'))
adj = sp.csr_matrix((f['data'], f['indices'], f['indptr']), f['shape'])
adj = adj.tocoo()
row = torch.from_numpy(adj.row).to(torch.long)
col = torch.from_numpy(adj.col).to(torch.long)
edge_index = torch.stack([row, col], dim=0)
x = np.load(Path.joinpath(Path(self.raw_dir), 'feats.npy'))
x = torch.from_numpy(x).to(torch.float)
ys = [-1] * x.size(0)
with open(Path.joinpath(Path(self.raw_dir), 'class_map.json')) as f:
class_map = json.load(f)
for key, item in class_map.items():
ys[int(key)] = item
y = torch.tensor(ys)
with open(Path.joinpath(Path(self.raw_dir), 'role.json')) as f:
role = json.load(f)
train_mask = torch.zeros(x.size(0), dtype=torch.bool)
train_mask[torch.tensor(role['tr'])] = True
val_mask = torch.zeros(x.size(0), dtype=torch.bool)
val_mask[torch.tensor(role['va'])] = True
test_mask = torch.zeros(x.size(0), dtype=torch.bool)
test_mask[torch.tensor(role['te'])] = True
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask,
val_mask=val_mask, test_mask=test_mask)
data = data if self.pre_transform is None else self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
|
the-stack_106_23033 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from setuptools import setup, find_packages
from docs import getVersion
# Variables ===================================================================
changelog = open('CHANGES.rst').read()
long_description = "\n\n".join([
open('README.rst').read(),
open('CONTRIBUTORS.rst').read(),
changelog
])
# Actual setup definition =====================================================
setup(
name='edeposit.amqp.ltp',
version=getVersion(changelog),
description="E-Deposit's AMQP binding to Long Time Preservation system.",
long_description=long_description,
url='https://github.com/edeposit/edeposit.amqp.ltp/',
author='Edeposit team',
author_email='[email protected]',
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
# scripts=[''],
namespace_packages=['edeposit', 'edeposit.amqp'],
include_package_data=True,
zip_safe=False,
install_requires=open("requirements.txt").read().splitlines(),
extras_require={
"test": [
"pytest"
],
"docs": [
"sphinx",
"sphinxcontrib-napoleon",
]
},
)
|
the-stack_106_23034 | from pyramid.httpexceptions import HTTPForbidden
from pyramid.httpexceptions import HTTPUnauthorized
from pyramid.httpexceptions import HTTPUnprocessableEntity
from pyramid.response import Response
from pyramid.view import view_config
from libweasyl.text import markdown, slug_for
from libweasyl import ratings
from weasyl.controllers.decorators import token_checked
from weasyl.error import WeasylError
from weasyl import define as d, macro as m
from weasyl import (
api, character, collection, commishinfo, favorite, folder,
index, journal, media, message, profile, submission)
_ERROR_UNEXPECTED = {
"error": {
"code": 100,
"text": "Unexpected"
}}
_ERROR_UNSIGNED = {
"error": {
"code": 110,
"text": "Session unsigned"
}}
_ERROR_SITE_STATUS = {
"error": {
"code": 115,
"text": "Site feature temporarily unavailable"
}}
_ERROR_PERMISSION = {
"error": {
"code": 120,
"text": "Permission denied"
}}
_CONTENT_IDS = {
'submissions': 'submitid',
'characters': 'charid',
'journals': 'journalid',
}
def api_method(view_callable):
def wrapper(request):
try:
return view_callable(request)
except WeasylError as e:
e.render_as_json = True
raise
except Exception as e:
# double underscore here to try to not conflict with any attributes
# already set on the exception, since we don't know where it's been.
e.__render_as_json = True
raise
return wrapper
_STANDARD_WWW_AUTHENTICATE = 'Bearer realm="Weasyl", Weasyl-API-Key realm="Weasyl"'
# TODO: Additional decorators for things like permissions checks if we ever add moderator/admin endpoints
# that return appropriate json. The common status check should also be refactored to return json.
def api_login_required(view_callable):
"""
Like decorators.login_required, but returning json on an error.
"""
# TODO: If we replace the regular @login_required checks on POSTs with a tween, what do about this?
def inner(request):
if request.userid == 0:
raise HTTPUnauthorized(
json=_ERROR_UNSIGNED,
www_authenticate=_STANDARD_WWW_AUTHENTICATE,
)
return view_callable(request)
return inner
@view_config(route_name='useravatar', renderer='json')
@api_method
def api_useravatar_(request):
form = request.web_input(username="")
userid = profile.resolve_by_login(d.get_sysname(form.username))
if userid:
media_items = media.get_user_media(userid)
return {
"avatar": d.absolutify_url(media_items['avatar'][0]['display_url']),
}
raise WeasylError('userRecordMissing')
@view_config(route_name='whoami', renderer='json')
@api_login_required
def api_whoami_(request):
return {
"login": d.get_display_name(request.userid),
"userid": request.userid,
}
@view_config(route_name='version', renderer='json')
@api_method
def api_version_(request):
format = request.matchdict.get("format", ".json")
if format == '.txt':
return Response(d.CURRENT_SHA, content_type='text/plain')
else:
return {
"short_sha": d.CURRENT_SHA,
}
def tidy_submission(submission):
submission['posted_at'] = d.iso8601(submission.pop('unixtime'))
submission['sub_media'] = api.tidy_all_media(submission['sub_media'])
if 'user_media' in submission:
submission['owner_media'] = api.tidy_all_media(submission.pop('user_media'))
submission.pop('userid', None)
subtype = submission.pop('subtype', None)
if subtype:
submission['subtype'] = m.CATEGORY_PARSABLE_MAP[subtype // 1000 * 1000]
contype = submission.pop('contype', None)
if contype:
submission['type'] = m.CONTYPE_PARSABLE_MAP[contype]
submission['rating'] = ratings.CODE_TO_NAME[submission['rating']]
submission['owner'] = submission.pop('username')
submission['owner_login'] = d.get_sysname(submission['owner'])
submission['media'] = submission.pop('sub_media')
submitid = 0
if 'submitid' in submission:
submitid = submission['submitid']
if 'charid' in submission:
submitid = submission['charid']
if submitid > 0:
if submission['type'] == "usercollect":
linktype = "submission"
else:
linktype = submission['type']
submission['link'] = d.absolutify_url(
"/%s/%d/%s" % (linktype, submitid, slug_for(submission['title'])))
@view_config(route_name='api_frontpage', renderer='json')
@api_method
def api_frontpage_(request):
form = request.web_input(since=None, count=0)
since = None
try:
if form.since:
since = d.parse_iso8601(form.since)
count = int(form.count)
except ValueError:
raise HTTPUnprocessableEntity(json=_ERROR_UNEXPECTED)
else:
count = min(count or 100, 100)
submissions = index.filter_submissions(request.userid, index.recent_submissions())
ret = []
for e, sub in enumerate(submissions, start=1):
if (since is not None and since >= sub['unixtime']) or (count and e > count):
break
tidy_submission(sub)
ret.append(sub)
return ret
@view_config(route_name='api_submission_view', renderer='json')
@api_method
def api_submission_view_(request):
form = request.web_input(anyway='', increment_views='')
return submission.select_view_api(
request.userid, int(request.matchdict['submitid']),
anyway=bool(form.anyway), increment_views=bool(form.increment_views))
@view_config(route_name='api_journal_view', renderer='json')
@api_method
def api_journal_view_(request):
form = request.web_input(anyway='', increment_views='')
return journal.select_view_api(
request.userid, int(request.matchdict['journalid']),
anyway=bool(form.anyway), increment_views=bool(form.increment_views))
@view_config(route_name='api_character_view', renderer='json')
@api_method
def api_character_view_(request):
form = request.web_input(anyway='', increment_views='')
return character.select_view_api(
request.userid, int(request.matchdict['charid']),
anyway=bool(form.anyway), increment_views=bool(form.increment_views))
@view_config(route_name='api_user_view', renderer='json')
@api_method
def api_user_view_(request):
# Helper functions for this view.
def convert_commission_price(value, options):
return d.text_price_symbol(options) + d.text_price_amount(value)
def convert_commission_setting(target):
if target == "o":
return "open"
elif target == "s":
return "sometimes"
elif target == "f":
return "filled"
elif target == "c":
return "closed"
else:
return None
userid = request.userid
otherid = profile.resolve_by_login(d.get_sysname(request.matchdict['login']))
user = profile.select_profile(otherid)
rating = d.get_rating(userid)
o_config = user.pop('config')
o_settings = user.pop('settings')
if not otherid and "h" in o_config:
raise HTTPForbidden(json={
"error": {
"code": 200,
"text": "Profile hidden from unlogged users.",
},
})
del user['userid']
del user['commish_slots']
user['created_at'] = d.iso8601(user.pop('unixtime'))
user['media'] = api.tidy_all_media(user.pop('user_media'))
user['login_name'] = d.get_sysname(user['username'])
user['profile_text'] = markdown(user['profile_text'])
user['folders'] = folder.select_list(otherid)
commissions = {
"details": None,
"price_classes": None,
"commissions": convert_commission_setting(o_settings[0]),
"trades": convert_commission_setting(o_settings[1]),
"requests": convert_commission_setting(o_settings[2])
}
commission_list = commishinfo.select_list(otherid)
commissions['details'] = commission_list['content']
if len(commission_list['class']) > 0:
classes = list()
for cclass in commission_list['class']:
commission_class = {
"title": cclass['title']
}
if len(commission_list['price']) > 0:
prices = list()
for cprice in (i for i in commission_list['price'] if i['classid'] == cclass['classid']):
if 'a' in cprice['settings']:
ptype = 'additional'
else:
ptype = 'base'
price = {
"title": cprice['title'],
"price_min": convert_commission_price(cprice['amount_min'], cprice['settings']),
"price_max": convert_commission_price(cprice['amount_min'], cprice['settings']),
'price_type': ptype
}
prices.append(price)
commission_class['prices'] = prices
classes.append(commission_class)
commissions['price_classes'] = classes
user['commission_info'] = commissions
user['relationship'] = profile.select_relation(userid, otherid) if userid else None
if 'O' in o_config:
submissions = collection.select_list(userid, rating, 11, otherid=otherid)
more_submissions = 'collections'
featured = None
elif 'A' in o_config:
submissions = character.select_list(userid, rating, 11, otherid=otherid)
more_submissions = 'characters'
featured = None
else:
submissions = submission.select_list(userid, rating, 11, otherid=otherid, profile_page_filter=True)
more_submissions = 'submissions'
featured = submission.select_featured(userid, otherid, rating)
for sub in submissions:
tidy_submission(sub)
user['recent_submissions'] = submissions
user['recent_type'] = more_submissions
if featured:
tidy_submission(featured)
user['featured_submission'] = featured
statistics, show_statistics = profile.select_statistics(otherid)
del statistics['staff_notes']
user['statistics'] = statistics if show_statistics else None
user_info = profile.select_userinfo(otherid, config=o_config)
if not user_info['show_age']:
user_info['age'] = None
del user_info['show_age']
del user_info['birthday']
user_info['location'] = user_info.pop('country')
user['user_info'] = user_info
user['link'] = d.absolutify_url("/~" + user['login_name'])
return user
@view_config(route_name='api_user_gallery', renderer='json')
@api_method
def api_user_gallery_(request):
userid = profile.resolve_by_login(d.get_sysname(request.matchdict['login']))
if not userid:
raise WeasylError('userRecordMissing')
form = request.web_input(since=None, count=0, folderid=0, backid=0, nextid=0)
since = None
try:
if form.since:
since = d.parse_iso8601(form.since)
count = int(form.count)
folderid = int(form.folderid)
backid = int(form.backid)
nextid = int(form.nextid)
except ValueError:
raise HTTPUnprocessableEntity(json=_ERROR_UNEXPECTED)
else:
count = min(count or 100, 100)
submissions = submission.select_list(
request.userid, d.get_rating(request.userid), count + 1,
otherid=userid, folderid=folderid, backid=backid, nextid=nextid)
backid, nextid = d.paginate(submissions, backid, nextid, count, 'submitid')
ret = []
for sub in submissions:
if since is not None and since >= sub['unixtime']:
break
tidy_submission(sub)
ret.append(sub)
return {
'backid': backid, 'nextid': nextid,
'submissions': ret,
}
@view_config(route_name='api_messages_submissions', renderer='json')
@api_login_required
@api_method
def api_messages_submissions_(request):
form = request.web_input(count=0, backtime=0, nexttime=0)
try:
count = int(form.count)
backtime = int(form.backtime)
nexttime = int(form.nexttime)
except ValueError:
raise HTTPUnprocessableEntity(json=_ERROR_UNEXPECTED)
else:
count = min(count or 100, 100)
submissions = message.select_submissions(
request.userid, count + 1, include_tags=True, backtime=backtime, nexttime=nexttime)
backtime, nexttime = d.paginate(submissions, backtime, nexttime, count, 'unixtime')
ret = []
for sub in submissions:
tidy_submission(sub)
ret.append(sub)
return {
'backtime': backtime, 'nexttime': nexttime,
'submissions': ret,
}
@view_config(route_name='api_messages_summary', renderer='json')
@api_login_required
@api_method
def api_messages_summary_(request):
counts = d._page_header_info(request.userid)
return {
'unread_notes': counts[0],
'comments': counts[1],
'notifications': counts[2],
'submissions': counts[3],
'journals': counts[4],
}
# TODO(hyena): It's probable that token_checked won't return json from these. Consider writing an api_token_checked.
@view_config(route_name='api_favorite', request_method='POST', renderer='json')
@api_login_required
@api_method
@token_checked
def api_favorite_(request):
favorite.insert(request.userid,
**{_CONTENT_IDS[request.matchdict['content_type']]: int(request.matchdict['content_id'])})
return {
'success': True
}
@view_config(route_name='api_unfavorite', request_method='POST', renderer='json')
@api_login_required
@api_method
@token_checked
def api_unfavorite_(request):
favorite.remove(request.userid,
**{_CONTENT_IDS[request.matchdict['content_type']]: int(request.matchdict['content_id'])})
return {
'success': True
}
|
the-stack_106_23036 | from RedmineAPI.Utilities import FileExtension, create_time_log
import shutil
import os
from RedmineAPI.Access import RedmineAccess
from RedmineAPI.Configuration import Setup
from Utilities import CustomKeys, CustomValues
class Automate(object):
def __init__(self, force):
# create a log, can be written to as the process continues
self.timelog = create_time_log(FileExtension.runner_log)
# Key: used to index the value to the config file for setup
# Value: 3 Item Tuple ("default value", ask user" - i.e. True/False, "type of value" - i.e. str, int....)
# A value of None is the default for all parts except for "Ask" which is True
# custom_terms = {CustomKeys.key_name: (CustomValues.value_name, True, str)} # *** can be more than 1 ***
custom_terms = dict()
# Create a RedmineAPI setup object to create/read/write to the config file and get default arguments
setup = Setup(time_log=self.timelog, custom_terms=custom_terms)
setup.set_api_key(force)
# Custom terms saved to the config after getting user input
# self.custom_values = setup.get_custom_term_values()
# *** can be multiple custom values variable, just use the key from above to reference the inputted value ***
# self.your_custom_value_name = self.custom_values[CustomKeys.key_name]
# Default terms saved to the config after getting user input
self.seconds_between_checks = setup.seconds_between_check
self.nas_mnt = setup.nas_mnt
self.redmine_api_key = setup.api_key
# Initialize Redmine wrapper
self.access_redmine = RedmineAccess(self.timelog, self.redmine_api_key)
self.botmsg = '\n\n_I am a bot. This action was performed automatically._' # sets bot message
# Subject name and Status to be searched on Redmine
self.issue_title = 'genesippr' # must be a lower case string to validate properly
self.issue_status = 'New'
def timed_retrieve(self):
"""
Continuously search Redmine in intervals for the inputted period of time,
Log errors to the log file as they occur
"""
import time
while True:
# Get issues matching the issue status and subject
found_issues = self.access_redmine.retrieve_issues(self.issue_status, self.issue_title)
# Respond to the issues in the list 1 at a time
while len(found_issues) > 0:
self.respond_to_issue(found_issues.pop(len(found_issues) - 1))
self.timelog.time_print("Waiting for the next check.")
time.sleep(self.seconds_between_checks)
def respond_to_issue(self, issue):
"""
Run the desired automation process on the inputted issue, if there is an error update the author
:param issue: Specified Redmine issue information
"""
self.timelog.time_print("Found a request to run. Subject: %s. ID: %s" % (issue.subject, str(issue.id)))
self.timelog.time_print("Adding to the list of responded to requests.")
self.access_redmine.log_new_issue(issue)
try:
issue.redmine_msg = "Beginning the process for: %s" % issue.subject
self.access_redmine.update_status_inprogress(issue, self.botmsg)
##########################################################################################
os.makedirs('/mnt/nas/bio_requests/' + str(issue.id))
# Remember the directory we're in.
work_dir = '/mnt/nas/bio_requests/' + str(issue.id)
current_dir = os.getcwd()
des = issue.description.split('\n')
seqids = list()
for item in des:
item = item.upper()
seqids.append(item.rstrip())
f = open(work_dir + '/seqid.txt', 'w')
for seqid in seqids:
f.write(seqid + '\n')
f.close()
os.chdir('/mnt/nas/MiSeq_Backup')
cmd = 'python2 /mnt/nas/MiSeq_Backup/file_extractor.py {}/seqid.txt {}'.format(work_dir, work_dir)
os.system(cmd)
os.chdir(current_dir)
f = open('Sippr.sh')
lines = f.readlines()
f.close()
f = open(work_dir + '/' + str(issue.id) + '.sh', 'w')
for line in lines:
if 'job_%j' in line:
line = line.replace('job', 'biorequest_' + str(issue.id) + '_job')
f.write(line)
f.write('docker run -i -u $(id -u) -v /mnt/nas/bio_requests/8312/newsixteens/targets/:/targets'
' -v {}:/sequences sipprverse geneSipprV2/sipprverse/method.py -s /sequences -t /targets /sequences\n'.format(work_dir))
f.write('cd /mnt/nas/bio_requests/{}\n'.format(str(issue.id)))
f.write('python upload_file.py {}\n'.format(str(issue.id)))
f.write('rm -rf *.fastq* */*fastq* *.fasta RedmineAPI running_logs *json upload_file.py')
f.close()
shutil.copy('upload_file.py', work_dir + '/upload_file.py')
shutil.copytree('RedmineAPI', work_dir + '/RedmineAPI')
# Submit the batch script to slurm.
cmd = 'sbatch {}'.format(work_dir + '/' + str(issue.id) + '.sh')
os.system(cmd)
##########################################################################################
self.completed_response(issue)
except Exception as e:
import traceback
self.timelog.time_print("[Warning] The automation process had a problem, continuing redmine api anyways.")
self.timelog.time_print("[Automation Error Dump]\n" + traceback.format_exc())
# Send response
issue.redmine_msg = "There was a problem with your request. Please create a new issue on" \
" Redmine to re-run it.\n%s" % traceback.format_exc()
# Set it to feedback and assign it back to the author
self.access_redmine.update_issue_to_author(issue, self.botmsg)
def completed_response(self, issue):
"""
Update the issue back to the author once the process has finished
:param issue: Specified Redmine issue the process has been completed on
"""
# Assign the issue back to the Author
self.timelog.time_print("Assigning the issue: %s back to the author." % str(issue.id))
issue.redmine_msg = "Your GeneSippr request has been sent to the OLC Compute Cluster for processing." \
" This issue will be updated once results are available."
# Update author on Redmine
self.access_redmine.update_issue_to_author(issue, self.botmsg)
# Log the completion of the issue including the message sent to the author
self.timelog.time_print("\nMessage to author - %s\n" % issue.redmine_msg)
self.timelog.time_print("Completed Response to issue %s." % str(issue.id))
self.timelog.time_print("The next request will be processed once available")
|
the-stack_106_23037 | import logging
from dataclasses import dataclass
from itertools import islice
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence
from lhotse.utils import JsonMixin, Seconds, YamlMixin, asdict_nonull, exactly_one_not_null, fastcopy, \
index_by_id_and_check, \
perturb_num_samples, split_sequence
@dataclass(frozen=True, unsafe_hash=True)
class SupervisionSegment:
id: str
recording_id: str
start: Seconds
duration: Seconds
channel: int = 0
text: Optional[str] = None
language: Optional[str] = None
speaker: Optional[str] = None
gender: Optional[str] = None
custom: Optional[Dict[str, Any]] = None
@property
def end(self) -> Seconds:
return round(self.start + self.duration, ndigits=8)
def with_offset(self, offset: Seconds) -> 'SupervisionSegment':
"""Return an identical ``SupervisionSegment``, but with the ``offset`` added to the ``start`` field."""
return fastcopy(self, start=round(self.start + offset, ndigits=8))
def perturb_speed(
self,
factor: float,
sampling_rate: int,
affix_id: bool = True
) -> 'SupervisionSegment':
"""
Return a ``SupervisionSegment`` that has time boundaries matching the
recording/cut perturbed with the same factor.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param sampling_rate: The sampling rate is necessary to accurately perturb the start
and duration (going through the sample counts).
:param affix_id: When true, we will modify the ``id`` and ``recording_id`` fields
by affixing it with "_sp{factor}".
:return: a modified copy of the current ``Recording``.
"""
start_sample = round(self.start * sampling_rate)
num_samples = round(self.duration * sampling_rate)
new_start = perturb_num_samples(start_sample, factor) / sampling_rate
new_duration = perturb_num_samples(num_samples, factor) / sampling_rate
return fastcopy(
self,
id=f'{self.id}_sp{factor}' if affix_id else self.id,
recording_id=f'{self.recording_id}_sp{factor}' if affix_id else self.id,
start=new_start,
duration=new_duration
)
def trim(self, end: Seconds) -> 'SupervisionSegment':
"""
Return an identical ``SupervisionSegment``, but ensure that ``self.start`` is not negative (in which case
it's set to 0) and ``self.end`` does not exceed the ``end`` parameter.
This method is useful for ensuring that the supervision does not exceed a cut's bounds,
in which case pass ``cut.duration`` as the ``end`` argument, since supervision times are relative to the cut.
"""
start_exceeds_by = abs(min(0, self.start))
end_exceeds_by = max(0, self.end - end)
return fastcopy(self, start=max(0, self.start), duration=self.duration - end_exceeds_by - start_exceeds_by)
def map(self, transform_fn: Callable[['SupervisionSegment'], 'SupervisionSegment']) -> 'SupervisionSegment':
"""
Return a copy of the current segment, transformed with ``transform_fn``.
:param transform_fn: a function that takes a segment as input, transforms it and returns a new segment.
:return: a modified ``SupervisionSegment``.
"""
return transform_fn(self)
def transform_text(self, transform_fn: Callable[[str], str]) -> 'SupervisionSegment':
"""
Return a copy of the current segment with transformed ``text`` field.
Useful for text normalization, phonetic transcription, etc.
:param transform_fn: a function that accepts a string and returns a string.
:return: a ``SupervisionSegment`` with adjusted text.
"""
if self.text is None:
return self
return fastcopy(self, text=transform_fn(self.text))
@staticmethod
def from_dict(data: dict) -> 'SupervisionSegment':
return SupervisionSegment(**data)
@dataclass
class SupervisionSet(JsonMixin, YamlMixin, Sequence[SupervisionSegment]):
"""
SupervisionSet represents a collection of segments containing some supervision information.
The only required fields are the ID of the segment, ID of the corresponding recording,
and the start and duration of the segment in seconds.
All other fields, such as text, language or speaker, are deliberately optional
to support a wide range of tasks, as well as adding more supervision types in the future,
while retaining backwards compatibility.
"""
segments: Dict[str, SupervisionSegment]
@staticmethod
def from_segments(segments: Iterable[SupervisionSegment]) -> 'SupervisionSet':
return SupervisionSet(segments=index_by_id_and_check(segments))
@staticmethod
def from_dicts(data: Iterable[Dict]) -> 'SupervisionSet':
return SupervisionSet.from_segments(SupervisionSegment.from_dict(s) for s in data)
def to_dicts(self) -> List[dict]:
return [asdict_nonull(s) for s in self]
def split(self, num_splits: int, shuffle: bool = False) -> List['SupervisionSet']:
"""
Split the ``SupervisionSet`` into ``num_splits`` pieces of equal size.
:param num_splits: Requested number of splits.
:param shuffle: Optionally shuffle the supervisions order first.
:return: A list of ``SupervisionSet`` pieces.
"""
return [
SupervisionSet.from_segments(subset) for subset in
split_sequence(self, num_splits=num_splits, shuffle=shuffle)
]
def subset(self, first: Optional[int] = None, last: Optional[int] = None) -> 'SupervisionSet':
"""
Return a new ``SupervisionSet`` according to the selected subset criterion.
Only a single argument to ``subset`` is supported at this time.
:param first: int, the number of first supervisions to keep.
:param last: int, the number of last supervisions to keep.
:return: a new ``SupervisionSet`` with the subset results.
"""
assert exactly_one_not_null(first, last), "subset() can handle only one non-None arg."
if first is not None:
assert first > 0
if first > len(self):
logging.warning(f'SupervisionSet has only {len(self)} items but first {first} required; '
f'not doing anything.')
return self
return SupervisionSet.from_segments(islice(self, first))
if last is not None:
assert last > 0
if last > len(self):
logging.warning(f'SupervisionSet has only {len(self)} items but last {last} required; '
f'not doing anything.')
return self
return SupervisionSet.from_segments(islice(self, len(self) - last, len(self)))
def filter(self, predicate: Callable[[SupervisionSegment], bool]) -> 'SupervisionSet':
"""
Return a new SupervisionSet with the SupervisionSegments that satisfy the `predicate`.
:param predicate: a function that takes a supervision as an argument and returns bool.
:return: a filtered SupervisionSet.
"""
return SupervisionSet.from_segments(seg for seg in self if predicate(seg))
def map(self, transform_fn: Callable[[SupervisionSegment], SupervisionSegment]) -> 'SupervisionSet':
"""
Map a ``transform_fn`` to the SupervisionSegments and return a new ``SupervisionSet``.
:param transform_fn: a function that modifies a supervision as an argument.
:return: a new ``SupervisionSet`` with modified segments.
"""
return SupervisionSet.from_segments(s.map(transform_fn) for s in self)
def transform_text(self, transform_fn: Callable[[str], str]) -> 'SupervisionSet':
"""
Return a copy of the current ``SupervisionSet`` with the segments having a transformed ``text`` field.
Useful for text normalization, phonetic transcription, etc.
:param transform_fn: a function that accepts a string and returns a string.
:return: a ``SupervisionSet`` with adjusted text.
"""
return SupervisionSet.from_segments(s.transform_text(transform_fn) for s in self)
def find(
self,
recording_id: str,
channel: Optional[int] = None,
start_after: Seconds = 0,
end_before: Optional[Seconds] = None,
adjust_offset: bool = False
) -> Iterable[SupervisionSegment]:
"""
Return an iterable of segments that match the provided ``recording_id``.
:param recording_id: Desired recording ID.
:param channel: When specified, return supervisions in that channel - otherwise, in all channels.
:param start_after: When specified, return segments that start after the given value.
:param end_before: When specified, return segments that end before the given value.
:param adjust_offset: When true, return segments as if the recordings had started at ``start_after``.
This is useful for creating Cuts. Fom a user perspective, when dealing with a Cut, it is no
longer helpful to know when the supervisions starts in a recording - instead, it's useful to
know when the supervision starts relative to the start of the Cut.
In the anticipated use-case, ``start_after`` and ``end_before`` would be
the beginning and end of a cut;
this option converts the times to be relative to the start of the cut.
:return: An iterator over supervision segments satisfying all criteria.
"""
segment_by_recording_id = self._index_by_recording_id_and_cache()
return (
# We only modify the offset - the duration remains the same, as we're only shifting the segment
# relative to the Cut's start, and not truncating anything.
segment.with_offset(-start_after) if adjust_offset else segment
for segment in segment_by_recording_id.get(recording_id, [])
if (channel is None or segment.channel == channel)
and segment.start >= start_after
and (end_before is None or segment.end <= end_before)
)
# This is a cache that significantly speeds up repeated ``find()`` queries.
_segments_by_recording_id: Optional[Dict[str, List[SupervisionSegment]]] = None
def _index_by_recording_id_and_cache(self):
if self._segments_by_recording_id is None:
from cytoolz import groupby
self._segments_by_recording_id = groupby(lambda seg: seg.recording_id, self)
return self._segments_by_recording_id
def __repr__(self) -> str:
return f'SupervisionSet(len={len(self)})'
def __getitem__(self, item: str) -> SupervisionSegment:
return self.segments[item]
def __iter__(self) -> Iterable[SupervisionSegment]:
return iter(self.segments.values())
def __len__(self) -> int:
return len(self.segments)
def __add__(self, other: 'SupervisionSet') -> 'SupervisionSet':
return SupervisionSet(segments={**self.segments, **other.segments})
|
the-stack_106_23038 | import traceback
import sys
from NewLifeUtils.FileModule import DataStorage
from NewLifeUtils.LoggerModule import cstm, smart_format
default_lang = {
"type": "Type",
"unknown": "Unknown Error",
"about": "More information",
"attention": "Attention",
"info": "Info",
"warning": "Warn",
"error": "Error",
"fatal": "Fatal error",
"wrong": "Something wrong...",
}
translation = DataStorage("lang.yml", "exceptsettings", default_lang)
def except_print(type="fat", more="", code=-1, tb=True, run = True):
if tb:
a = traceback.extract_tb(sys.exc_info()[2])
exception_text = ""
for f in a:
pattern = '{#fff}"{#under}{#7193c9}{file}{#fff}{#nounder}" {#fff}({#42eb77}{lineno}{#fff}): {#de6a50}{content}\n {#fff}> {#22bf2a}{line}\n'
#'{white}"{ul}{fn}{nul}{white}" {white}({yellow}{ln}{white}): {green}{n}\n{s}{white}> {lime}{lc}\n'
exception_text += smart_format(
pattern,
file= f.filename,
lineno= f.lineno,
content= f.name,
line= f.line,
)
exception_text += smart_format(
"{#169181}{err}", err = traceback.format_exc().splitlines()[-1]
)
else:
exception_text = smart_format("{#757575}Traceback is disabled")
kshortcuts = {
"unk": "unknown",
"att": "attention",
"inf": "info",
"wrn": "warning",
"err": "error",
"fat": "fatal",
"wrg": "wrong",
}
pattern = "{#db8e2a}{d}{#under}{title}{#nounder}{d}\n{text}\n{#e352d5}{a}\n{#db8e2a}{d}{td}{d}"
if run:
cstm(
pattern,
d= "-" * 15,
title= translation[kshortcuts[type]],
text= exception_text,
td= " " * len(translation[kshortcuts[type]]),
a=f"More info: {more}" if more else "",
)
else:
return smart_format(pattern,
d= "-" * 15,
title= translation[kshortcuts[type]],
text= exception_text,
td= " " * len(translation[kshortcuts[type]]),
a= f"More info: {more}" if more != "" else "",
)
if type == "fat":
exit(code)
|
the-stack_106_23040 | import importlib
import sys
import logging
import os.path
import threading
import copy
import dexbot.errors as errors
from dexbot.strategies.base import StrategyBase
from bitshares.notify import Notify
from bitshares.instance import shared_bitshares_instance
log = logging.getLogger(__name__)
log_workers = logging.getLogger('dexbot.per_worker')
# NOTE this is the special logger for per-worker events
# it returns LogRecords with extra fields: worker_name, account, market and is_disabled
# is_disabled is a callable returning True if the worker is currently disabled.
# GUIs can add a handler to this logger to get a stream of events of the running workers.
class WorkerInfrastructure(threading.Thread):
def __init__(
self,
config,
bitshares_instance=None,
view=None
):
super().__init__()
# BitShares instance
self.bitshares = bitshares_instance or shared_bitshares_instance()
self.config = copy.deepcopy(config)
self.view = view
self.jobs = set()
self.notify = None
self.config_lock = threading.RLock()
self.workers = {}
self.accounts = set()
self.markets = set()
# Set the module search path
user_worker_path = os.path.expanduser("~/bots")
if os.path.exists(user_worker_path):
sys.path.append(user_worker_path)
def init_workers(self, config):
""" Initialize the workers
"""
self.config_lock.acquire()
for worker_name, worker in config["workers"].items():
if "account" not in worker:
log_workers.critical("Worker has no account", extra={
'worker_name': worker_name, 'account': 'unknown',
'market': 'unknown', 'is_disabled': (lambda: True)
})
continue
if "market" not in worker:
log_workers.critical("Worker has no market", extra={
'worker_name': worker_name, 'account': worker['account'],
'market': 'unknown', 'is_disabled': (lambda: True)
})
continue
try:
strategy_class = getattr(
importlib.import_module(worker["module"]),
'Strategy'
)
self.workers[worker_name] = strategy_class(
config=config,
name=worker_name,
bitshares_instance=self.bitshares,
view=self.view
)
self.markets.add(worker['market'])
self.accounts.add(worker['account'])
except BaseException:
log_workers.exception("Worker initialisation", extra={
'worker_name': worker_name, 'account': worker['account'],
'market': 'unknown', 'is_disabled': (lambda: True)
})
self.config_lock.release()
def update_notify(self):
if not self.config['workers']:
log.critical("No workers configured to launch, exiting")
raise errors.NoWorkersAvailable()
if not self.workers:
log.critical("No workers actually running")
raise errors.NoWorkersAvailable()
if self.notify:
# Update the notification instance
self.notify.reset_subscriptions(list(self.accounts), list(self.markets))
else:
# Initialize the notification instance
self.notify = Notify(
markets=list(self.markets),
accounts=list(self.accounts),
on_market=self.on_market,
on_account=self.on_account,
on_block=self.on_block,
bitshares_instance=self.bitshares
)
# Events
def on_block(self, data):
if self.jobs:
try:
for job in self.jobs:
job()
finally:
self.jobs = set()
self.config_lock.acquire()
for worker_name, worker in self.config["workers"].items():
if worker_name not in self.workers:
continue
elif self.workers[worker_name].disabled:
self.workers[worker_name].log.error('Worker "{}" is disabled'.format(worker_name))
self.workers.pop(worker_name)
continue
try:
self.workers[worker_name].ontick(data)
except Exception as e:
self.workers[worker_name].log.exception("in ontick()")
try:
self.workers[worker_name].error_ontick(e)
except Exception:
self.workers[worker_name].log.exception("in error_ontick()")
self.config_lock.release()
def on_market(self, data):
if data.get("deleted", False): # No info available on deleted orders
return
self.config_lock.acquire()
for worker_name, worker in self.config["workers"].items():
if worker_name not in self.workers:
continue
elif self.workers[worker_name].disabled:
self.workers[worker_name].log.error('Worker "{}" is disabled'.format(worker_name))
self.workers.pop(worker_name)
continue
if worker["market"] == data.market:
try:
self.workers[worker_name].onMarketUpdate(data)
except Exception as e:
self.workers[worker_name].log.exception("in onMarketUpdate()")
try:
self.workers[worker_name].error_onMarketUpdate(e)
except Exception:
self.workers[worker_name].log.exception("in error_onMarketUpdate()")
self.config_lock.release()
def on_account(self, account_update):
self.config_lock.acquire()
account = account_update.account
for worker_name, worker in self.config["workers"].items():
if worker_name not in self.workers:
continue
elif self.workers[worker_name].disabled:
self.workers[worker_name].log.error('Worker "{}" is disabled'.format(worker_name))
self.workers.pop(worker_name)
continue
if worker["account"] == account["name"]:
try:
self.workers[worker_name].onAccount(account_update)
except Exception as e:
self.workers[worker_name].log.exception("in onAccountUpdate()")
try:
self.workers[worker_name].error_onAccount(e)
except Exception:
self.workers[worker_name].log.exception("in error_onAccountUpdate()")
self.config_lock.release()
def add_worker(self, worker_name, config):
with self.config_lock:
self.config['workers'][worker_name] = config['workers'][worker_name]
self.init_workers(config)
self.update_notify()
def run(self):
self.init_workers(self.config)
self.update_notify()
self.notify.listen()
def stop(self, worker_name=None, pause=False):
""" Used to stop the worker(s)
:param str worker_name: name of the worker to stop
:param bool pause: optional argument which tells worker if it was stopped or just paused
"""
if worker_name:
try:
# Kill only the specified worker
self.remove_market(worker_name)
except KeyError:
# Worker was not found meaning it does not exist or it is paused already
return
with self.config_lock:
account = self.config['workers'][worker_name]['account']
self.config['workers'].pop(worker_name)
self.accounts.remove(account)
if pause and worker_name in self.workers:
self.workers[worker_name].pause()
self.workers.pop(worker_name, None)
else:
# Kill all of the workers
if pause:
for worker in self.workers:
self.workers[worker].pause()
self.workers = []
# Update other workers
if len(self.workers) > 0:
self.update_notify()
else:
# No workers left, close websocket
self.notify.websocket.close()
def remove_worker(self, worker_name=None):
if worker_name:
self.workers[worker_name].purge()
else:
for worker in self.workers:
self.workers[worker].purge()
def remove_market(self, worker_name):
""" Remove the market only if the worker is the only one using it
"""
with self.config_lock:
market = self.config['workers'][worker_name]['market']
for name, worker in self.config['workers'].items():
if market == worker['market']:
break # Found the same market, do nothing
else:
# No markets found, safe to remove
self.markets.remove(market)
@staticmethod
def remove_offline_worker(config, worker_name, bitshares_instance):
# Initialize the base strategy to get control over the data
strategy = StrategyBase(worker_name, config, bitshares_instance=bitshares_instance)
strategy.clear_all_worker_data()
@staticmethod
def remove_offline_worker_data(worker_name):
StrategyBase.purge_all_local_worker_data(worker_name)
def do_next_tick(self, job):
""" Add a callable to be executed on the next tick """
self.jobs.add(job)
|
the-stack_106_23043 | from __future__ import print_function
from typing import List
from gphotospy import authorize
from gphotospy.album import *
from gphotospy.media import *
"""
https://dev.to/davidedelpapa/manage-your-google-photo-account-with-python-p-1-9m2
"""
def main():
service = authorize.init('credentials.json')
new_albums = mainname_to_id(service)
create_albums(service, new_albums)
def get_media(service: dict[str, str]):
try:
media_manager = Media(service)
# Retrieve the documents contents from the Docs service.
iterator = media_manager.list()
first = next(iterator)
print(first)
except Exception as err:
print(err)
def mainname_to_id(service: dict[str, str]) -> dict[str, List[str]]:
mainname_to_id_dict: dict[str, List[str]] = {}
try:
media_manager = Media(service)
# Retrieve the documents contents from the Docs service.
iterator = media_manager.list()
for i in range(10):
item = next(iterator)
print(item)
mainname = item['filename'].split('_')[0]
if not mainname in mainname_to_id_dict:
mainname_to_id_dict[mainname] = []
mainname_to_id_dict[mainname].append(item['id'])
print(mainname_to_id_dict)
return mainname_to_id_dict
except Exception as err:
print(err)
def create_albums(service: dict[str, str], album_contents: dict[str, List[str]]):
album_manager = Album(service)
existing_albums = {album['title']: album['id'] for album in get_albums(service)}
print(existing_albums)
for key in album_contents:
if not key in existing_albums:
album_id = album_manager.create(key)['id']
else:
album_id = existing_albums[key]
print(album_id)
chunks = [album_contents[key][x:x + 50] for x in range(0, len(album_contents[key]), 50)]
for chunk in chunks:
print(chunk)
album_manager.batchAddMediaItems(album_id, chunk)
def get_albums(service: dict[str, str])->List[dict]:
album_manager = Album(service)
album_iterator = album_manager.list()
albums = []
while True:
try:
album= next(album_iterator)
if not album:
break
albums.append(album)
except Exception as err:
print(err)
break
return albums
if __name__ == '__main__':
main()
|
the-stack_106_23044 | # Tai Sakuma <[email protected]>
##__________________________________________________________________||
def IsROOTNullPointer(tobject):
try:
tobject.GetName()
return False
except ReferenceError:
return True
##__________________________________________________________________||
def inspect_tree(tree):
ret = { }
ret['leaves'] = [inspect_leaf(leaf) for leaf in tree.GetListOfLeaves()]
return ret
##__________________________________________________________________||
def inspect_leaf(leaf):
ret = { }
ret.update(inspect_leaf_definition(leaf))
ret.update(inspect_leaf_size(leaf))
return ret
##__________________________________________________________________||
def inspect_leaf_definition(leaf):
leafcount = leaf.GetLeafCount()
isArray = not IsROOTNullPointer(leafcount)
ret = { }
ret['name'] = leaf.GetName()
ret['type'] = leaf.GetTypeName()
ret['isarray'] = '1' if isArray else '0'
ret['countname'] = leafcount.GetName() if isArray else None
ret['title'] = leaf.GetBranch().GetTitle()
return ret
##__________________________________________________________________||
def inspect_leaf_size(leaf):
ret = { }
zipbytes = leaf.GetBranch().GetZipBytes()/1024.0/1024.0 # MB
totalsize = leaf.GetBranch().GetTotalSize()/1024.0/1024.0 # MB
ret['size'] = zipbytes
ret['uncompressed_size'] = totalsize
ret['compression_factor'] = totalsize/zipbytes if zipbytes > 0 else 0
return ret
##__________________________________________________________________||
|
the-stack_106_23045 | # -*- coding: utf-8 -*-
# @Author: MaxST
# @Date: 2019-09-08 23:10:00
# @Last Modified by: MaxST
# @Last Modified time: 2019-09-15 20:17:33
import base64
from dynaconf import settings
from kivy.app import App
from kivy.core.image import Image as CoreImage
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.uix.image import Image
from kivy.uix.screenmanager import Screen
from kivymd.toast import toast
from kivymd.uix.button import MDIconButton
from kivymd.uix.list import ILeftBodyTouch, IRightBodyTouch
from db import User
from db import database_lock as db_lock
from errors import ContactExists, ContactNotExists, NotFoundUser
from jim_mes import Message
logger = Logger
class Contacts(Screen):
def __init__(self, **kwargs):
self.set_template()
self.set_viewclass()
self.app = App.get_running_app()
super().__init__(**kwargs)
self.app.events[f'done_{settings.USERS_REQUEST}'] = self.make_data
self.app.events[f'done_{settings.GET_CHATS}'] = self.make_data
def set_template(self):
Builder.load_file('templates/contacts.kv')
def set_viewclass(self):
self.viewclass_item = 'RVRowDel'
def get_raw_data(self, **kwargs):
user = User.by_name(settings.USER_NAME)
search = kwargs.get('search', '')
return user.get_chats(search) if kwargs.get('contacts', True) else user.not_contacts(search)
def make_data(self, *args, **kwargs):
logger.info('Подготовка контактов')
data = self.prepare_data(self.get_raw_data(**kwargs))
self.set_data(data)
def prepare_data(self, objects):
data = []
for x in objects:
username = str(x.username)
if x.avatar:
img = CoreImage('data:image/' + 'png;base64,' + base64.b64encode(x.avatar).decode('ascii')).texture
else:
img = CoreImage('./templates/img/avatar.png').texture
data.append({
'viewclass': self.viewclass_item,
'text': username,
'callback': self.select_active,
'callback_del': self.del_active,
'image': img,
})
return data
def set_data(self, data):
self.ids.rv_main.data = data
def select_active(self, row):
self.app.main_widget.ids.toolbar.title = row.text
self.app.show_screen('chat')
def del_active(self, row):
user = User.by_name(settings.USER_NAME)
name_contact = row.text
try:
with db_lock:
chat = user.del_contact(name_contact)
self.app.client.notify(f'send_{settings.MESSAGE}', msg=Message(**{
settings.ACTION: settings.DEL_CONTACT,
settings.USER: settings.USER_NAME,
settings.ACCOUNT_NAME: name_contact,
}))
self.app.send_chat(chat)
except (ContactExists, NotFoundUser, ContactNotExists) as e:
toast('Ошибка\n' + str(e))
logger.error(e)
else:
self.make_data()
class AvatarSampleWidget(ILeftBodyTouch, Image):
pass
class DelWidget(IRightBodyTouch, MDIconButton):
pass
|
the-stack_106_23047 | # This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import os
import pickle
import time
from datetime import datetime
from pprint import pprint
import sys
import logging
import traceback
from termcolor import colored
import itertools
import shutil
from p3iv_utils.consoleprint import Print2Console
from p3iv_utils.ofstream import create_output_dir, create_output_path, save_settings
from p3iv_utils.lanelet_map_reader import get_lanelet_map
from p3iv_types.vehicle import Vehicle
from p3iv_modules.execute import drive, predict
from p3iv_core.configurations.utils import load_configurations
from p3iv_core.bindings.dataset import SimulationBindings
def run(configurations, f_execute=drive):
# Print system time
Print2Console.p("ss", ["Simulation start time:", time.ctime()], style="bright")
# Print settings
Print2Console.p("s", ["=" * 72], style="magenta", bold=True)
Print2Console.p("s", ["Simulation configurations:"], style="magenta", bold=True)
Print2Console.p("s", ["=" * 72], style="magenta", bold=True)
pprint(configurations)
# Load lanelet2 map
laneletmap = get_lanelet_map(configurations)
# Get ground-truth object data
bindings = SimulationBindings(configurations, laneletmap)
ground_truth = bindings.create_ground_truth(configurations["timestamp_begin"])
# Extract timestamps to be computed
timestamps = list(
range(configurations["timestamp_begin"], configurations["timestamp_end"] + 1, configurations["temporal"]["dt"])
)
# Perform computation
for i, ts_now in enumerate(timestamps):
# Print information
Print2Console.p("s", ["=" * 72], style="magenta", bold=True)
Print2Console.p("sf", ["Computing timestamp:", ts_now], first_col_w=38, style="magenta", bold=True)
Print2Console.p("s", ["=" * 72], style="magenta", bold=True)
# update planned motion from previous solution or from dataset
if configurations["simulation_type"] == "open-loop" or i == 0:
# update ground truth objects
bindings.update_open_loop_simulation(ground_truth, ts_now)
elif configurations["simulation_type"] == "closed-loop":
# check and get new vehicles
bindings.update_open_loop_simulation(ground_truth, ts_now)
for v in list(ground_truth.values()):
# overwrite open loop data if the vehicle is specified for planning
if v.id in list(configurations["meta_state"].keys()):
state_ts_now = v.timestamps.previous().plan_optimal.states[1]
v.timestamps.create_and_add(ts_now)
v.timestamps.latest().state = state_ts_now
else:
msg = "'simulation_type' in configurations is wrong.\n" + "Choose between 'open-loop' and 'closed-loop'"
raise Exception(msg)
# Compute the trajectory of vehicles who have a 'toLanelet' in their **objective**!
for vehicle in [_v for _v in ground_truth.vehicles() if _v.objective.toLanelet]:
try:
f_execute(vehicle, ground_truth)
# if you want to have plots after each timestamp, you can add them here
curr_save_dir = os.path.join(configurations["save_dir"], str(ts_now), str(vehicle.id))
os.makedirs(curr_save_dir)
# Update vehicle data
ground_truth.update(vehicle)
except:
traceback.print_exc()
msg = "Simulation terminated before timestamp " + str(configurations["timestamp_end"])
msg += "\nThere may be a problem in calculations. "
msg += "\nMaybe the vehicle has reached its destination?"
print(colored(msg, "red"))
break
else:
continue
break
Print2Console.p("s", ["=" * 72], style="magenta", bold=True)
Print2Console.p("s", ["Simulation completed!"], style="magenta", bold=True)
Print2Console.p("s", ["=" * 72], style="magenta", bold=True)
return ground_truth
|
the-stack_106_23048 | from pypy.translator.simplify import get_graph
from pypy.tool.compat import md5
def get_statistics(graph, translator, save_per_graph_details=None, ignore_stack_checks=False):
seen_graphs = {}
stack = [graph]
num_graphs = 0
num_blocks = 0
num_ops = 0
per_graph = {}
while stack:
graph = stack.pop()
if graph in seen_graphs:
continue
seen_graphs[graph] = True
num_graphs += 1
old_num_blocks = num_blocks
old_num_ops = num_ops
for block in graph.iterblocks():
num_blocks += 1
for op in block.operations:
if op.opname == "direct_call":
called_graph = get_graph(op.args[0], translator)
if called_graph is not None and ignore_stack_checks:
if called_graph.name.startswith('ll_stack_check'):
continue
if called_graph is not None:
stack.append(called_graph)
elif op.opname == "indirect_call":
called_graphs = op.args[-1].value
if called_graphs is not None:
stack.extend(called_graphs)
num_ops += 1
per_graph[graph] = (num_blocks-old_num_blocks, num_ops-old_num_ops)
if save_per_graph_details:
details = []
for graph, (nblocks, nops) in per_graph.iteritems():
try:
code = graph.func.func_code.co_code
except AttributeError:
code = "None"
hash = md5(code).hexdigest()
details.append((hash, graph.name, nblocks, nops))
details.sort()
f = open(save_per_graph_details, "w")
try:
for hash, name, nblocks, nops in details:
print >>f, hash, name, nblocks, nops
finally:
f.close()
return num_graphs, num_blocks, num_ops
def print_statistics(graph, translator, save_per_graph_details=None, ignore_stack_checks=False):
num_graphs, num_blocks, num_ops = get_statistics(graph, translator, save_per_graph_details,
ignore_stack_checks=ignore_stack_checks)
print ("Statistics:\nnumber of graphs %s\n"
"number of blocks %s\n"
"number of operations %s\n") % (num_graphs, num_blocks, num_ops)
|
the-stack_106_23051 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import tg
from nose.tools import assert_equal, assert_in, assert_not_in
from alluratest.controller import TestRestApiBase
from allura.model import Project, User
from allura.tests import decorators as td
from allura.tests import TestController
class TestUserProfile(TestController):
@td.with_user_project('test-admin')
def test_profile(self):
r = self.app.get('/u/test-admin/profile/')
assert_equal('Test Admin',
r.html.find('h1', 'project_title').find('a').text)
sections = set([c for s in r.html.findAll(None, 'profile-section') for c in s['class'].split()])
assert_in('personal-data', sections)
assert_in('Username:test-admin', r.html.find(None, 'personal-data').getText())
assert_in('projects', sections)
assert_in('Test Project', r.html.find(None, 'projects').getText())
assert_in('Last Updated:', r.html.find(None, 'projects').getText())
assert_in('tools', sections)
assert_in('Admin', r.html.find(None, 'tools').getText())
assert_in('skills', sections)
assert_in('No skills entered', r.html.find(None, 'skills').getText())
def test_wrong_profile(self):
self.app.get('/u/no-such-user/profile/', status=404)
@td.with_user_project('test-user')
def test_missing_user(self):
User.query.remove(dict(username='test-user'))
p = Project.query.get(shortname='u/test-user')
assert p is not None and p.is_user_project
response = self.app.get('/u/test-user/profile/', status=404)
def test_differing_profile_proj_shortname(self):
User.upsert('foo_bar')
# default auth provider's user_project_shortname() converts _ to - for the project name
response = self.app.get('/u/foo_bar/', status=302)
assert_equal(response.location, 'http://localhost/u/foo-bar/')
response = self.app.get('/u/foo_bar/profile/xyz?a=b', status=302)
assert_equal(response.location, 'http://localhost/u/foo-bar/profile/xyz?a=b')
# unfortunately this doesn't work because the default auth provider's user_by_project_shortname()
# doesn't try converting back (and it probably shouldn't since you could get multiple users with conflicting proj names)
# at least this works with other auth providers that have a more complete implementation of both
# user_project_shortname() and user_by_project_shortname()
#self.app.get('/u/foo-bar/profile/')
def test_differing_profile_proj_shortname_rest_api(self):
User.upsert('foo_bar')
# default auth provider's user_project_shortname() converts _ to - for the project name
response = self.app.get('/rest/u/foo_bar/', status=302)
assert_equal(response.location, 'http://localhost/rest/u/foo-bar/')
@td.with_user_project('test-admin')
@td.with_wiki
def test_feed(self):
for ext in ['', '.rss', '.atom']:
r = self.app.get('/u/test-admin/profile/feed%s' % ext, status=200)
assert 'Recent posts by Test Admin' in r
assert 'Home modified by Test Admin' in r
@td.with_user_project('test-admin')
@td.with_user_project('test-user')
@mock.patch('allura.tasks.mail_tasks.sendsimplemail')
@mock.patch('allura.lib.helpers.gen_message_id')
@mock.patch('allura.model.User.can_send_user_message')
def test_send_message(self, check, gen_message_id, sendsimplemail):
check.return_value = True
gen_message_id.return_value = 'id'
test_user = User.by_username('test-user')
test_user.set_pref('email_address', '[email protected]')
response = self.app.get(
'/u/test-user/profile/send_message', status=200)
assert 'you currently have user messages disabled' not in response
assert '<b>From:</b> "Test Admin" <[email protected]>' in response
self.app.post('/u/test-user/profile/send_user_message',
params={'subject': 'test subject',
'message': 'test message',
'cc': 'on'})
sendsimplemail.post.assert_called_once_with(
cc=User.by_username('test-admin').get_pref('email_address'),
text=u'test message\n\n---\n\nThis message was sent to you via the Allura web mail form. You may reply to this message directly, or send a message to Test Admin at http://localhost/u/test-admin/profile/send_message\n',
toaddr=User.by_username('test-user').get_pref('email_address'),
fromaddr=User.by_username('test-admin').get_pref('email_address'),
reply_to=User.by_username('test-admin').get_pref('email_address'),
message_id=u'id',
subject=u'test subject')
sendsimplemail.reset_mock()
self.app.post('/u/test-user/profile/send_user_message',
params={'subject': 'test subject',
'message': 'test message'})
sendsimplemail.post.assert_called_once_with(
cc=None,
text=u'test message\n\n---\n\nThis message was sent to you via the Allura web mail form. You may reply to this message directly, or send a message to Test Admin at http://localhost/u/test-admin/profile/send_message\n',
toaddr=User.by_username('test-user').get_pref('email_address'),
fromaddr=User.by_username('test-admin').get_pref('email_address'),
reply_to=User.by_username('test-admin').get_pref('email_address'),
message_id=u'id',
subject=u'test subject')
check.return_value = False
response = self.app.get(
'/u/test-user/profile/send_message', status=200)
assert 'Sorry, messaging is rate-limited' in response
@td.with_user_project('test-user')
def test_send_message_for_anonymous(self):
r = self.app.get('/u/test-user/profile/send_message',
extra_environ={'username': '*anonymous'},
status=302)
assert 'You must be logged in to send user messages.' in self.webflash(
r)
r = self.app.post('/u/test-user/profile/send_user_message',
params={'subject': 'test subject',
'message': 'test message',
'cc': 'on'},
extra_environ={'username': '*anonymous'},
status=302)
assert 'You must be logged in to send user messages.' in self.webflash(
r)
@td.with_user_project('test-user')
def test_link_to_send_message_form(self):
User.by_username('test-admin').set_pref('email_address',
'[email protected]')
User.by_username('test-user').set_pref('email_address',
'[email protected]')
r = self.app.get('/u/test-user/profile',
status=200)
assert r.html.find('a', dict(href='send_message'))
@td.with_user_project('test-user')
def test_disable_user_messages(self):
User.by_username('test-admin').set_pref('email_address',
'[email protected]')
test_user = User.by_username('test-user')
test_user.set_pref('email_address', '[email protected]')
test_user.set_pref('disable_user_messages', True)
r = self.app.get('/u/test-user/profile')
assert '<a href="send_message">Send me a message</a>' not in r
r = self.app.get('/u/test-user/profile/send_message', status=302)
assert 'This user has disabled direct email messages' in self.webflash(
r)
@td.with_user_project('test-admin')
@td.with_user_project('test-user')
def test_user_messages_sender_disabled(self):
admin_user = User.by_username('test-admin')
admin_user.set_pref('email_address', '[email protected]')
admin_user.set_pref('disable_user_messages', True)
test_user = User.by_username('test-user')
test_user.set_pref('email_address', '[email protected]')
r = self.app.get('/u/test-user/profile/send_message', status=200)
assert 'you currently have user messages disabled' in r
@td.with_user_project('test-user')
def test_profile_sections(self):
project = Project.query.get(shortname='u/test-user')
app = project.app_instance('profile')
def ep(n):
m = mock.Mock()
m.name = n
m.load()().display.return_value = 'Section %s' % n
return m
eps = map(ep, ['a', 'b', 'c', 'd'])
order = {'user_profile_sections.order': 'b, d,c , f '}
if hasattr(type(app), '_sections'):
delattr(type(app), '_sections')
with mock.patch('allura.lib.helpers.iter_entry_points') as iep:
with mock.patch.dict(tg.config, order):
iep.return_value = eps
sections = app.profile_sections
assert_equal(sections, [
eps[1].load(),
eps[3].load(),
eps[2].load(),
eps[0].load()])
r = self.app.get('/u/test-user/profile')
assert_in('Section a', r.body)
assert_in('Section b', r.body)
assert_in('Section c', r.body)
assert_in('Section d', r.body)
assert_not_in('Section f', r.body)
class TestUserProfileHasAccessAPI(TestRestApiBase):
@td.with_user_project('test-admin')
def test_has_access_no_params(self):
self.api_get('/rest/u/test-admin/profile/has_access', status=404)
self.api_get('/rest/u/test-admin/profile/has_access?user=root', status=404)
self.api_get('/rest/u/test-admin/profile/has_access?perm=read', status=404)
@td.with_user_project('test-admin')
def test_has_access_unknown_params(self):
"""Unknown user and/or permission always False for has_access API"""
r = self.api_get(
'/rest/u/test-admin/profile/has_access?user=babadook&perm=read',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
r = self.api_get(
'/rest/u/test-admin/profile/has_access?user=test-user&perm=jump',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
@td.with_user_project('test-admin')
def test_has_access_not_admin(self):
"""
User which has no 'admin' permission on neighborhood can't use
has_access API
"""
self.api_get(
'/rest/u/test-admin/profile/has_access?user=test-admin&perm=admin',
user='test-user',
status=403)
@td.with_user_project('test-admin')
def test_has_access(self):
r = self.api_get(
'/rest/u/test-admin/profile/has_access?user=test-admin&perm=admin',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], True)
r = self.api_get(
'/rest/u/test-admin/profile/has_access?user=test-user&perm=admin',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
|
the-stack_106_23052 | import sys
H, W, Q = map(int, input().split())
h = [[0] * W for _ in range(H)]
for i in range(H):
for j in range(W):
print('1 1 {} {}'.format(i+1, j+1))
sys.stdout.flush()
h[i][j] = int(input())
for _ in range(Q):
Si, Sj, Ti, Tj = map(lambda x: int(x)-1 ,input().split())
if Si > Ti:
Si, Ti = Ti, Si
Sj, Tj = Tj, Sj
if Sj < Tj:
print(h[Ti][Tj] - h[Si][Sj])
else:
print(h[Ti][Tj] + h[Si][Sj] - 2 * h[Si][Tj])
sys.stdout.flush()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.