| content
				 stringlengths 5 1.05M | 
|---|
| 
	from six import assertRegex
from . import *
class TestUserArgs(IntegrationTest):
    def __init__(self, *args, **kwargs):
        IntegrationTest.__init__(
            self, os.path.join(examples_dir, '10_custom_args'),
            configure=False, *args, **kwargs
        )
    def test_build_default(self):
        self.configure()
        self.build(executable('simple'))
        self.assertOutput([executable('simple')], 'hello from unnamed!\n')
    def test_build_with_args(self):
        self.configure(extra_args=['--name=foo'])
        self.build(executable('simple'))
        self.assertOutput([executable('simple')], 'hello from foo!\n')
    def test_help(self):
        os.chdir(self.srcdir)
        output = self.assertPopen(
            ['bfg9000', 'help', 'configure']
        )
        assertRegex(self, output, r'(?m)^project-defined arguments:$')
        assertRegex(self, output,
                    r'(?m)^\s+--name NAME\s+set the name to greet$')
    def test_help_explicit_srcdir(self):
        os.chdir(this_dir)
        output = self.assertPopen(
            ['bfg9000', 'help', 'configure', self.srcdir]
        )
        assertRegex(self, output, r'(?m)^project-defined arguments:$')
        assertRegex(self, output,
                    r'(?m)^\s+--name NAME\s+set the name to greet$')
 | 
| 
	"""This module collects several utility funictions we found useful at HUDORA. Nothing special in there.
Consider it BSD Licensed.
"""
from __future__ import unicode_literals
__revision__ = "$Revision$"
 | 
| 
	from LogDecoders import *
class McuLogDecoder(DebugLogDecoder):
    def __init__(self, dev_name, filter_dict, config):
        super(McuLogDecoder, self).__init__(dev_name, filter_dict)
        self.mcu_decode_output_folder = './mcu_log_files/'
        self.filter_out_count = 0  # Record the discarded log count
        self.filter_in_count = 0  # Record the kept log count
        self.config = config
    def load_one_file(self, file_name):
        # Input: do not include file extension in the filename
        with open(self.mcu_decode_output_folder + '{0}.txt'.format(file_name)) as f_t:
            one_row = f_t.readline()
            f_t.close()
        self.f_out = open(self.mcu_decode_output_folder + file_name + '_decoded.txt', 'w')  # output handler
        return one_row  # all the data in the file.
    def state_machine(self, row_all):
        states = {'UNKNOWN': 0, 'PREAMBLE': 1, 'COUNT': 2, 'TICK': 3,
                  'DATA': 5, 'LENGTH': 4, 'FINISHED': 6}  # UART state machine
        str_buf = []
        raw_buf = []
        st = states['PREAMBLE']
        # Initialize local variable to prevent warning.
        seq_num = 0
        time_tick = 0
        parsed_msg = ''
        time_stamp = .0
        payload_len = 1
        max_len = 0
        app_rep_flag = False
        empty_msg_list = [0, 0, .0]  # Order: seq_num, timestamp, time tick,
        parsed_log_list = empty_msg_list.copy()
        row_buf = row_all.split(' ')
        print('Message byte length:', len(row_buf))
        while len(row_buf) > 0:
            if st == states['PREAMBLE']:
                new_byte = row_buf.pop(0)
                # print(new_byte)
                if new_byte == '25':  # '%'
                    str_buf.append(new_byte)
                    # TODO: change read byte to traversing.
                    new_byte = ''
                    for i in range(4):
                        new_byte += row_buf.pop(0)
                    if new_byte == '4442473A':  # 'DBG:'
                        str_buf.append(new_byte)
                        st = states['COUNT']
                        time_stamp = time.time()
                        parsed_log_list[1] = time_stamp
                    else:
                        str_buf = []
                else:
                    str_buf = []  # Empty the buf and restart
                    # str_buf.append(new_byte)
                    # if len(str_buf) > 200:  # read
                    #     print('Read too m')
                    #     self.dbg_run_flag = False
            elif st == states['COUNT']:
                # 4 bytes' msg counter.
                str_buf = []
                for i in range(4):
                    str_buf.append(row_buf.pop(0))
                    num_temp = self.hex_to_decimal(str_buf)
                if num_temp - seq_num != 1:
                    missing_log_msg = '[Warning] Inconsistent sequence number detected! This: {0}, Prev: {1}'.format(num_temp, seq_num)
                    print(missing_log_msg)
                    seq_num = num_temp
                    parsed_log_list[0] = seq_num  # Update the dict
                str_buf = []
                st = states['TICK']
            elif st == states['TICK']:
                str_buf = []
                for i in range(4):
                    str_buf.append(row_buf.pop(0))
                    time_tick = self.hex_to_decimal(str_buf)
                    parsed_log_list[2] = time_tick  # Update the dict
                    for i in range(4):
                        dummy = row_buf.pop(0)  # Neglect the useless bytes.
                if len(dummy) == 0:
                    st = states['PREAMBLE']
                    continue
                if dummy[0] == 'A':  # This is an application report message
                    app_rep_flag = True
                    parsed_log_list.append('APPLICATION_REPORT')
                else:
                    app_rep_flag = False
                    st = states['LENGTH']
            elif st == states['LENGTH']:
                str_buf = []
                for i in range(2):
                    str_buf.append(row_buf.pop(0))
                    payload_len = self.hex_to_decimal(str_buf)
                    # if max_len < payload_len:
                    #     max_len = payload_len
                    #     print('[INFO]Max payload length:', max_len)
                if payload_len > 720:
                    st = states['UNKNOWN']
                    print('[ERROR] Found unbounded large payload length.')
                    continue
                st = states['DATA']
            elif st == states['DATA']:
                # Read in the data as the length field specified.
                str_buf = []
                for i in range(payload_len):
                    str_buf.append(row_buf.pop(0))
                    raw_buf = parsed_log_list.copy()
                    raw_buf.append(self.byte_concatenation(str_buf))
                print(str_buf)
                if app_rep_flag is True:
                    str_buf.reverse()  # There is another reverse in the hex to ascii function.
                    parsed_log_list.append(self.hex_to_ascii(str_buf))
                    self.application_report_export_processing(parsed_log_list)
                else:
                    disp_list = self.parse_one_msg_common(str_buf)
                    # Output order: msg_id_dec, msg_name, msg_src, msg_dest, msg_length, decoded_msg
                    disp_list = [str(x) for x in disp_list]
                    self.f_out.write('\t'.join(disp_list))
                    print(disp_list)
                    # TODO: Bookmarked. extract info from log.
                    # self.extract_info_from_log(disp_list)
                    parsed_log_list += disp_list  # parsed_log_list have time. disp_list only has message info.
                    self.display_export_processing(parsed_log_list)
                    if len(parsed_log_list) >= 6 and parsed_log_list[4] != 'N/A':  # msg name
                        self.dy_extract_rsrp_snr_from_log(parsed_log_list)
                        self.extract_npusch_power_from_log(parsed_log_list)
                        # print(parsed_log_dict)
                st = states['FINISHED']
            elif st == states['FINISHED']:
                parsed_log_list = empty_msg_list.copy()
                self.f_out.flush()
                st = states['PREAMBLE']  # Recycle the processing state machine
            elif st == states['UNKNOWN']:
                print('[ERROR] Something wrong happens. Reset to PREAMBLE state.')
                st = states['PREAMBLE']
        # All the bytes are processed.
        self.f_out.flush()
        self.f_close()
    def display_export_processing(self, info_list):
        # TODO: change this to txt friendly format.
        self.res = self.packet_output_formatting(info_list)
        if True:
            res_disp = self.res.split('\n')[0] + '\n'  # Truncate the result and keep only the first line.
        else:
            res_disp = self.res
        if len(info_list) <= 5:
            print('[ERROR] Missing element in Info List.')
        try:
            is_filtered = self.check_filters(info_list[4])
        except IndexError:
            is_filtered = True
        if is_filtered is False:
            print(res_disp)
        # Apply the filter, exporting
        if self.config['Export decoded']:
            if self.config['Keep filtered logs'] is True:
                # Means write every log
                is_filtered = False
            if is_filtered is False:
                # This log need to be export
                if self.config['Export format'] == 'txt':
                    # self.f_exp.write(self.res)
                    self.file_io.write_debug_log_formatted(info_list)
                elif self.config['Export format'] == 'csv':
                    # self.f_exp_csv_writer.writerow(info_list)
                    self.file_io.write_debug_log_formatted(info_list)
    def application_report_export_processing(self, info_list):
        first_line = '#{0}\t{1}\t{2}\t{3}\t\n'.format(info_list[0], info_list[1],
                                                      info_list[2], info_list[3])
        whole_app_rep = first_line + info_list[4] + '\n'  # The 4th element is the actual msg. add double \n
        # Check filter
        is_filtered = self.check_filters('APPLICATION_REPORT')
        if is_filtered is False:
            if self.config['Run in Qt']:
                self.transfer_buf.append(whole_app_rep)
                self.dbg_uart_trigger.emit()
            else:
                print(whole_app_rep)
        if self.config['Export decoded']:
            if self.config['Keep filtered logs'] is True:
                # Means write every log
                is_filtered = False
            if is_filtered is False:
                # This log need to be export
                if self.config['Export format'] == 'txt':
                    # self.f_exp.write(whole_app_rep)
                    self.file_io.write_debug_log_formatted(whole_app_rep)
                elif self.config['Export format'] == 'csv':
                    # self.f_exp_csv_writer.writerow(info_list)
                    self.file_io.write_debug_log_formatted(info_list)
    def check_filters(self, log_name):
        is_filtered_flag = False  # True: not wanted log; False: wanted log.
        # Apply the filter, printing
        if self.filter_flag == 1:  # Filter out
            if log_name in self.filter_dict['FO']:  # Message name
                is_filtered_flag = True
                self.filter_out_count += 1
            else:
                self.filter_in_count += 1  # The log that is kept.
        elif self.filter_flag == 2:  # Filter in
            if log_name in self.filter_dict['FI']:  # Message in the set
                self.filter_in_count += 1
            else:
                is_filtered_flag = True
                self.filter_out_count += 1
        if self.filter_out_count % 1000 == 0 and self.filter_out_count > 0:
            filter_out_count_msg = '[INFO] Excluded log count: {0}'.format(self.filter_out_count)
            print(filter_out_count_msg)
            if self.config['Run in Qt']:
                self.sys_info_buf.append(filter_out_count_msg)
                self.dbg_uart_trigger.emit()  # Tell the main thread to update the system info monitor.
        if self.filter_in_count % 500 == 0 and self.filter_in_count > 0:
            filter_in_count_msg = '[INFO] Included log count: {0}'.format(self.filter_in_count)
            print(filter_in_count_msg)
            if self.config['Run in Qt']:
                self.sys_info_buf.append(filter_in_count_msg)
                self.dbg_uart_trigger.emit()
        return is_filtered_flag
    def byte_concatenation(self, b_list):
        # Convert ['AA', '3E', '4F'] to 'AA-3E-4F'.
        ret = ''
        for i in range(len(b_list)):
            if i != len(b_list) - 1:
                ret += b_list[i] + '-'
            else:
                ret += b_list[i]
        return ret
filter_out_list = {'N/A', 'UICC_DBG_LOG_P0', 'UICC_DBG_LOG_P1', 'UICC_DBG_LOG_P2'}  # add item to only one of them!!! E.g. to remove invalid message, use 'N/A'.
filter_in_list = {}  # add item to only one of them!!!
filter_dict = {'FO': filter_out_list, 'FI': filter_in_list}
config = {'Device name': 'BC95',
            'Dbg port': 'COM3',
            'Filter dict': filter_dict,
            'Run in Qt': False,
          'Export decoded': True,
            'Export to file': True,  # choose to export the decoded info. The raw log is mandatory.
            'Export filename time prefix': '%y%m%d_%H%M%S',
            'Keep filtered logs': False,  # filtered logs will not be printed, but you can export them to file.
            'Time format': '%m-%d %H:%M:%S.%f',  # see time.strftime() for detail.
            'Export format': 'csv'}  # format: txt or csv, need to enable "export to file first".
mcu_decoder = McuLogDecoder('BC95', filter_dict, config)
mcu_decoder.state_machine(mcu_decoder.load_one_file('raw_dbg_log_short'))
 | 
| 
	from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import os, sys
import argparse
import re
parser = argparse.ArgumentParser(description='Mask degenerate sites in alignment.')
parser.add_argument("--alignment", type = str, help="sequence(s) to be used, supplied as FASTA files", required=True)
parser.add_argument("--output", type = str, help="output file with masked sequence data.  FASTA file", required=True)
args = parser.parse_args()
    
##  Read sequence alignment
# alignment = SeqIO.parse(args.alignment, "fasta")
##  List of masked SeqRecords
masked_seqs = []
##  Mask sequences
with open (args.alignment, "rU") as aln_handle:
    for record in SeqIO.parse (aln_handle, "fasta"):
        new_seq = str(record.seq.upper())
        new_seq = new_seq.replace("U", "T")
        new_seq_tuple = re.subn('[^ACGTN]', "N", new_seq)
        new_seq = new_seq_tuple[0]
        replacement_count = new_seq_tuple[1]
        
        ##  Create record
        id = record.id
        new_record = SeqRecord(Seq(new_seq), id, "", "")
        
        ##  Add record to list
        masked_seqs.append(new_record)
        
        ##  Write log info
        print (f'seq: {id}\treplacements: {replacement_count}')
    
##  Write output
SeqIO.write(masked_seqs, args.output, "fasta")
    
        
    
     | 
| 
	from .base import *  
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
IP_DEVELOPMENT = ["192.168.100.%s" % i for i in range(1, 100)] + ["192.168.0.%s" % i for i in range(1, 100)]
ALLOWED_HOSTS = [
    "localhost",
    "0.0.0.0",
    "127.0.0.1",
    ".vercel.app",
] + IP_DEVELOPMENT 
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
        'LOCATION': ''
    }
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG  
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar']  
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']  
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
    'DISABLE_PANELS': [
        'debug_toolbar.panels.redirects.RedirectsPanel',
    ],
    'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions']  
# Your stuff...
# ------------------------------------------------------------------------------
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
    'DJANGO_DEFAULT_FROM_EMAIL',
    default='ThePucukan Sistem Delivery Manager<[email protected]>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[ThePucukan Sistem Delivery Manager]')
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = False
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = False
 | 
| 
	#!/usr/bin/env python3
import sys
sys.setrecursionlimit(3000)
# -----
# This section is just used to implement tail-recursion.
# You probably don't need to reverse this but you can try if you want ;p
class TR(Exception):
    SEEN = []
    
    def __init__(self, key, args, kwargs):
        self.key = key
        self.args = args
        self.kwargs = kwargs
def T(fn, name=''):
    def _fn(*args, **kwargs):
        key = id(_fn)
        if key in TR.SEEN:
            raise TR(key, args, kwargs)
        else:
            TR.SEEN.append(key)
            while True:
                try:
                    val = fn(*args, **kwargs)
                    TR.SEEN = TR.SEEN[:TR.SEEN.index(key)]
                    return val
                except TR as e:
                    if e.key != key:
                        raise
                    else:
                        args = e.args
                        kwargs = e.kwargs
                        
                    TR.SEEN = TR.SEEN[:TR.SEEN.index(key)+1]
    return _fn
# -----
# Sice machine:
____=lambda _:lambda __,**___:_(*__,**___)
_____=____(lambda _,*__:_)
______=____(lambda _,*__:__)
_______=____(lambda _,__:_)
________=____(lambda _,__:__)
_________=lambda *_:_
__________=lambda _,__,___:_____(______(_________(*(((),)*(_==())),___,__)))()
___________=lambda _:(_,)
____________=____(lambda *_,___=():__________(_,lambda:____________(______(_),___=___________(___)),lambda:___))
_____________=____(lambda *_:_________(*______(_),_____(_)))
______________=lambda _,__,___:__________(_,lambda:______________(_____(_),___(__),___),lambda:__)
_______________=T(lambda _,__,___:__________(_,lambda:_______________(_____(_),___(__),___),lambda:__))
________________=____(lambda *_:_______________(_____(____________(_)),_,_____________))
_________________=____(lambda *_:__________(______(_),lambda:_________________(_________(___________(_____(_)),*______(______(_)))),lambda:___________(_____(_))))
__________________=lambda _:_______________(_,0,lambda __:__+1)
___________________=lambda _,__,___:__________(_,lambda:___________________(______(_),__,_________(*___,__(_____(_)))),lambda:___)
____________________=lambda _,__:___________________(_,__,())
_____________________=lambda _,__,___:(__________(_______(_____(__)),lambda:__________(_______(_______(_____(__))),lambda:((_________(_____(___),*______(_)),_____________(__),______(___))),lambda:((_,_____________(__),_________(_____(_),*___)))),lambda:__________(_______(________(_____(__))),lambda:__________(_______(_______(________(_____(__)))),lambda:((______________(_____(___),_,_____________),_____________(__),______(___))),lambda:((______________(_____(___),_,________________),_____________(__),______(___))),),lambda:__________(_______(________(________(_____(__)))),lambda:__________(_______(_______(________(________(_____(__))))),lambda:(_,______________(_____(_______(_______(________(________(_____(__)))))),__,________________),___),lambda:(_,______________(_____(________(_______(________(________(_____(__)))))),__,_____________),___)),lambda:__________(_______(________(________(________(_____(__))))),lambda:__________(_______(_______(________(________(________(_____(__)))))),lambda:(_,_____________(__),_________(_____(_______(_______(________(________(________(_____(__))))))),*___)),lambda:(_,_____________(__),_________(_____(_______________(_____(________(_______(________(________(________(_____(__))))))),___,_____________)),*___))),lambda:__________(_______(________(________(________(________(_____(__)))))),lambda:__________(_______(_______(________(________(________(________(_____(__))))))),lambda:(_,__________(_____(___),lambda:_____________(__),lambda:_____________(_____________(__))),______(___)),lambda:(_,______________(_____(___),__,_____________),______(___))),lambda:__________(_______(________(________(________(________(________(_____(__))))))),lambda:__________(_______(_______(________(________(________(________(________(_____(__)))))))),lambda:(_,_____________(__),_________(_______________(_____(___),_____(______(___)),___________),*______(______(___)))),lambda:(_,_____________(__),_________(_______________(_____(___),_____(______(___)),_____),*______(______(___))))),lambda:())))))))
______________________=T(lambda _,__,___:__________(_____(__),lambda:______________________(*_____________________(_,__,___)),lambda:_))
_______________________=lambda _,__:____________________(______________________(((),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),()),__,_________(*____________________(____________________(_,lambda _:((),)*_),_________________),*(((),(),(),(),(),(),(),(),(),(),(),(),(),(),(),())))),__________________)
# -----
def load(cs, i=0):
    objs = []
    while True:
        if cs[i+1] == ')':
            return tuple(objs), i+1
        elif cs[i+1] == '(':
            obj, i = load(cs, i+1)
            objs.append(obj)
        elif cs[i+1] == ',':
            i += 1
# this is apparently "too nested" for the native python parser, so we need to use a custom parser
prog_string = open('./prog', 'r').read()
prog, _ = load(prog_string)
flag = input('flag plz: ').encode('ascii')
print('checking...')
# --- takes 1-2 minutes to check flag
o = _______________________(flag, prog)
# ---
output = bytes(o[:o.index(0)]).decode('ascii')
print(output)
if output == b'Correct!':
    print('Flag: %s' % flag)
 | 
| 
	#
# @lc app=leetcode id=82 lang=python3
#
# [82] Remove Duplicates from Sorted List II
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
#     def __init__(self, x):
#         self.val = x
#         self.next = None
class Solution:
    def deleteDuplicates(self, head: ListNode) -> ListNode:
        dummy = pre = ListNode(0)
        dummy.next = head
        while head and head.next:
            if head.val == head.next.val:
                while head and head.next and head.val == head.next.val:
                    head = head.next
                head = head.next
                pre.next = head
            else:
                pre = pre.next
                head = head.next
        return dummy.next
# @lc code=end
# Accepted
# 168/168 cases passed(32 ms)
# Your runtime beats 99.85 % of python3 submissions
# Your memory usage beats 100 % of python3 submissions(13 MB)
 | 
| 
	import sys
# These are required libraries for using TensorFlow with a GPU
sys.path.append("C:/Users/Jonathan/AppData/Local/Programs/Python/Python37/Lib")
sys.path.append("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/bin")
sys.path.append("C:/cuda/bin")
import os
import csv
import tensorflow as tf
# This will be our class that we train to give us a hood angle ouput for a given range input
class HoodAngleModel:
	# Constructor for HoodAngleModel class
	def __init__(self, fileName="hoodAngleInterpolationValues.csv"):
		# These are the inputs and outputs to be trained on, for now just empty
		self.X = []
		self.Y = []
		
		# Configure TensorFlow to use the GPU
		self.configGPUs()
		# Load the range and hood angle data to be trained on
		self.loadData(fileName)
		# Check if we already trained a model
		if not os.path.exists('HoodAngleModel/'):
			# If we didn't already train a model make one, train it, and save it
			self.makeModel()
			self.trainModel()
			self.model.save("HoodAngleModel")
		else:
			# If we did already train a model just load it
			self.model = tf.keras.models.load_model("HoodAngleModel")
	# Configure TensorFlow to use the GPU
	def configGPUs(self):
		gpus = tf.config.experimental.list_physical_devices('GPU')
		if gpus:
			try:
				tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
				logicalGPUs = tf.config.experimental.list_logical_devices('GPU')
				print(len(gpus), "Physical GPUs,", len(logicalGPUs), "Logical GPU")
			except RuntimeError as e:
				print(e)
	# Create a neural net with a SGD optimizer and MSE loss function
	def makeModel(self):
		# Create a neural net with a shape of [1, 8, 16, 8, 1]
		self.model = tf.keras.models.Sequential()
		self.model.add(tf.keras.layers.Dense(8, activation='relu'))
		self.model.add(tf.keras.layers.Dense(16, activation='relu'))
		self.model.add(tf.keras.layers.Dense(8, activation='relu'))
		self.model.add(tf.keras.layers.Dense(1, activation='linear'))
		# Compile it with SGD optimizer and MSE loss function
		self.model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.01), 
						   loss=tf.keras.losses.MeanSquaredError(),
						   metrics=[])
	
	# Load range and hood angle data from a csv
	def loadData(self, fileName):
		with open(fileName, 'r') as csvFile:
			csvReader = csv.reader(csvFile)
			for line in csvReader:
				self.X.append(float(line[0]))
				self.Y.append(float(line[1]))
	# Train the neural net on the range and hood angle data
	def trainModel(self, ep=100000):
		self.model.fit(self.X, self.Y, epochs=ep, batch_size=len(self.X), verbose=2)
	# Predict a hood angle value for a given range value
	def predict(self, x):
		return self.model.predict([x])[0][0]
	# This is strictly for demoing purposes
	def demoAccuracy(self):
		while True:
			x = float(input())
			print(round(self.predict(x), 2))
# Main function
def main():
	# Create a HoodAngleModel object
	hoodAngleModel = HoodAngleModel()
	# Print out the model's loss
	print(f"Loss: {hoodAngleModel.model.evaluate(hoodAngleModel.X, hoodAngleModel.Y)}")
	# Demo the accuracy of the model
	hoodAngleModel.demoAccuracy()
if __name__ == '__main__':
	main()
 | 
| 
	import unittest
from src.parsing import IncrementalParser
class IncrementalParserTests(unittest.TestCase):
    def test_incremental_parser(self):
        ip = IncrementalParser('some 50 users')
        self.assertEqual(ip.extract('(\w+)\s*?(\d+)'), ['some', '50'])
        self.assertEqual(ip.text(), 'users')
        self.assertEqual(ip.extract('(\w+)'), ['users'])
 | 
| 
	#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2017 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import pytest
from supervisor.states import ProcessStates
from unittest.mock import call, Mock
from supvisors.address import AddressStatus
from supvisors.statemachine import *
from supvisors.ttypes import AddressStates, SupvisorsStates
@pytest.fixture
def supvisors_ctx(supvisors):
    """ Create a Supvisors-like structure filled with some nodes. """
    nodes = supvisors.context.nodes
    nodes['127.0.0.1']._state = AddressStates.RUNNING
    nodes['10.0.0.1']._state = AddressStates.SILENT
    nodes['10.0.0.2']._state = AddressStates.RUNNING
    nodes['10.0.0.3']._state = AddressStates.ISOLATING
    nodes['10.0.0.4']._state = AddressStates.RUNNING
    nodes['10.0.0.5']._state = AddressStates.ISOLATED
    return supvisors
def test_abstract_state(supvisors_ctx):
    """ Test the Abstract state of the self.fsm. """
    state = AbstractState(supvisors_ctx)
    # check attributes at creation
    assert state.supvisors is supvisors_ctx
    assert state.local_node_name == '127.0.0.1'
    # call empty methods
    state.enter()
    state.next()
    state.exit()
    # test check_nodes method
    # declare local and master address running
    supvisors_ctx.context._master_node_name = '10.0.0.3'
    supvisors_ctx.context.nodes['127.0.0.1']._state = AddressStates.RUNNING
    supvisors_ctx.context.nodes['10.0.0.3']._state = AddressStates.RUNNING
    assert state.check_nodes() is None
    # transition to INITIALIZATION state if the local address or master address is not RUNNING
    supvisors_ctx.context.nodes['127.0.0.1']._state = AddressStates.SILENT
    assert state.check_nodes() == SupvisorsStates.INITIALIZATION
    supvisors_ctx.context.nodes['127.0.0.1']._state = AddressStates.RUNNING
    supvisors_ctx.context.nodes['10.0.0.3']._state = AddressStates.SILENT
    assert state.check_nodes() == SupvisorsStates.INITIALIZATION
    supvisors_ctx.context.nodes['127.0.0.1']._state = AddressStates.SILENT
    assert state.check_nodes() == SupvisorsStates.INITIALIZATION
    # test abort_jobs method
    state.abort_jobs()
    assert supvisors_ctx.failure_handler.abort.called
    assert supvisors_ctx.starter.abort.called
def test_initialization_state(mocker, supvisors_ctx):
    """ Test the Initialization state of the fsm. """
    state = InitializationState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # 1. test enter method: master and start_date are reset
    # test that all active nodes have been reset to UNKNOWN
    state.enter()
    assert state.context.master_node_name == ''
    nodes = supvisors_ctx.context.nodes
    assert nodes['127.0.0.1'].state == AddressStates.UNKNOWN
    assert nodes['10.0.0.1'].state == AddressStates.SILENT
    assert nodes['10.0.0.2'].state == AddressStates.UNKNOWN
    assert nodes['10.0.0.3'].state == AddressStates.ISOLATING
    assert nodes['10.0.0.4'].state == AddressStates.UNKNOWN
    assert nodes['10.0.0.5'].state == AddressStates.ISOLATED
    # 2. test next method
    # trigger log for synchro time out
    state.context.start_date = 0
    # test that Supvisors wait for all nodes to be running or a given timeout is reached
    # test case no node is running, especially local node
    result = state.next()
    assert result == SupvisorsStates.INITIALIZATION
    # test case where addresses are still unknown and timeout is not reached
    nodes['127.0.0.1']._state = AddressStates.RUNNING
    nodes['10.0.0.2']._state = AddressStates.RUNNING
    nodes['10.0.0.4']._state = AddressStates.SILENT
    result = state.next()
    assert result == SupvisorsStates.INITIALIZATION
    # test case where no more nodes are still unknown
    nodes['10.0.0.1']._state = AddressStates.SILENT
    nodes['10.0.0.3']._state = AddressStates.ISOLATED
    result = state.next()
    assert result == SupvisorsStates.DEPLOYMENT
    # test case where end of synchro is forced based on core nodes running
    supvisors_ctx.options.force_synchro_if = {'10.0.0.2', '10.0.0.4'}
    nodes['10.0.0.3']._state = AddressStates.UNKNOWN
    nodes['10.0.0.4']._state = AddressStates.RUNNING
    # SYNCHRO_TIMEOUT_MIN not passed yet
    state.context.start_date = time() - 10
    result = state.next()
    assert result == SupvisorsStates.INITIALIZATION
    # no master set
    state.context.start_date = 0
    result = state.next()
    assert result == SupvisorsStates.DEPLOYMENT
    # master known, not in core nodes and not running
    supvisors_ctx.context.master_node_name = '10.0.0.3'
    result = state.next()
    assert result == SupvisorsStates.INITIALIZATION
    # master known, not in core nodes and running
    supvisors_ctx.context.master_node_name = '127.0.0.1'
    result = state.next()
    assert result == SupvisorsStates.DEPLOYMENT
    # 3. test exit method
    # test when master_node_name is already set: no change
    state.exit()
    assert supvisors_ctx.context.master_node_name == '127.0.0.1'
    # test when master_node_name is not set and no core nodes
    # check master is the lowest string among running node names
    supvisors_ctx.context.master_node_name = None
    supvisors_ctx.options.force_synchro_if = {}
    state.exit()
    assert supvisors_ctx.context.running_nodes() == ['127.0.0.1', '10.0.0.2', '10.0.0.4']
    assert supvisors_ctx.context.master_node_name == '10.0.0.2'
    # test when master_node_name is not set and forced nodes are used
    # check master is the lowest string among the intersection between running node names and forced nodes
    supvisors_ctx.context.master_node_name = None
    supvisors_ctx.options.force_synchro_if = {'10.0.0.3', '10.0.0.4'}
    state.exit()
    assert supvisors_ctx.context.running_nodes() == ['127.0.0.1', '10.0.0.2', '10.0.0.4']
    assert supvisors_ctx.context.master_node_name == '10.0.0.4'
def test_master_deployment_state(mocker, supvisors_ctx):
    """ Test the Deployment state of the fsm. """
    state = MasterDeploymentState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # test enter method
    mocked_starter = supvisors_ctx.starter.start_applications
    supvisors_ctx.fsm.redeploy_mark = True
    state.enter()
    assert mocked_starter.called
    assert not supvisors_ctx.fsm.redeploy_mark
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.INITIALIZATION
    assert not supvisors_ctx.starter.is_starting_completed.called
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # test next method if the local node is master
    supvisors_ctx.context._is_master = True
    # stay in DEPLOYMENT if a start sequence is in progress
    supvisors_ctx.starter.is_starting_completed.return_value = False
    result = state.next()
    assert result == SupvisorsStates.DEPLOYMENT
    # return OPERATION and no start sequence is in progress
    supvisors_ctx.starter.is_starting_completed.return_value = True
    result = state.next()
    assert result == SupvisorsStates.OPERATION
    # no exit implementation. just call it without test
    state.exit()
def test_master_operation_state(mocker, supvisors_ctx):
    """ Test the Operation state of the fsm. """
    mocked_start = supvisors_ctx.starter.is_starting_completed
    # create instance
    state = MasterOperationState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # no enter implementation. just call it without test
    state.enter()
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.INITIALIZATION
    assert not mocked_start.called
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # do not leave OPERATION state if a starting or a stopping is in progress
    mocked_start.return_value = False
    result = state.next()
    assert result == SupvisorsStates.OPERATION
    mocked_start.return_value = True
    mocked_stop = mocker.patch.object(supvisors_ctx.stopper, 'is_stopping_completed', return_value=False)
    result = state.next()
    assert result == SupvisorsStates.OPERATION
    mocked_stop.return_value = True
    # create address context
    for node_name in supvisors_ctx.address_mapper.node_names:
        status = AddressStatus(node_name, supvisors_ctx.logger)
        supvisors_ctx.context.nodes[node_name] = status
    # no starting or stopping is in progress
    # stay in OPERATION if no conflict
    mocked_conflict = mocker.patch.object(supvisors_ctx.context, 'conflicting', return_value=False)
    # mark for re-deployment
    supvisors_ctx.fsm.redeploy_mark = True
    result = state.next()
    assert result == SupvisorsStates.DEPLOYMENT
    # transit to CONCILIATION if conflict detected
    mocked_conflict.return_value = True
    result = state.next()
    assert result == SupvisorsStates.CONCILIATION
    # no exit implementation. just call it without test
    state.exit()
def test_master_conciliation_state(mocker, supvisors_ctx):
    """ Test the Conciliation state of the fsm. """
    mocked_conciliate = mocker.patch('supvisors.statemachine.conciliate_conflicts')
    mocked_start = supvisors_ctx.starter.is_starting_completed
    mocked_stop = supvisors_ctx.stopper.is_stopping_completed
    # create instance
    state = MasterConciliationState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # test enter method
    mocker.patch.object(supvisors_ctx.context, 'conflicts', return_value=[1, 2, 3])
    state.enter()
    assert mocked_conciliate.call_args_list == [call(supvisors_ctx, 0, [1, 2, 3])]
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.INITIALIZATION
    assert not mocked_start.called
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # do not leave CONCILIATION state if a starting or a stopping is in progress
    mocked_start.return_value = False
    mocked_stop.return_value = False
    result = state.next()
    assert result == SupvisorsStates.CONCILIATION
    mocked_start.return_value = True
    mocked_stop.return_value = False
    result = state.next()
    assert result == SupvisorsStates.CONCILIATION
    mocked_start.return_value = False
    mocked_stop.return_value = True
    result = state.next()
    assert result == SupvisorsStates.CONCILIATION
    # consider that no starting or stopping is in progress
    mocked_start.return_value = True
    mocked_stop.return_value = True
    # if local address and master address are RUNNING and conflict still detected, re-enter CONCILIATION
    mocker.patch.object(supvisors_ctx.context, 'conflicting', return_value=True)
    mocked_enter = mocker.patch.object(state, 'enter')
    result = state.next()
    assert mocked_enter.call_count == 1
    assert result == SupvisorsStates.CONCILIATION
    # transit to OPERATION if local node and master node are RUNNING and no conflict detected
    supvisors_ctx.context.conflicting.return_value = False
    result = state.next()
    assert result == SupvisorsStates.OPERATION
    # no exit implementation. just call it without test
    state.exit()
def test_master_restarting_state(mocker, supvisors_ctx):
    """ Test the Restarting state of the fsm. """
    mocked_starter = supvisors_ctx.starter.abort
    mocked_stopper = supvisors_ctx.stopper.stop_applications
    mocked_stopping = supvisors_ctx.stopper.is_stopping_completed
    # create instance to test
    state = MasterRestartingState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # test enter method: starting ang stopping in progress are aborted
    state.enter()
    assert mocked_starter.call_count == 1
    assert mocked_stopper.call_count == 1
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.SHUTDOWN
    assert not mocked_stopping.called
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # test next method: all processes are stopped
    mocked_stopping.return_value = True
    result = state.next()
    assert result == SupvisorsStates.SHUTDOWN
    mocked_stopping.return_value = False
    result = state.next()
    assert result == SupvisorsStates.RESTARTING
    # test exit method: call to pusher send_restart for all addresses
    assert not state.supvisors.zmq.pusher.send_restart.called
    state.exit()
    assert state.supvisors.zmq.pusher.send_restart.call_args_list == [call('127.0.0.1')]
def test_master_shutting_down_state(mocker, supvisors_ctx):
    """ Test the ShuttingDown state of the fsm. """
    mocked_starter = supvisors_ctx.starter.abort
    mocked_stopper = supvisors_ctx.stopper.stop_applications
    mocked_stopping = supvisors_ctx.stopper.is_stopping_completed
    # create instance to test
    state = MasterShuttingDownState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # test enter method: starting ang stopping in progress are aborted
    state.enter()
    assert mocked_starter.call_count == 1
    assert mocked_stopper.call_count == 1
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.SHUTDOWN
    assert not mocked_stopping.called
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # test next method: all processes are stopped
    result = state.next()
    assert result == SupvisorsStates.SHUTDOWN
    mocked_stopping.return_value = False
    result = state.next()
    assert result == SupvisorsStates.SHUTTING_DOWN
    # test exit method: call to pusher send_restart for all addresses
    assert not state.supvisors.zmq.pusher.send_shutdown.called
    state.exit()
    assert state.supvisors.zmq.pusher.send_shutdown.call_args_list == [call('127.0.0.1')]
def test_shutdown_state(supvisors_ctx):
    """ Test the ShutDown state of the fsm. """
    state = ShutdownState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # no enter / next / exit implementation. just call it without test
    state.enter()
    state.next()
    state.exit()
def test_slave_main_state(mocker, supvisors_ctx):
    """ Test the SlaveMain state of the fsm. """
    # create instance to test
    state = SlaveMainState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # no enter implementation. just call it without test
    state.enter()
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.INITIALIZATION
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # test next method: no next state proposed
    assert state.next() is None
    # no exit implementation. just call it without test
    state.exit()
def test_slave_restarting_state(mocker, supvisors_ctx):
    """ Test the SlaveRestarting state of the fsm. """
    # create instance to test
    state = SlaveRestartingState(supvisors_ctx)
    assert isinstance(state, AbstractState)
    # no enter implementation. just call it without test
    state.enter()
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.SHUTDOWN
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # test next method: no next state proposed
    assert state.next() is None
    # test exit
    assert not state.supvisors.zmq.pusher.send_restart.called
    state.exit()
    assert state.supvisors.zmq.pusher.send_restart.call_args_list == [call('127.0.0.1')]
def test_slave_shutting_down_state(mocker, supvisors_ctx):
    """ Test the SlaveShuttingDown state of the fsm. """
    # create instance to test
    state = SlaveShuttingDownState(supvisors_ctx)
    assert isinstance(state, SlaveRestartingState)
    # no enter implementation. just call it without test
    state.enter()
    # test next method if check_nodes return something
    mocker.patch.object(state, 'check_nodes', return_value=SupvisorsStates.INITIALIZATION)
    assert state.next() == SupvisorsStates.SHUTDOWN
    # test next method if check_nodes return nothing
    state.check_nodes.return_value = None
    # test next method: no next state proposed
    assert state.next() is None
    # test exit
    assert not state.supvisors.zmq.pusher.send_shutdown.called
    state.exit()
    assert state.supvisors.zmq.pusher.send_shutdown.call_args_list == [call('127.0.0.1')]
@pytest.fixture
def fsm(supvisors):
    """ Create the FiniteStateMachine instance to test. """
    return FiniteStateMachine(supvisors)
def test_creation(supvisors, fsm):
    """ Test the values set at construction. """
    assert fsm.supvisors is supvisors
    assert not fsm.redeploy_mark
    # test that the INITIALIZATION state is triggered at creation
    assert fsm.state == SupvisorsStates.INITIALIZATION
    assert isinstance(fsm.instance, InitializationState)
def test_state_string(fsm):
    """ Test the string conversion of state machine. """
    # test string conversion for all states
    for state in SupvisorsStates:
        fsm.state = state
        assert fsm.state.name == state.name
def test_serial(fsm):
    """ Test the serialization of state machine. """
    # test serialization for all states
    for state in SupvisorsStates:
        fsm.state = state
        assert fsm.serial() == {'statecode': state.value, 'statename': state.name}
# Patch all state events
MASTER_STATES = [cls.__name__ for cls in FiniteStateMachine._MasterStateInstances.values()]
SLAVE_STATES = ['InitializationState', 'SlaveMainState', 'SlaveRestartingState', 'SlaveShuttingDownState',
                'ShutdownState']
EVENTS = ['enter', 'next', 'exit']
@pytest.fixture
def mock_master_events(mocker):
    return [mocker.patch('supvisors.statemachine.%s.%s' % (cls, evt), return_value=None)
            for cls in MASTER_STATES
            for evt in EVENTS]
@pytest.fixture
def mock_slave_events(mocker):
    return [mocker.patch('supvisors.statemachine.%s.%s' % (cls, evt), return_value=None)
            for cls in SLAVE_STATES
            for evt in EVENTS]
def compare_calls(call_counts, mock_events):
    """ Compare call counts of mocked methods. """
    for call_count, mocked in zip(call_counts, mock_events):
        assert mocked.call_count == call_count
        mocked.reset_mock()
def test_master_simple_set_state(fsm, mock_master_events):
    """ Test single transitions of the state machine using set_state method.
    As it is a Master FSM, transitions are checked.
    Beware of the fixture sequence. If mock_master_events is set before fsm, mocks would capture the calls triggered
    from the FiniteStateMachine constructor.
    """
    instance_ref = fsm.instance
    # test set_state with identical state parameter
    fsm.set_state(SupvisorsStates.INITIALIZATION)
    compare_calls([0, 0, 0, 0, 0, 0], mock_master_events)
    assert fsm.instance is instance_ref
    assert fsm.state == SupvisorsStates.INITIALIZATION
    # test set_state with not authorized transition for master
    fsm.context._is_master = True
    fsm.set_state(SupvisorsStates.OPERATION)
    compare_calls([0, 0, 0, 0, 0, 0], mock_master_events)
    assert fsm.instance is instance_ref
    assert fsm.state == SupvisorsStates.INITIALIZATION
    # test set_state with authorized transition
    fsm.set_state(SupvisorsStates.DEPLOYMENT)
    compare_calls([0, 0, 1, 1, 1, 0], mock_master_events)
    assert fsm.instance is not instance_ref
    assert fsm.state == SupvisorsStates.DEPLOYMENT
def test_slave_simple_set_state(fsm, mock_slave_events):
    """ Test single transitions of the state machine using set_state method.
    Same transition rules apply.
    """
    instance_ref = fsm.instance
    # test set_state with identical state parameter
    fsm.set_state(SupvisorsStates.INITIALIZATION)
    compare_calls([0, 0, 0, 0, 0, 0], mock_slave_events)
    assert fsm.instance is instance_ref
    assert fsm.state == SupvisorsStates.INITIALIZATION
    # test set_state with not authorized transition
    fsm.set_state(SupvisorsStates.OPERATION)
    compare_calls([0, 0, 0, 0, 0, 0], mock_slave_events)
    assert fsm.instance is instance_ref
    assert fsm.state == SupvisorsStates.INITIALIZATION
    # test set_state with authorized transition
    fsm.set_state(SupvisorsStates.DEPLOYMENT)
    compare_calls([0, 0, 1, 1, 1, 0], mock_slave_events)
    assert fsm.instance is not instance_ref
    assert fsm.state == SupvisorsStates.DEPLOYMENT
    # test set_state with unauthorized transition
    fsm.set_state(SupvisorsStates.SHUTDOWN)
    compare_calls([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], mock_slave_events)
    assert fsm.state == SupvisorsStates.DEPLOYMENT
    # test set_state with unauthorized transition but forced
    fsm.set_state(SupvisorsStates.SHUTDOWN, True)
    compare_calls([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0], mock_slave_events)
    assert fsm.state == SupvisorsStates.SHUTDOWN
def test_master_complex_set_state(fsm, mock_master_events):
    """ Test multiple transitions of the Master FSM using set_state method. """
    mock_master_events[4].return_value = SupvisorsStates.OPERATION
    instance_ref = fsm.instance
    # test set_state with authorized transition
    fsm.context._is_master = True
    fsm.set_state(SupvisorsStates.DEPLOYMENT)
    compare_calls([0, 0, 1, 1, 1, 1, 1, 1, 0], mock_master_events)
    assert fsm.instance is not instance_ref
    assert fsm.state == SupvisorsStates.OPERATION
def test_master_no_next(fsm, mock_master_events):
    """ Test no transition of the state machine using next_method. """
    mock_master_events[1].return_value = SupvisorsStates.INITIALIZATION
    instance_ref = fsm.instance
    # test set_state with authorized transition
    fsm.context._is_master = True
    fsm.next()
    compare_calls([0, 1, 0], mock_master_events)
    assert fsm.instance is instance_ref
    assert fsm.state == SupvisorsStates.INITIALIZATION
def test_master_simple_next(fsm, mock_master_events):
    """ Test single transition of the state machine using next_method. """
    mock_master_events[1].return_value = SupvisorsStates.DEPLOYMENT
    mock_master_events[4].return_value = SupvisorsStates.DEPLOYMENT
    instance_ref = fsm.instance
    # test set_state with authorized transition
    fsm.context._is_master = True
    fsm.next()
    compare_calls([0, 1, 1, 1, 1, 0], mock_master_events)
    assert fsm.instance is not instance_ref
    assert fsm.state == SupvisorsStates.DEPLOYMENT
def test_master_complex_next(fsm, mock_master_events):
    """ Test multiple transitions of the state machine using next_method. """
    mock_master_events[1].side_effect = [SupvisorsStates.DEPLOYMENT, SupvisorsStates.DEPLOYMENT]
    mock_master_events[4].side_effect = [SupvisorsStates.OPERATION, SupvisorsStates.OPERATION]
    mock_master_events[7].side_effect = [SupvisorsStates.CONCILIATION, SupvisorsStates.INITIALIZATION,
                                         SupvisorsStates.RESTARTING]
    mock_master_events[10].side_effect = [SupvisorsStates.OPERATION]
    mock_master_events[13].return_value = SupvisorsStates.SHUTDOWN
    instance_ref = fsm.instance
    # test set_state with authorized transition
    fsm.context._is_master = True
    fsm.next()
    compare_calls([1, 2, 2, 2, 2, 2, 3, 3, 3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0], mock_master_events)
    assert fsm.instance is not instance_ref
    assert fsm.state == SupvisorsStates.SHUTDOWN
def test_timer_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a timer event. """
    # apply patches
    mocked_event = mocker.patch.object(fsm.supvisors.context, 'on_timer_event', return_value=['proc_1', 'proc_2'])
    mocked_next = mocker.patch.object(fsm, 'next')
    mocked_add = fsm.supvisors.failure_handler.add_default_job
    mocked_trigger = fsm.supvisors.failure_handler.trigger_jobs
    mocked_isolation = mocker.patch.object(fsm.supvisors.context, 'handle_isolation', return_value=['2', '3'])
    # test when not master
    assert not fsm.context.is_master
    result = fsm.on_timer_event()
    # check result: marked processes are started
    assert result == ['2', '3']
    assert mocked_event.call_count == 1
    assert mocked_next.call_count == 1
    assert not mocked_add.called
    assert not mocked_trigger.called
    assert mocked_isolation.call_count == 1
    # reset mocks
    mocker.resetall()
    # test when not master
    fsm.context._is_master = True
    assert fsm.context.is_master
    result = fsm.on_timer_event()
    # check result: marked processes are started
    assert result == ['2', '3']
    assert mocked_event.call_count == 1
    assert mocked_next.call_count == 1
    assert mocked_add.call_args_list == [call('proc_1'), call('proc_2')]
    assert mocked_trigger.call_count == 1
    assert mocked_isolation.call_count == 1
def test_tick_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a tick event. """
    # inject tick event and test call to context on_tick_event
    mocked_evt = mocker.patch.object(fsm.supvisors.context, 'on_tick_event')
    fsm.on_tick_event('10.0.0.1', {'tick': 1234})
    assert mocked_evt.call_count == 1
    assert mocked_evt.call_args == call('10.0.0.1', {'tick': 1234})
def test_process_state_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a process state event. """
    # prepare context
    fsm.supvisors.context._is_master = True
    process = Mock(application_name='appli', forced_state=None, **{'crashed.return_value': True})
    # get patches
    mocked_ctx = mocker.patch.object(fsm.supvisors.context, 'on_process_state_event', return_value=None)
    mocked_start_evt = fsm.supvisors.starter.on_event
    mocked_stop_evt = fsm.supvisors.stopper.on_event
    mocked_add = fsm.supvisors.failure_handler.add_default_job
    # context.on_process_event is always called
    # test that starter and stopper are not involved when corresponding process is not found
    fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
    assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
    assert not mocked_start_evt.called
    assert not mocked_stop_evt.called
    assert not mocked_add.called
    # when process is found but local node is not master, only starter and stopper are called
    mocked_ctx.return_value = process
    mocked_ctx.reset_mock()
    fsm.supvisors.context._is_master = False
    fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
    assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
    assert mocked_start_evt.call_args_list == [call(process)]
    assert mocked_stop_evt.call_args_list == [call(process)]
    assert not mocked_add.called
    # reset mocks
    mocked_ctx.reset_mock()
    mocked_start_evt.reset_mock()
    mocked_stop_evt.reset_mock()
    # process is found and local node is master. starter and stopper are called and the process has crashed
    # test with running_failure_strategy set to CONTINUE / RESTART_PROCESS so job is not added to failure handler
    fsm.supvisors.context._is_master = True
    for strategy in [RunningFailureStrategies.CONTINUE, RunningFailureStrategies.RESTART_PROCESS]:
        process.rules.running_failure_strategy = strategy
        fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
        assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
        assert mocked_start_evt.call_args_list == [call(process)]
        assert mocked_stop_evt.call_args_list == [call(process)]
        assert mocked_add.call_args_list == []
        # reset mocks
        mocked_ctx.reset_mock()
        mocked_start_evt.reset_mock()
        mocked_stop_evt.reset_mock()
    # test with running_failure_strategy set to STOP_APPLICATION
    # job is added to failure handler
    process.rules.running_failure_strategy = RunningFailureStrategies.STOP_APPLICATION
    fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
    assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
    assert mocked_start_evt.call_args_list == [call(process)]
    assert mocked_stop_evt.call_args_list == [call(process)]
    assert mocked_add.call_args_list == [call(process)]
    # reset mocks
    mocked_ctx.reset_mock()
    mocked_start_evt.reset_mock()
    mocked_stop_evt.reset_mock()
    mocked_add.reset_mock()
    # test with running_failure_strategy set to RESTART_APPLICATION
    # job is added to failure handler only if process crash is 'real' (not forced)
    # test with no forced state
    process.rules.running_failure_strategy = RunningFailureStrategies.RESTART_APPLICATION
    fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
    assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
    assert mocked_start_evt.call_args_list == [call(process)]
    assert mocked_stop_evt.call_args_list == [call(process)]
    assert mocked_add.call_args_list == [call(process)]
    # reset mocks
    mocked_ctx.reset_mock()
    mocked_start_evt.reset_mock()
    mocked_stop_evt.reset_mock()
    mocked_add.reset_mock()
    # test with forced state
    process.forced_state = ProcessStates.FATAL
    fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
    assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
    assert mocked_start_evt.call_args_list == [call(process)]
    assert mocked_stop_evt.call_args_list == [call(process)]
    assert not mocked_add.called
    # reset mocks
    mocked_ctx.reset_mock()
    mocked_start_evt.reset_mock()
    mocked_stop_evt.reset_mock()
    # test when process has not crashed
    process.crashed.return_value = False
    for strategy in RunningFailureStrategies:
        process.rules.running_failure_strategy = strategy
        fsm.on_process_state_event('10.0.0.1', {'process_name': 'dummy_proc'})
        assert mocked_ctx.call_args_list == [call('10.0.0.1', {'process_name': 'dummy_proc'})]
        assert mocked_start_evt.call_args_list == [call(process)]
        assert mocked_stop_evt.call_args_list == [call(process)]
        assert not mocked_add.called
        # reset mocks
        mocked_ctx.reset_mock()
        mocked_start_evt.reset_mock()
        mocked_stop_evt.reset_mock()
        mocked_add.reset_mock()
def test_on_process_added_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a process added event. """
    mocked_load = mocker.patch.object(fsm.context, 'load_processes')
    fsm.on_process_added_event('10.0.0.1', {'info': 'dummy_info'})
    assert mocked_load.call_args_list == [call('10.0.0.1', [{'info': 'dummy_info'}])]
def test_on_process_removed_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a process removed event. """
    mocked_context = mocker.patch.object(fsm.context, 'on_process_removed_event')
    fsm.on_process_removed_event('10.0.0.1', {'info': 'dummy_info'})
    assert mocked_context.call_args_list == [call('10.0.0.1', {'info': 'dummy_info'})]
def test_on_process_info(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a process information. """
    # inject process info and test call to context load_processes
    mocked_load = mocker.patch.object(fsm.context, 'load_processes')
    fsm.on_process_info('10.0.0.1', {'info': 'dummy_info'})
    assert mocked_load.call_args_list == [call('10.0.0.1', {'info': 'dummy_info'})]
def test_on_state_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a Master state event. """
    mocked_state = mocker.patch.object(fsm, 'set_state')
    fsm.context.master_node_name = '10.0.0.1'
    # test event not sent by Master node
    for state in SupvisorsStates:
        payload = {'statecode': state}
        fsm.on_state_event('10.0.0.2', payload)
        assert not mocked_state.called
    # test event sent by Master node
    for state in SupvisorsStates:
        payload = {'statecode': state}
        fsm.on_state_event('10.0.0.1', payload)
        assert mocked_state.call_args_list == [call(state)]
        mocker.resetall()
def test_on_authorization(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of an authorization event. """
    # prepare context
    mocked_auth = mocker.patch.object(fsm.context, 'on_authorization', return_value=False)
    # set initial condition
    assert fsm.supvisors.address_mapper.local_node_name == '127.0.0.1'
    nodes = fsm.context.nodes
    nodes['127.0.0.1']._state = AddressStates.RUNNING
    nodes['10.0.0.5']._state = AddressStates.RUNNING
    # test rejected authorization
    fsm.on_authorization('10.0.0.1', False, '10.0.0.5', SupvisorsStates.INITIALIZATION)
    assert mocked_auth.call_args_list == [call('10.0.0.1', False)]
    assert fsm.state == SupvisorsStates.INITIALIZATION
    assert fsm.context.master_node_name == ''
    assert not fsm.redeploy_mark
    # reset mocks
    mocked_auth.reset_mock()
    mocked_auth.return_value = True
    # test authorization when no master node provided
    fsm.on_authorization('10.0.0.1', True, '', SupvisorsStates.INITIALIZATION)
    assert mocked_auth.call_args == call('10.0.0.1', True)
    assert fsm.state == SupvisorsStates.INITIALIZATION
    assert fsm.context.master_node_name == ''
    assert not fsm.redeploy_mark
    # reset mocks
    mocked_auth.reset_mock()
    # test authorization and master node assignment
    fsm.on_authorization('10.0.0.1', True, '10.0.0.5', SupvisorsStates.INITIALIZATION)
    assert mocked_auth.call_args == call('10.0.0.1', True)
    assert fsm.state == SupvisorsStates.INITIALIZATION
    assert fsm.context.master_node_name == '10.0.0.5'
    assert not fsm.redeploy_mark
    # reset mocks
    mocked_auth.reset_mock()
    # test authorization and master node operational
    fsm.on_authorization('10.0.0.5', True, '10.0.0.5', SupvisorsStates.OPERATION)
    assert mocked_auth.call_args == call('10.0.0.5', True)
    assert fsm.state == SupvisorsStates.OPERATION
    assert fsm.context._master_node_name == '10.0.0.5'
    assert not fsm.redeploy_mark
    # reset mocks
    mocked_auth.reset_mock()
    # test authorization and master node conflict
    fsm.on_authorization('10.0.0.3', True, '10.0.0.4', SupvisorsStates.OPERATION)
    assert mocked_auth.call_args == call('10.0.0.3', True)
    assert fsm.state == SupvisorsStates.INITIALIZATION
    assert fsm.context.master_node_name == ''
    assert not fsm.redeploy_mark
    # change context while instance is not master
    nodes['127.0.0.1']._state = AddressStates.RUNNING
    nodes['10.0.0.5']._state = AddressStates.RUNNING
    # as local is not master is operational, no automatic transition
    fsm.set_state(SupvisorsStates.DEPLOYMENT)
    assert fsm.state == SupvisorsStates.DEPLOYMENT
    # set current instance as master
    fsm.supvisors.context._is_master = True
    # test authorization when no master node provided
    fsm.on_authorization('10.0.0.4', True, '', SupvisorsStates.INITIALIZATION)
    assert mocked_auth.call_args == call('10.0.0.4', True)
    assert fsm.state == SupvisorsStates.DEPLOYMENT
    assert fsm.supvisors.context.master_node_name == '10.0.0.5'
    assert fsm.redeploy_mark
    # test authorization and master node conflict
    fsm.on_authorization('10.0.0.5', True, '10.0.0.4', SupvisorsStates.OPERATION)
    assert mocked_auth.call_args == call('10.0.0.5', True)
    assert fsm.state == SupvisorsStates.INITIALIZATION
    assert fsm.supvisors.context.master_node_name == ''
    assert fsm.redeploy_mark
def test_restart_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a restart event. """
    # inject restart event and test call to fsm set_state RESTARTING
    mocked_fsm = mocker.patch.object(fsm, 'set_state')
    mocked_zmq = fsm.supvisors.zmq.pusher.send_restart_all
    fsm.supvisors.context.master_node_name = '10.0.0.1'
    # test when not master
    fsm.on_restart()
    assert not mocked_fsm.called
    assert mocked_zmq.call_args_list == [call('10.0.0.1')]
    mocked_zmq.reset_mock()
    # test when master
    fsm.context._is_master = True
    fsm.on_restart()
    assert not mocked_zmq.called
    assert mocked_fsm.call_args_list == [call(SupvisorsStates.RESTARTING)]
def test_shutdown_event(mocker, fsm):
    """ Test the actions triggered in state machine upon reception of a shutdown event. """
    # inject shutdown event and test call to fsm set_state SHUTTING_DOWN
    mocked_fsm = mocker.patch.object(fsm, 'set_state')
    mocked_zmq = fsm.supvisors.zmq.pusher.send_shutdown_all
    fsm.supvisors.context.master_node_name = '10.0.0.1'
    # test when not master
    fsm.on_shutdown()
    assert not mocked_fsm.called
    assert mocked_zmq.call_args_list == [call('10.0.0.1')]
    mocked_zmq.reset_mock()
    # test when master
    fsm.context._is_master = True
    fsm.on_shutdown()
    assert not mocked_zmq.called
    assert mocked_fsm.call_args_list == [call(SupvisorsStates.SHUTTING_DOWN)]
 | 
| 
	from life import random_state, get_next_state
import numpy as np
if __name__ == "__main__":
    test_state1 = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
    test_state2 = [[1, 1, 1], [0, 0, 1], [1, 1, 1]]
    test_state3 = np.array([[1, 0, 1, 0], [0, 0, 1, 0], [0, 1, 1, 0]])
    correct_state1 = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
    correct_state2 = [[0, 1, 1], [0, 0, 0], [0, 1, 1]]
    correct_state3 = [[0, 1, 0, 0], [0, 0, 1, 1], [0, 1, 1, 0]]
    # test for shape & state from get_next_state to match
    test_list = [test_state1, test_state2, test_state3]
    correct_list = [correct_state1, correct_state2, correct_state3]
    print(type(test_state1))
    for t, c in zip(test_list, correct_list):
        next_state = get_next_state(np.array(t))
        if np.array_equal(next_state, c):
            print("Correct")
        else:
            print("Incorrect!!")
            print("Expected: " + c)
            print("Returned: " + next_state)
 | 
| 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
""" HHB Command Line Tools """
import argparse
import logging
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
from core.arguments_manage import ArgumentManage, CommandType, HHBException, ArgumentFilter
from core.arguments_manage import update_arguments_by_file
from core.common import ALL_ARGUMENTS_INFO, import_module_for_register, collect_arguments_info
from core.common import ArgInfo, ALL_ARGUMENTS_DESC
LOG = 25
logging.addLevelName(LOG, "LOG")
def _main(argv):
    """ HHB commmand line interface. """
    arg_manage = ArgumentManage(argv)
    arg_manage.check_cmd_arguments()
    parser = argparse.ArgumentParser(
        prog="HHB",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description="HHB command line tools",
        epilog=__doc__,
        allow_abbrev=False,
        add_help=False,
    )
    # add command line parameters
    curr_command_type = arg_manage.get_command_type()
    if curr_command_type == CommandType.SUBCOMMAND:
        arg_manage.set_subcommand(parser)
    else:
        arg_manage.set_main_command(parser)
        ALL_ARGUMENTS_DESC["main_command"] = collect_arguments_info(parser._actions)
    # print help info
    if arg_manage.have_help:
        arg_manage.print_help_info(parser)
        return 0
    # generate readme file
    if arg_manage.have_generate_readme:
        arg_manage.generate_readme(parser)
        return 0
    # parse command line parameters
    args = parser.parse_args(arg_manage.origin_argv[1:])
    if args.config_file:
        update_arguments_by_file(args, arg_manage.origin_argv[1:])
    args_filter = ArgumentFilter(args)
    # config logger
    logging.basicConfig(
        format="[%(asctime)s] (%(name)s %(levelname)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
    )
    logger = logging.getLogger("HHB")
    logger.setLevel(25 - args.verbose * 10)
    # run command
    arg_manage.run_command(args_filter, curr_command_type)
def main():
    ALL_MODULES_FOR_REGISTER = [
        "importer",
        "quantizer",
        "codegen",
        "simulate",
        "benchmark",
        "profiler",
    ]
    import_module_for_register(ALL_MODULES_FOR_REGISTER)
    argv = sys.argv
    sys.exit(_main(argv))
if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\nCtrl-C detected.")
 | 
| 
	# import networkx as nx
from .transition import Transition
class ShapeAutomaton:
    def __init__(self):
        self.locations = set()
        self.transitions = set()
        self.initial_locations = set()
        self.final_locations = set()
        # self.parameters = set()
    def deepcopy_own(self):
        clone = ShapeAutomaton()
        d = dict()
        for location in self.locations:
            tmp = location.deepcopy_own()
            clone.add_location(tmp)
            # d[tmp.name] = location #this ensures that i can ceep track of originals, so I can keep the original structure
            d[location.name] = tmp
        for transition in self.transitions:
            cloned_source = d[transition.source.name]
            cloned_target = d[transition.target.name]
            cloned_letter = transition.letter.deepcopy_own()
            clone.add_transition(Transition(transition.name, cloned_source, cloned_target, cloned_letter))
        # for parameter in self.parameters:
        #     clone.add_parameter(parameter)
        return clone
    # for easier handling in expression2aut
    def accept(self, visitor):
        visitor.visit_automaton(self)
        pass
    def undo_initial(self, location):
        if location in self.locations:
            location.is_initial = False
        if location in self.initial_locations:
            self.initial_locations.remove(location)
    def undo_final(self, location):
        if location in self.locations:
            location.is_final = False
        if location in self.final_locations:
            self.final_locations.remove(location)
    # aux fct for set of all incoming transitions
    def incoming(self, location):
        out = set()
        for transition in self.transitions:
            if transition.target == location:
                out.add(transition)
        return out
    def outgoing(self, location, include_dead_ends=True):
        out = set()
        for transition in self.transitions:
            if transition.source == location:
                if include_dead_ends:
                    out.add(transition)
                # additional condition for skipping dead ends
                elif not include_dead_ends:
                    if self.outgoing(transition.target) or transition.target.is_final:
                        out.add(transition)
                else:
                    raise Exception('2nd argument has to be boolean')
        return out
    # use this if you want the nodes instead of the edges
    def adjacent(self, location, include_dead_ends=True):
        # returns next layer of nodes, by default includes even nonfinal dead ends
        adjacent_nodes = set()
        for transition in self.outgoing(location, include_dead_ends):
            adjacent_nodes.add(transition.target)
        return adjacent_nodes
    # def visualize(self):
    #     G = nx.DiGraph()
    #
    #     for location in self.locations:
    #         G.add_node(location)
    #
    #     for transition in self.transitions:
    #         G.add_edge(transition.source, transition.target, weight = 5)
    #         #G.edges[transition.source, transition.target]['weight'] = 5
    #
    #     pos = nx.spring_layout(G)
    #     plt.subplot(111)
    #
    #     ## fits better, but without info on final/initial
    #     # nx.draw(G, pos, edge_color = 'cyan', with_labels=True, node_color ='pink',
    #     #         labels={location:location.name for location in self.locations})
    #
    #
    #     nx.draw(G, pos, edge_color = 'black', node_color = 'cyan', with_labels=True)
    #
    #     nx.draw_networkx_edge_labels(G, pos, edge_labels={(edge.source, edge.target):str(edge.letter)
    #                                                       for edge in self.transitions}, font_color='green')
    #     plt.axis('off')
    #     plt.tight_layout()
    #     plt.show()
    # aux for set of locations in the layer adjacent to the final locations (used in expression2aut.kleene)
    def last_nonfinal_layer(self):
        out = set()
        for final_location in self.final_locations:
            for transition in final_location.incoming():
                out.add(transition.source)
        return out
    # transition
    def add_transition(self, transition):
        self.transitions.add(transition)
    def remove_transition(self, transition):
        self.transitions.remove(transition)
    @property
    def transitions(self):
        return self.__transitions
    @transitions.setter
    def transitions(self, transitions):
        self.__transitions = transitions
    # location
    def add_location(self, location):
        self.locations.add(location)
        if location.is_initial:
            self.initial_locations.add(location)
        if location.is_final:
            self.final_locations.add(location)
    def remove_location(self, location):
        self.locations.remove(location)
        if location.is_initial:
            self.initial_locations.remove(location)
        if location.is_final:
            self.final_locations.remove(location)
    @property
    def locations(self):
        return self.__locations
    @locations.setter
    def locations(self, locations):
        self.__locations = locations
        for location in locations:
            if location.is_initial:
                self.initial_locations.add(location)
            if location.is_final:
                self.final_locations.add(location)
    @property
    def initial_locations(self):
        return self.__initial_locations
    @initial_locations.setter
    def initial_locations(self, initial_locations):
        self.__initial_locations = initial_locations
    @property
    def final_locations(self):
        return self.__final_locations
    @final_locations.setter
    def final_locations(self, final_locations):
        self.__final_locations = final_locations
    # #parameter
    #     def add_parameter(self,parameter):
    #         self.parameters.add(parameter)
    #
    #     @property
    #     def parameters(self):
    #         return self.__parameters
    #
    #     @parameters.setter
    #     def parameters(self, parameters):
    #         self.__parameters = parameters
    def __str__(self):
        out = ''
        for s in self.locations:
            out = out + str(s) + '\n'
        for t in self.transitions:
            out = out + str(t) + '\n'
        return out
    # TODO for prettier printing one could easily print initial locations first, then normal one, then final ones
 | 
| 
	### Interpolating bad channels ###
import os
from copy import deepcopy
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
                                    'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
######## Marking bad channels ########
print(raw.info['bads'])
picks = mne.pick_channels_regexp(raw.ch_names, regexp='EEG 05.')
raw.plot(order=picks, n_channels=len(picks))
picks = mne.pick_channels_regexp(raw.ch_names, regexp='MEG 2..3')
raw.plot(order=picks, n_channels=len(picks))
original_bads = deepcopy(raw.info['bads'])
raw.info['bads'].append('EEG 050')               # add a single channel
raw.info['bads'].extend(['EEG 051', 'EEG 052'])  # add a list of channels
bad_chan = raw.info['bads'].pop(-1)  # remove the last entry in the list
raw.info['bads'] = original_bads     # change the whole list at once
# default is exclude='bads':
good_eeg = mne.pick_types(raw.info, meg=False, eeg=True)
all_eeg = mne.pick_types(raw.info, meg=False, eeg=True, exclude=[])
print(np.setdiff1d(all_eeg, good_eeg))
print(np.array(raw.ch_names)[np.setdiff1d(all_eeg, good_eeg)])
######## When to look for bad channels ########
raw2 = raw.copy()
raw2.info['bads'] = []
events = mne.find_events(raw2, stim_channel='STI 014')
epochs = mne.Epochs(raw2, events=events)['2'].average().plot()
######## Interpolating bad channels ########
raw.crop(tmin=0, tmax=3).load_data()
eeg_data = raw.copy().pick_types(meg=False, eeg=True, exclude=[])
eeg_data_interp = eeg_data.copy().interpolate_bads(reset_bads=False)
for title, data in zip(['orig.', 'interp.'], [eeg_data, eeg_data_interp]):
    fig = data.plot(butterfly=True, color='#00000022', bad_color='r')
    fig.subplots_adjust(top=0.9)
    fig.suptitle(title, size='xx-large', weight='bold') | 
| 
	from enum import Enum
class ConnectorTypes(Enum):
    MSSQL = 1
    ORACLE = 2
    POSTGRESQL = 3
    EXCEL = 4
    CSV = 5
    Kafka = 6
    MYSQL = 7
    Impala = 8
    Soap = 9
    SqLite = 10
 | 
| 
	# coding=utf-8
import pytest
from mockito import expect
import elib_wx
@pytest.mark.weather
def test_weather_from_icao(with_db):
    icao = 'EBBR'
    expect(elib_wx.avwx.metar).fetch(icao).thenReturn('raw metar str')
    expect(elib_wx.avwx.metar).parse(icao, 'raw metar str').thenReturn(('metar data', 'metar units'))
    expect(elib_wx.Weather).fill_from_metar_data()
    wx = elib_wx.Weather(icao)
    assert wx.source_type == 'ICAO'
    assert wx.metar_data == 'metar data'
    assert wx.metar_units == 'metar units'
    assert wx.station_name == 'Brussels Airport'
@pytest.mark.weather
def test_weather_from_icao_unknown_icao():
    icao = 'KLXX'
    expect(elib_wx.avwx.metar).fetch(icao).thenReturn('raw metar str')
    expect(elib_wx.avwx.metar).parse(icao, 'raw metar str').thenReturn(('metar data', 'metar units'))
    expect(elib_wx.Weather).fill_from_metar_data()
    wx = elib_wx.Weather(icao)
    assert 'unknown airport (KLXX)' == wx.station_name
@pytest.mark.weather
def test_weather_from_icao_wrong_icao():
    icao = '1234'
    with pytest.raises(elib_wx.BadStationError):
        elib_wx.Weather(icao)
 | 
| 
	from typing import (
    Type,
)
from eth.rlp.blocks import BaseBlock
from eth.vm.forks.frontier import (
    FrontierVM,
)
from eth.vm.state import BaseState
from .blocks import NogasBlock
from .headers import (
    compute_nogas_difficulty,
    configure_nogas_header,
    create_nogas_header_from_parent,
)
from .state import NogasState
class NogasVM(FrontierVM):
    # fork name
    fork = 'nogas'
    # classes
    block_class: Type[BaseBlock] = NogasBlock
    _state_class: Type[BaseState] = NogasState
    # Methods
    create_header_from_parent = staticmethod(create_nogas_header_from_parent)  # type: ignore
    compute_difficulty = staticmethod(compute_nogas_difficulty)    # type: ignore
    configure_header = configure_nogas_header
 | 
| 
	import csv
from pymongo import MongoClient
client = MongoClient()
db = client.gallery
user_collection = db.users
wallet_count_greater = 0
wallet_count_to_amount = {}
while True:
    count = user_collection.count_documents(
        {"addresses.{}".format(wallet_count_greater): {"$exists": True}}
    )
    amount = count
    if amount == 0:
        break
    wallet_count_to_amount[wallet_count_greater] = amount
    wallet_count_greater += 1
total = 0
for i in range(wallet_count_greater):
    back = wallet_count_greater - i - 1
    wallet_count_to_amount[back] = wallet_count_to_amount[back] - total
    total += wallet_count_to_amount[back]
for k, v in wallet_count_to_amount.items():
    print(f"{k + 1} wallets: {v}")
    print(f"{k + 1} ratio: {v / total * 100}%")
 | 
| 
	#!/usr/env/python3
import random
from util import gist, playlist
from util.spotify import get_spotify_client
lofi = {
    "name": "lofi",
    "id": "5h9LqGUUE4FKQfVwgAu1OA",
    "backup": "31k9ZXIfUi9v5mpbfg6FQH",
    "base": "5adSO5spsp0tb48t2MyoD6",
}
japan = {
    "name": "japan",
    "id": "6Cu6fL6djm63Em0i93IRUW",
    "backup": "19yEs2hB6J5FwHrGMsIlZQ",
    "base": "7EdczIpYSY7aUBSMqgGguV",
}
def main(id: str, backup: str, base: str):
    def randomize_tracks(lofi_base: list, lofi_list: list):
        try:
            list_size = 250 - 1  # We count -1 bc of initial track
            list_sample = random.sample(lofi_list, (list_size - len(lofi_base)))
            final_sample = random.sample(lofi_base + list_sample, list_size)
            return [x['track']['uri'] for x in [initial_track, *final_sample]]
        except Exception:
            raise Exception("Could not sample lofibase or lofilist")
    if playlist.edited_this_week(_spotify, id):
        print("Exiting, Ran this week")
        return
    print("getting playlist Backup")
    lofi_list = playlist.getAsync(_spotify, backup, True)["items"]
    print("getting playlist base")
    lofi_base = playlist.get(_spotify, base, True)['items']
    print("deduplifying list")
    lofi_list = playlist.deduplify_list(lofi_list, lofi_base, disabled)
    initial_track = random.choice(lofi_base)
    lofi_base.remove(initial_track)
    print(f"chose the initial track: {initial_track['track']['name']}")
    print("randomizing")
    weekly_playlistIds = randomize_tracks(lofi_base, lofi_list)
    print(weekly_playlistIds)
    print("clearing playlist")
    playlist.clear(_spotify, id)
    print("adding songs to playlist")
    playlist.add(
        _spotify=_spotify,
        tracks_to_add=weekly_playlistIds,
        playlistId=id
    )
if __name__ == '__main__':
    _spotify = get_spotify_client()
    print("loading disabled tracks...")
    data = gist.load("autofy.json")
    disabled = playlist.getAsync(_spotify, data['disabled'])["items"]
    for x in (lofi, japan):
        print(f'shuffeling {x["name"]}')
        main(x['id'], x['backup'], x['base'])
 | 
| 
	from six.moves import UserDict
import types
from ._registry import register_output
from .base_output import OutputInterface
class GreedyDict(UserDict, object):
    def __setitem__(self, key, value):
        if isinstance(value, types.GeneratorType):
            value = [val for val in value]
        super(GreedyDict, self).__setitem__(key, value)
    def __iter__(self):
        for val in self.data.values():
            yield val
@register_output
class InMemoryOutput(OutputInterface):
    def __init__(self, iterable=None, hash_field=None,
                 tokenized_corpora=None,
                 vectorized_corpora=None, modeled_corpora=None):
        super(InMemoryOutput, self).__init__()
        self.corpus = GreedyDict()
        if iterable:
            self.import_from_iterable(iterable, hash_field)
        self.tokenized_corpora = tokenized_corpora if tokenized_corpora else GreedyDict()
        self.vectorized_corpora = vectorized_corpora if vectorized_corpora else GreedyDict()
        self.modeled_corpora = modeled_corpora if modeled_corpora else GreedyDict()
    def import_from_iterable(self, iterable, field_to_hash):
        """
        iterable: generally a list of dicts, but possibly a list of strings
            This is your data.  Your dictionary structure defines the schema
            of the elasticsearch index.
        """
        self.hash_field=field_to_hash
        for item in iterable:
            if isinstance(item, basestring):
                item = {field_to_hash: item}
            elif field_to_hash not in item and field_to_hash in item.values()[0]:
                item = item.values()[0]
            id = hash(item[field_to_hash])
            self.corpus[id] = item
    # TODO: generalize for datetimes
    # TODO: validate input data to ensure that it has valid year data
    def get_date_filtered_data(self, field_to_get, start, end, filter_field="year"):
        return self.get_filtered_data(field_to_get,
                                      "{}<=int({}['{}'])<={}".format(start, "{}",
                                                                     filter_field, end))
    def get_filtered_data(self, field_to_get, filter=""):
        if not filter:
            for doc_id, doc in self.corpus.items():
                yield doc_id, doc[field_to_get]
        else:
            for doc_id, doc in self.corpus.items():
                if eval(filter.format(doc)):
                    yield doc_id, doc[field_to_get]
    def save(self, filename):
        saved_data = {"iterable": self.corpus,
                      "hash_field": self.hash_field,
                      "modeled_corpora": self.modeled_corpora,
                      "vectorized_corpora": self.vectorized_corpora,
                      "tokenized_corpora": self.tokenized_corpora}
        return super(InMemoryOutput, self).save(filename, saved_data)
 | 
| 
	import autolens as al
class TestPipelineGeneralSettings:
    def test__hyper_galaxies_tag(self):
        general = al.setup.General(hyper_galaxies=False)
        assert general.hyper_galaxies_tag == ""
        general = al.setup.General(hyper_galaxies=True)
        assert general.hyper_galaxies_tag == "_galaxies"
    def test__hyper_image_sky_tag(self):
        general = al.setup.General(hyper_image_sky=False)
        assert general.hyper_galaxies_tag == ""
        general = al.setup.General(hyper_image_sky=True)
        assert general.hyper_image_sky_tag == "_bg_sky"
    def test__hyper_background_noise_tag(self):
        general = al.setup.General(hyper_background_noise=False)
        assert general.hyper_galaxies_tag == ""
        general = al.setup.General(hyper_background_noise=True)
        assert general.hyper_background_noise_tag == "_bg_noise"
    def test__tag(self):
        general = al.setup.General(
            hyper_galaxies=True, hyper_image_sky=True, hyper_background_noise=True
        )
        assert general.tag == "general__hyper_galaxies_bg_sky_bg_noise"
        general = al.setup.General(hyper_galaxies=True, hyper_background_noise=True)
        assert general.tag == "general__hyper_galaxies_bg_noise"
class TestPipelineSourceSettings:
    def test__pixelization_tag(self):
        source = al.setup.Source(pixelization=None)
        assert source.pixelization_tag == ""
        source = al.setup.Source(pixelization=al.pix.Rectangular)
        assert source.pixelization_tag == "pix_rect"
        source = al.setup.Source(pixelization=al.pix.VoronoiBrightnessImage)
        assert source.pixelization_tag == "pix_voro_image"
    def test__regularization_tag(self):
        source = al.setup.Source(regularization=None)
        assert source.regularization_tag == ""
        source = al.setup.Source(regularization=al.reg.Constant)
        assert source.regularization_tag == "__reg_const"
        source = al.setup.Source(regularization=al.reg.AdaptiveBrightness)
        assert source.regularization_tag == "__reg_adapt_bright"
    def test__lens_light_centre_tag(self):
        source = al.setup.Source(lens_light_centre=None)
        assert source.lens_light_centre_tag == ""
        source = al.setup.Source(lens_light_centre=(2.0, 2.0))
        assert source.lens_light_centre_tag == "__lens_light_centre_(2.00,2.00)"
        source = al.setup.Source(lens_light_centre=(3.0, 4.0))
        assert source.lens_light_centre_tag == "__lens_light_centre_(3.00,4.00)"
        source = al.setup.Source(lens_light_centre=(3.027, 4.033))
        assert source.lens_light_centre_tag == "__lens_light_centre_(3.03,4.03)"
    def test__lens_mass_centre_tag(self):
        source = al.setup.Source(lens_mass_centre=None)
        assert source.lens_mass_centre_tag == ""
        source = al.setup.Source(lens_mass_centre=(2.0, 2.0))
        assert source.lens_mass_centre_tag == "__lens_mass_centre_(2.00,2.00)"
        source = al.setup.Source(lens_mass_centre=(3.0, 4.0))
        assert source.lens_mass_centre_tag == "__lens_mass_centre_(3.00,4.00)"
        source = al.setup.Source(lens_mass_centre=(3.027, 4.033))
        assert source.lens_mass_centre_tag == "__lens_mass_centre_(3.03,4.03)"
    def test__align_light_mass_centre_tag__is_empty_sting_if_both_lens_light_and_mass_centres_input(
        self
    ):
        source = al.setup.Source(align_light_mass_centre=False)
        assert source.align_light_mass_centre_tag == ""
        source = al.setup.Source(align_light_mass_centre=True)
        assert source.align_light_mass_centre_tag == "__align_light_mass_centre"
        source = al.setup.Source(
            lens_light_centre=(0.0, 0.0),
            lens_mass_centre=(1.0, 1.0),
            align_light_mass_centre=True,
        )
        assert source.align_light_mass_centre_tag == ""
    def test__lens_light_bulge_only_tag(self):
        source = al.setup.Source(lens_light_bulge_only=False)
        assert source.lens_light_bulge_only_tag == ""
        source = al.setup.Source(lens_light_bulge_only=True)
        assert source.lens_light_bulge_only_tag == "__bulge_only"
    def test__no_shear_tag(self):
        source = al.setup.Source(no_shear=False)
        assert source.no_shear_tag == "__with_shear"
        source = al.setup.Source(no_shear=True)
        assert source.no_shear_tag == "__no_shear"
    def test__fix_lens_light_tag(self):
        source = al.setup.Source(fix_lens_light=False)
        assert source.fix_lens_light_tag == ""
        source = al.setup.Source(fix_lens_light=True)
        assert source.fix_lens_light_tag == "__fix_lens_light"
    def test__number_of_gaussians_tag(self):
        source = al.setup.Source()
        assert source.number_of_gaussians_tag == ""
        source = al.setup.Source(number_of_gaussians=1)
        assert source.number_of_gaussians_tag == "__gaussians_x1"
        source = al.setup.Source(number_of_gaussians=2)
        assert source.number_of_gaussians_tag == "__gaussians_x2"
    def test__tag(self):
        source = al.setup.Source(
            pixelization=al.pix.Rectangular,
            regularization=al.reg.Constant,
            lens_light_centre=(1.0, 2.0),
            lens_mass_centre=(3.0, 4.0),
            align_light_mass_centre=False,
            no_shear=True,
            fix_lens_light=True,
        )
        source.type_tag = source.inversion_tag
        assert (
            source.tag
            == "source__pix_rect__reg_const__no_shear__lens_light_centre_(1.00,2.00)__lens_mass_centre_(3.00,4.00)__fix_lens_light"
        )
        assert source.tag_beginner == "source__pix_rect__reg_const"
        source = al.setup.Source(
            pixelization=al.pix.Rectangular,
            regularization=al.reg.Constant,
            align_light_mass_centre=True,
            number_of_gaussians=1,
            fix_lens_light=True,
            lens_light_bulge_only=True,
        )
        source.type_tag = "test"
        assert (
            source.tag
            == "source__test__gaussians_x1__with_shear__align_light_mass_centre__bulge_only__fix_lens_light"
        )
    def test__tag_beginner(self):
        source = al.setup.Source(
            pixelization=al.pix.Rectangular, regularization=al.reg.Constant
        )
        assert source.tag_beginner_no_inversion == "source"
        assert source.tag_beginner == "source__pix_rect__reg_const"
class TestPipelineLightSettings:
    def test__align_bulge_disk_tags(self):
        light = al.setup.Light(align_bulge_disk_centre=False)
        assert light.align_bulge_disk_centre_tag == ""
        light = al.setup.Light(align_bulge_disk_centre=True)
        assert light.align_bulge_disk_centre_tag == "_centre"
        light = al.setup.Light(align_bulge_disk_axis_ratio=False)
        assert light.align_bulge_disk_axis_ratio_tag == ""
        light = al.setup.Light(align_bulge_disk_axis_ratio=True)
        assert light.align_bulge_disk_axis_ratio_tag == "_axis_ratio"
        light = al.setup.Light(align_bulge_disk_phi=False)
        assert light.align_bulge_disk_phi_tag == ""
        light = al.setup.Light(align_bulge_disk_phi=True)
        assert light.align_bulge_disk_phi_tag == "_phi"
    def test__bulge_disk_tag(self):
        light = al.setup.Light(
            align_bulge_disk_centre=False,
            align_bulge_disk_axis_ratio=False,
            align_bulge_disk_phi=False,
        )
        assert light.align_bulge_disk_tag == ""
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=False,
            align_bulge_disk_phi=False,
        )
        assert light.align_bulge_disk_tag == "__align_bulge_disk_centre"
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=False,
            align_bulge_disk_phi=True,
        )
        assert light.align_bulge_disk_tag == "__align_bulge_disk_centre_phi"
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=True,
            align_bulge_disk_phi=True,
        )
        assert light.align_bulge_disk_tag == "__align_bulge_disk_centre_axis_ratio_phi"
    def test__disk_as_sersic_tag(self):
        light = al.setup.Light(disk_as_sersic=False)
        assert light.disk_as_sersic_tag == "__disk_exp"
        light = al.setup.Light(disk_as_sersic=True)
        assert light.disk_as_sersic_tag == "__disk_sersic"
    def test__number_of_gaussians_tag(self):
        source = al.setup.Source()
        assert source.number_of_gaussians_tag == ""
        source = al.setup.Source(number_of_gaussians=1)
        assert source.number_of_gaussians_tag == "__gaussians_x1"
        source = al.setup.Source(number_of_gaussians=2)
        assert source.number_of_gaussians_tag == "__gaussians_x2"
    def test__tag(self):
        light = al.setup.Light(align_bulge_disk_phi=True)
        light.type_tag = ""
        assert light.tag == "light____align_bulge_disk_phi__disk_exp"
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=True,
            disk_as_sersic=True,
        )
        light.type_tag = "lol"
        assert (
            light.tag == "light__lol__align_bulge_disk_centre_axis_ratio__disk_sersic"
        )
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=True,
            disk_as_sersic=True,
            number_of_gaussians=2,
        )
        light.type_tag = "test"
        assert light.tag == "light__test__gaussians_x2"
    def test__tag_beginner(self):
        light = al.setup.Light(align_bulge_disk_phi=True)
        assert light.tag_beginner == "light__align_bulge_disk_phi__disk_exp"
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=True,
            disk_as_sersic=True,
        )
        assert (
            light.tag_beginner
            == "light__align_bulge_disk_centre_axis_ratio__disk_sersic"
        )
        light = al.setup.Light(
            align_bulge_disk_centre=True,
            align_bulge_disk_axis_ratio=True,
            disk_as_sersic=True,
            number_of_gaussians=2,
        )
        assert light.tag_beginner == "light__gaussians_x2"
class TestPipelineMassSettings:
    def test__no_shear_tag(self):
        mass = al.setup.Mass(no_shear=False)
        assert mass.no_shear_tag == "__with_shear"
        mass = al.setup.Mass(no_shear=True)
        assert mass.no_shear_tag == "__no_shear"
    def test__align_light_dark_tag(self):
        mass = al.setup.Mass(align_light_dark_centre=False)
        assert mass.align_light_dark_centre_tag == ""
        mass = al.setup.Mass(align_light_dark_centre=True)
        assert mass.align_light_dark_centre_tag == "__align_light_dark_centre"
    def test__align_bulge_dark_tag(self):
        mass = al.setup.Mass(align_bulge_dark_centre=False)
        assert mass.align_bulge_dark_centre_tag == ""
        mass = al.setup.Mass(align_bulge_dark_centre=True)
        assert mass.align_bulge_dark_centre_tag == "__align_bulge_dark_centre"
    def test__fix_lens_light_tag(self):
        mass = al.setup.Mass(fix_lens_light=False)
        assert mass.fix_lens_light_tag == ""
        mass = al.setup.Mass(fix_lens_light=True)
        assert mass.fix_lens_light_tag == "__fix_lens_light"
    def test__tag(self):
        mass = al.setup.Mass(
            no_shear=True, align_light_dark_centre=True, fix_lens_light=True
        )
        mass.type_tag = ""
        assert mass.tag == "mass____no_shear__align_light_dark_centre__fix_lens_light"
        mass = al.setup.Mass(align_bulge_dark_centre=True)
        mass.type_tag = "test"
        assert mass.tag == "mass__test__with_shear__align_bulge_dark_centre"
    def test__tag_beginner(self):
        mass = al.setup.Mass(
            no_shear=True, align_light_dark_centre=True, fix_lens_light=True
        )
        assert (
            mass.tag_beginner
            == "mass__no_shear__align_light_dark_centre__fix_lens_light"
        )
        mass = al.setup.Mass(align_bulge_dark_centre=True)
        assert mass.tag_beginner == "mass__with_shear__align_bulge_dark_centre"
 | 
| 
	r"""
Homology :mod:`homology`
========================
Tools for "symmetrizing" a period matrix.
There exists a symplectic transformation on the period matrix of a real curve
such that the corresponding a- and b-cycles have certain transformation
properties until the anti-holomorphic involution on said Riemann surface.
.. note::
   The algorithm described in Kalla, Klein actually operates on the transposes
   of the a- and b-period matrices. All intermediate functions assume the input
   period matrices are transposed. The primary function in this module,
   :func:`symmetrize_periods`
Functions
---------
.. autosummary::
    symmetrize_periods
    symmetric_transformation_matrix
References
----------
.. [KallaKlein] C. Kalla, C. Klein "Computation of the Topological Type of a
   Real Riemann Surface"
Contents
--------
"""
import numpy
from sage.all import (
    real, imag, Matrix, ZZ, QQ, RDF, CDF, GF, identity_matrix, zero_matrix)
def Re(M):
    return M.apply_map(real)
def Im(M):
    return M.apply_map(imag)
def involution_matrix(Pa, Pb, tol=1e-4):
    r"""Returns the transformation matrix `R` corresponding to the anti-holomorphic
    involution on the periods of the Riemann surface.
    Given an aritrary `2g x g` period matrix `[Pa, Pb]^T` of a genus `g`
    Riemann surface `X` the action of the anti-holomorphic involution on `X` of
    these periods is given by left-multiplication by a `2g x 2g` matrix `R`.
    That is, .. math::
        [\tau P_a^T, \tau P_b^T]^T = R [P_a^T, P_b^T]^T
    Parameters
    ----------
    Pa : complex matrix
    Pb : complex matrix
        The a- and b-periods, respectively, of a genus `g` Riemann surface.
    tol : double
        (Default: 1e-4) Tolerance used to veryify integrality of transformation
        matrix. Dependent on precision of period matrices.
    Returns
    -------
    R : complex matrix
        The anti-holomorphic involution matrix.
    Todo
    ----
    For numerical stability, replace matrix inversion with linear system
    solves.
    """
    g,g = Pa.dimensions()
    R_RDF = Matrix(RDF, 2*g, 2*g)
    Ig = identity_matrix(RDF, g)
    M = Im(Pb.T)*Re(Pa) - Im(Pa.T)*Re(Pb)
    Minv = M.inverse()
    R_RDF[:g,:g] = (2*Re(Pb)*Minv*Im(Pa.T) + Ig).T
    R_RDF[:g,g:] = -2*Re(Pa)*Minv*Im(Pa.T)
    R_RDF[g:,:g] = 2*Re(Pb)*Minv*Im(Pb.T)
    R_RDF[g:,g:] = -(2*Re(Pb)*Minv*Im(Pa.T) + Ig)
    R = R_RDF.round().change_ring(ZZ)
    # sanity check: make sure that R_RDF is close to integral. we perform this
    # test here since the matrix returned should be over ZZ
    error = (R_RDF.round() - R_RDF).norm()
    if error > tol:
        raise ValueError("The anti-holomorphic involution matrix is not "
                         "integral. Try increasing the precision of the input "
                         "period matrices.")
    return R
def integer_kernel_basis(R):
    r"""Returns the Z-basis `[S1 \\ S2]` of the kernel of the anti-holomorphic
    involution matrix `R`.
    The `2g x g` matrix `[S1 \\ S2]` represents a Z-basis of the kernel space
    .. math::
        K_\mathbb{Z} = \text{ker}(R^T - \mathbb{I}_{2g})
    That is, the basis of the space of all vectors fixed by the
    anti-holomorphic involution `R`.
    Used as input in `N1_matrix`.
    Parameters
    ----------
    R : integer matrix
        The anti-holomorphic involution matrix of a genus `g` Riemann surface.
    Returns
    -------
    S : integer matrix
        A `2g x g` matrix where each column is a basis element of the fixed
        point space of `R`.
    """
    twog, twog = R.dimensions()
    g = twog//2
    K = R.T - identity_matrix(ZZ, twog)
    r = K.rank()
    # sanity check: the rank of the kernel should be the genus of the curve
    if r != g:
        raise ValueError("The rank of the integer kernel of K should be "
                         "equal to the genus.")
    # compute the integer kernel from the Smith normal form of K
    D,U,V = K.smith_form()
    S = V[:,g:]
    return S
def N1_matrix(Pa, Pb, S, tol=1e-4):
    r"""Returns the matrix `N1` from the integer kernel of the anti-holomorphic
    involution matrix.
    This matrix `N1` is used directly to determine the topological type of a
    Riemann surface. Used as input in `symmetric_block_diagonalize`.
    Paramters
    ---------
    S : integer matrix
        A `2g x g` Z-basis of the kernel of the anti-holomorphic involution.
        (See `integer_kernel_basis`.)
    tol : double
        (Default: 1e-4) Tolerance used to veryify integrality of the matrix.
        Dependent on precision of period matrices.
    Returns
    -------
    N1 : GF(2) matrix
        A `g x g` matrix from which we can compute the topological type.
    """
    # compute the Smith normal form of S, itself
    g = S.ncols()
    S1 = S[:g,:]
    S2 = S[g:,:]
    ES, US, VS = S.smith_form()
    # construct the matrix N1 piece by piece
    Nper = zero_matrix(RDF, 2*g,g)
    Nper[:g,:] = -Re(Pb)[:,:]
    Nper[g:,:] = Re(Pa)[:,:]
    Nhat = (S1.T*Re(Pa) + S2.T*Re(Pb)).inverse()
    Ntilde = 2*US*Nper*Nhat
    N1_RDF = VS*Ntilde[:g,:]
    N1 = N1_RDF.round().change_ring(GF(2))
    # sanity check: N1 should be integral
    error = (N1_RDF.round() - N1_RDF).norm()
    if error > tol:
        raise ValueError("The N1 matrix is not integral. Try increasing the "
                         "precision of the input period matrices.")
    return N1
def symmetric_block_diagonalize(N1):
    r"""Returns matrices `H` and `Q` such that `N1 = Q*H*Q.T` and `H` is block
    diagonal.
    The algorithm used here is as follows. Whenever a row operation is
    performed (via multiplication on the left by a transformation matrix `q`)
    the corresponding symmetric column operation is also performed via
    multiplication on the right by `q^T`.
    For each column `j` of `N1`:
    1. If column `j` consists only of zeros then swap with the last column with
       non-zero entries.
    2. If there is a `1` in position `j` of the column (i.e. a `1` lies on the
       diagonal in this column) then eliminate further entries below as in
       standard Gaussian elimination.
    3. Otherwise, if there is a `1` in the column, but not in position `j` then
       rows are swapped in a way that it appears in the position `j+1` of the
       column. Eliminate further entries below as in standard Gaussian
       elimination.
    4. After elimination, if `1` lies on the diagonal in column `j` then
       increment `j` by one. If instead the block matrix `[0 1 \\ 1 0]` lies
       along the diagonal then eliminate under the `(j,j+1)` element (the upper
       right element) of this `2 x 2` block and increment `j` by two.
    5. Repeat until `j` passes the final column or until further columns
       consists of all zeros.
    6. Finally, perform the appropriate transformations such that all `2 x 2`
       blocks in `H` appear first in the diagonalization. (Uses the
       `diagonal_locations` helper function.)
    Parameters
    ----------
    N1 : GF(2) matrix
    Returns
    -------
    H : GF(2) matrix
        Symmetric `g x g` matrix where the diagonal elements consist of either
        a "1" or a `2 x 2` block matrix `[0 1 \\ 1 0]`.
    Q : GF(2) matrix
        The corresponding transformation matrix.
    """
    g = N1.nrows()
    H = zero_matrix(GF(2), g)
    Q = identity_matrix(GF(2), g)
    # if N1 is the zero matrix the H is also the zero matrix (and Q is the
    # identity transformation)
    if (N1 % 2) == 0:
        return H,Q
    # perform the "modified gaussian elimination"
    B = Matrix(GF(2),[[0,1],[1,0]])
    H = N1.change_ring(GF(2))
    j = 0
    while (j < g) and (H[:,j:] != 0):
        # if the current column is zero then swap with the last non-zero column
        if H.column(j) == 0:
            last_non_zero_col = max(k for k in range(j,g) if H.column(k) != 0)
            Q.swap_columns(j,last_non_zero_col)
            H = Q.T*N1*Q
        # if the current diagonal element is 1 then gaussian eliminate as
        # usual. otherwise, swap rows so that a "1" appears in H[j+1,j] and
        # then eliminate from H[j+1,j]
        if H[j,j] == 1:
            rows_to_eliminate = (r for r in range(g) if H[r,j] == 1 and r != j)
            for r in rows_to_eliminate:
                Q.add_multiple_of_column(r,j,1)
            H = Q.T*N1*Q
        else:
            # find the first non-zero element in the column after the diagonal
            # element and swap rows with this element
            first_non_zero = min(k for k in range(j,g) if H[k,j] != 0)
            Q.swap_columns(j+1,first_non_zero)
            H = Q.T*N1*Q
            # eliminate *all* other ones in the column, including those above
            # the element (j,j+1)
            rows_to_eliminate = (r for r in range(g) if H[r,j] == 1 and r != j+1)
            for r in rows_to_eliminate:
                Q.add_multiple_of_column(r,j+1,1)
            H = Q.T*N1*Q
        # increment the column based on the diagonal element
        if H[j,j] == 1:
            j += 1
        elif H[j:(j+2),j:(j+2)] == B:
            # in the block diagonal case, need to eliminate below the j+1 term
            rows_to_eliminate = (r for r in range(g) if H[r,j+1] == 1 and r != j)
            for r in rows_to_eliminate:
                Q.add_multiple_of_column(r,j,1)
            H = Q.T*N1*Q
            j += 2
    # finally, check if there are blocks of "special" form. that is, shift all
    # blocks such that they occur first along the diagonal of H
    index_one, index_B = diagonal_locations(H)
    while index_one < index_B:
        j = index_B
        Qtilde = zero_matrix(GF(2), g)
        Qtilde[0,0] = 1
        Qtilde[j,0] = 1; Qtilde[j+1,0] = 1
        Qtilde[0,j] = 1; Qtilde[0,j+1] = 1
        Qtilde[j:(j+2),j:(j+2)] = B
        Q = Q*Qtilde
        H = Q.T*N1*Q
        # continue until none are left
        index_one, index_B = diagonal_locations(H)
    # above, we used Q to store column operations on N1. switch to rows
    # operations on H so that N1 = Q*H*Q.T
    Q = Q.T.inverse()
    return H,Q
def diagonal_locations(H):
    r"""Returns the indices of the last `1` along the diagonal and the first block
    along the diagonal of `H`.
    Parameters
    ----------
    H : symmetric GF(2) matrix
        Contains either 1's along the diagonal or anti-symmetric blocks.
    Returns
    -------
    index_one : integer
        The last occurrence of a `1` along the diagonal of `H`. Equal to `g`
        if there are no ones along the diagonal.
    index_B : integer
        The first occurrence of a block along the diagonal of `H`. Equal to
        `-1` if there are no blocks along the diagonal.
    """
    g = H.nrows()
    B = Matrix(GF(2),[[0,1],[1,0]])
    try:
        index_one = min(j for j in range(g) if H[j,j] == 1)
    except ValueError:
        index_one = g
    try:
        index_B = max(j for j in range(g-1) if H[j:(j+2),j:(j+2)] == B)
    except ValueError:
        index_B = -1
    return index_one, index_B
def symmetric_transformation_matrix(Pa, Pb, S, H, Q, tol=1e-4):
    r"""Returns the symplectic matrix `\Gamma` mapping the period matrices `Pa,Pb`
    to a symmetric period matrices.
    A helper function to :func:`symmetrize_periods`.
    Parameters
    ----------
    Pa : complex matrix
        A `g x g` a-period matrix.
    Pb : complex matrix
        A `g x g` b-period matrix.
    S : integer matrix
        Integer kernel basis matrix.
    H : integer matrix
        Topological type classification matrix.
    Q : integer matrix
        The transformation matrix from `symmetric_block_diagonalize`.
    tol : double
        (Default: 1e-4) Tolerance used to verify integrality of intermediate
        matrices. Dependent on precision of period matrices.
    Returns
    -------
    Gamma : integer matrix
        A `2g x 2g` symplectic matrix.
    """
    # compute A and B
    g,g = Pa.dimensions()
    rhs = S*Q.change_ring(ZZ)
    A = rhs[:g,:g].T
    B = rhs[g:,:g].T
    H = H.change_ring(ZZ)
    # compute C and D
    half = QQ(1)/QQ(2)
    temp = (A*Re(Pa) + B*Re(Pb)).inverse()
    CT = half*A.T*H - Re(Pb)*temp
    CT_ZZ = CT.round().change_ring(ZZ)
    C = CT_ZZ.T
    DT = half*B.T*H + Re(Pa)*temp
    DT_ZZ = DT.round().change_ring(ZZ)
    D = DT_ZZ.T
    # sanity checks: make sure C and D are integral
    C_error = (CT.round() - CT).norm()
    D_error = (DT.round() - DT).norm()
    if (C_error > tol) or (D_error > tol):
        raise ValueError("The symmetric transformation matrix is not integral. "
                         "Try increasing the precision of the input period "
                         "matrices.")
    # construct Gamma
    Gamma = zero_matrix(ZZ, 2*g, 2*g)
    Gamma[:g,:g] = A
    Gamma[:g,g:] = B
    Gamma[g:,:g] = C
    Gamma[g:,g:] = D
    return Gamma
def symmetrize_periods(Pa, Pb, tol=1e-4):
    r"""Returns symmetric a- and b-periods `Pa_symm` and `Pb_symm`, as well as the
    corresponding symplectic operator `Gamma` such that `Gamma [Pa \\ Pb] =
    [Pa_symm \\ Pb_symm]`.
    Parameters
    ----------
    Pa : complex matrix
    Pb : complex matrix
        The a- and b-periods, respectively, of a genus `g` Riemann surface.
    tol : double
        (Default: 1e-4) Tolerance used to verify integrality of intermediate
        matrices. Dependent on precision of period matrices.
    Returns
    -------
    Gamma : integer matrix
        The symplectic transformation operator.
    Pa : complex matrix
    Pb : complex matrix
        Symmetric a- and b-periods, respectively, of a genus `g` Riemann surface.
    Notes
    -----
    The algorithm described in Kalla, Klein actually operates on the transposes
    of the a- and b-period matrices.
    """
    # coerce from numpy, if necessary
    if isinstance(Pa, numpy.ndarray):
        Pa = Matrix(CDF, numpy.ascontiguousarray(Pa))
    if isinstance(Pb, numpy.ndarray):
        Pb = Matrix(CDF, numpy.ascontiguousarray(Pb))
    # use the transposes of the period matrices and coerce to Sage matrices
    Pa = Pa.T
    Pb = Pb.T
    # use above functions to obtain topological type matrix
    g,g = Pa.dimensions()
    R = involution_matrix(Pa, Pb, tol=tol)
    S = integer_kernel_basis(R)
    N1 = N1_matrix(Pa, Pb, S, tol=tol)
    H,Q = symmetric_block_diagonalize(N1)
    Gamma = symmetric_transformation_matrix(Pa, Pb, S, H, Q, tol=tol)
    # compute the corresponding symmetric periods
    stacked_periods = zero_matrix(CDF, 2*g, g)
    stacked_periods[:g,:] = Pa
    stacked_periods[g:,:] = Pb
    stacked_symmetric_periods = Gamma*stacked_periods
    Pa_symm = stacked_symmetric_periods[:g,:]
    Pb_symm = stacked_symmetric_periods[g:,:]
    # transpose results back
    Pa_symm = Pa_symm.T
    Pb_symm = Pb_symm.T
    return Pa_symm, Pb_symm
 | 
| 
	try:
    from sia.sia import Sia
except ImportError:
    from sia import Sia
__authors__ = 'Dmytro Striletskyi, Alexander Ruban'
__email__ = '[email protected]'
__version__ = '0.0.1a0'
 | 
| 
	# Copyright 2015 VMware, Inc.
# All Rights Reserved
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
BALANCE_MAP = {
    LB_METHOD_ROUND_ROBIN: 'round-robin',
    LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
    LB_METHOD_SOURCE_IP: 'ip-hash'}
LB_PROTOCOL_TCP = 'TCP'
LB_PROTOCOL_HTTP = 'HTTP'
LB_PROTOCOL_HTTPS = 'HTTPS'
LB_PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS'
PROTOCOL_MAP = {
    LB_PROTOCOL_TCP: 'tcp',
    LB_PROTOCOL_HTTP: 'http',
    LB_PROTOCOL_HTTPS: 'https',
    LB_PROTOCOL_TERMINATED_HTTPS: 'https'}
LB_HEALTH_MONITOR_PING = 'PING'
LB_HEALTH_MONITOR_TCP = 'TCP'
LB_HEALTH_MONITOR_HTTP = 'HTTP'
LB_HEALTH_MONITOR_HTTPS = 'HTTPS'
HEALTH_MONITOR_MAP = {
    LB_HEALTH_MONITOR_PING: 'icmp',
    LB_HEALTH_MONITOR_TCP: 'tcp',
    LB_HEALTH_MONITOR_HTTP: 'http',
    LB_HEALTH_MONITOR_HTTPS: 'tcp'}
LB_SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
LB_SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
LB_SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
SESSION_PERSISTENCE_METHOD_MAP = {
    LB_SESSION_PERSISTENCE_SOURCE_IP: 'sourceip',
    LB_SESSION_PERSISTENCE_APP_COOKIE: 'cookie',
    LB_SESSION_PERSISTENCE_HTTP_COOKIE: 'cookie'}
SESSION_PERSISTENCE_COOKIE_MAP = {
    LB_SESSION_PERSISTENCE_APP_COOKIE: 'app',
    LB_SESSION_PERSISTENCE_HTTP_COOKIE: 'insert'}
L7_POLICY_ACTION_REJECT = 'REJECT'
L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL'
L7_POLICY_ACTION_REDIRECT_TO_URL = 'REDIRECT_TO_URL'
L7_RULE_TYPE_HOST_NAME = 'HOST_NAME'
L7_RULE_TYPE_PATH = 'PATH'
L7_RULE_TYPE_FILE_TYPE = 'FILE_TYPE'
L7_RULE_TYPE_HEADER = 'HEADER'
L7_RULE_TYPE_COOKIE = 'COOKIE'
L7_RULE_COMPARE_TYPE_REGEX = 'REGEX'
L7_RULE_COMPARE_TYPE_STARTS_WITH = 'STARTS_WITH'
L7_RULE_COMPARE_TYPE_ENDS_WITH = 'ENDS_WITH'
L7_RULE_COMPARE_TYPE_CONTAINS = 'CONTAINS'
L7_RULE_COMPARE_TYPE_EQUAL_TO = 'EQUAL_TO'
 | 
| 
	import logging
import random
import time
from lib.membase.helper.cluster_helper import ClusterOperationHelper
from lib.remote.remote_util import RemoteMachineShellConnection
from .newtuq import QueryTests
from couchbase_helper.cluster import Cluster
from couchbase_helper.tuq_generators import TuqGenerators
from couchbase_helper.query_definitions import SQLDefinitionGenerator
from membase.api.rest_client import RestConnection
from deepdiff import DeepDiff
log = logging.getLogger(__name__)
class BaseSecondaryIndexingTests(QueryTests):
    def setUp(self):
        super(BaseSecondaryIndexingTests, self).setUp()
        self.ansi_join = self.input.param("ansi_join", False)
        self.index_lost_during_move_out = []
        self.verify_using_index_status = self.input.param("verify_using_index_status", False)
        self.use_replica_when_active_down = self.input.param("use_replica_when_active_down", True)
        self.use_where_clause_in_index= self.input.param("use_where_clause_in_index", False)
        self.scan_consistency= self.input.param("scan_consistency", "request_plus")
        self.scan_vector_per_values= self.input.param("scan_vector_per_values", None)
        self.timeout_for_index_online= self.input.param("timeout_for_index_online", 600)
        self.verify_query_result= self.input.param("verify_query_result", True)
        self.verify_explain_result= self.input.param("verify_explain_result", True)
        self.defer_build= self.input.param("defer_build", True)
        self.build_index_after_create = self.input.param("build_index_after_create", True)
        self.run_query_with_explain= self.input.param("run_query_with_explain", True)
        self.run_query= self.input.param("run_query", True)
        self.graceful = self.input.param("graceful", False)
        self.groups = self.input.param("groups", "all").split(":")
        self.use_rest = self.input.param("use_rest", False)
        self.plasma_dgm = self.input.param("plasma_dgm", False)
        if not self.use_rest:
            query_definition_generator = SQLDefinitionGenerator()
            if self.dataset == "default" or self.dataset == "employee":
                self.query_definitions = query_definition_generator.generate_employee_data_query_definitions()
            if self.dataset == "simple":
                self.query_definitions = query_definition_generator.generate_simple_data_query_definitions()
            if self.dataset == "sabre":
                self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions()
            if self.dataset == "bigdata":
                self.query_definitions = query_definition_generator.generate_big_data_query_definitions()
            if self.dataset == "array":
                self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
            self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
        self.ops_map = self._create_operation_map()
        self.find_nodes_in_list()
        self.generate_map_nodes_out_dist()
        self.memory_create_list = []
        self.memory_drop_list = []
        self.skip_cleanup = self.input.param("skip_cleanup", False)
        self.index_loglevel = self.input.param("index_loglevel", None)
        if self.index_loglevel:
            self.set_indexer_logLevel(self.index_loglevel)
        if self.dgm_run:
            self._load_doc_data_all_buckets(gen_load=self.gens_load)
        self.gsi_thread = Cluster()
        self.defer_build = self.defer_build and self.use_gsi_for_secondary
        self.num_index_replicas = self.input.param("num_index_replica", 0)
    def tearDown(self):
        super(BaseSecondaryIndexingTests, self).tearDown()
    def create_index(self, bucket, query_definition, deploy_node_info=None, desc=None):
        create_task = self.async_create_index(bucket, query_definition, deploy_node_info, desc=desc)
        create_task.result()
        if self.build_index_after_create:
            if self.defer_build:
                build_index_task = self.async_build_index(bucket, [query_definition.index_name])
                build_index_task.result()
            check = self.n1ql_helper.is_index_ready_and_in_list(bucket, query_definition.index_name,
                                                            server=self.n1ql_node)
            self.assertTrue(check, "index {0} failed to be created".format(query_definition.index_name))
    def async_create_index(self, bucket, query_definition, deploy_node_info=None, desc=None):
        index_where_clause = None
        if self.use_where_clause_in_index:
            index_where_clause = query_definition.index_where_clause
        self.query = query_definition.generate_index_create_query(bucket=bucket,
                                                                  use_gsi_for_secondary=self.use_gsi_for_secondary,
                                                                  deploy_node_info=deploy_node_info,
                                                                  defer_build=self.defer_build,
                                                                  index_where_clause=index_where_clause, num_replica=self.num_index_replicas, desc=desc)
        create_index_task = self.gsi_thread.async_create_index(server=self.n1ql_node, bucket=bucket,
                                                               query=self.query, n1ql_helper=self.n1ql_helper,
                                                               index_name=query_definition.index_name,
                                                               defer_build=self.defer_build)
        return create_index_task
    def create_index_using_rest(self, bucket, query_definition, exprType='N1QL', deploy_node_info=None, desc=None):
        ind_content = query_definition.generate_gsi_index_create_query_using_rest(bucket=bucket,
                                                                                  deploy_node_info=deploy_node_info,
                                                                                  defer_build=None,
                                                                                  index_where_clause=None,
                                                                                  gsi_type=self.gsi_type,
                                                                                  desc=desc)
        log.info("Creating index {0}...".format(query_definition.index_name))
        return self.rest.create_index_with_rest(ind_content)
    def async_build_index(self, bucket, index_list=None):
        if not index_list:
            index_list = []
        self.query = self.n1ql_helper.gen_build_index_query(bucket=bucket, index_list=index_list)
        self.log.info(self.query)
        build_index_task = self.gsi_thread.async_build_index(server=self.n1ql_node, bucket=bucket,
                                                             query=self.query, n1ql_helper=self.n1ql_helper)
        return build_index_task
    def async_monitor_index(self, bucket, index_name = None):
        monitor_index_task = self.gsi_thread.async_monitor_index(server=self.n1ql_node, bucket=bucket,
                                                                 n1ql_helper=self.n1ql_helper, index_name=index_name,
                                                                 timeout=self.timeout_for_index_online)
        return monitor_index_task
    def multi_create_index(self, buckets=None, query_definitions=None, deploy_node_info=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = "{0}:{1}".format(bucket.name, query_definition.index_name)
                if index_info not in self.memory_create_list:
                    self.memory_create_list.append(index_info)
                    self.create_index(bucket.name, query_definition, deploy_node_info)
    def multi_create_index_using_rest(self, buckets=None, query_definitions=None, deploy_node_info=None):
        self.index_id_map = {}
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            if bucket not in list(self.index_id_map.keys()):
                self.index_id_map[bucket] = {}
            for query_definition in query_definitions:
                id_map = self.create_index_using_rest(bucket=bucket, query_definition=query_definition,
                                                      deploy_node_info=deploy_node_info)
                self.index_id_map[bucket][query_definition] = id_map["id"]
    def async_multi_create_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        create_index_tasks = []
        self.index_lost_during_move_out = []
        self.log.info(self.index_nodes_out)
        index_node_count = 0
        for query_definition in query_definitions:
                index_info = "{0}".format(query_definition.index_name)
                if index_info not in self.memory_create_list:
                    self.memory_create_list.append(index_info)
                    self.deploy_node_info = None
                    if index_node_count < len(self.index_nodes_out):
                        node_index = index_node_count
                        self.deploy_node_info = ["{0}:{1}".format(self.index_nodes_out[index_node_count].ip,
                        self.index_nodes_out[index_node_count].port)]
                        if query_definition.index_name not in self.index_lost_during_move_out:
                            self.index_lost_during_move_out.append(query_definition.index_name)
                        index_node_count += 1
                    for bucket in buckets:
                        create_index_tasks.append(self.async_create_index(bucket.name,
                            query_definition, deploy_node_info = self.deploy_node_info))
                    self.sleep(3)
        if self.defer_build and self.build_index_after_create:
            index_list = []
            for task in create_index_tasks:
                task.result()
            for query_definition in query_definitions:
                if query_definition.index_name not in index_list:
                    index_list.append(query_definition.index_name)
            for bucket in self.buckets:
                build_index_task = self.async_build_index(bucket, index_list)
                build_index_task.result()
            monitor_index_tasks = []
            for index_name in index_list:
                for bucket in self.buckets:
                    monitor_index_tasks.append(self.async_monitor_index(bucket.name, index_name))
            return monitor_index_tasks
        else:
            return create_index_tasks
    def multi_drop_index_using_rest(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                self.drop_index_using_rest(bucket, query_definition)
    def multi_drop_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = query_definition.generate_index_drop_query(bucket = bucket.name)
                index_create_info = "{0}:{1}".format(bucket.name, query_definition.index_name)
                if index_info not in self.memory_drop_list:
                    self.memory_drop_list.append(index_info)
                    self.drop_index(bucket.name, query_definition)
                if index_create_info in self.memory_create_list:
                    self.memory_create_list.remove(index_create_info)
    def async_multi_drop_index(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        drop_index_tasks = []
        for bucket in buckets:
            for query_definition in query_definitions:
                index_info = query_definition.generate_index_drop_query(bucket = bucket.name)
                if index_info not in self.memory_drop_list:
                    self.memory_drop_list.append(index_info)
                    drop_index_tasks.append(self.async_drop_index(bucket.name, query_definition))
        return drop_index_tasks
    def drop_index(self, bucket, query_definition, verify_drop=True):
        try:
            self.query = query_definition.generate_index_drop_query(bucket = bucket,
            use_gsi_for_secondary = self.use_gsi_for_secondary, use_gsi_for_primary = self.use_gsi_for_primary)
            actual_result = self.n1ql_helper.run_cbq_query(query = self.query, server = self.n1ql_node)
            if verify_drop:
                check = self.n1ql_helper._is_index_in_list(bucket, query_definition.index_name, server = self.n1ql_node)
                self.assertFalse(check, "index {0} failed to be deleted".format(query_definition.index_name))
        except Exception as ex:
                self.log.info(ex)
                query = "select * from system:indexes"
                actual_result = self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_node)
                self.log.info(actual_result)
    def drop_index_using_rest(self, bucket, query_definition, verify_drop=True):
        self.log.info("Dropping index: {0}...".format(query_definition.index_name))
        self.rest.drop_index_with_rest(self.index_id_map[bucket][query_definition])
        if verify_drop:
            check = self.n1ql_helper._is_index_in_list(bucket, query_definition.index_name, server=self.n1ql_node)
            self.assertFalse(check, "Index {0} failed to be deleted".format(query_definition.index_name))
            del(self.index_id_map[bucket][query_definition])
    def async_drop_index(self, bucket, query_definition):
        self.query = query_definition.generate_index_drop_query(bucket=bucket,
                                                                use_gsi_for_secondary=self.use_gsi_for_secondary,
                                                                use_gsi_for_primary=self.use_gsi_for_primary)
        drop_index_task = self.gsi_thread.async_drop_index(server=self.n1ql_node, bucket=bucket, query=self.query,
                                                           n1ql_helper=self.n1ql_helper,
                                                           index_name=query_definition.index_name)
        return drop_index_task
    def query_using_index_with_explain(self, bucket, query_definition):
        self.query = query_definition.generate_query_with_explain(bucket=bucket)
        actual_result = self.n1ql_helper.run_cbq_query(query=self.query, server=self.n1ql_node)
        self.log.info(actual_result)
        if self.verify_explain_result:
            check = self.n1ql_helper.verify_index_with_explain(actual_result, query_definition.index_name)
            self.assertTrue(check, "Index %s not found" % (query_definition.index_name))
    def async_query_using_index_with_explain(self, bucket, query_definition):
        self.query = query_definition.generate_query_with_explain(bucket=bucket)
        query_with_index_task = self.gsi_thread.async_n1ql_query_verification(server=self.n1ql_node, bucket=bucket,
                                                                              query=self.query,
                                                                              n1ql_helper=self.n1ql_helper,
                                                                              is_explain_query=True,
                                                                              index_name=query_definition.index_name,
                                                                              verify_results=self.verify_explain_result)
        return query_with_index_task
    def multi_query_using_index_with_explain(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                self.query_using_index_with_explain(bucket.name,
                    query_definition)
    def async_multi_query_using_index_with_explain(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        async_query_with_explain_tasks = []
        for bucket in buckets:
            for query_definition in query_definitions:
                async_query_with_explain_tasks.append(self.async_query_using_index_with_explain(bucket.name,
                                                                                                query_definition))
        return async_query_with_explain_tasks
    def query_using_index(self, bucket, query_definition, expected_result=None, scan_consistency=None,
                          scan_vector=None, verify_results=True):
        if not scan_consistency:
            scan_consistency = self.scan_consistency
        self.gen_results.query = query_definition.generate_query(bucket=bucket)
        if expected_result == None:
            expected_result = self.gen_results.generate_expected_result(print_expected_result=False)
        self.query = self.gen_results.query
        log.info("Query : {0}".format(self.query))
        msg, check = self.n1ql_helper.run_query_and_verify_result(query=self.query, server=self.n1ql_node, timeout=500,
                                            expected_result=expected_result, scan_consistency=scan_consistency,
                                            scan_vector=scan_vector, verify_results=verify_results)
        self.assertTrue(check, msg)
    def async_query_using_index(self, bucket, query_definition, expected_result = None, scan_consistency = None, scan_vector = None):
        self.gen_results.query = query_definition.generate_query(bucket = bucket)
        self.log.info("Query : {0}".format(self.gen_results.query))
        if expected_result == None:
            expected_result = self.gen_results.generate_expected_result(print_expected_result = False)
        self.query = self.gen_results.query
        query_with_index_task = self.gsi_thread.async_n1ql_query_verification(
                 server = self.n1ql_node, bucket = bucket,
                 query = self.query, n1ql_helper = self.n1ql_helper,
                 expected_result=expected_result, index_name = query_definition.index_name,
                  scan_consistency = scan_consistency, scan_vector = scan_vector,
                  verify_results = self.verify_query_result)
        return query_with_index_task
    def query_using_index_with_emptyset(self, bucket, query_definition):
        self.gen_results.query = query_definition.generate_query(bucket = bucket)
        self.log.info("Query : {0}".format(self.gen_results.query))
        self.query = self.gen_results.query
        actual_result = self.n1ql_helper.run_cbq_query(query = self.query, server = self.n1ql_node)
        self.assertTrue(len(actual_result["results"]) == 0, "Result is not empty {0}".format(actual_result["results"]))
    def multi_query_using_index_with_emptyresult(self, buckets=None, query_definitions=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            for query_definition in query_definitions:
                self.query_using_index_with_emptyset(bucket.name, query_definition)
    def multi_query_using_index(self, buckets=None, query_definitions=None,
                                expected_results=None, scan_consistency=None,
                                scan_vectors=None, verify_results=True):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        for bucket in buckets:
            scan_vector = None
            if scan_vectors != None:
                scan_vector = scan_vectors[bucket.name]
            for query_definition in query_definitions:
                if expected_results:
                    expected_result = expected_results[query_definition.index_name]
                else:
                    expected_result = None
                self.query_using_index(bucket=bucket.name, query_definition=query_definition,
                                       expected_result=expected_result, scan_consistency=scan_consistency,
                                       scan_vector=scan_vector, verify_results=verify_results)
    def async_multi_query_using_index(self, buckets =[], query_definitions = [], expected_results = {}, scan_consistency = None, scan_vectors = None):
        multi_query_tasks = []
        for bucket in buckets:
            scan_vector = None
            if scan_vectors != None:
                scan_vector = scan_vectors[bucket.name]
            for query_definition in query_definitions:
                if expected_results:
                    multi_query_tasks.append(self.async_query_using_index(bucket.name, query_definition, expected_results[query_definition.index_name],
                     scan_consistency = scan_consistency, scan_vector = scan_vector))
                else:
                    multi_query_tasks.append(self.async_query_using_index(bucket.name, query_definition, None,
                     scan_consistency = scan_consistency, scan_vector = scan_vector))
        return multi_query_tasks
    def async_check_and_run_operations(self, buckets=[], initial=False, before=False, after=False,
                                       in_between=False, scan_consistency=None, scan_vectors=None):
        #self.verify_query_result = True
        #self.verify_explain_result = True
        if initial:
            self._set_query_explain_flags("initial")
            self.log.info(self.ops_map["initial"])
            return self.async_run_multi_operations(buckets = buckets,
                create_index = self.ops_map["initial"]["create_index"],
                drop_index = self.ops_map["initial"]["drop_index"],
                query = self.ops_map["initial"]["query_ops"],
                query_with_explain = self.ops_map["initial"]["query_explain_ops"],
                scan_consistency = scan_consistency,
                scan_vectors = scan_vectors)
        if before:
            self._set_query_explain_flags("before")
            self.log.info(self.ops_map["before"])
            return self.async_run_multi_operations(buckets = buckets,
                create_index = self.ops_map["before"]["create_index"],
                drop_index = self.ops_map["before"]["drop_index"],
                query = self.ops_map["before"]["query_ops"],
                query_with_explain = self.ops_map["before"]["query_explain_ops"],
                scan_consistency = scan_consistency,
                scan_vectors = scan_vectors)
        if in_between:
            self._set_query_explain_flags("in_between")
            self.log.info(self.ops_map["initial"])
            return self.async_run_multi_operations(buckets = buckets,
                create_index = self.ops_map["in_between"]["create_index"],
                drop_index = self.ops_map["in_between"]["drop_index"],
                query = self.ops_map["in_between"]["query_ops"],
                query_with_explain = self.ops_map["in_between"]["query_explain_ops"],
                scan_consistency = scan_consistency,
                scan_vectors = scan_vectors)
        if after:
            self._set_query_explain_flags("after")
            self.log.info(self.ops_map["initial"])
            return self.async_run_multi_operations(buckets = buckets,
                create_index = self.ops_map["after"]["create_index"],
                drop_index = self.ops_map["after"]["drop_index"],
                query = self.ops_map["after"]["query_ops"],
                query_with_explain = self.ops_map["after"]["query_explain_ops"],
                scan_consistency = "request_plus",
                scan_vectors = scan_vectors)
    def run_multi_operations(self, buckets = [], query_definitions = [], expected_results = {},
        create_index = False, drop_index = False, query_with_explain = False, query = False,
         scan_consistency = None, scan_vectors = None):
        try:
            if create_index:
                self.multi_create_index(buckets, query_definitions)
            if query_with_explain:
                self.multi_query_using_index_with_explain(buckets, query_definitions)
            if query:
                self.multi_query_using_index(buckets, query_definitions,
                 expected_results, scan_consistency = scan_consistency,
                 scan_vectors = scan_vectors)
        except Exception as ex:
            self.log.info(ex)
            raise
        finally:
            if drop_index and not self.skip_cleanup:
                self.multi_drop_index(buckets, query_definitions)
    def async_run_multi_operations(self, buckets=None, query_definitions=None, expected_results=None,
                                   create_index=False, drop_index=False, query_with_explain=False, query=False,
                                   scan_consistency=None, scan_vectors=None):
        tasks = []
        if not query_definitions:
            query_definitions = self.query_definitions
        try:
            if create_index:
                tasks += self.async_multi_create_index(buckets, query_definitions)
            if query_with_explain:
                tasks += self.async_multi_query_using_index_with_explain(buckets, query_definitions)
            if query:
                tasks  += self.async_multi_query_using_index(buckets, query_definitions, expected_results,
                 scan_consistency = scan_consistency, scan_vectors = scan_vectors)
            if drop_index:
                tasks += self.async_multi_drop_index(self.buckets, query_definitions)
        except Exception as ex:
            self.log.info(ex)
            raise
        return tasks
    def async_run_operations(self, phase, buckets=None, query_definitions=None, expected_results=None,
                             scan_consistency=None, scan_vectors=None):
        if not buckets:
            buckets = self.buckets
        if not query_definitions:
            query_definitions = self.query_definitions
        if not scan_consistency:
            scan_consistency = self.scan_consistency
        tasks = []
        operation_map = self.generate_operation_map(phase)
        self.log.info("=== {0}: {1} ===".format(phase.upper(), operation_map))
        nodes_out = []
        if isinstance(self.nodes_out_dist, str):
            for service in self.nodes_out_dist.split("-"):
                    nodes_out.append(service.split(":")[0])
        if operation_map:
            try:
                if "create_index" in operation_map:
                    if ("index" in nodes_out or "n1ql" in nodes_out) and phase == "in_between":
                            tasks = []
                    else:
                        tasks += self.async_multi_create_index(buckets, query_definitions)
                if "query_with_explain" in operation_map:
                    if "n1ql" in nodes_out and phase == "in_between":
                        tasks = []
                    else:
                        tasks += self.async_multi_query_using_index_with_explain(buckets, query_definitions)
                if "query" in operation_map:
                    if "n1ql" in nodes_out and phase == "in_between":
                        tasks = []
                    else:
                        tasks += self.async_multi_query_using_index(buckets, query_definitions, expected_results,
                                                                 scan_consistency=scan_consistency,
                                                                 scan_vectors=scan_vectors)
                if "drop_index" in operation_map:
                    if "index" in nodes_out or "n1ql" in nodes_out:
                        if phase == "in_between":
                            tasks = []
                    else:
                        tasks += self.async_multi_drop_index(self.buckets, query_definitions)
            except Exception as ex:
                log.info(ex)
                raise
        return tasks
    def run_full_table_scan_using_rest(self, bucket, query_definition, verify_result=False):
        expected_result = []
        actual_result = []
        full_scan_query = "SELECT * FROM {0} WHERE {1}".format(bucket.name, query_definition.index_where_clause)
        self.gen_results.query = full_scan_query
        temp = self.gen_results.generate_expected_result(print_expected_result=False)
        for item in temp:
            expected_result.append(list(item.values()))
        if self.scan_consistency == "request_plus":
            body = {"stale": "False"}
        else:
            body = {"stale": "ok"}
        content = self.rest.full_table_scan_gsi_index_with_rest(self.index_id_map[bucket][query_definition], body)
        if verify_result:
            doc_id_list = []
            for item in content:
                if item["docid"] not in doc_id_list:
                    for doc in self.full_docs_list:
                        if doc["_id"] == item["docid"]:
                            actual_result.append([doc])
                            doc_id_list.append(item["docid"])
            actual_result = [item[0] for item in actual_result]
            expected_result = [item[0] for item in expected_result]
            self.assertEqual(len(actual_result), len(expected_result),
                             "Actual Items {0} are not equal to expected Items {1}".
                             format(len(actual_result), len(expected_result)))
            msg = "The number of rows match but the results mismatch, please check"
            for item in actual_result:
                if item not in expected_result:
                    raise Exception(msg)
    def run_lookup_gsi_index_with_rest(self, bucket, query_definition):
        pass
    def run_range_scan_with_rest(self, bucket, query_definition):
        pass
    def gen_scan_vector(self, use_percentage = 1.0, use_random = False):
        servers = self.get_kv_nodes(servers= self.servers[:self.nodes_init])
        sequence_bucket_map = self.get_vbucket_seqnos(servers, self.buckets)
        scan_vectors ={}
        if use_percentage == 1.0:
            for bucket in self.buckets:
                scan_vector = []
                self.log.info("analyzing for bucket {0}".format(bucket.name))
                map = sequence_bucket_map[bucket.name]
                for i in range(1024):
                    key = "vb_" + str(i)
                    value = [ int(map[key]["abs_high_seqno"]), map[key]["uuid"] ]
                    scan_vector.append(value)
                scan_vectors[bucket.name] = scan_vector
        else:
            for bucket in self.buckets:
                scan_vector = {}
                total = int(self.vbuckets*use_percentage)
                vbuckets_number_list = list(range(0, total))
                if use_random:
                    vbuckets_number_list  =  random.sample(range(0, self.vbuckets), total)
                self.log.info("analyzing for bucket {0}".format(bucket.name))
                map = sequence_bucket_map[bucket.name]
                for key in list(map.keys()):
                    vb = int(key.split("vb_")[1])
                    if vb in vbuckets_number_list:
                        value = [ int(map[key]["abs_high_seqno"]), map[key]["uuid"] ]
                        scan_vector[str(vb)] = value
                scan_vectors[bucket.name] = scan_vector
        return scan_vectors
    def check_missing_and_extra(self, actual, expected):
        missing = []
        extra = []
        for item in actual:
            if not (item in expected):
                 extra.append(item)
        for item in expected:
            if not (item in actual):
                missing.append(item)
        return missing, extra
    def _verify_results(self, actual_result, expected_result, missing_count = 1, extra_count = 1):
        actual_result = self._gen_dict(actual_result)
        expected_result = self._gen_dict(expected_result)
        if len(actual_result) != len(expected_result):
            missing, extra = self.check_missing_and_extra(actual_result, expected_result)
            self.log.error("Missing items: %s.\n Extra items: %s" % (missing[:missing_count], extra[:extra_count]))
            self.fail("Results are incorrect.Actual num %s. Expected num: %s.\n" % (
                                            len(actual_result), len(expected_result)))
        if self.max_verify is not None:
            actual_result = actual_result[:self.max_verify]
            expected_result = expected_result[:self.max_verify]
        msg = "Results are incorrect.\n Actual first and last 100:  %s.\n ... \n %s" +\
        "Expected first and last 100: %s.\n  ... \n %s"
        self.assertTrue(actual_result == expected_result,
                          msg % (actual_result[:100], actual_result[-100:],
                                 expected_result[:100], expected_result[-100:]))
    def verify_index_absence(self, query_definitions, buckets):
        server = self.get_nodes_from_services_map(service_type = "n1ql")
        for bucket in buckets:
            for query_definition in query_definitions:
                check = self.n1ql_helper._is_index_in_list(bucket.name, query_definition.index_name, server = server)
                self.assertFalse(check, " {0} was not absent as expected".format(query_definition.index_name))
    def _gen_dict(self, result):
        result_set = []
        if result != None and len(result) > 0:
            for val in result:
                for key in list(val.keys()):
                    result_set.append(val[key])
        return result_set
    def _verify_index_map(self):
        if not self.verify_using_index_status:
            return
        index_map = self.get_index_map()
        index_bucket_map = self.n1ql_helper.gen_index_map(self.n1ql_node)
        msg = "difference in index map found, expected {0} \n actual {1}".format(index_bucket_map, index_map)
        self.assertTrue(len(list(index_map.keys())) == len(self.buckets),
            "numer of buckets mismatch :: "+msg)
        for bucket in self.buckets:
            self.assertTrue((bucket.name in list(index_map.keys())), " bucket name not present in index map {0}".format(index_map))
        for bucket_name in list(index_bucket_map.keys()):
            self.assertTrue(len(list(index_bucket_map[bucket_name].keys())) == len(list(index_map[bucket_name].keys())), "number of indexes mismatch ::"+msg)
            for index_name in list(index_bucket_map[bucket_name].keys()):
                msg1 ="index_name {0} not found in {1}".format(index_name, list(index_map[bucket_name].keys()))
                self.assertTrue(index_name in list(index_map[bucket_name].keys()), msg1+" :: "+ msg)
    def _verify_primary_index_count(self):
        bucket_map = self.get_buckets_itemCount()
        count = 0
        while not self._verify_items_count() and count < 15:
            self.log.info("All Items Yet to be Indexed...")
            self.sleep(5)
            count += 1
        self.assertTrue(self._verify_items_count(), "All Items didn't get Indexed...")
        self.log.info("All the documents are indexed...")
        self.sleep(10)
        index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
        self.log.info(bucket_map)
        self.log.info(index_bucket_map)
        for bucket_name in list(bucket_map.keys()):
            actual_item_count = index_bucket_map[bucket_name]
            expected_item_count = bucket_map[bucket_name]
            self.assertTrue(str(actual_item_count) == str(expected_item_count),
                "Bucket {0}, mismatch in item count for index :{1} : expected {2} != actual {3} ".format
                    (bucket_name, "primary", expected_item_count, actual_item_count))
    def _verify_items_count(self):
        """
        Compares Items indexed count is sample
        as items in the bucket.
        """
        index_map = self.get_index_stats()
        for bucket_name in list(index_map.keys()):
            self.log.info("Bucket: {0}".format(bucket_name))
            for index_name, index_val in index_map[bucket_name].items():
                self.log.info("Index: {0}".format(index_name))
                self.log.info("number of docs pending: {0}".format(index_val["num_docs_pending"]))
                self.log.info("number of docs queued: {0}".format(index_val["num_docs_queued"]))
                if index_val["num_docs_pending"] and index_val["num_docs_queued"]:
                    return False
        return True
    def _verify_bucket_count_with_index_count(self, query_definitions=None, buckets=None):
        """
        :param bucket:
        :param index:
        :return:
        """
        count = 0
        if not query_definitions:
            query_definitions = self.query_definitions
        if not buckets:
            buckets = self.buckets
        while not self._verify_items_count() and count < 15:
            self.log.info("All Items Yet to be Indexed...")
            self.sleep(10)
            count += 1
        if not self._verify_items_count():
            raise Exception("All Items didn't get Indexed...")
        bucket_map = self.get_buckets_itemCount()
        for bucket in buckets:
            bucket_count = bucket_map[bucket.name]
            for query in query_definitions:
                index_count = self.n1ql_helper.get_index_count_using_index(bucket,
                                                                           query.index_name, self.n1ql_node)
                self.assertTrue(int(index_count) == int(bucket_count),
                        "Bucket {0}, mismatch in item count for index :{1} : expected {2} != actual {3} ".format
                        (bucket.name, query.index_name, bucket_count, index_count))
        self.log.info("Items Indexed Verified with bucket count...")
    def _check_all_bucket_items_indexed(self, query_definitions=None, buckets=None):
        """
        :param bucket:
        :param index:
        :return:
        """
        count = 0
        while not self._verify_items_count() and count < 15:
            self.log.info("All Items Yet to be Indexed...")
            self.sleep(10)
            count += 1
        self.assertTrue(self._verify_items_count(), "All Items didn't get Indexed...")
    def _create_operation_map(self):
        map_initial = {"create_index":False, "query_ops": False, "query_explain_ops": False, "drop_index": False}
        map_before = {"create_index":False, "query_ops": False, "query_explain_ops": False, "drop_index": False}
        map_in_between = {"create_index":False, "query_ops": False, "query_explain_ops": False, "drop_index": False}
        map_after = {"create_index":False, "query_ops": False, "query_explain_ops": False, "drop_index": False}
        initial = self.input.param("initial", "")
        for op_type in initial.split(":"):
            if op_type != '':
                map_initial[op_type] = True
        before = self.input.param("before", "")
        for op_type in before.split(":"):
            if op_type != '':
                map_before[op_type] = True
        in_between = self.input.param("in_between", "")
        for op_type in in_between.split(":"):
            if op_type != '':
                map_in_between[op_type] = True
        after = self.input.param("after", "")
        for op_type in after.split(":"):
            if op_type != '':
                map_after[op_type] = True
        return {"initial":map_initial, "before":map_before, "in_between": map_in_between, "after": map_after}
    def generate_operation_map(self, phase):
        operation_map = []
        self.verify_query_result = False
        self.verify_explain_result = False
        ops = self.input.param(phase, "")
        for type in ops.split("-"):
            for op_type in type.split(":"):
                if "verify_query_result" in op_type:
                    self.verify_query_result = True
                    continue
                if "verify_explain_result" in op_type:
                    self.verify_explain_result = True
                    continue
                if op_type != '':
                    operation_map.append(op_type)
        return operation_map
    def _query_explain_in_async(self):
        tasks = self.async_run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = False,
            query_with_explain = self.run_query_with_explain,
            query = False, scan_consistency = self.scan_consistency)
        for task in tasks:
            task.result()
        tasks = self.async_run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = False,
            query_with_explain = False, query = self.run_query,
            scan_consistency = self.scan_consistency)
        for task in tasks:
            task.result()
    def _set_query_explain_flags(self, phase):
        if ("query_ops" in list(self.ops_map[phase].keys())) and self.ops_map[phase]["query_ops"]:
            self.ops_map[phase]["query_explain_ops"] = True
        if ("do_not_verify_query_result" in list(self.ops_map[phase].keys())) and self.ops_map[phase]["do_not_verify_query_result"]:
            self.verify_query_result = False
            self.ops_map[phase]["query_explain_ops"] = False
        if ("do_not_verify_explain_result" in list(self.ops_map[phase].keys())) and self.ops_map[phase]["do_not_verify_explain_result"]:
            self.verify_explain_result = False
            self.ops_map[phase]["query_explain_ops"] = False
        self.log.info(self.ops_map)
    def fail_if_no_buckets(self):
        buckets = False
        for a_bucket in self.buckets:
            buckets = True
        if not buckets:
            self.fail('FAIL: This test requires buckets')
    def block_incoming_network_from_node(self, node1, node2):
        shell = RemoteMachineShellConnection(node1)
        self.log.info("Adding {0} into iptables rules on {1}".format(
            node1.ip, node2.ip))
        command = "iptables -A INPUT -s {0} -j REJECT".format(node2.ip)
        shell.execute_command(command)
    def resume_blocked_incoming_network_from_node(self, node1, node2):
        shell = RemoteMachineShellConnection(node1)
        self.log.info("Adding {0} into iptables rules on {1}".format(
            node1.ip, node2.ip))
        command = "iptables -D INPUT -s {0} -j REJECT".format(node2.ip)
        shell.execute_command(command)
    def set_indexer_logLevel(self, loglevel="info"):
        """
        :param loglevel:
        Possible Values
            -- info
            -- debug
            -- warn
            -- verbose
            -- Silent
            -- Fatal
            -- Error
            -- Timing
            -- Trace
        """
        self.log.info("Setting indexer log level to {0}".format(loglevel))
        server = self.get_nodes_from_services_map(service_type="index")
        rest = RestConnection(server)
        status = rest.set_indexer_params("logLevel", loglevel)
    def wait_until_cluster_is_healthy(self):
        master_node = self.master
        if self.targetMaster:
            if len(self.servers) > 1:
                master_node = self.servers[1]
        rest = RestConnection(master_node)
        is_cluster_healthy = False
        count = 0
        while not is_cluster_healthy and count < 10:
            count += 1
            cluster_nodes = rest.node_statuses()
            for node in cluster_nodes:
                if node.status != "healthy":
                    is_cluster_healthy = False
                    log.info("Node {0} is in {1} state...".format(node.ip,
                                                                  node.status))
                    self.sleep(5)
                    break
                else:
                    is_cluster_healthy = True
        return is_cluster_healthy
    def wait_until_indexes_online(self, timeout=600, defer_build=False):
        rest = RestConnection(self.master)
        init_time = time.time()
        check = False
        timed_out = False
        while not check:
            index_status = rest.get_index_status()
            next_time = time.time()
            for index_info in index_status.values():
                for index_state in index_info.values():
                    if defer_build:
                        if index_state["status"] == "Created":
                            check = True
                        else:
                            check = False
                            time.sleep(1)
                            break
                    else:
                        if index_state["status"] == "Ready":
                            check = True
                        else:
                            check = False
                            time.sleep(1)
                            break
            if next_time - init_time > timeout:
                timed_out = True
                check = next_time - init_time > timeout
        if timed_out:
            check = False
        return check
    def wait_until_specific_index_online(self, index_name='', timeout=600, defer_build=False):
        rest = RestConnection(self.master)
        init_time = time.time()
        check = False
        timed_out = False
        while not check:
            index_status = rest.get_index_status()
            next_time = init_time
            log.info(index_status)
            for index_info in list(index_status.values()):
                for idx_name in list(index_info.keys()):
                    if idx_name == index_name:
                        for index_state in list(index_info.values()):
                            if defer_build:
                                if index_state["status"] == "Created":
                                    check = True
                                else:
                                    check = False
                                    time.sleep(1)
                                    next_time = time.time()
                                    break
                            else:
                                if index_state["status"] == "Ready":
                                    check = True
                                else:
                                    check = False
                                    time.sleep(1)
                                    next_time = time.time()
                                    break
            if next_time - init_time > timeout:
                timed_out = True
                check = next_time - init_time > timeout
        if timed_out:
            check = False
        return check
    def verify_index_in_index_map(self, index_name='', timeout=600):
        rest = RestConnection(self.master)
        init_time = time.time()
        check = False
        seen = False
        next_time = init_time
        timed_out = False
        while not check:
            index_status = rest.get_index_status()
            log.info(index_status)
            for index_info in list(index_status.values()):
                for idx_name in list(index_info.keys()):
                    if idx_name == index_name:
                        seen = True
                        check = True
                        break
                    else:
                        check = False
                        time.sleep(1)
                        next_time = time.time()
                        break
            if seen:
                check = True
            if next_time - init_time > timeout:
                timed_out = True
                check = next_time - init_time > timeout
        if timed_out:
            check = False
        return check
    def get_dgm_for_plasma(self, indexer_nodes=None, memory_quota=256):
        """
        Internal Method to create OOM scenario
        :return:
        """
        def validate_disk_writes(indexer_nodes=None):
            if not indexer_nodes:
                indexer_nodes = self.get_nodes_from_services_map(
                    service_type="index", get_all_nodes=True)
            for node in indexer_nodes:
                indexer_rest = RestConnection(node)
                content = indexer_rest.get_index_storage_stats()
                for index in list(content.values()):
                    for stats in list(index.values()):
                        if stats["MainStore"]["resident_ratio"] >= 1.00:
                            return False
            return True
        def kv_mutations(self, docs=1):
            if not docs:
                docs = self.docs_per_day
            gens_load = self.generate_docs(docs)
            self.full_docs_list = self.generate_full_docs_list(gens_load)
            self.gen_results = TuqGenerators(self.log, self.full_docs_list)
            self.load(gens_load, buckets=self.buckets, flag=self.item_flag,
                  verify_data=False, batch_size=self.batch_size)
        if self.gsi_type != "plasma":
            return
        if not self.plasma_dgm:
            return
        log.info("Trying to get all indexes in DGM...")
        log.info("Setting indexer memory quota to {0} MB...".format(memory_quota))
        node = self.get_nodes_from_services_map(service_type="index")
        rest = RestConnection(node)
        rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=memory_quota)
        cnt = 0
        docs = 50 + self.docs_per_day
        while cnt < 100:
            if validate_disk_writes(indexer_nodes):
                log.info("========== DGM is achieved ==========")
                return True
            kv_mutations(self, docs)
            self.sleep(30)
            cnt += 1
            docs += 20
        return False
    def reboot_node(self, node):
        self.log.info("Rebooting node '{0}'....".format(node.ip))
        shell = RemoteMachineShellConnection(node)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        shell.disconnect()
        # wait for restart and warmup on all node
        self.sleep(self.wait_timeout * 5)
        # disable firewall on these nodes
        self.stop_firewall_on_node(node)
        # wait till node is ready after warmup
        ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self,
                                                             wait_if_warmup=True)
 | 
| 
	from glob import glob
from os.path import join, basename
from typing import Dict, Any
class TkdndEvent(object):
    """
    see http://www.ellogon.org/petasis/tcltk-projects/tkdnd/tkdnd-man-page
    for details on the fields
    The longer attribute names (action instead of %A) were made up for
    this API.
    Not all attributes are visible yet, since I have not thought
    through what conversions they should receive and I don't want to
    unnecessarily change their types later.
    """
    substitutions = {
        "%A": "action",
        "%b": "button",
        "%D": "data",
        "%m": "modifiers",
        "%T": "type",
        "%W": "targetWindow",
        "%X": "mouseX",
        "%Y": "mouseY",
    }
    @classmethod
    def makeEvent(cls, *args):
        ev = cls()
        for (k, v), arg in zip(sorted(TkdndEvent.substitutions.items()), args):
            setattr(ev, v, arg)
        # it would be cool for this to decode text data according to the charset in the type
        for attr in ['button', 'mouseX', 'mouseY']:
            setattr(ev, attr, int(getattr(ev, attr)))
        return (ev,)
    tclSubstitutions = ' '.join(sorted(substitutions.keys()))
    def __repr__(self):
        return "<TkdndEvent %r>" % self.__dict__
class Hover(object):
    def __init__(self, widget, style):
        self.widget, self.style = widget, style
        self.oldStyle: Dict[Any, Any] = {}
    def set(self, ev):
        for k, v in list(self.style.items()):
            self.oldStyle[k] = self.widget.cget(k)
        self.widget.configure(**self.style)
        return ev.action
    def restore(self, ev):
        self.widget.configure(**self.oldStyle)
def initTkdnd(tk, tkdndBuildDir):
    """
    pass the 'tk' attribute of any Tkinter object, and the top dir of
    your built tkdnd package
    """
    tk.call('source', join(tkdndBuildDir, 'library/tkdnd.tcl'))
    for dll in glob(
            join(tkdndBuildDir,
                 '*tkdnd*' + tk.call('info', 'sharedlibextension'))):
        tk.call('tkdnd::initialise', join(tkdndBuildDir, 'library'),
                join('..', basename(dll)), 'tkdnd')
def dragSourceRegister(widget, action='copy', datatype='text/uri-list',
                       data=''):
    """
    if the 'data' param is callable, it will be called every time to
    look up the current data.
    If the callable returns None (or data is None to begin with), the drag
    """
    widget.tk.call('tkdnd::drag_source', 'register', widget._w)
    # with normal Tkinter bind(), the result of your handler isn't
    # actually returned so the drag doesn't get launched. This is a
    # corrected version of what bind() does when you pass a function,
    # but I don't block my tuple from getting returned (as a tcl list)
    def init():
        dataValue = data() if callable(data) else data
        if dataValue is None:
            return
        return (action, datatype, dataValue)
    funcId = widget._register(
        init,
        widget._substitute,
        1  # needscleanup
    )
    widget.bind("<<DragInitCmd>>", funcId)
def dropTargetRegister(
        widget,
        typeList=None,
        onDropEnter=None,
        onDropPosition=None,
        onDropLeave=None,
        onDrop=None,
        hoverStyle=None,
):
    """
    the optional callbacks will be called with a TkdndEvent
    argument.
    onDropEnter, onDropPosition, and onDrop are supposed to return an
    action (perhaps the value in event.action). The return value seems
    to have no effect, but it might just be that errors are getting
    silenced.
    Passing hoverStyle sets onDropEnter to call
    widget.configure(**hoverStyle) and onDropLeave to restore the
    widget's style. onDrop is also wrapped to do a restore.
    """
    if hoverStyle is not None:
        hover = Hover(widget, hoverStyle)
        def wrappedDrop(ev):
            hover.restore(ev)
            if onDrop:
                return onDrop(ev)
        return dropTargetRegister(widget,
                                  typeList=typeList,
                                  onDropEnter=hover.set,
                                  onDropLeave=hover.restore,
                                  onDropPosition=onDropPosition,
                                  onDrop=wrappedDrop)
    if typeList is None:
        typeList = ['*']
    widget.tk.call(*(['tkdnd::drop_target', 'register', widget._w] + typeList))
    for sequence, handler in [
        ('<<DropEnter>>', onDropEnter),
        ('<<DropPosition>>', onDropPosition),
        ('<<DropLeave>>', onDropLeave),
        ('<<Drop>>', onDrop),
    ]:
        if not handler:
            continue
        func = widget._register(handler,
                                subst=TkdndEvent.makeEvent,
                                needcleanup=1)
        widget.bind(sequence, func + " " + TkdndEvent.tclSubstitutions)
 | 
| 
	from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import range
from . import MultipleSplinesFunction
from . import SplineFunction
from . import Point
from . import Axis
import numpy as np
class GradeableFunction(MultipleSplinesFunction.MultipleSplinesFunction):
    """GradeableFunction."""
    def __init__(self, gradeable, tolerance = dict()):
        f = gradeable
        xaxis = Axis.Axis(f.params['xrange'], f.params['width'])
        yaxis = Axis.Axis(f.params['yrange'][::-1], f.params['height'])
        MultipleSplinesFunction.MultipleSplinesFunction.__init__(self, xaxis, yaxis, path_info = f, tolerance = tolerance)
        self.set_default_tolerance('point_distance_squared', 400) # threshold for finding a point close to a point
        self.set_default_tolerance('point_distance', 20) # threshold for finding a point close to an x value
        # transform from polar
        #if f.params['xscale'] == 'polar' and f.params['yscale'] == 'polar':
        if 'coordinates' in f.params and f.params['coordinates'] == 'polar':
            from .PolarTransform import PolarTransform
            self.pt = PolarTransform(gradeable, self)
            f = self.pt.getTransformedFunctionData()
            xaxis = Axis.Axis(f.params['xrange'], f.params['width'])
            yaxis = Axis.Axis(f.params['yrange'][::-1], f.params['height'])
            MultipleSplinesFunction.MultipleSplinesFunction.__init__(self, xaxis, yaxis, path_info = f, tolerance = tolerance)
            self.pt.resampleNewSplines()
    def samplePoints(self):
        spline_samples = []
        
        for f in self.functions:
            print("number of functions: " + str(len(f.functions)))
            curve_samples = []
            # these are the spline function objects
            for curve in f.functions:
                # these are the curve function objects
                curve_samples.extend(self.sample_x_and_y(curve, 0.02))
            curve_samples.append(self.sample_last_t_1(f.functions[-1]))
            spline_samples.append(curve_samples)
        return spline_samples
    def sample_x_and_y(self, curve, step):
        samples = []
        x_t = curve.x
        y_t = curve.y
        for t in np.arange(0, 1, step):
            # interpolate the x and y values
            x = np.polyval(x_t, t)
            y = np.polyval(y_t, t)
            samples.append([x, y])
        return samples
    def sample_last_t_1(self, curve):
        x = np.polyval(curve.x, 1)
        y = np.polyval(curve.y, 1)
        return [x, y]
            
    def create_from_path_info(self, path_info):
        dtol = 100
        self.functions = []
        self.points = []
        xvals = []
        for i in range(len(path_info)):
            if 'spline' in path_info[i]:
                spline = SplineFunction.SplineFunction(self.xaxis, self.yaxis, path_info[i]['spline'])
                self.functions.append(spline)
                xvals += spline.get_domain()
                self.domain = [np.min(xvals), np.max(xvals)]
            if 'point' in path_info[i]:
                [px_x, px_y] = path_info[i]['point']
                point = Point.Point(self, px_x, px_y)
                d, p = self.closest_point_to_point(point)
                if d >= dtol:
                    self.points.append(point)
            if 'tag' in path_info[i]:
                tag = path_info[i]['tag']
                if len(self.functions) > 0:
                    self.functions[-1].set_tag(tag)
                elif len(self.points) > 0:
                    self.points[-1].set_tag(tag)
        # set the gradeable object list to the tagable list
        self.set_tagables(None)
        if len(self.functions) > 0:
            self.set_tagables(self.functions)
        if len(self.points) > 0:
            self.set_tagables(self.points)
## for Points ##
    # returns the distance (squared) and the point
    def closest_point_to_point(self, point):
        """Return the square pixel distance to the closest point and a Point instance.
        Args:
            point: a Point instance
        Returns:
            float, Point: 
            minDistanceSquared: the square of the pixel distance between point
                                and the closest point, or float('inf') if no
                                point exists.
            minPoint: the closest Point to x, or None if no point exists.
        """
        minDistanceSquared = float('inf')
        minPoint = None
        for p in self.points:
            d = p.get_px_distance_squared(point)
            if d < minDistanceSquared:
                minDistanceSquared = d
                minPoint = p
        return minDistanceSquared, minPoint
    # returns the distance and the point
    def closest_point_to_x(self, x):
        """Return the distance to the closest point and a Point instance.
        Args:
            x: a value in the range of the x axis.
        Returns:
            float, Point:
            minDistance: the absolute distance between x and the point, or
                         float('inf') if no point exists.
            minPoint: the closest Point to x, or None if no point exists.
        """
        minDistance = float('inf')
        minPoint = None
        for p in self.points:
            d = p.get_x_distance(x)
            if d < minDistance:
                minDistance = d
                minPoint = p
        return minDistance, minPoint
    # returns None if no point is close enough
    def get_point_at(self, point=False, x=False, y=False, distTolerance=None,
                     squareDistTolerance=None):
        """ Return a reference to the Point declared at the given value.
        Args:
            point(default: False): a Point instance at the value of interest.
            x(default: False): the x coordinate of interest.
            y(default: False): the y coordinate of interest.
            distTolerance(default: None): the pixel distance tolerance if 
                                          only the x coordinate is given. If None
                                          default constant 'point_distance' is used.
            squareDistTolerance(default: None): the square pixel distance tolerance
                                          if point, or x and y are given. If
                                          None, default constant 'point_distance_squared'
                                          is used.
        Note:    
           There are three use cases:
              1) point not False: use the Point instance as the target to locate a point in the function.
              2) x and y not False: use (x, y) as the target to locate a point in the function.
              3) x not False: use only the x coordinate to locate a point in the function, returning the first Point with the given x value.
        Returns:
            Point: 
            the first Point instance within tolerances of the given arguments, or None
        """
        if distTolerance is None:
            distTolerance = self.tolerance['point_distance'] / self.xscale
        else:
            distTolerance /= self.xscale
        if squareDistTolerance is None:
            squareDistTolerance = self.tolerance['point_distance_squared']
                
        if point is not False:
            distanceSquared, foundPoint = self.closest_point_to_point(point)
            if distanceSquared < squareDistTolerance:
                return foundPoint
        if y is not False and x is not False:
            point = Point.Point(self, x, y, pixel=False)
            return self.get_point_at(point=point)
        if x is not False:
            distance, foundPoint = self.closest_point_to_x(x)
            if distance < distTolerance:
                return foundPoint
        return None
    def has_point_at(self, point=False, x=False, y=False, distTolerance=None,
                     squareDistTolerance=None):
        """ Return whether a point is declared at the given value.
        Args:
            point(default: False): a Point instance at the value of interest.
            x(default: False): the x coordinate of interest.
            y(default: False): the y coordinate of interest.
            distTolerance(default: None): the pixel distance tolerance if 
                                          only the x coordinate is given. If None
                                          default constant 'point_distance' is used.
            squareDistTolerance(default: None): the square pixel distance tolerance
                                          if point, or x and y are given. If
                                          None, default constant 'point_distance_squared'
                                          is used.
        Note:    
           There are three use cases:
              1) point not False: use the Point instance as the target to locate a point in the function.
              2) x and y not False: use (x, y) as the target to locate a point in the function.
              3) x not False: use only the x coordinate to locate a point in the function, returning the first Point with the given x value.
        Returns:
            bool:
            true if there is a Point declared within tolerances of the given
            argument(s), false otherwise.
        """
        return self.get_point_at(point=point, x=x, y=y,
                                 distTolerance=distTolerance,
                                 squareDistTolerance=squareDistTolerance) is not None
    def get_number_of_points(self):
        """Return the number of points declared in the function."""
        return len(self.points)
 | 
| 
	from __future__ import print_function
import os
import shutil
import sys
import stat
from . import webext
def setup_parser(parser):
    parser.what_group.add_argument('--firefox', action='store_true',
        help="Install Firefox addon.")
    firefox_group = parser.add_argument_group('Firefox options')
    firefox_group.add_argument('--native-only', action='store_true')
    firefox_group.add_argument('--xpi', default='/Volumes/CGroot/systems/packages/Firefox/sgactions/latest.xpi')
def main(args):
    if not (args.all or args.firefox):
        return
    if args.native_only or not os.path.exists(args.xpi):
        args.xpi = None
    if not (args.native_only or args.xpi):
        print("Firefox XPI does not exist.", file=sys.stderr)
        exit(1)
    for home in args.home:
        print('Setting up firefox in', home)
        setup_one_home(home, args)
    return True
def setup_one_home(home, args):
    if args.xpi:
        _copy_extension_into_home(home, args)
    if sys.platform == 'darwin':
        native_dir = os.path.join(home, 'Library/Application Support/Mozilla/NativeMessagingHosts')
    else:
        native_dir = os.path.join(home, '.mozilla/native-messaging-hosts')
    webext.install_native_messenger(native_dir, allowed_extensions=[
        '@sgactions',
    ])
def _copy_extension_into_home(home, args):
    count = 0 
    home_stat = os.stat(home)
    for rel_profile in 'Library/Application Support/Firefox/Profiles', '.mozilla/firefox':
        profile_root = os.path.join(home, rel_profile)
        if not os.path.exists(profile_root):
            continue
        for name in os.listdir(profile_root):
            ext_dir = os.path.join(profile_root, name, 'extensions')
            if not os.path.exists(ext_dir):
                continue
            _copy_extension(ext_dir, args, home_stat)
            count += 1
    if not count:
        print('    WARNING: No Firefox profiles found!')
def _copy_extension(ext_dir, args, home_stat):
    dst = os.path.join(ext_dir, '@sgactions.xpi')
    exists = os.path.exists(dst)
    print('    {} extension at {}'.format('Replacing' if exists else 'Copying',  dst))
    if not args.dry_run:
        if exists:
            os.unlink(dst)
        shutil.copyfile(args.xpi, dst)
        if not os.getuid():
            print('        Setting ownership and permissions')
            os.chown(dst, home_stat.st_uid, home_stat.st_gid)
            os.chmod(dst, 0o755)
 | 
| 
	import numpy as np
from multiagent.core import World, Agent, Landmark, Border
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
    def make_world(self):
        world = World()
        # set any world properties first
        world.dim_c = 2
        num_agents = 6
        num_landmarks = 6
        world.collaborative = False
        # Control partial observability of the agents
        self.vision_range = 3  # multiplier of agent size
        self.land_vision_count = 4
        self.agent_vision_count = 3
        # add agents
        world.agents = [Agent() for _ in range(num_agents)]
        for i, agent in enumerate(world.agents):
            agent.name = 'agent %d' % i
            agent.collide = True
            agent.silent = True
            agent.size = 0.15 / (num_agents / 6)
        # add landmarks
        world.landmarks = [Landmark() for i in range(num_landmarks)]
        for i, landmark in enumerate(world.landmarks):
            landmark.name = 'landmark %d' % i
            landmark.collide = False
            landmark.movable = False
            landmark.size = 0.05 / (num_landmarks / 6)
        self.occ_land_dist = world.agents[0].size - world.landmarks[0].size
        self.reset_world(world)
        return world
    def reset_world(self, world):
        # random properties for agents
        for i, agent in enumerate(world.agents):
            agent.color = np.array([0.35, 0.35, 0.85])
        # random properties for landmarks
        for i, landmark in enumerate(world.landmarks):
            landmark.color = np.array([0.25, 0.25, 0.25])
        # set random initial states
        for agent in world.agents:
            agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
            agent.state.p_vel = np.zeros(world.dim_p)
            agent.state.c = np.zeros(world.dim_c)
        for i, landmark in enumerate(world.landmarks):
            landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
            landmark.state.p_vel = np.zeros(world.dim_p)
    def benchmark_data(self, agent, world):
        rew = 0
        collisions = 0
        occupied_landmarks = 0
        min_dists = 0
        dists = [np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos))) for l in world.landmarks]
        rew -= min(dists)
        if min(dists) < 0.1:
            occupied_landmarks += 1
        # for l in world.landmarks:
        #     dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
        #     min_dists += min(dists)
        #     rew -= min(dists)
        #     if min(dists) < 0.1:
        #         occupied_landmarks += 1
        if agent.collide:
            for a in world.agents:
                if a is not agent:
                    if self.is_collision(a, agent):
                        rew -= 1
                        collisions += 1
        info = {'success': [], 'collisions': [], 'rew': [], 'min_dists': [], 'occ_land': []}
        info['collisions'].append(collisions)
        info['occ_land'].append(occupied_landmarks)
        info['rew'].append(rew)
        info['min_dists'].append(min(dists))
        return info
        # return (rew, collisions, min_dists, occupied_landmarks)
    def is_collision(self, agent1, agent2):
        delta_pos = agent1.state.p_pos - agent2.state.p_pos
        dist = np.sqrt(np.sum(np.square(delta_pos)))
        dist_min = agent1.size + agent2.size
        return True if dist < dist_min else False
    def reward(self, agent, world):
        # Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
        rew = 0
        dists = [np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos))) for l in world.landmarks]
        rew -= min(dists)
        if not min(dists) < self.occ_land_dist:
            rew -= 1
        if agent.collide:
            for a in world.agents:
                if a is not agent:
                    if self.is_collision(a, agent):
                        rew -= 1
        return rew
    def observation(self, agent, world):
        # get positions of all entities in this agent's reference frame
        entity_pos = self.get_land_vision(agent, world)
        other_pos = self.get_agent_vision(agent, world)
        return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos)  # + comm
    def get_agent_vision(self, agent, world):
        dist = []
        for other in world.agents:
            dist.append(np.sqrt(np.sum(np.square(other.state.p_pos - agent.state.p_pos))))
        sorted_dist = np.argsort(dist)
        agent_idx = sorted_dist[1:self.agent_vision_count+1]  # max. number of agents it can observe
        agent_vision = []
        for i, other in enumerate(world.agents):
            if i in agent_idx:
                agent_vision.append(other.state.p_pos - agent.state.p_pos)
        return agent_vision
    def get_land_vision(self, agent, world):
        dist = []
        for entity in world.landmarks:
            dist.append(np.sqrt(np.sum(np.square(entity.state.p_pos - agent.state.p_pos))))
        # Ascending sort, and retrieve the index of landmarks in that order
        sorted_dist = np.argsort(dist)
        land_idx = sorted_dist[0:self.land_vision_count]  # max. number of landmarks that it can observe
        # Check if these landmarks are in the vision range and populate observation
        land_vision = []
        for i, entity in enumerate(world.landmarks):
            if i in land_idx:
                land_vision.append(entity.state.p_pos - agent.state.p_pos)
        return land_vision
 | 
| 
	from __future__ import print_function
import os.path
from tracker import KalmanTracker, ORBTracker, ReIDTracker
from imutils.video import WebcamVideoStream
import numpy as np
import cv2
from tracker_utils import bbox_to_centroid
import random
import colorsys
from human_detection import DetectorAPI
import face_recognition
import os
import sys
import glob
from collections import OrderedDict
from threading import Thread
import PySimpleGUI as sg
import datetime
import  shutil
import mss
sct =mss.mss()
monitor = {"top": 0, "left": 0, "width": 1366, "height": 768}
#  intializing shite for face_recognition
known_face_names = []
known_face_encodings = []
face_locations = []
face_encodings = []
face_names = []
# reading jpg names from folder & adding them  to list
file = [os.path.basename(x) for x in glob.glob(r'images//' + '*.jpg')]
known_face_names.extend(file)
# adding faces from folder to encodings
for filename in glob.glob('images/*.jpg'):
    filename_image = face_recognition.load_image_file(filename)
    filename_encoding = face_recognition.face_encodings(filename_image)[0]
    known_face_encodings.append(filename_encoding)
#  getting rid of .jpg from list
known_face_names = " ".join(known_face_names).replace(".jpg", " ").split()
class ScreenFeed:
    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.hope, args=())
        t.daemon = True
        t.start()
        return self
    def hope(self):
        while True:
            self.x = np.array(sct.grab(monitor))
    def read(self):
        return self.x
class Presence:
    @staticmethod
    def select_area(first_frame):
        area = cv2.selectROI("zaznacz drzwi", first_frame)
        save_crds = open("koordynaty" , "w")
        save_crds.write(str(area[0]) + "\n" + str(area[1]) + "\n" + str(area[2]) + "\n" + str(area[3]))
        save_crds.close()
        return int(area[0]), int(area[1]), int(area[2]), int(area[3])
    @staticmethod
    def close():
        cv2.destroyWindow("zaznacz drzwi")
class Distance:
    def __init__(self):
        self.known_distance = 100.0
        self.known_width = 14.0
    def focal_distance(self):
        self.img = cv2.imread("mensura.jpg")
        # self.resize = cv2.resize(self.img, (940, 560))
        self.ref = cv2.selectROI("distance", self.img)
        self.ref_width = int(self.ref[2])
        self.focalLength = (self.ref_width * self.known_distance) / self.known_width
        return self.focalLength
    def skip(self, focalLength):
        self.focalLength = focalLength
        return self.focalLength
    def distance_to_camera(self,  bbox):
        distance = (self.known_width * self.focalLength) / (int(bbox[2]) - int(bbox[0]))
        return distance
    @staticmethod
    def close():
        cv2.destroyWindow("distance")
# tying
class Associate:
    def __init__(self):
        plox = OrderedDict()
        cokolwiek = []
        listaID = set()
        self.plox = plox
        self.present_list = []
        self.ploxiterate = self.plox.copy()
        self.cokolwiek = cokolwiek
        self.listaID = listaID
        self.sendhelp =[]
        self.keystodel = []
    def counter(self, tracks):
        self.tracks = tracks
        self.plox[len(self.tracks)] = None
        return self.plox, self.tracks
    def associating(self, face_names, ID):
        self.ID = ID
        self.face_names = face_names
        self.listaID.add(self.ID)
        for name in self.face_names:
            if name is not None:
                if name not in self.cokolwiek:
                    if name != "Unknown":
                        self.cokolwiek.append(name)
        self.sendhelp = list(zip(self.listaID, self.cokolwiek))
        return self.sendhelp, self.listaID, self.cokolwiek
    def make(self):
        for key in self.plox.keys():
            if key <= len(self.sendhelp) - 1:
                self.plox[key] = self.sendhelp[key]
                self.ploxiterate = self.plox.copy()
            else:
                pass
        return self.plox
    def delnone(self):
        try:
            for key, value in self.ploxiterate.items():
                if value is None:
                    del self.plox[key]
        except KeyError:
            pass
        return self.plox
    def clean(self):
        self.listaIDCOPY = self.listaID.copy()
        self.IDintracks = [x[0] for x in self.tracks]
        for identificator in self.listaIDCOPY:
            if identificator not in self.IDintracks:
                self.listaID.remove(identificator)
        for element in self.sendhelp:
            listcheck = element[0]
            if listcheck not in self.IDintracks:
                self.sendhelp.remove(element)
                self.cokolwiek.remove(element[1])
        return self.plox, self.sendhelp, self.listaID, self.cokolwiek
    def check(self):
        print(self.plox)
    def check_frq(self, present_id):
        for key, value in  self.plox.items():
            for ID in present_id:
                if ID in value and self.plox[key] not in self.present_list:
                    self.present_list.append(self.plox[key])
        return self.present_list
# main thing
class SORT:
    def __init__(self, src=None, tracker='Kalman', detector='faster-rcnn', benchmark=False):
        """
         Sets key parameters for SORT
        :param src: path to video file
        :param tracker: (string) 'ORB', 'Kalman' or 'ReID', determines which Tracker class will be used for tracking
        :param benchmark: (bool) determines whether the track will perform a test on the MOT benchmark
        ---- attributes ---
        detections (list) - relevant for 'benchmark' mode, data structure for holding all the detections from file
        frame_count (int) - relevant for 'benchmark' mode, frame counter, used for indexing and looping through frames
        """
        if tracker == 'Kalman': self.tracker = KalmanTracker()
        elif tracker == 'ORB': self.tracker = ORBTracker()
        elif tracker == 'ReID': self.tracker = ReIDTracker()
        screen = ScreenFeed()
        self.benchmark = benchmark
        # self.src = screen.start()
        if src is not None:
            # stara wersja jakby multithreading cos zepsuł, co sie nieuchronnie stanie
            # self.src = cv2.VideoCapture(src)
            self.src = WebcamVideoStream(src=src).start()
        self.detector = None
        if self.benchmark:
            SORT.check_data_path()
            self.sequences = ['PETS09-S2L1', 'TUD-Campus', 'TUD-Stadtmitte', 'ETH-Bahnhof']
            """
            More sequences:
            'ETH-Sunnyday', 'ETH-Pedcross2', 'KITTI-13', 'KITTI-17', 'ADL-Rundle-6', 'ADL-Rundle-8', 'Venice-2'
            """
            self.seq_idx = None
            self.load_next_seq()
        else:
            if detector == 'faster-rcnn':
                model_path = './faster_rcnn_inception_v2/frozen_inference_graph.pb'
                self.score_threshold = 0.9  # threshold for box score from neural network
            self.detector = DetectorAPI(path_to_ckpt=model_path)
            self.start_tracking()
    def load_next_seq(self):
        """
        When switching sequence - propagate the sequence index and reset the frame count
        Load pre-made detections for .txt file (from MOT benchmark). Starts tracking on next sequence
        """
        if self.seq_idx == len(self.sequences) - 1:
            print('SORT finished going over all the input sequences... closing tracker')
            return
        # Load detection from next sequence and reset the frame count for it
        if self.seq_idx is None:
            self.seq_idx = 0
        else:
            self.seq_idx += 1
        self.frame_count = 1
        # Load detections for new sequence
        file_path = 'data/%s/det.txt' % self.sequences[self.seq_idx]
        self.detections = np.loadtxt(file_path, delimiter=',')
        # reset the tracker and start tracking on new sequence
        self.tracker.reset()
        self.start_tracking()
    def next_frame(self):
        """
        Method for handling the correct way to fetch the next frame according to the 'src' or
         'benchmark' attribute applied
        :return: (np.ndarray) next frame, (np.ndarray) detections for that frame
        """
        if self.benchmark:
            frame = SORT.show_source(self.sequences[self.seq_idx], self.frame_count)
            new_detections = self.detections[self.detections[:, 0] == self.frame_count, 2:7]
            new_detections[:, 2:4] += new_detections[:, 0:2]  # convert to [x1,y1,w,h] to [x1,y1,x2,y2]
            self.frame_count += 1
            return frame, new_detections[:, :4]
        else:
            frame = self.src.read()
            boxes, scores, classes, num = self.detector.processFrame(frame)
            # supress boxes with scores lower than threshold
            boxes_nms = []
            for i in range(len(boxes)):
                if classes[i] == 1 and scores[i] > self.score_threshold:    # Class 1 represents person
                    boxes_nms.append(boxes[i])
            return frame, boxes_nms
    def face_rec(self, frame, startX, startY,endX, endY):
        framef = cv2.rectangle(frame, (int(startX), int(startY)), (int(endX), int(endY)), 2)
        rgb_frame = framef[:, :, ::-1]  # na 95% to jest zbędne, bo to już jest w BGR, ale chuj wi, wiec zostawiam
        face_locations = face_recognition.face_locations(rgb_frame)
        face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
        self.face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
            name = "Unknown"
            face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = known_face_names[best_match_index]
            self.face_names.append(name)
            return self.face_names
    def start_tracking(self):
        """
        Main driver method for the SORT class, starts tracking detections from source.
        Receives list of associated detections for each frame from its tracker (Kalman or ORB),
        Shows the frame with color specific bounding boxes surrounding each unique track.
        """
        associate = Associate()
        distance = Distance()
        frame, detections = self.next_frame()
        layout = [[sg.Button("skip focal length", key="-SKIP_FOCAL-"), sg.Button("choose doors", key="-DOORS-")],
                  [sg.Button("retake focus length", key="-GET_FOCAL-"), sg.Button("skip doors", key="-SKIP_DOORS-")],
                  [sg.Text("distance"), sg.Input()], # value[0]
                  [sg.Text("focal length") ,sg.Input()], # value[1]
                  [sg.Combo(['podgląd', 'speed'])], # value[2]
                  [sg.Submit(key="-SEND-"), sg.Cancel()]]
        window = sg.Window('menu2', layout)
        while True:
            event, values = window.read()
            if event == "-SKIP_FOCAL-":
                distance.skip(int(values[1]))
            if event == "-GET_FOCAL-":
                print(distance.focal_distance())
                distance.close()
            if event == "-DOORS-":
                coordinates = Presence.select_area(frame)
                Presence.close()
            if event == "-SKIP_DOORS-":
                save_crds = open("koordynaty", "r")
                coordinates = save_crds.readlines()
            if event == sg.WIN_CLOSED or event == 'Exit' or event =="-SEND-":
                break
        window.close()
        while True:
            # Fetch the next frame from video source, if no frames are fetched, stop loop
            frame, detections = self.next_frame()
            if frame is None:
                break
            # Send new detections to set tracker
            if isinstance(self.tracker, KalmanTracker):
                tracks = self.tracker.update(detections)
            elif isinstance(self.tracker, ORBTracker) or isinstance(self.tracker, ReIDTracker):
                tracks = self.tracker.update(frame, detections)
            else:
                raise Exception('[ERROR] Tracker type not specified for SORT')
            associate.counter(tracks)
            # Look through each track and display it on frame (each track is a tuple (ID, [x1,y1,x2,y2])
            try:
                for ID, bbox in tracks:
                    bbox = self.verify_bbox_format(bbox)
                    # Generate pseudo-random colors for bounding boxes for each unique ID
                    random.seed(ID)
                    bbox_distance = distance.distance_to_camera(bbox)
                    # Make sure the colors are strong and bright and draw the bounding box around the track
                    h, s, l = random.random(), 0.5 + random.random() / 2.0, 0.4 + random.random() / 5.0
                    color = [int(256 * i) for i in colorsys.hls_to_rgb(h, l, s)]
                    startX, startY, endX, endY = bbox
                    cv2.rectangle(frame, (int(startX), int(startY)), (int(endX), int(endY)), color, 2)
                    self.face_rec(frame,startX, startY, endX, endY)
                    associate.associating(self.face_names, ID)
                    associate.make()
                    associate.clean()
                    associate.delnone()
                    associate.check()
                    # Calculate centroid from bbox, display it and its unique ID
                    centroid = bbox_to_centroid(bbox)
                    text = "ID {}".format(ID)
                    cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
                    cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)
                    present =[]
                    if startX >= int(coordinates[0]) and startY >= int(coordinates[1]) and \
                            endX <= int(coordinates[0]) + int(coordinates[2]) and \
                            endY <= int(coordinates[1]) + int(coordinates[3]) and bbox_distance > int(values[0]):
                        present.append(ID)
                    real_present_popup = [associate.check_frq(present)]
                cv2.imshow("Video Feed", frame)
            except TypeError:
                pass
            # iqf the `q` key was pressed, break from the loop
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print('SORT operation terminated by user... closing tracker')
                sg.popup(real_present_popup, no_titlebar=True)
                return
        if self.benchmark:
            self.load_next_seq()
    def verify_bbox_format(self, bbox):
        """
        Fixes bounding box format according to video type (e.g. benchmark test or video capture)
        :param bbox: (array) list of bounding boxes
        :return: (array) reformatted bounding box
        """
        if self.benchmark:
            return bbox.astype("int")
        else:
            bbox.astype("int")
            return [bbox[1], bbox[0], bbox[3], bbox[2]]
def main():
    """ Starts the tracker on source video. Can start multiple instances of SORT in parallel """
    # path_to_video = "http://192.168.1.39:4747/video"
    path_to_video = 0
    SORT(path_to_video)
if __name__ == '__main__':
    main()
 | 
| 
	import json
from sendinblue.client import Client
api = Client('xjTqIa43VmQ0GnyX')
data = api.get_forms()
# data = api.get('scenarios/getForms')
# data = api.get_list(6)
print(json.dumps(data))
# print('\n')
# to retrieve all campaigns of type 'classic' & status 'queued'
# data = { "type":"classic",
# 	"status":"queued",
# 	"page":1,
# 	"page_limit":10
# }
#
# campaigns = m.get_campaigns_v2(data)
 | 
| 
	import inspect
import re
import types
import pymlconf
from . import statuses, static
from .request import Request
from .response import Response
class Application:
    __requestfactory__ = Request
    __responsefactory__ = Response
    builtinsettings = '''
    debug: true
    cookie:
      http_only: false
      secure: false
    '''
    def __init__(self, **context):
        self.routes = {}
        self.events = {}
        self.settings = pymlconf.Root(self.builtinsettings, context=context)
    def _findhandler(self, request):
        patterns = self.routes.get(request.verb)
        if not patterns:
            raise statuses.methodnotallowed()
        for pattern, func, info in patterns:
            match = pattern.match(request.path)
            if not match:
                continue
            arguments = [a for a in match.groups() if a is not None]
            querystrings = {
                k: v for k, v in request.query.items()
                if k in info['kwonly']
            }
            return func, arguments, querystrings
        raise statuses.notfound()
    def __call__(self, environ, startresponse):
        request = self.__requestfactory__(self, environ)
        response = self.__responsefactory__(self, startresponse)
        try:
            handler, arguments, querystrings = self._findhandler(request)
            body = handler(request, response, *arguments, **querystrings)
            if isinstance(body, types.GeneratorType):
                response.firstchunk = next(body)
            response.body = body
        except statuses.HTTPStatus as ex:
            ex.setupresponse(response, stacktrace=self.settings.debug)
        # Setting cookies in response headers, if any
        cookie = request.cookies.output()
        if cookie:
            for line in cookie.split('\r\n'):
                response.headers.add(line)
        return response.start()
    def route(self, pattern='/', verb=None):
        def decorator(f):
            routes = self.routes.setdefault(verb or f.__name__, [])
            info = dict(
                kwonly={
                    k for k, v in inspect.signature(f).parameters.items()
                    if v.kind == inspect.Parameter.KEYWORD_ONLY
                }
            )
            routes.append((re.compile(f'^{pattern}$'), f, info))
        return decorator
    def when(self, f):
        callbacks = self.events.setdefault(f.__name__, set())
        if f not in callbacks:
            callbacks.add(f)
    def hook(self, name, *a, **kw):
        callbacks = self.events.get(name)
        if not callbacks:
            return
        for c in callbacks:
            c(*a, **kw)
    def staticfile(self, pattern, filename):
        return self.route(pattern)(static.file(filename))
    def staticdirectory(self, directory):
        return self.route(r'/(.*)')(static.directory(directory))
 | 
| 
	# coding=utf-8
"""replace feature tests."""
from pytest_bdd import (
    scenario,
    then,
    when,
)
from openshift.dynamic.exceptions import NotFoundError
@scenario('replace.feature', 'Replace a resource that does not exist')
def test_replace_a_resource_that_does_not_exist():
    """replace a resource that does not exist."""
    pass
@scenario('replace.feature', 'Replace a resource that exists')
def test_replace_a_resource_that_exists():
    """replace a resource that exists."""
    pass
@when('I replace <group_version>.<kind> <name> in <namespace> with <update>')
def replace_resource_in_namespace(context, client, group_version, kind, name, namespace, update, definition_loader):
    """I replace <group_version>.<kind> <name> in <namespace> with <update>."""
    replace = definition_loader(update)
    resource = client.resources.get(api_version=group_version, kind=kind)
    replace['metadata']['resourceVersion'] = resource.get(replace['metadata']['name'], namespace).metadata.resourceVersion
    context['instance'] = resource.replace(body=replace, namespace=namespace)
@when('I try to replace <group_version>.<kind> <name> in <namespace> with <update>')
def attempt_replace_resource_in_namespace(context, client, group_version, kind, name, namespace, update, definition_loader):
    """I try to replace <group_version>.<kind> <name> in <namespace> with <update>."""
    replace = definition_loader(update)
    resource = client.resources.get(api_version=group_version, kind=kind)
    try:
        replace['metadata']['resourceVersion'] = resource.get(replace['metadata']['name'], namespace).metadata.resourceVersion
    except NotFoundError:
        replace['metadata']['resourceVersion'] = "0"
    try:
        context['instance'] = resource.replace(body=replace, namespace=namespace)
    except Exception as e:
        context['exc'] = e
@then('<group_version>.<kind> <name> in <namespace> should match the content of <update>')
def resource_should_match_update(context, client, group_version, kind, name, namespace, update, definition_loader, object_contains):
    """<group_version>.<kind> <name> in <namespace> should match the content of <update>."""
    replace = definition_loader(update)
    resource = client.resources.get(api_version=group_version, kind=kind)
    cluster_instance = resource.get(name=name, namespace=namespace).to_dict()
    assert object_contains(cluster_instance, replace)
 | 
| 
	'''
Image Preprocessing script.
Converts image data to numpy array with each image as 128*128 in size
'''
from PIL import Image
import numpy as np
import os
def load_data(root):
    img = []
    label = []
    for image in os.listdir(root):
        im = Image.open(os.path.join(root,image))
        im = im.resize((128,128))
        img.append(np.array(im))
        label.append(image.split(".")[0])
    return np.array(img) , np.array(label) | 
| 
	import disnake, youtube_dl
import src.core.embeds as embeds
import src.core.functions as funcs
from disnake.ext import commands
prefix = funcs.get_prefix()
class Music(commands.Cog):
    def __init__(self, bot: commands.Bot):
        self.bot = bot
    @commands.group(invoke_without_command=True, description="Connect/Leave VC")
    @commands.has_guild_permissions(connect=True)
    async def vc(self, ctx: commands.Context, command: str):
        await ctx.reply("Command not Found!!")
    @commands.group(
        invoke_without_command=True, description="Play, Pause, Resume, Stop Music"
    )
    @commands.has_guild_permissions(connect=True)
    async def music(self, ctx: commands.Context, command: str):
        await ctx.reply("Command not Found!!")
    @vc.command(
        description="Joins the VC you are currently in", aliases=["connect", "c"]
    )
    @commands.has_guild_permissions(connect=True)
    async def join(self, ctx: commands.Context):
        if ctx.author.voice is None:
            await ctx.reply("You are not Connected to a Voice Channel!!")
            return
        if ctx.voice_client is None:
            voice_channel = ctx.author.voice.channel
            try:
                await voice_channel.connect()
                await ctx.reply("Connected!!")
            except disnake.HTTPException:
                await ctx.reply("Can't Connect to this Voice Channel!!")
        else:
            await ctx.reply("I am already in a Voice Channel!!")
    @vc.command(description="Leaves VC", aliases=["disconnect", "dc"])
    @commands.has_guild_permissions(connect=True)
    async def leave(self, ctx: commands.Context):
        if ctx.voice_client:
            await ctx.reply("Disconnected!!")
            await ctx.voice_client.disconnect()
        else:
            await ctx.reply("I am not Connected to any Voice Channel!!")
    @music.command(description="Plays the Music")
    @commands.has_guild_permissions(connect=True)
    async def play(self, ctx: commands.Context, *, music_name: str):
        vc = ctx.voice_client
        if vc:
            FFMPEG_OPTIONS = {
                "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
                "options": "-vn",
            }
            YDL_OPTIONS = {"formats": "bestaudio"}
            with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
                info = {}
                url = ""
                if music_name.startswith("https://"):
                    info = ydl.extract_info(music_name, download=False)
                    url = info["formats"][0]["url"]
                else:
                    info_ = ydl.extract_info(f"ytsearch:{music_name}", download=False)
                    url_ = info_["entries"][0]["webpage_url"]
                    info = ydl.extract_info(url_, download=False)
                    url = info["formats"][0]["url"]
                if info:
                    await ctx.reply(embed=embeds.music_playing_embed(info))
                source = disnake.FFmpegPCMAudio(url, **FFMPEG_OPTIONS)
                vc.play(source)
        else:
            await ctx.reply("I am not Connected to any Voice Channel!!")
    @music.command(description="Pauses the Music")
    @commands.has_guild_permissions(connect=True)
    async def pause(self, ctx: commands.Context):
        vc = ctx.voice_client
        if vc:
            if ctx.voice_client.is_playing():
                await ctx.reply("Song Paused!!")
                await ctx.voice_client.pause()
            else:
                await ctx.reply("No Song is Playing!!")
        else:
            await ctx.reply("I am not Connected to any Voice Channel!!")
    @music.command(description="Resumes the Music")
    @commands.has_guild_permissions(connect=True)
    async def resume(self, ctx: commands.Context):
        vc = ctx.voice_client
        if vc:
            if ctx.voice_client.is_paused():
                await ctx.reply("Song Resumed!!")
                await ctx.voice_client.resume()
            else:
                await ctx.reply("No Song is Paused!!")
        else:
            await ctx.reply(" I am not Connected to any Voice Channel!!")
    @music.command(description="Adjusts the Volume as per given amount")
    @commands.has_guild_permissions(connect=True)
    async def volume(self, ctx: commands.Context, volume: int):
        vc = ctx.voice_client
        if vc:
            if not 0 > volume > 100:
                volume = volume / 100
                vc.source = disnake.PCMVolumeTransformer(original=vc.source, volume=1.0)
                vc.source.volume = volume
                await ctx.reply(f"Changed volume to {volume * 100}%")
            else:
                await ctx.reply("Volume must be between 0 to 100 (Inclusive)")
        else:
            await ctx.reply("I am not Connected to any Voice Channel!!")
    @music.command(description="Stops the Music")
    @commands.has_guild_permissions(connect=True)
    async def stop(self, ctx: commands.Context):
        vc = ctx.voice_client
        if vc:
            if ctx.voice_client.is_playing() or ctx.voice_client.is_paused():
                await ctx.reply("Song Stopped!!")
                await ctx.voice_client.stop()
            else:
                await ctx.reply("No Song is Playing")
        else:
            await ctx.reply("I am not Connected to any Voice Channel!!")
def setup(bot: commands.Bot):
    bot.add_cog(Music(bot))
 | 
| 
	import sys
sys.path.append('.')
sys.path.append('..')
sys.path.append('../../')
sys.path.append('../../../')
import numpy
import os
import pandas
from sklearn.model_selection import train_test_split
from environment import DATA_DIR
CAPTIONS_FILE = "text_descriptions.csv"
SCORES_FILE = "scores_v2.csv"
DEV_CAPTIONS_FILE = "dev_text_descriptions.csv"
DEV_SCORES_FILE = "dev_scores.csv"
TEST_CAPTIONS_FILE = "test_text_descriptions.csv"
def get_score(data, short=True):
	"""
		Retrieve a list of memorability scores.
	Args:
		data: pandas.DataFrame with score fields
		short: bool, whether get long or short-term scores
	Return:
		a (N,) list of target values
	"""
	if short:
		data = data[['video_id', 'part_1_scores']]
		return data.rename(columns={'part_1_scores': "score"})
	data = data[['video_id', 'part_2_scores']]
	return data.rename(columns={'part_2_scores': "score"})
def bow(data):
	"""
		Aggregate words within all captions for a given video ID in
		a Bag-Of-Words way.
	Args:
		data: pandas.DataFrame of videoID, url and descriptions
	Return:
		a pandas.DataFrame with 1 BOW sentence per video ID
	"""
	bow_df = []
	ids = sorted(list(set(data['video_id'])))
	for i in ids:
		subset_df = data.loc[data['video_id'] == i]
		caption = ' '.join(subset_df['description'].values)
		video_id = subset_df['video_id'].iloc[0]
		video_url = subset_df['video_url'].iloc[0]
		bow_df.append(
			(video_id, video_url, caption.lower()))
	bow_df = pandas.DataFrame(bow_df, 
		columns=['video_id', 'video_url', 'description'])
	return bow_df
def save_folds(folds, to_dir):
	"""
		Save prepared lists of balanced folds datasets
	Args:
		folds: list of samples, labels and numbering
		to_dir: str, output directory
	"""
	os.makedirs(to_dir, exist_ok=True)
	for f, fold in enumerate(folds):
		train = []
		val = []
		[train.extend(i_fold) for i_fold in folds[:-1]]
		val.extend(folds[-1])
		folds += [folds.pop(0)]
		filename = 'training-fold_{:d}_outof_{:d}.txt'.format(
			int(f + 1), len(folds))
		write_samples(os.path.join(to_dir, filename), train)
		filename = filename.replace('training', 'validation')
		write_samples(os.path.join(to_dir, filename), val)
def write_samples(file, samples):
	with open(file, 'w') as f:
			for s in samples:
				s = '\t'.join(s)
				f.write(s + '\n')
def read_samples_file(captions_dir):
	"""
		Retrieve the sampels that are split among 2 files
	"""
	captions = pandas.read_csv(os.path.join(
		captions_dir, CAPTIONS_FILE))
	captions = pandas.concat([captions, pandas.read_csv(os.path.join(
		captions_dir, DEV_CAPTIONS_FILE))], ignore_index=True)
	scores = pandas.read_csv(os.path.join(
		captions_dir, SCORES_FILE))
	scores = pandas.concat([scores, pandas.read_csv(os.path.join(
		captions_dir, DEV_SCORES_FILE))], ignore_index=True)
	return captions, scores
def _main_():
	captions_dir = sys.argv[1]
	kfolds = int(sys.argv[2])
	captions, scores = read_samples_file(captions_dir)
	captions = bow(captions)
	test_captions = pandas.read_csv(os.path.join(
		captions_dir, TEST_CAPTIONS_FILE))
	test_captions = bow(test_captions)
	for t, term in enumerate(["long", "short"]):
		df_scores = get_score(scores, short=bool(t))
		df = pandas.merge(captions, df_scores, on=['video_id'])
		
		if kfolds > 0:
			folds = [[] for f in range(kfolds)]
			for row, sample in df.iterrows():
				fold = row % kfolds
				counter = '{:05d}'.format(row)
				folds[fold].append(
					(counter, str(sample['score']), 
						sample['description'], 
						str(sample['video_id']) + '.mp4'))
			save_folds(folds=folds, 
				to_dir=os.path.join(DATA_DIR, "MEMTEXT-" + term.upper()))
		else:
			train, val = train_test_split(df, test_size=0.3,
				random_state=1234)
			ds = []
			count = 0
			for row, sample in train.iterrows():
				counter = '{:05d}'.format(count)
				ds.append((counter, str(sample['score']), 
					sample["description"], 
					str(sample['video_id']) + '.mp4'))
				count += 1
			write_samples(os.path.join(
				DATA_DIR, 'MEMTEXT-' + term.upper(), 'training.txt'), ds)
			
			ds = []
			for row, sample in val.iterrows():
				counter = '{:05d}'.format(count)
				ds.append((counter, str(sample['score']), 
					sample["description"], 
					str(sample['video_id']) + '.mp4'))
				count += 1
			write_samples(os.path.join(
				DATA_DIR, 'MEMTEXT-' + term.upper(), 'validation.txt'), ds)
			
			ds = []
			for row, sample in test_captions.iterrows():
				counter = '{:05d}'.format(count)
				ds.append((counter, str(0.0), 
					sample["description"], 
					str(sample['video_id']) + '.mp4'))
				count += 1
			write_samples(os.path.join(
				DATA_DIR, 'MEMTEXT-' + term.upper(), 'test.txt'), ds)
if __name__ == "__main__":
	_main_() | 
| 
	from flask import jsonify, request
from web.models import Pastebin
from web.api import bp
@bp.route("/pastebins/<int:id>", methods=["GET"])
def get_pastebin(id):
    """
    Returns pastebin with given id
    """
    return jsonify(Pastebin.query.get_or_404(id).to_dict())
    
@bp.route("/pastebins", methods=["GET"])
def get_pastebins():
    """
    Returns 50 pastebins per page
    @param page
    """
    page = request.args.get("page")
    if not page:
        start = 0
        limit = 50
    else:
        start = 50 * int(page)
        limit = 50 + 50 * int(page)
    return jsonify([pastebin.to_dict() for pastebin in Pastebin.query.all()[start:limit]])
 | 
| 
	from pandas.core.dtypes.dtypes import register_extension_dtype
from .base import NumpyExtensionArray, NumpyFloat32ExtensionDtype
@register_extension_dtype
class StandardDeviationDtype(NumpyFloat32ExtensionDtype):
    """Dtype for standard deviation of observables: J, F, D or other"""
    name = 'Stddev'
    mtztype = "Q"
    
    @classmethod
    def construct_array_type(cls):
        return StandardDeviationArray
class StandardDeviationArray(NumpyExtensionArray):
    """ExtensionArray for supporting StandardDeviationDtype"""    
    _dtype = StandardDeviationDtype()
    pass
@register_extension_dtype
class StandardDeviationFriedelSFDtype(NumpyFloat32ExtensionDtype):
    """Dtype for standard deviation of F(+) or F(-)"""
    name = 'StddevFriedelSF'
    mtztype = "L"
    @classmethod
    def construct_array_type(cls):
        return StandardDeviationFriedelSFArray
class StandardDeviationFriedelSFArray(NumpyExtensionArray):
    """ExtensionArray for supporting StandardDeviationFriedelSFDtype"""
    _dtype = StandardDeviationFriedelSFDtype()
    pass
@register_extension_dtype
class StandardDeviationFriedelIDtype(NumpyFloat32ExtensionDtype):
    """Dtype for standard deviation of I(+) or I(-)"""
    name = 'StddevFriedelI'
    mtztype = "M"
    @classmethod
    def construct_array_type(cls):
        return StandardDeviationFriedelIArray
class StandardDeviationFriedelIArray(NumpyExtensionArray):
    """ExtensionArray for supporting StandardDeviationFriedelIDtype"""
    _dtype = StandardDeviationFriedelIDtype()
    pass
 | 
| 
	from abc import ABCMeta, abstractmethod
from enum import Enum
class LoadableTypeEnum(Enum):
    Achievement = "Achievement"
    Character = "Character"
    Guild = "Guild"
    GuildUpgrade = "GuildUpgrade"
    Item = "Item"
    ItemStat = "ItemStat"
    Mastery = "Mastery"
    MiniPet = "MiniPet"
    Skill = "Skill"
    Skin = "Skin"
    Title = "Title"
class LoadableObjectVisitorBase(object, metaclass=ABCMeta):
    @abstractmethod
    def collect(self, obj):
        raise NotImplementedError()
class LoadableObjectBase(object, metaclass=ABCMeta):
    """Base class for all GW2 API objects that can be retreived from API"""
    @abstractmethod
    def _populate_inner(self, json):
        raise NotImplementedError()
    def populate(self, json):
        self._populate_inner(json)
class LoadableObjectContainer():
    """Container for GW2 API object pending loading"""
    _visitors = []
    def register_visitor(visitor: LoadableObjectVisitorBase):
        LoadableObjectContainer._visitors.append(visitor)
    def __init__(self, id, type: LoadableTypeEnum):
        self.id = id
        self.loadable_type = type
        self.object = None
        for visitor in LoadableObjectContainer._visitors:
            visitor.collect(self)
    @property
    def id(self):
        return self._id
    @id.setter
    def id(self, value):
        self._id = value
    @property
    def object(self):
        return self._object
    @object.setter
    def object(self, value: LoadableObjectBase):
        self._object = value
    @property
    def has_loaded(self):
        return self.object != None
class LoadableObjectVisitorBase(object, metaclass=ABCMeta):
    @abstractmethod
    def collect(self, obj: LoadableObjectContainer):
        raise NotImplementedError()
 | 
| 
	# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from .. import cntk_py
def map_if_possible(obj):
    from cntk.ops.variables import Variable, Parameter, Constant
    from cntk.ops.functions import Function
    from cntk.learner import Learner
    from cntk.io import MinibatchSource, MinibatchData, StreamConfiguration
    from cntk.axis import Axis
    typemap = { 
            cntk_py.Variable: Variable,
            cntk_py.Parameter: Parameter,
            cntk_py.Constant: Constant,
            cntk_py.Function: Function, 
            cntk_py.Learner: Learner, 
            cntk_py.MinibatchSource: MinibatchSource,
            cntk_py.MinibatchData: MinibatchData,
            cntk_py.StreamConfiguration: StreamConfiguration, 
            cntk_py.Axis: Axis,
            }
    # Some types like NumPy arrays don't let to set the __class__
    if obj.__class__ in typemap:
        obj.__class__ = typemap[obj.__class__]
            
def typemap(f):
    '''
    Upcasts Swig types to cntk types that inherit from Swig.
    '''
    from functools import wraps
    @wraps(f)
    def wrapper(*args, **kwds):
        result = f(*args, **kwds)
        if isinstance(result, (tuple, list, set)):
            for r in result:
                map_if_possible(r)
        elif isinstance(result, dict):
            for k,v in result.items():
                map_if_possible(k)
                map_if_possible(v)
        else:
            try:
                map_if_possible(result)
            except TypeError:
                pass
        return result
    return wrapper
 | 
| 
	"""Simple implementation of double linked lists with sentinels. """
class Node:
    """Implemetation of a list node. """
    def __init__(self, data=None, next=None, prev=None):
        """Init node object.
        Parameters:
            data (...): Data that is stored in this node
            next (Node): Reference to the next node in the list
            prev (Node): Reference to the previous node in the list
        Returns:
            None
        Raises:
            None
        """
        self._data = data
        self._next = next
        self._prev = prev
    def __str__(self):
        """Get string representation of the data from a node.
        Parameters:
            None
        Returns:
            [...] (...): String representation of the data that is stored in this node object
        Raises:
            None
        """
        return str(self._data)
    def get_data(self):
        """Get data from a node object.
        Parameters:
            None
        Returns:
            [...] (...): Data that is stored in this node object
        Raises:
            None
        """
        return self._data
    def set_data(self, data):
        """Set data in a node object.
        Parameters:
            data (...): Data that will be stored in this node
        Returns:
            None
        Raises:
            None
        """
        self._data = data
    def get_next(self):
        """Get the next node object after this one.
        Parameters:
            None
        Returns:
            [...] (Node): Next node object after this one
        Raises:
            None
        """
        return self._next
    def get_prev(self):
        """Get the previous node object before this one.
        Parameters:
            None
        Returns:
            [...] (Node): Previous node object before this one
        Raises:
            None
        """
        return self._prev
    def set_next(self, next):
        """Set the reference to the next node object after this one.
        Parameters:
            next (Node): Reference to the next node in the list
        Returns:
            None
        Raises:
            None
        """
        self._next = next
    def set_prev(self, prev):
        """Set the reference to the previous node object before this one.
        Parameters:
            prev (Node): Reference to the previous node in the list
        Returns:
            None
        Raises:
            None
        """
        self._prev = prev
class DoubleLinkedList(object):
    """Implemetation of a double linked list. """
    def __init__(self):
        """Init list object.
        Parameters:
            None
        Returns:
            None
        Raises:
            None
        """
        self._head = Node()
        self._tail = Node()
        self._head.set_next(self._tail)
        self._tail.set_prev(self._head)
        self._size = 0
    def __len__(self):
        """Get length of the list object.
        Parameters:
            None
        Returns:
            [...] (int): Number of data nodes in the list object
        Raises:
            None
        """
        return self._size
    def __str__(self):
        """Get string representation of the list object.
        Parameters:
            None
        Returns:
            output (str): String representation of the list object
        Raises:
            None
        """
        current_node = self._head
        output = ""
        while current_node:
            output += str(current_node)
            if current_node.get_next():
                output += " -> "
            current_node = current_node.get_next()
        return output
    def _increase_size(self):
        """Increase size attribute of the list object by one.
        Parameters:
            None
        Returns:
            None
        Raises:
            None
        """
        self._size += 1
    def _decrease_size(self):
        """Decrease size attribute of the list object by one.
        Parameters:
            None
        Returns:
            None
        Raises:
            None
        """
        if self._size > 0:
            self._size -= 1
    def size(self):
        """Returns number of node objects in the list.
        Parameters:
            None
        Returns:
            [...] (int): Number of items in the array
        Raises:
            None
        """
        return self._size
    def is_empty(self):
        """Returns True if the list is empty.
        Parameters:
            None
        Returns:
            [...] (bool): Indicator if array is empty
        Raises:
            None
        """
        return self._size == 0
    def set_head(self, node):
        """Set the head attribute to a node reference.
        Parameters:
            node (Node): Reference to a node object
        Returns:
            None
        Raises:
            None
        """
        self._head = node
    def set_tail(self, node):
        """Set the tail attribute to a node reference.
        Parameters:
            node (Node): Reference to a node object
        Returns:
            None
        Raises:
            None
        """
        self._tail = node
    def insert_at(self, index, data):
        """Insert a node containing the data into the list at the index.
        Parameters:
            index (int): Index to add the node at
            data (...): Data to add to the node
        Returns:
            None
        Raises:
            IndexError: If the index is out of range
        """
        if 0 > index or index > self._size:
            return IndexError('index out of range!')
        current_node = self._head.get_next()
        current_node_idx = 0
        while current_node:
            if index == current_node_idx:
                node = Node(data, current_node, current_node.get_prev())
                current_node.get_prev().set_next(node)
                current_node.set_prev(node)
            current_node = current_node.get_next()
            current_node_idx += 1
        self._increase_size()
    def append(self, data):
        """Append a node containing the data at the end of the list.
        Parameters:
            data (...): Data to add to the node
        Returns:
            None
        Raises:
            None
        """
        node = Node(data, self._tail, self._tail.get_prev())
        self._tail.get_prev().set_next(node)
        self._tail.set_prev(node)
        self._increase_size()
    def prepend(self, data):
        """Prepend a node containing the data in the front of the list.
        Parameters:
            data (...): Data to add to the node
        Returns:
            None
        Raises:
            None
        """
        node = Node(data, self._head.get_next(), self._head)
        self._head.get_next().set_prev(node)
        self._head.set_next(node)
        self._increase_size()
    def delete_at(self, index):
        """Delete a node in the list at the index.
        Parameters:
            index (int): Index to delete the node at
        Returns:
            None
        Raises:
            IndexError: If the index is out of range
        """
        if 0 > index or index > self._size - 1:
            return IndexError('index out of range!')
        current_node = self._head.get_next()
        current_node_idx = 0
        while current_node:
            if index == current_node_idx:
                current_node.get_prev().set_next(current_node.get_next())
                current_node.get_next().set_prev(current_node.get_prev())
            current_node = current_node.get_next()
            current_node_idx += 1
        self._decrease_size()
    def pop_front(self):
        """Remove front node and return its data.
        Parameters:
            None
        Returns:
            pop_data (...): Data from the first node
        Raises:
            IndexError: If the list is empty
        """
        if self._size > 0:
            pop_data = self._head.get_next().get_data()
            self._head.set_next(self._head.get_next().get_next())
            self._head.get_next().set_prev(self._head)
            self._decrease_size()
            return pop_data
        else:
            return IndexError("can not pop from empty list!")
    def pop_back(self):
        """Remove last node and return its data.
        Parameters:
            None
        Returns:
            pop_data (...): Data from the last node
        Raises:
            IndexError: If the list is empty
        """
        if self._size > 0:
            pop_data = self._tail.get_prev().get_data()
            self._tail.set_prev(self._tail.get_prev().get_prev())
            self._tail.get_prev().set_next(self._tail)
            self._decrease_size()
            return pop_data
        else:
            return IndexError("can not pop from empty list!")
    def data_at(self, index):
        """Return data of the node at the index.
        Parameters:
            None
        Returns:
            data (...): Data from the node at the index
        Raises:
            IndexError: If the index is out of range
        """
        if 0 > index or index > self._size - 1:
            return IndexError('index out of range!')
        current_node = self._head.get_next()
        current_node_idx = 0
        while current_node_idx != index:
            current_node = current_node.get_next()
            current_node_idx += 1
        data = current_node.get_data()
        return data
    def find(self, data):
        """Search and return the index of the next node with data equal to the input.
        Parameters:
            data (...): Data to find in the list
        Returns:
            found_node_idx (int): Index of the node that contains the same data as the input
        Raises:
            None
        """
        current_node = self._head.get_next()
        current_node_idx = 0
        found_node = None
        found_node_idx = None
        while current_node and not found_node:
            if current_node.get_data() == data:
                found_node = current_node
                found_node_idx = current_node_idx
            else:
                current_node = current_node.get_next()
                current_node_idx += 1
        return found_node_idx
    def contains(self, data):
        """Returns True if the list contains the data.
        Parameters:
            data (...): Data to find in the list
        Returns:
            [...] (bool): Indicator if input data was found
        Raises:
            None
        """
        return self.find(data) != None
    def remove_first(self, data):
        """Remove the first node with data equal to the input.
        Parameters:
            data (...): Data to remove in the list
        Returns:
            None
        Raises:
            None
        """
        index = self.find(data)
        if index:
            self.delete_at(index)
    def remove_all(self, data):
        """Remove all nodes with data equal to the input.
        Parameters:
            data (...): Data to remove in the list
        Returns:
            None
        Raises:
            None
        """
        index = self.find(data)
        while index != None:
            self.delete_at(index)
            index = self.find(data)
    def reverse(self):
        """Reverse the order of nodes in the list.
        Parameters:
            None
        Returns:
            None
        Raises:
            None
        """
        current_node = self._head
        while current_node is not None:
            next_node = current_node.get_next()
            current_node.set_next(current_node.get_prev())
            current_node.set_prev(next_node)
            current_node = current_node.get_prev()
        head_node = self._head
        self._head = self._tail
        self._tail = head_node
def main():
    print("Init single linked list.")
    dll = DoubleLinkedList()
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Fill double linked list.")
    dll.insert_at(0, 'first_item')
    dll.insert_at(0, 'second_item')
    dll.insert_at(2, 'third_item')
    dll.append('appended_item')
    dll.append('another_appended_item')
    dll.append('another_appended_item')
    dll.prepend('prepended_item')
    dll.prepend('another_prepended_item')
    dll.prepend('another_prepended_item')
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Show data from the list via using index.")
    print("First entry: ", dll.data_at(0))
    print("Third entry: ", dll.data_at(2))
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Find data in the list.")
    print("Find 'prepended_item': ", dll.find('prepended_item'))
    print("Find 'prepended_item_2': ", dll.find('prepended_item_2'))
    print("Contains 'second_item': ", dll.contains('second_item'))
    print("Contains 'second_item_2': ", dll.contains('second_item_2'))
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Remove data from the list.")
    print("Remove the first 'another_appended_item': ",
          dll.remove_first('another_appended_item'))
    print("Remove all 'another_prepended_item': ",
          dll.remove_all('another_prepended_item'))
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Delete data from the list using the index.")
    dll.delete_at(0)
    dll.delete_at(2)
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Pop data from the list.")
    print("Pop front: ", dll.pop_front())
    print("Pop_back: ", dll.pop_back())
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Check 'out of range' insertion and deletion.")
    print(dll.insert_at(5, 'test'))
    print(DoubleLinkedList().delete_at(0))
    print(DoubleLinkedList().pop_back())
    print(DoubleLinkedList().pop_front(), "\n")
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
    print("Add a few items and reverse the list.")
    dll.append('added_back')
    dll.append('added_back_2')
    dll.prepend('added_front')
    dll.prepend('added_front_2')
    print("List content: ", dll)
    dll.reverse()
    print("List content: ", dll)
    print("Size: ", dll.size(), "\n")
if __name__ == "__main__":
    main()
 | 
| 
	import dataclasses
from docarray import DocumentArray
from jina import Executor, Flow, requests
def test_executor_dataclass():
    @dataclasses.dataclass
    class MyDataClassExecutor(Executor):
        my_field: str
        @requests(on=['/search'])
        def baz(self, docs, **kwargs):
            for doc in docs:
                doc.tags['metas_name'] = self.metas.name
                doc.tags['my_field'] = self.my_field
    f = Flow().add(
        uses=MyDataClassExecutor,
        uses_with={'my_field': 'this is my field'},
        uses_metas={'name': 'test-name-updated'},
        uses_requests={'/foo': 'baz'},
    )
    with f:
        res = f.post(on='/foo', inputs=DocumentArray.empty(2))
    assert len(res) == 2
    for r in res:
        assert r.tags['metas_name'] == 'test-name-updated'
        assert r.tags['my_field'] == 'this is my field'
 | 
| 
	# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .pool_usage_metrics import PoolUsageMetrics
from .image_reference import ImageReference
from .node_agent_sku import NodeAgentSku
from .authentication_token_settings import AuthenticationTokenSettings
from .usage_statistics import UsageStatistics
from .resource_statistics import ResourceStatistics
from .pool_statistics import PoolStatistics
from .job_statistics import JobStatistics
from .name_value_pair import NameValuePair
from .delete_certificate_error import DeleteCertificateError
from .certificate import Certificate
from .application_package_reference import ApplicationPackageReference
from .application_summary import ApplicationSummary
from .certificate_add_parameter import CertificateAddParameter
from .file_properties import FileProperties
from .node_file import NodeFile
from .schedule import Schedule
from .job_constraints import JobConstraints
from .resource_file import ResourceFile
from .environment_setting import EnvironmentSetting
from .exit_options import ExitOptions
from .exit_code_mapping import ExitCodeMapping
from .exit_code_range_mapping import ExitCodeRangeMapping
from .exit_conditions import ExitConditions
from .auto_user_specification import AutoUserSpecification
from .user_identity import UserIdentity
from .user_account import UserAccount
from .task_constraints import TaskConstraints
from .job_manager_task import JobManagerTask
from .job_preparation_task import JobPreparationTask
from .job_release_task import JobReleaseTask
from .task_scheduling_policy import TaskSchedulingPolicy
from .start_task import StartTask
from .certificate_reference import CertificateReference
from .metadata_item import MetadataItem
from .cloud_service_configuration import CloudServiceConfiguration
from .os_disk import OSDisk
from .windows_configuration import WindowsConfiguration
from .virtual_machine_configuration import VirtualMachineConfiguration
from .network_configuration import NetworkConfiguration
from .pool_specification import PoolSpecification
from .auto_pool_specification import AutoPoolSpecification
from .pool_information import PoolInformation
from .job_specification import JobSpecification
from .recent_job import RecentJob
from .job_schedule_execution_information import JobScheduleExecutionInformation
from .job_schedule_statistics import JobScheduleStatistics
from .cloud_job_schedule import CloudJobSchedule
from .job_schedule_add_parameter import JobScheduleAddParameter
from .job_scheduling_error import JobSchedulingError
from .job_execution_information import JobExecutionInformation
from .cloud_job import CloudJob
from .job_add_parameter import JobAddParameter
from .task_scheduling_error import TaskSchedulingError
from .job_preparation_task_execution_information import JobPreparationTaskExecutionInformation
from .job_release_task_execution_information import JobReleaseTaskExecutionInformation
from .job_preparation_and_release_task_execution_information import JobPreparationAndReleaseTaskExecutionInformation
from .auto_scale_run_error import AutoScaleRunError
from .auto_scale_run import AutoScaleRun
from .resize_error import ResizeError
from .cloud_pool import CloudPool
from .pool_add_parameter import PoolAddParameter
from .affinity_information import AffinityInformation
from .task_execution_information import TaskExecutionInformation
from .compute_node_information import ComputeNodeInformation
from .multi_instance_settings import MultiInstanceSettings
from .task_statistics import TaskStatistics
from .task_id_range import TaskIdRange
from .task_dependencies import TaskDependencies
from .cloud_task import CloudTask
from .task_add_parameter import TaskAddParameter
from .task_add_collection_parameter import TaskAddCollectionParameter
from .error_message import ErrorMessage
from .batch_error_detail import BatchErrorDetail
from .batch_error import BatchError, BatchErrorException
from .task_add_result import TaskAddResult
from .task_add_collection_result import TaskAddCollectionResult
from .subtask_information import SubtaskInformation
from .cloud_task_list_subtasks_result import CloudTaskListSubtasksResult
from .task_information import TaskInformation
from .start_task_information import StartTaskInformation
from .compute_node_error import ComputeNodeError
from .compute_node import ComputeNode
from .compute_node_user import ComputeNodeUser
from .compute_node_get_remote_login_settings_result import ComputeNodeGetRemoteLoginSettingsResult
from .job_schedule_patch_parameter import JobSchedulePatchParameter
from .job_schedule_update_parameter import JobScheduleUpdateParameter
from .job_disable_parameter import JobDisableParameter
from .job_terminate_parameter import JobTerminateParameter
from .job_patch_parameter import JobPatchParameter
from .job_update_parameter import JobUpdateParameter
from .pool_enable_auto_scale_parameter import PoolEnableAutoScaleParameter
from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter
from .pool_resize_parameter import PoolResizeParameter
from .pool_update_properties_parameter import PoolUpdatePropertiesParameter
from .pool_upgrade_os_parameter import PoolUpgradeOSParameter
from .pool_patch_parameter import PoolPatchParameter
from .task_update_parameter import TaskUpdateParameter
from .node_update_user_parameter import NodeUpdateUserParameter
from .node_reboot_parameter import NodeRebootParameter
from .node_reimage_parameter import NodeReimageParameter
from .node_disable_scheduling_parameter import NodeDisableSchedulingParameter
from .node_remove_parameter import NodeRemoveParameter
from .application_list_options import ApplicationListOptions
from .application_get_options import ApplicationGetOptions
from .pool_list_usage_metrics_options import PoolListUsageMetricsOptions
from .pool_get_all_lifetime_statistics_options import PoolGetAllLifetimeStatisticsOptions
from .pool_add_options import PoolAddOptions
from .pool_list_options import PoolListOptions
from .pool_delete_options import PoolDeleteOptions
from .pool_exists_options import PoolExistsOptions
from .pool_get_options import PoolGetOptions
from .pool_patch_options import PoolPatchOptions
from .pool_disable_auto_scale_options import PoolDisableAutoScaleOptions
from .pool_enable_auto_scale_options import PoolEnableAutoScaleOptions
from .pool_evaluate_auto_scale_options import PoolEvaluateAutoScaleOptions
from .pool_resize_options import PoolResizeOptions
from .pool_stop_resize_options import PoolStopResizeOptions
from .pool_update_properties_options import PoolUpdatePropertiesOptions
from .pool_upgrade_os_options import PoolUpgradeOsOptions
from .pool_remove_nodes_options import PoolRemoveNodesOptions
from .account_list_node_agent_skus_options import AccountListNodeAgentSkusOptions
from .job_get_all_lifetime_statistics_options import JobGetAllLifetimeStatisticsOptions
from .job_delete_options import JobDeleteOptions
from .job_get_options import JobGetOptions
from .job_patch_options import JobPatchOptions
from .job_update_options import JobUpdateOptions
from .job_disable_options import JobDisableOptions
from .job_enable_options import JobEnableOptions
from .job_terminate_options import JobTerminateOptions
from .job_add_options import JobAddOptions
from .job_list_options import JobListOptions
from .job_list_from_job_schedule_options import JobListFromJobScheduleOptions
from .job_list_preparation_and_release_task_status_options import JobListPreparationAndReleaseTaskStatusOptions
from .certificate_add_options import CertificateAddOptions
from .certificate_list_options import CertificateListOptions
from .certificate_cancel_deletion_options import CertificateCancelDeletionOptions
from .certificate_delete_options import CertificateDeleteOptions
from .certificate_get_options import CertificateGetOptions
from .file_delete_from_task_options import FileDeleteFromTaskOptions
from .file_get_from_task_options import FileGetFromTaskOptions
from .file_get_properties_from_task_options import FileGetPropertiesFromTaskOptions
from .file_delete_from_compute_node_options import FileDeleteFromComputeNodeOptions
from .file_get_from_compute_node_options import FileGetFromComputeNodeOptions
from .file_get_properties_from_compute_node_options import FileGetPropertiesFromComputeNodeOptions
from .file_list_from_task_options import FileListFromTaskOptions
from .file_list_from_compute_node_options import FileListFromComputeNodeOptions
from .job_schedule_exists_options import JobScheduleExistsOptions
from .job_schedule_delete_options import JobScheduleDeleteOptions
from .job_schedule_get_options import JobScheduleGetOptions
from .job_schedule_patch_options import JobSchedulePatchOptions
from .job_schedule_update_options import JobScheduleUpdateOptions
from .job_schedule_disable_options import JobScheduleDisableOptions
from .job_schedule_enable_options import JobScheduleEnableOptions
from .job_schedule_terminate_options import JobScheduleTerminateOptions
from .job_schedule_add_options import JobScheduleAddOptions
from .job_schedule_list_options import JobScheduleListOptions
from .task_add_options import TaskAddOptions
from .task_list_options import TaskListOptions
from .task_add_collection_options import TaskAddCollectionOptions
from .task_delete_options import TaskDeleteOptions
from .task_get_options import TaskGetOptions
from .task_update_options import TaskUpdateOptions
from .task_list_subtasks_options import TaskListSubtasksOptions
from .task_terminate_options import TaskTerminateOptions
from .task_reactivate_options import TaskReactivateOptions
from .compute_node_add_user_options import ComputeNodeAddUserOptions
from .compute_node_delete_user_options import ComputeNodeDeleteUserOptions
from .compute_node_update_user_options import ComputeNodeUpdateUserOptions
from .compute_node_get_options import ComputeNodeGetOptions
from .compute_node_reboot_options import ComputeNodeRebootOptions
from .compute_node_reimage_options import ComputeNodeReimageOptions
from .compute_node_disable_scheduling_options import ComputeNodeDisableSchedulingOptions
from .compute_node_enable_scheduling_options import ComputeNodeEnableSchedulingOptions
from .compute_node_get_remote_login_settings_options import ComputeNodeGetRemoteLoginSettingsOptions
from .compute_node_get_remote_desktop_options import ComputeNodeGetRemoteDesktopOptions
from .compute_node_list_options import ComputeNodeListOptions
from .application_summary_paged import ApplicationSummaryPaged
from .pool_usage_metrics_paged import PoolUsageMetricsPaged
from .cloud_pool_paged import CloudPoolPaged
from .node_agent_sku_paged import NodeAgentSkuPaged
from .cloud_job_paged import CloudJobPaged
from .job_preparation_and_release_task_execution_information_paged import JobPreparationAndReleaseTaskExecutionInformationPaged
from .certificate_paged import CertificatePaged
from .node_file_paged import NodeFilePaged
from .cloud_job_schedule_paged import CloudJobSchedulePaged
from .cloud_task_paged import CloudTaskPaged
from .compute_node_paged import ComputeNodePaged
from .batch_service_client_enums import (
    OSType,
    AccessScope,
    CertificateState,
    CertificateFormat,
    JobAction,
    DependencyAction,
    AutoUserScope,
    ElevationLevel,
    ComputeNodeFillType,
    CertificateStoreLocation,
    CertificateVisibility,
    CachingType,
    PoolLifetimeOption,
    OnAllTasksComplete,
    OnTaskFailure,
    JobScheduleState,
    SchedulingErrorCategory,
    JobState,
    JobPreparationTaskState,
    JobReleaseTaskState,
    PoolState,
    AllocationState,
    TaskState,
    TaskAddStatus,
    SubtaskState,
    StartTaskState,
    ComputeNodeState,
    SchedulingState,
    DisableJobOption,
    ComputeNodeDeallocationOption,
    ComputeNodeRebootOption,
    ComputeNodeReimageOption,
    DisableComputeNodeSchedulingOption,
)
__all__ = [
    'PoolUsageMetrics',
    'ImageReference',
    'NodeAgentSku',
    'AuthenticationTokenSettings',
    'UsageStatistics',
    'ResourceStatistics',
    'PoolStatistics',
    'JobStatistics',
    'NameValuePair',
    'DeleteCertificateError',
    'Certificate',
    'ApplicationPackageReference',
    'ApplicationSummary',
    'CertificateAddParameter',
    'FileProperties',
    'NodeFile',
    'Schedule',
    'JobConstraints',
    'ResourceFile',
    'EnvironmentSetting',
    'ExitOptions',
    'ExitCodeMapping',
    'ExitCodeRangeMapping',
    'ExitConditions',
    'AutoUserSpecification',
    'UserIdentity',
    'UserAccount',
    'TaskConstraints',
    'JobManagerTask',
    'JobPreparationTask',
    'JobReleaseTask',
    'TaskSchedulingPolicy',
    'StartTask',
    'CertificateReference',
    'MetadataItem',
    'CloudServiceConfiguration',
    'OSDisk',
    'WindowsConfiguration',
    'VirtualMachineConfiguration',
    'NetworkConfiguration',
    'PoolSpecification',
    'AutoPoolSpecification',
    'PoolInformation',
    'JobSpecification',
    'RecentJob',
    'JobScheduleExecutionInformation',
    'JobScheduleStatistics',
    'CloudJobSchedule',
    'JobScheduleAddParameter',
    'JobSchedulingError',
    'JobExecutionInformation',
    'CloudJob',
    'JobAddParameter',
    'TaskSchedulingError',
    'JobPreparationTaskExecutionInformation',
    'JobReleaseTaskExecutionInformation',
    'JobPreparationAndReleaseTaskExecutionInformation',
    'AutoScaleRunError',
    'AutoScaleRun',
    'ResizeError',
    'CloudPool',
    'PoolAddParameter',
    'AffinityInformation',
    'TaskExecutionInformation',
    'ComputeNodeInformation',
    'MultiInstanceSettings',
    'TaskStatistics',
    'TaskIdRange',
    'TaskDependencies',
    'CloudTask',
    'TaskAddParameter',
    'TaskAddCollectionParameter',
    'ErrorMessage',
    'BatchErrorDetail',
    'BatchError', 'BatchErrorException',
    'TaskAddResult',
    'TaskAddCollectionResult',
    'SubtaskInformation',
    'CloudTaskListSubtasksResult',
    'TaskInformation',
    'StartTaskInformation',
    'ComputeNodeError',
    'ComputeNode',
    'ComputeNodeUser',
    'ComputeNodeGetRemoteLoginSettingsResult',
    'JobSchedulePatchParameter',
    'JobScheduleUpdateParameter',
    'JobDisableParameter',
    'JobTerminateParameter',
    'JobPatchParameter',
    'JobUpdateParameter',
    'PoolEnableAutoScaleParameter',
    'PoolEvaluateAutoScaleParameter',
    'PoolResizeParameter',
    'PoolUpdatePropertiesParameter',
    'PoolUpgradeOSParameter',
    'PoolPatchParameter',
    'TaskUpdateParameter',
    'NodeUpdateUserParameter',
    'NodeRebootParameter',
    'NodeReimageParameter',
    'NodeDisableSchedulingParameter',
    'NodeRemoveParameter',
    'ApplicationListOptions',
    'ApplicationGetOptions',
    'PoolListUsageMetricsOptions',
    'PoolGetAllLifetimeStatisticsOptions',
    'PoolAddOptions',
    'PoolListOptions',
    'PoolDeleteOptions',
    'PoolExistsOptions',
    'PoolGetOptions',
    'PoolPatchOptions',
    'PoolDisableAutoScaleOptions',
    'PoolEnableAutoScaleOptions',
    'PoolEvaluateAutoScaleOptions',
    'PoolResizeOptions',
    'PoolStopResizeOptions',
    'PoolUpdatePropertiesOptions',
    'PoolUpgradeOsOptions',
    'PoolRemoveNodesOptions',
    'AccountListNodeAgentSkusOptions',
    'JobGetAllLifetimeStatisticsOptions',
    'JobDeleteOptions',
    'JobGetOptions',
    'JobPatchOptions',
    'JobUpdateOptions',
    'JobDisableOptions',
    'JobEnableOptions',
    'JobTerminateOptions',
    'JobAddOptions',
    'JobListOptions',
    'JobListFromJobScheduleOptions',
    'JobListPreparationAndReleaseTaskStatusOptions',
    'CertificateAddOptions',
    'CertificateListOptions',
    'CertificateCancelDeletionOptions',
    'CertificateDeleteOptions',
    'CertificateGetOptions',
    'FileDeleteFromTaskOptions',
    'FileGetFromTaskOptions',
    'FileGetPropertiesFromTaskOptions',
    'FileDeleteFromComputeNodeOptions',
    'FileGetFromComputeNodeOptions',
    'FileGetPropertiesFromComputeNodeOptions',
    'FileListFromTaskOptions',
    'FileListFromComputeNodeOptions',
    'JobScheduleExistsOptions',
    'JobScheduleDeleteOptions',
    'JobScheduleGetOptions',
    'JobSchedulePatchOptions',
    'JobScheduleUpdateOptions',
    'JobScheduleDisableOptions',
    'JobScheduleEnableOptions',
    'JobScheduleTerminateOptions',
    'JobScheduleAddOptions',
    'JobScheduleListOptions',
    'TaskAddOptions',
    'TaskListOptions',
    'TaskAddCollectionOptions',
    'TaskDeleteOptions',
    'TaskGetOptions',
    'TaskUpdateOptions',
    'TaskListSubtasksOptions',
    'TaskTerminateOptions',
    'TaskReactivateOptions',
    'ComputeNodeAddUserOptions',
    'ComputeNodeDeleteUserOptions',
    'ComputeNodeUpdateUserOptions',
    'ComputeNodeGetOptions',
    'ComputeNodeRebootOptions',
    'ComputeNodeReimageOptions',
    'ComputeNodeDisableSchedulingOptions',
    'ComputeNodeEnableSchedulingOptions',
    'ComputeNodeGetRemoteLoginSettingsOptions',
    'ComputeNodeGetRemoteDesktopOptions',
    'ComputeNodeListOptions',
    'ApplicationSummaryPaged',
    'PoolUsageMetricsPaged',
    'CloudPoolPaged',
    'NodeAgentSkuPaged',
    'CloudJobPaged',
    'JobPreparationAndReleaseTaskExecutionInformationPaged',
    'CertificatePaged',
    'NodeFilePaged',
    'CloudJobSchedulePaged',
    'CloudTaskPaged',
    'ComputeNodePaged',
    'OSType',
    'AccessScope',
    'CertificateState',
    'CertificateFormat',
    'JobAction',
    'DependencyAction',
    'AutoUserScope',
    'ElevationLevel',
    'ComputeNodeFillType',
    'CertificateStoreLocation',
    'CertificateVisibility',
    'CachingType',
    'PoolLifetimeOption',
    'OnAllTasksComplete',
    'OnTaskFailure',
    'JobScheduleState',
    'SchedulingErrorCategory',
    'JobState',
    'JobPreparationTaskState',
    'JobReleaseTaskState',
    'PoolState',
    'AllocationState',
    'TaskState',
    'TaskAddStatus',
    'SubtaskState',
    'StartTaskState',
    'ComputeNodeState',
    'SchedulingState',
    'DisableJobOption',
    'ComputeNodeDeallocationOption',
    'ComputeNodeRebootOption',
    'ComputeNodeReimageOption',
    'DisableComputeNodeSchedulingOption',
]
 | 
| 
	from setuptools import find_packages, setup
setup(
    name="torch_metrics",
    version="1.1.7",
    description="Metrics for model evaluation in pytorch",
    url="https://github.com/chinokenochkan/torch-metrics",
    author="Chi Nok Enoch Kan @chinokenochkan",
    author_email="[email protected]",
    packages=find_packages(include=["torch_metrics", "torch_metrics.*"]),
)
 | 
| 
	import requests
import sys
def banner():
	
	print (" .-----------------------------.           ")
	print (" |  Hi Hackers                 |           ")
	print ("|  Tool   : f1nd0d3           |")
	print (" |  Author : @karthi_the_hacker|           ")
	print (" |           Jai Hind          |           ")
	print (" '-----------------------------'           ")
	print ("                 ^      (\_/)    ")
	print ("                 '----- (O.o)    ")
	print ("                        (> <)    ")
	print (" ")
def all():
	url = sys.argv[2]
	try:
		r = requests.get(url , allow_redirects=False)
		code = r.status_code
		print(url + "   <---- [ {} ]".format(r.status_code) )
		sys.exit()
	except:
		sys.exit()
def single():
	try:
		url = sys.argv[3]
		inl = sys.argv[2]
		r = requests.get(url , allow_redirects=False)
		if ( int(r.status_code) == int(inl)):
			print( url +"   <---- [ {} ]".format(r.status_code) )
			sys.exit()
	
	except:
		sys.exit()
def helpl():
	banner()
	print ("For single domain and all status code : python f1nd0d3.py --all http://yourserver.com/ ")
	print ("For multiple domain and particular single status code : cat live-domain.txt | xargs -n1 -p50 python c3cilia.py 301   " )
	
if (len(sys.argv)<=1):
	banner()
	print("You must provide a target. Use -h or --help for help.")
	print(" ")
	sys.exit()
if (str(sys.argv[1]) == "-h" or str(sys.argv[1]) == "--help"):
	helpl()
	sys.exit()
if (str(sys.argv[1]) == "-a" or str(sys.argv[1]) == "--all"):
	all()
if (str(sys.argv[1]) == "-c" or str(sys.argv[1]) =="--code"):
	single()	
 | 
| 
	# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import numpy as np
import acl
import cv2
import time
import asyncore
import pickle
import socket
import struct
sys.path.append("../../../common")
from atlas_utils.acl_resource import AclResource
from atlas_utils.acl_model import Model
from atlas_utils.acl_image import AclImage
# config for ascend detection
LABELS = ["ascend"]
MODEL_PATH = "../model/yolov3_ascend_logo.om"
# yolov3 input image size
MODEL_WIDTH = 416
MODEL_HEIGHT = 416
STRIDE_LIST = [8, 16, 32]
# object detection parameters
CONF_THRESHOLD = 0.3
IOU_THRESHOLD = 0.45
CLASS_NUM = len(LABELS)
NUM_CHANNEL = 3 * (CLASS_NUM + 5)
# color for show detection results
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0)]
# RealSense D435 Ethernet config
print('Number of arguments:', len(sys.argv), 'arguments.')
print('Argument List:', str(sys.argv))
LOCAL_IP_ADDRESS = '192.168.8.136'  # 200DK ip
MC_IP_ADDRESS = '192.168.8.102'  # PC ip
PORT = 1024
CHUNK_SIZE = 4096
class ImageClient(asyncore.dispatcher):
    """
    UDP client for each camera server
    """
    def __init__(self, server, source):
        asyncore.dispatcher.__init__(self, server)
        self._address = server.getsockname()[0]
        self._port = source[1]
        self._buffer = bytearray()
        self._window_name = self._port
        self._remaining_bytes = 0
        self._frame_id = 0
        self._image_data = np.array([])
        self._frame_data = b''
        self._frame_length = 0
        self._timestamp = 0
    def handle_read(self):
        """
        Read data
        :return: null
        """
        if self._remaining_bytes == 0:
            # get the expected frame size
            self._frame_length = struct.unpack('<I', self.recv(4))[0]
            # get the timestamp of the current frame
            self._timestamp = struct.unpack('<d', self.recv(8))
            self._remaining_bytes = self._frame_length
        # request the frame data until the frame is completely in buffer
        receive_frame_data = self.recv(self._remaining_bytes)
        self._buffer += receive_frame_data
        self._remaining_bytes -= len(receive_frame_data)
        # once the frame is fully received, process/display it
        if len(self._buffer) == self._frame_length:
            self.handle_frame()
    def handle_frame(self):
        """
        Execute model and send result
        :return: null
        """
        # convert the frame from string to numerical data
        self._image_data = pickle.loads(self._buffer)
        # print(self._image_data.shape)
        self._buffer = bytearray()
        self._frame_id += 1
        # yolov3 model inference with image data
        yolov3_inference_result = inference(yolov3_model, self._image_data)
        # send inference result from 200DK to PC
        send_result_data = pickle.dumps(yolov3_inference_result)
        # capture the length of the data portion of the message
        data_length = struct.pack('<I', len(send_result_data))
        # for the message transmission
        self._frame_data = b''.join([data_length, send_result_data])
        self.send(self._frame_data)
    def get_img(self):
        """
        Get image data
        :return: self._image_data
        """
        return self._image_data
    def readable(self):
        """
        Readable or not
        :return: True
        """
        return True
class EtherSenseClient(asyncore.dispatcher):
    """
    UDP server
    """
    def __init__(self):
        asyncore.dispatcher.__init__(self)
        self._server_address = (LOCAL_IP_ADDRESS, PORT)
        # create a socket for TCP connection between the client and server
        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.settimeout(5)
        self.bind(self._server_address)
        self.listen(10)
        self._image_data = np.array([])
        self.handler = 0
    def writable(self):
        """
        Don't want write notifies
        :return: False
        """
        return False
    def readable(self):
        """
        Readable or not
        :return: True
        """
        return True
    def handle_connect(self):
        """
        Print UDP connection messages
        :return: null
        """
        print("connection recvied")
    def handle_accept(self):
        """
        Print UDP connection messages and receive data
        :return: null
        """
        pair = self.accept()
        if pair is not None:
            sock_, addr_ = pair
            print('Incoming connection from %s' % repr(addr_))
            # when a connection is attempted, delegate image receival to the ImageClient
            self.handler = ImageClient(sock_, addr_)
            self._image_data = self.handler.get_img()
def preprocess_cv2(original_bgr_img):
    """
    Preprocess cv2 bgr image for yolov3 input
    :param original_bgr_img: original image with BGR format
    :return: processed image, MODEL_WIDTH, MODEL_HEIGHT
    """
    shape = original_bgr_img.shape[:2]  # [height, width]
    net_h = MODEL_HEIGHT
    net_w = MODEL_WIDTH
    scale = min(float(net_h) / float(shape[0]), float(net_w) / float(shape[1]))
    new_h = int(shape[0] * scale)
    new_w = int(shape[1] * scale)
    dw = (net_w - new_w) / 2
    dh = (net_h - new_h) / 2
    # yolov3 isobi scaling
    img = cv2.resize(original_bgr_img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(0, 0, 0))
    return img, shape[1], shape[0]
def overlap(xy_min1, xy_max1, xy_min2, xy_max2):
    """
    Calculate the width/height of overlap area between bbox1 and bbox2
    :param xy_min1: x_min or y_min of bbox1
    :param xy_max1: x_max or y_max of bbox1
    :param xy_min2: x_min or y_min of bbox2
    :param xy_max2: x_max or y_max of bbox2
    :return:
    """
    left = max(xy_min1, xy_min2)
    right = min(xy_max1, xy_max2)
    return right - left
def cal_iou(prediction_box, ground_truth_box):
    """
    Calculate IOU between box and truth
    :param prediction_box: model prediction bounding box: [x_min, y_min, x_max, y_max]
    :param ground_truth_box: ground truth bounding box: [x_min, y_min, x_max, y_max]
    :return: IOU between prediction_box and ground_truth_box
    """
    overlap_w = overlap(prediction_box[0], prediction_box[2], ground_truth_box[0], ground_truth_box[2])
    overlap_h = overlap(prediction_box[1], prediction_box[3], ground_truth_box[1], ground_truth_box[3])
    if overlap_w <= 0 or overlap_h <= 0:
        return 0
    inter_area = overlap_w * overlap_h
    union_area = (prediction_box[2] - prediction_box[0]) * (prediction_box[3] - prediction_box[1]) + \
                 (ground_truth_box[2] - ground_truth_box[0]) * (ground_truth_box[3] - ground_truth_box[1]) - inter_area
    return inter_area * 1.0 / union_area
def apply_nms(original_boxes, thresh):
    """
    Hard nms for yolov3 output boxes's postprocess
    :param original_boxes: yolov3 output boxes
    :param thresh: nms thresh hold
    :return: nms result list
    """
    nms_result = []
    for class_num in range(CLASS_NUM):
        one_class_boxes = original_boxes[class_num]
        sorted_boxes = sorted(one_class_boxes, key=lambda d: d[5])[::-1]
        result_box_id = dict()
        for box_id in range(len(sorted_boxes)):
            if box_id in result_box_id:
                continue
            truth = sorted_boxes[box_id]
            for box_id_else in range(box_id + 1, len(sorted_boxes)):
                if box_id_else in result_box_id:
                    continue
                box_else = sorted_boxes[box_id_else]
                iou = cal_iou(box_else, truth)
                if iou >= thresh:
                    result_box_id[box_id_else] = 1
        for box_id in range(len(sorted_boxes)):
            if box_id not in result_box_id:
                nms_result.append(sorted_boxes[box_id])
    return nms_result
def decode(conv_output, img_w, img_h):
    """
    Decode 3 output feature maps to object detection result
    :param conv_output: 3 output feature maps
    :param img_w: original image width
    :param img_h: original image height
    :return: object detection result
    """
    conv_output_h, conv_output_w, _ = conv_output.shape
    feature_map = conv_output.reshape((conv_output_h * conv_output_w, 3, 5 + CLASS_NUM))
    resize_ratio = min(MODEL_WIDTH / img_w, MODEL_HEIGHT / img_h)
    dw = (MODEL_WIDTH - resize_ratio * img_w) / 2
    dh = (MODEL_HEIGHT - resize_ratio * img_h) / 2
    bbox = np.zeros((conv_output_h * conv_output_w, 3, 4))
    bbox[..., 0] = np.maximum((feature_map[..., 0] - feature_map[..., 2] / 2.0 - dw) / resize_ratio, 0)  # x_min
    bbox[..., 1] = np.maximum((feature_map[..., 1] - feature_map[..., 3] / 2.0 - dh) / resize_ratio, 0)  # y_min
    bbox[..., 2] = np.minimum((feature_map[..., 0] + feature_map[..., 2] / 2.0 - dw) / resize_ratio, img_w)  # x_max
    bbox[..., 3] = np.minimum((feature_map[..., 1] + feature_map[..., 3] / 2.0 - dh) / resize_ratio, img_h)  # y_max
    feature_map[..., :4] = bbox
    feature_map = feature_map.reshape((-1, 5 + CLASS_NUM))
    feature_map[:, 4] = feature_map[:, 4] * feature_map[:, 5:].max(1)
    feature_map = feature_map[feature_map[:, 4] >= CONF_THRESHOLD]
    feature_map[:, 5] = np.argmax(feature_map[:, 5:], axis=-1)
    all_boxes = [[]]
    for box_index in range(feature_map.shape[0]):
        each_box = [int(feature_map[box_index, iy]) for iy in range(4)]
        each_box.append(int(feature_map[box_index, 5]))
        each_box.append(feature_map[box_index, 4])
        all_boxes[each_box[4] - 1].append(each_box)
    return all_boxes
def convert_labels(label_list):
    """
    Convert label index to class name
    :param label_list: index number of label
    :return: class name of label
    """
    label_names = []
    if isinstance(label_list, np.ndarray):
        label_list = label_list.tolist()
        label_names = [LABELS[int(index)] for index in label_list]
    return label_names
def post_process(infer_output, img_w, img_h):
    """
    Convert yolov3 output feature maps to detection result
    :param infer_output: 3 output feature maps
    :param img_w: original image width
    :param img_h: original image height
    :return: object detetion result with detection_classes, detection_boxes, detection_scores
    """
    result_dict = dict()
    all_boxes = [[]]
    for feature_map_id in range(3):
        feature_map = infer_output[feature_map_id].reshape(
            (MODEL_HEIGHT // STRIDE_LIST[feature_map_id], MODEL_WIDTH // STRIDE_LIST[feature_map_id], NUM_CHANNEL))
        boxes = decode(feature_map, img_w, img_h)
        all_boxes = [all_boxes[iy] + boxes[iy] for iy in range(CLASS_NUM)]
    nms_result = apply_nms(all_boxes, IOU_THRESHOLD)
    if not nms_result:
        result_dict['detection_classes'] = []
        result_dict['detection_boxes'] = []
        result_dict['detection_scores'] = []
    else:
        nms_result_array = np.array(nms_result)
        picked_boxes = nms_result_array[:, 0:4]
        picked_boxes = picked_boxes[:, [1, 0, 3, 2]]
        picked_classes = convert_labels(nms_result_array[:, 4])
        picked_score = nms_result_array[:, 5]
        result_dict['detection_classes'] = picked_classes
        result_dict['detection_boxes'] = picked_boxes.tolist()
        result_dict['detection_scores'] = picked_score.tolist()
    return result_dict
def inference(model, bgr_img):
    """
    Yolov3 model inference pipeline
    :param model: yolov3 om model
    :param bgr_img: opencv bgr image
    :return: yolov3 detection result
    """
    if bgr_img.shape[0] > 0:
        t_preprocess = 0
        t_inference = 0
        t_post_process = 0
        # preprocess image
        t1 = time.time()
        processed_bgr_img, img_w, img_h = preprocess_cv2(bgr_img)
        t2 = time.time()
        t_preprocess += (t2 - t1)
        # model inference
        inference_result_list = model.execute([processed_bgr_img, ])
        t3 = time.time()
        t_inference += (t3 - t2)
        # post process
        inference_result = post_process(inference_result_list, img_w, img_h)
        t4 = time.time()
        t_post_process += (t4 - t3)
        print("*" * 40)
        print("result = ", inference_result)
        print("preprocess cost:", t2 - t1)
        print("forward cost:", t3 - t2)
        print("post process cost:", t4 - t3)
        print("FPS:", 1 / (t4 - t1))
        print("*" * 40)
        return inference_result
    else:
        return dict()
if __name__ == "__main__":
    # acl resource init
    acl_resource = AclResource()
    acl_resource.init()
    # load om model
    yolov3_model = Model(MODEL_PATH)
    # send the multicast message
    multicast_group = (LOCAL_IP_ADDRESS, PORT)
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    connections = {}
    try:
        # Send data to the multicast group
        print('sending "%s"' % 'EtherSensePing' + str(multicast_group))
        sent = sock.sendto('EtherSensePing'.encode(), multicast_group)
        # defer waiting for a response using Asyncore
        client = EtherSenseClient()
        # print("data shape:", client._image_data.shape)
        asyncore.loop()
    except socket.timeout:
        print('timed out, no more responses')
    finally:
        print(sys.stderr, 'closing socket')
        sock.close()
 | 
| 
	import os
from gaesessions import SessionMiddleware
__author__ = 'rob'
def webapp_add_wsgi_middleware(app):
    app = SessionMiddleware(app,
                            cookie_key="\x77\xcb\xef~\x83\x12\xbev\xfeZ\x1aG\xb9^\x89:\xf8\x7f+Y+\x15\x91\xe8\x985\xd9aHY\xf1x\x99]'\xd3\xb2\x13\xa4\xc3\x92\xa50\xae\xb8\x90\xbb(\xba\x95\x1f\x07\xd2\xa0\xc0B*q\xe7\xc4e.\xae9")
    return app
   | 
| 
	import time
import collections
class Structure(object):
    def __init__(self, data, api):
        self.api = api
        self.structure_id = data.id_
        self.structure_name = data.attributes['name']
        self.refresh()
    def refresh(self):
        structure_state = self.api.refresh_attributes('structures', self.structure_id)
        self.current_hvac_mode = structure_state.attributes['structure-heat-cool-mode']
        self.current_system_mode = structure_state.attributes['mode']
        self.is_home = structure_state.attributes['home']
        self.relationships = structure_state.relationships
        self.active_schedule = structure_state.attributes['active-schedule-id']
        try:
            structure_schedules = self.api.get_schedules(self.structure_id)
            """Get all schedules as dictionaries and place in a list"""
            self.schedules_list = []
            """ Add No Schedule key in order to be able to set the structure schedule to No Schedule using the set_schedule function """
            self.schedules_dictionary = {
                "No Schedule": None,
            }
            for schedule in structure_schedules.resources:
                self.schedules_list.append({
                    schedule.attributes['name']: schedule.id_,
                })
            """Combine all the schedule dictionaries into a single dictionary"""
            for dictc in self.schedules_list:
                self.schedules_dictionary.update(dictc)
        except AttributeError:
            print("This structure doesn't have any schedules")
        except:
            print("Something went wrong while attempting to get Flair structure schedules")
    def set_schedule(self, schedule_id):
        resource_type = 'structures'
        attributes = {
        'active-schedule-id': schedule_id,
        }
        relationships = {}
        self.api.control_structure(self, resource_type, attributes, relationships)
        self.refresh()
    def set_structure_mode(self, hvac_mode):
        """ Possible HVAC modes are heat, cool, auto, and float. Float means off """
        resource_type = 'structures'
        attributes = {
        'structure-heat-cool-mode': hvac_mode,
        }
        relationships = {}
        self.api.control_structure(self, resource_type, attributes, relationships)
        self.refresh()
    def set_system_mode(self, system_mode):
        """ Possible System modes are auto and manual """
        resource_type = 'structures'
        attributes = {
        'mode': system_mode,
        }
        relationships = {}
        self.api.control_structure(self, resource_type, attributes, relationships)
        self.refresh()
    def set_home_away_mode(self, home_mode):
        """ Home mode is True and Away mode is False """
        resource_type = 'structures'
        attributes = {
        'home': home_mode,
        }
        relationships = {}
        self.api.control_structure(self, resource_type, attributes, relationships)
        self.refresh()
 | 
| 
	from crescent.core import Resource
from .constants import ResourceRequiredProperties
class ApiMapping(Resource):
    __TYPE = "AWS::ApiGatewayV2::ApiMapping"
    def __init__(self, id: str):
        super(ApiMapping, self).__init__(
            id=id,
            type=self.__TYPE,
            required_properties=ResourceRequiredProperties.API_MAPPING
        )
    def ApiId(self, api_id: str):
        return self._set_property(self.ApiId.__name__, api_id)
    def ApiMappingKey(self, api_mapping_key: str):
        return self._set_property(self.ApiMappingKey.__name__, api_mapping_key)
    def DomainName(self, domain_name: str):
        return self._set_property(self.DomainName.__name__, domain_name)
    def Stage(self, stage: str):
        return self._set_property(self.Stage.__name__, stage) | 
| 
	import gym
import gym_gobang
env=gym.make('Gobang-v0')
env.play('single')
#env.play('dual')
 | 
| 
	#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov  3 12:28:37 2019
@author: leewayleaf
"""
def main():
    print("Hello mannieeeeeee")
# -------------------------------------- #
if __name__ == "__main__":
    main()
 | 
| 
	class FCore:
    """Provides the basis for a functionality core with default implementations."""
    def __init__(self, bot):
        self.bot = bot
    
    def get_commands(self) -> dict:
        """Return a dictionary of all commands and their corresponding callbacks."""
        return dict()
    def terminal_control(self):
        """Provide in-terminal functionality after being given control."""
        print("This core does not have terminal functionality.")
    
    def not_command(self, update, context):
        """Called if the bot recieves an unrecognised command."""
        pass
    
    def message(self, update, context):
        """Called if the bot recieves a message."""
        pass | 
| 
	import json
from sqlalchemy.ext.declarative import DeclarativeMeta
class JsonEncoder(json.JSONEncoder):
    # @staticmethod
    # def to_dict(obj):
    #     if isinstance(obj.__class__, DeclarativeMeta):
    #         # an SQLAlchemy class
    #         fields = {}
    #         for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
    #             data = obj.__getattribute__(field)
    #             if data is not None:
    #                 try:
    #                     json.dumps(data)  # this will fail on non-encodable values, like other classes
    #                     fields[field] = data
    #                 except TypeError:
    #                     pass
    #         return fields
    #     else:
    #         return {}
    def default(self, obj):
        if isinstance(obj.__class__, DeclarativeMeta):
            return JsonEncoder.to_dict(obj)
        return json.JSONEncoder.default(self, obj)
    @staticmethod
    def to_json(object_to_convert):
        return json.dumps(object_to_convert, cls=JsonEncoder)
    @staticmethod
    def dict_to_json(dictionary):
        return json.dumps(dictionary)
 | 
| 
	# app4.py
import numpy as np
import pandas as pd
import altair as alt
import plotly.express as px
import plotly.graph_objs as go
import pickle as pkle
import os.path
import streamlit as st
def app():
    st.title('Writeup')
    
    st.write('We created several different visualizations of the data set from both a macro and micro lens by\
    first illustrated the general statistical distributions of the data scientist candidate population in terms\
    of city development index, training hour, experience, latest jobs, and education levels (in Analysis 1 Tab),\
    then drilling down to a gender and education level-specific breakdowns to see if there are any noticeable employment\
    trends among groups of different gender identities and academic achievements. The City Development Indices of the cities\
    that the candidates reside in are extremely left-skewed, with the median at 0.86 with an overwhelming majority of residents\
    residing in cities with well-established Infrastructure, Waste Management, Health, Education, and City Product, as defined by\
    the United Nations. The specific country/regional information wasn’t provided in the dataset, so the developers would refrain\
    from making potentially biased assumptions, but it’s interesting to note that there’s a spike at 0.62 with a moderately large\
    group of candidates residing in less developed cities. Our box plot distributions of CDI by Education Level show that Masters,\
    Graduates, and PhD’s are highly centered around cities with high CDI’s, while high school and primary school grads are scattered\
    towards lower CDI’s with slightly more outliers. We hypothesize that candidates in developing cities may have access to online/open-source\
    material that can supplement or replace formal training in DS, hence the supply in the job market.')
    st.write('60% of the dataset are graduate students, with 60% having Graduate Degrees, 22.8% with Undergraduate, 2.15% with PhDs\
    and 10.5% with high school degrees. The developers found this distribution quite jarringly different from the job market situation\
    in the U.S., where closely 50-60% of the data scientist job applicants hold Master’s degrees or higher, so we deemed it a factor highly\
    dependent on the region/continent, which is unknown. The years of experience by education level is as expected with PhD’s and Master’s\
    students having the upper bound of 20> years, followed by Undergraduate degree holders, then High School and Primary Schoolers. Since Data\
    Scientists used to be primarily PhD’s or academic scholars, it’s not surprising that those groups have more experiences than others.\
    The experience distribution by major was quite contrary to our hypothesis - that STEM degree holders will have more YoE on average - with \
    all disciplines having pretty much equivalent distributions.')
    st.write('We must note that our dataset’s intrinsically imbalanced in terms of the candidates’ experience and gender, with nearly ~40% of the\
    candidates having 20+ years of work experience as a Data Scientist. Another limitation that the dataset had was its ambiguity in certain\
    columns, including training hours, which the developers assumed as the # of hours formally trained as a data scientist apart from general\
    work experience. This information would have been more meaningful if it were the number of hours per week spent on training oneself as a\
    better Data Professional, but since it isn’t, the more relevant work experiences as a Data Scientist, the longer the training hours, hence\
    the apparent correlation between having relevant work experience and higher training hours.')
    st.write('Last New Job distribution was quite interesting, with 60% of the candidates only having worked at their current job for less than a year.\
    Given that DS’s are predominantly hired in tech companies or at tech functions, it’s not surprising that frequent job switches are common\
    and not necessarily frowned upon, compared to more traditional industries.')
    st.write('We include two gender-related distributions before deep-diving into it in Analysis 2, as the dataset has 1.5x more male than female candidates,\
    it was hard to recognize the data points that represent female data scientists in Distribution of Data Scientists by Experience and Last New Job.\
    In almost all cases, the number of male data scientists is much higher than the female data scientists, female data scientists points were covered.\
    This graph showed that the respondents who have worked for their previous company for 4 years tended to have longer working experience.\
    On the other hand, the ones who have shorter working experience have changed their positions or jobs more often.')
    st.write('To classify the data scientists by their gender and education level, we added two drop-down menus, so the users can easily select\
    the data that matches with a certain condition and only use it to create plots. Using these two options, we created three-bar plots\
    which show the distribution of data scientists by their enrolled university types, majors, and company types. The majority of the data\
    scientists in the given data set are not currently enrolled in university. However, most Ph.D.-level data scientists answered that they\
    are currently enrolled in university. Also, the proportion of data scientists who are currently attending university full-time was much\
    higher in the female data scientists group than in the male group.')
    st.write('In the major graph, as was expected, the majority of data scientists studied STEM majors regardless of gender,\
    and those who did not attend university are classified as ‘other’ in the major graph. The number of data scientists who studied Arts\
    during their undergrad was the lowest in this distribution graph.')
    st.write('Lastly, to find which type of companies hire data scientists the most, we drew a graph that shows the company type and size\
    that the respondents are currently working for. According to their answers, the majority works for small private firms regardless of gender.\
    However, when we selected only Ph.D.-level data scientists, the result was different. The proportion of respondents\
    who work for the public sector has increased.')
 | 
| 
	from __future__ import absolute_import, division, print_function
import os
import numpy as np
from vivarium.library.units import units
from vivarium.core.process import Process
from vivarium.core.composition import (
    simulate_process_in_experiment,
    plot_simulation_output,
    PROCESS_OUT_DIR,
)
NAME = 'growth_protein'
class GrowthProtein(Process):
 
    defaults = {
        'initial_protein': 5e7,
        'growth_rate': 0.006,
        'global_deriver_key': 'global_deriver',
        'mass_deriver_key': 'mass_deriver',
    }
    def __init__(self, initial_parameters=None):
        if initial_parameters is None:
            initial_parameters = {}
        self.growth_rate = self.or_default(initial_parameters, 'growth_rate')
        self.global_deriver_key = self.or_default(
            initial_parameters, 'global_deriver_key')
        self.mass_deriver_key = self.or_default(
            initial_parameters, 'mass_deriver_key')
        # default state
        # 1000 proteins per fg
        self.initial_protein = self.or_default(
            initial_parameters, 'initial_protein')  # counts of protein
        self.divide_protein = self.initial_protein * 2
        parameters = {
            'growth_rate': self.growth_rate}
        parameters.update(initial_parameters)
        super(GrowthProtein, self).__init__(parameters)
    def ports_schema(self):
        return {
            'internal': {
                'protein': {
                    '_default': self.initial_protein,
                    '_divider': 'split',
                    '_emit': True,
                    '_properties': {
                        'mw': 2.09e4 * units.g / units.mol}}},  # the median E. coli protein is 209 amino acids long, and AAs ~ 100 Da
            'global': {
                'volume': {
                    '_updater': 'set',
                    '_divider': 'split'},
                'divide': {
                    '_default': False,
                    '_updater': 'set'}}}
    def derivers(self):
        return {
            self.global_deriver_key: {
                'deriver': 'globals',
                'port_mapping': {
                    'global': 'global'},
                'config': {}},
            self.mass_deriver_key: {
                'deriver': 'mass',
                'port_mapping': {
                    'global': 'global'},
                'config': {}},
        }
    def next_update(self, timestep, states):
        protein = states['internal']['protein']
        total_protein = protein * np.exp(self.parameters['growth_rate'] * timestep)
        new_protein = int(total_protein - protein)
        extra = total_protein - int(total_protein)
        # simulate remainder
        where = np.random.random()
        if where < extra:
            new_protein += 1
        divide = False
        if protein >= self.divide_protein:
            divide = True
        return {
            'internal': {
                'protein': new_protein},
            'global': {
                'divide': divide}}
if __name__ == '__main__':
    out_dir = os.path.join(PROCESS_OUT_DIR, NAME)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    process = GrowthProtein()
    settings = {'total_time': 10}
    timeseries = simulate_process_in_experiment(process, settings)
    plot_simulation_output(timeseries, {}, out_dir)
 | 
| 
	"""The mikettle component."""
 | 
| 
	import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def normalizing(matrix, k):
    for i in range(k):
        mean = np.mean(matrix[:, i])
        std = np.std(matrix[:, i])
        matrix[:, i] = (matrix[:, i] - mean) / std
def fit(learning_rate, trials):
    coefficients = np.zeros(number_of_features + 1)
    coefficients = np.reshape(coefficients, (14, 1))
    for i in range(trials):
        h = np.dot(x_train, coefficients)
        coefficients = coefficients - np.dot(np.transpose(x_train), (h - y_train)) * learning_rate * (1 / m)
    return coefficients
def regularization_fit(learning_rate, trials, landa):
    coefficients = np.zeros(number_of_features + 1)
    coefficients = np.reshape(coefficients, (14, 1))
    for i in range(trials):
        h = np.dot(x_train, coefficients)
        coefficients = coefficients * (1 - (learning_rate * landa / m)) - np.dot(np.transpose(x_train),
                                                                                 (h - y_train)) * learning_rate * (
                               1 / m)
    return coefficients
def regularization_fit_valid(learning_rate, trials, landa):
    coefficients = np.zeros(number_of_features + 1)
    coefficients = np.reshape(coefficients, (14, 1))
    k = len(new_x_train)
    for i in range(trials):
        h = np.dot(new_x_train, coefficients)
        coefficients = coefficients * (1 - (learning_rate * landa / k)) - np.dot(np.transpose(new_x_train),
                                                                                 (h - new_y_train)) * learning_rate * (
                               1 / k)
    return coefficients
def predict(coefficients):
    return np.dot(x_test, coefficients)
def find_hyper_parameter():
    lamdas = [500, 750, 1000]
    error = sys.maxsize
    landa = 0
    for l in lamdas:
        theta = regularization_fit_valid(0.01, 500, l)
        y_predicted = np.dot(x_valid, theta)
        er = np.sqrt(np.square(np.subtract(y_valid, y_predicted)).mean())
        if er < error:
            error = er
            landa = l
    return landa
def plot_errors_normal(learning_rate, k, j):
    errors = []
    iterations = []
    for i in range(100, 1100, 100):
        theta = fit(learning_rate, i)
        y_predicted = predict(theta)
        error = np.sqrt(np.square(np.subtract(y_test, y_predicted)).mean())
        errors.append(error)
        iterations.append(i)
    a[k][j].plot(iterations, errors)
    a[k][j].set_title(learning_rate)
def plot_errors_regularization(learning_rate, k, j):
    errors = []
    iterations = []
    for i in range(100, 1100, 100):
        theta = regularization_fit(learning_rate, i, landa)
        y_predicted = predict(theta)
        error = np.sqrt(np.square(np.subtract(y_test, y_predicted)).mean())
        errors.append(error)
        iterations.append(i)
    b[k][j].plot(iterations, errors)
    b[k][j].set_title(learning_rate)
def normal_gradient_descent():
    plot_errors_normal(0.1, 0, 0)
    plot_errors_normal(0.05, 0, 1)
    plot_errors_normal(0.01, 1, 0)
    plot_errors_normal(0.001, 1, 1)
    plt.suptitle("MSE Error/Number of Iteration")
def regularization_gradient_descent():
    plot_errors_regularization(0.1, 0, 0)
    plot_errors_regularization(0.05, 0, 1)
    plot_errors_regularization(0.01, 1, 0)
    plot_errors_regularization(0.001, 1, 1)
    plt.suptitle("With Regularization\nMSE Error/Number of Iteration")
housing = pd.read_csv("housing.csv")
# one-hot representation for the categorical data
housing['categorical'] = pd.Categorical(housing['ocean_proximity'])
categorical = pd.get_dummies(housing['ocean_proximity'], prefix='ocean_proximity')
housing = pd.concat([housing, categorical], axis=1)
housing = housing.drop(['categorical'], axis=1)
housing = housing.drop(['ocean_proximity'], axis=1)
# some cells were Nan type, we fill them with mean of other values
housing.dropna(subset=["total_bedrooms"], inplace=True)
# separating test and training sets
cut = int(7 * len(housing) / 10)
number_of_features = 13
housing = housing.to_numpy()
# normalizing the data in same scale
labels = housing[..., 8]
housing = np.delete(housing, 8, axis=1)
normalizing(housing, number_of_features)
# train set :
x_train = housing[0:cut]
m = len(x_train)
first_column_train = np.ones((m, 1))
x_train = np.hstack((first_column_train, x_train))
y_train = labels[0:cut]
y_train = np.reshape(y_train, (m, 1))
# test set :
x_test = housing[cut:len(housing)]
first_column_test = np.ones((len(x_test), 1))
x_test = np.hstack((first_column_test, x_test))
y_test = labels[cut:len(housing)]
y_test = np.reshape(y_test, (len(x_test), 1))
# validation set: for finding the best landa in regularization
cut_v = int(4 * len(x_train) / 5)
new_x_train = x_train[0:cut_v]
new_y_train = y_train[0:cut_v]
x_valid = x_train[cut_v:len(x_train)]
y_valid = y_train[cut_v:len(y_train)]
landa = 750
# **************
# WE CAN USE THE FOLLOWING COMMENTED LINE TO DETERMINE LAMBDA WITH VALIDATION SET
# landa = find_hyper_parameter()
# **************
# normal gradient descent :
fig1, a = plt.subplots(2, 2)
normal_gradient_descent()
# gradient descent with regularization :
fig2, b = plt.subplots(2, 2)
regularization_gradient_descent()
plt.show()
 | 
| 
	#!/usr/bin/env python
# pylint: disable=no-value-for-parameter
from jumpscale.loader import j
from jumpscale.clients.stellar.exceptions import NoTrustLine,TemporaryProblem
import jumpscale
import click
import time
import stellar_sdk
from requests.exceptions import ConnectionError
_ASSET_ISUERS = {
    "TEST": {
        "TFT": "GA47YZA3PKFUZMPLQ3B5F2E3CJIB57TGGU7SPCQT2WAEYKN766PWIMB3",
        "TFTA": "GB55A4RR4G2MIORJTQA4L6FENZU7K4W7ATGY6YOT2CW47M5SZYGYKSCT",
    },
    "STD": {
        "TFT": "GBOVQKJYHXRR3DX6NOX2RRYFRCUMSADGDESTDNBDS6CDVLGVESRTAC47",
        "TFTA": "GBUT4GP5GJ6B3XW5PXENHQA7TXJI5GOPW3NF4W3ZIW6OOO4ISY6WNLN2",
    },
}
_HORIZON_NETWORKS = {"TEST": "https://horizon-testnet.stellar.org", "STD": "https://horizon.stellar.org"}
def fetch_new_transaction_memo_hashes(wallet, address, cursor):
    tx_list_result = wallet.list_transactions(address, cursor)
    tft_tx_memo_hashes = [tx.memo_hash_as_hex for tx in tx_list_result["transactions"] if tx.memo_hash is not None]
    return tft_tx_memo_hashes, tx_list_result["cursor"]
def already_issued_for_payment(payment_from_sales_service, issuer_memo_hashes):
    return payment_from_sales_service.transaction_hash in issuer_memo_hashes
def _get_horizon_server(network:str):
    server_url = _HORIZON_NETWORKS[network]
    return stellar_sdk.Server(horizon_url=server_url)
def fetch_new_payments_to_process(
    wallet, destination:str, cursor:str, asset, already_issued_memo_hashes, 
):
    payment_list_result = wallet.list_payments(destination, asset=asset, cursor=cursor)
    new_cursor = payment_list_result["cursor"]
    payments_to_process = []
    from_address_payments = [
        p for p in payment_list_result["payments"] if p.to_address == p.my_address
    ]
    for payment in from_address_payments:
        if not already_issued_for_payment(payment, already_issued_memo_hashes):
            payments_to_process.append(payment)
    return payments_to_process, new_cursor
@click.command(help="Convert burned TFTA's to TFT's")
@click.option("--walletname", type=str, default="tftatotftissuer")
@click.option("--preview/--no-preview",default=False)
def convert_tfta_to_tft(walletname,preview):
    wallet = j.clients.stellar.get(walletname)
    network = wallet.network.value
    print(f"Starting service to convert TFTA to TFT on the {network} network")
    tfta_issuer = _ASSET_ISUERS[network]["TFTA"]
    tft_issuer = _ASSET_ISUERS[network]["TFT"]
    tfta_payments_cursor = ""
    tft_transactions_cursor = ""
    tft_issuer_memo_hashes = []
    tft_tx_memo_hashes, tft_transactions_cursor = fetch_new_transaction_memo_hashes(
        wallet, tft_issuer, tft_transactions_cursor
    )
    tft_issuer_memo_hashes = [*tft_issuer_memo_hashes, *tft_tx_memo_hashes]
    payments_to_process,tfta_payments_cursor = fetch_new_payments_to_process(
        wallet,
        tfta_issuer,
        tfta_payments_cursor,
        f"TFTA:{tfta_issuer}",
        tft_issuer_memo_hashes,
    )
    while True:
        time.sleep(60)  # Make sure if we fetch the isuances that everything is up to date
        try:
            tft_tx_memo_hashes, tft_transactions_cursor = fetch_new_transaction_memo_hashes(
                wallet, tft_issuer, tft_transactions_cursor
            )
            tft_issuer_memo_hashes = [*tft_issuer_memo_hashes, *tft_tx_memo_hashes]
            for p in payments_to_process:
                if not already_issued_for_payment(p, tft_issuer_memo_hashes):
                    j.logger.info(f"Issuing {p.balance.balance} TFT to {p.from_address} for payment {p.transaction_hash}")
                    if preview:
                        continue
                    try:
                        wallet.transfer(
                        p.from_address,
                        amount=p.balance.balance,
                        asset=f"TFT:{tft_issuer}",
                        memo_hash=p.transaction_hash,
                        fund_transaction=False,
                        from_address=tft_issuer,
                        )
                    except NoTrustLine:
                       j.logger.error(f"{p.from_address} has no TFT trustline")
                    except TemporaryProblem as e:
                       j.logger.error(f"Temporaryproblem: {e}") 
                     
            payments_to_process, tfta_payments_cursor = fetch_new_payments_to_process(
                wallet,
                tfta_issuer,
                tfta_payments_cursor,
                f"TFTA:{tfta_issuer}",
                tft_issuer_memo_hashes,
            )
        except (jumpscale.core.exceptions.exceptions.Timeout,ConnectionError) :
            continue
if __name__ == "__main__":
    convert_tfta_to_tft()
 | 
| 
	# import necessary modules
from discord.ext import commands
# creates a bot instance with "$" as the command prefix
bot = commands.Bot("$")
TOKEN = "TOKEN"
#       ^^^^^^^ FILL IN THIS! This is the generated token for your bot from the Discord Developer Page
# a command in discord.py is <command-prefix><command-name>
# this creates a command that can be triggered by `$hello`, ie. "hello" is the command name
@bot.command()
async def hello(ctx):  # note: discord commands must be coroutines, so define the function with `async def`
    # the ctx argument is automatically passed as the first positional argument
    # you can access the author (type discord.Member) and channel (type discord.TextChannel) of the command as followed:
    message_author = ctx.author
    message_channel = ctx.channel
    # prints "<username> said hello" to the console
    print("{} said hello".format(message_author))
    # Member.send(...) is a coroutine, so it must be awaited
    # this sends a message "Hello, <username>!" to the message channel
    await message_channel.send("Hello, {}!".format(message_author.name))
    # more info on string formatting: https://pyformat.info
# This is how you define a discord.py event
@bot.event
async def on_ready():  # the event `on_ready` is triggered when the bot is ready to function
    print("The bot is READY!")
    print("Logged in as: {}".format(bot.user.name))
# starts the bot with the corresponding token
bot.run(TOKEN)
 | 
| 
	from django.core.exceptions import ImproperlyConfigured
try:
    from django.contrib.gis.db import models
except ImproperlyConfigured:
    from django.db import models
    class DummyField(models.Field):
        def __init__(self, dim=None, srid=None, geography=None, *args, **kwargs):
            super(DummyField, self).__init__(*args, **kwargs)
    models.GeoManager = models.Manager
    models.GeometryField = DummyField
    models.LineStringField = DummyField
    models.MultiPointField = DummyField
    models.MultiPolygonField = DummyField
    models.PointField = DummyField
    models.PolygonField = DummyField
 | 
| 
	# This is a sample Python program that trains a simple TensorFlow California Housing model.
# This implementation will work on your *local computer* or in the *AWS Cloud*.
# To run training and inference *locally* set: `config = get_config(LOCAL_MODE)`
# To run training and inference on the *cloud* set: `config = get_config(CLOUD_MODE)` and set a valid IAM role value in get_config()
#
# Prerequisites:
#   1. Install required Python packages:
#      `pip install -r requirements.txt`
#   2. Docker Desktop installed and running on your computer:
#      `docker ps`
#   3. You should have AWS credentials configured on your local machine
#      in order to be able to pull the docker image from ECR.
###############################################################################################
import os
import numpy as np
import pandas as pd
from sklearn.datasets import *
import sklearn.model_selection
from sklearn.preprocessing import StandardScaler
from sagemaker.tensorflow import TensorFlow
DUMMY_IAM_ROLE = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'
def download_training_and_eval_data():
    if os.path.isfile('./data/train/x_train.npy') and \
            os.path.isfile('./data/test/x_test.npy') and \
            os.path.isfile('./data/train/y_train.npy') and \
            os.path.isfile('./data/test/y_test.npy'):
        print('Training and evaluation datasets exist. Skipping Download')
    else:
        print('Downloading training and evaluation dataset')
        data_dir = os.path.join(os.getcwd(), 'data')
        os.makedirs(data_dir, exist_ok=True)
        train_dir = os.path.join(os.getcwd(), 'data/train')
        os.makedirs(train_dir, exist_ok=True)
        test_dir = os.path.join(os.getcwd(), 'data/test')
        os.makedirs(test_dir, exist_ok=True)
        data_set = fetch_california_housing()
        X = pd.DataFrame(data_set.data, columns=data_set.feature_names)
        Y = pd.DataFrame(data_set.target)
        # We partition the dataset into 2/3 training and 1/3 test set.
        x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.33)
        scaler = StandardScaler()
        scaler.fit(x_train)
        x_train = scaler.transform(x_train)
        x_test = scaler.transform(x_test)
        np.save(os.path.join(train_dir, 'x_train.npy'), x_train)
        np.save(os.path.join(test_dir, 'x_test.npy'), x_test)
        np.save(os.path.join(train_dir, 'y_train.npy'), y_train)
        np.save(os.path.join(test_dir, 'y_test.npy'), y_test)
        print('Downloading completed')
def do_inference_on_local_endpoint(predictor):
    print(f'\nStarting Inference on endpoint (local).')
    x_test = np.load('./data/test/x_test.npy')
    y_test = np.load('./data/test/y_test.npy')
    results = predictor.predict(x_test[:10])['predictions']
    flat_list = [float('%.1f' % (item)) for sublist in results for item in sublist]
    print('predictions: \t{}'.format(np.array(flat_list)))
    print('target values: \t{}'.format(y_test[:10].round(decimals=1)))
def main():
    download_training_and_eval_data()
    print('Starting model training.')
    print(
        'Note: if launching for the first time in local mode, container image download might take a few minutes to complete.')
    california_housing_estimator = TensorFlow(entry_point='california_housing_tf2.py',
                                              source_dir='code',
                                              role=DUMMY_IAM_ROLE,
                                              instance_count=1,
                                              instance_type='local',
                                              framework_version='2.4.1',
                                              py_version='py37')
    inputs = {'train': 'file://./data/train', 'test': 'file://./data/test'}
    california_housing_estimator.fit(inputs)
    print('Completed model training')
    print('Deploying endpoint in local mode')
    predictor = california_housing_estimator.deploy(initial_instance_count=1, instance_type='local')
    do_inference_on_local_endpoint(predictor)
    print('About to delete the endpoint to stop paying (if in cloud mode).')
    predictor.delete_endpoint(predictor.endpoint_name)
if __name__ == "__main__":
    main()
 | 
| 
	import unittest
import os
import test
PEM_FILEPATH = os.path.join(os.path.abspath('.'), 'forwarders.pem')
def forwarders(*args):
    return test.scloud("forwarders", *args)
class TestForwarders(unittest.TestCase):
    def setUp(self):
        # retrieve the selected tenant name
        code, self.tname, _ = test.scloud("get", "tenant")
        self.assertEqual(0, code)
        self.assertIsNotNone(self.tname)
    def tearDown(self):
        code, result, _ = forwarders("delete-certificates")
        self.assertEqual(0, code)
    def test_create_certificates(self):
        code, result, _ = forwarders("create-certificate", PEM_FILEPATH)
        print "\n--Create-Certificate-Tests--\n"
        print "Validate Success Code"
        print code
        self.assertEqual(0, code)
        self.assertTrue("content" in result)
        print "Validate certificate"
        #Create existing certificate - 400
        print "Create existing certificate"
        code, _, err = forwarders("create-certificate", PEM_FILEPATH)
        self.assertTrue(test.is400(err))
        #Clean up
        print "Delete certificate"
        code, result, _ = forwarders("delete-certificates")
        self.assertEqual(0, code)
    def test_get_certificates(self):
        print "\n--Get-Certificate-Tests--\n"
        print "Create-Certificate"
        code, result, _ = forwarders("create-certificate", PEM_FILEPATH)
        self.assertEqual(0, code)
        self.assertTrue("content" in result)
        print "Get-Certificate"
        code, result, _ = forwarders("list-certificates")
        self.assertEqual(0, code)
        print "Delete All certificates"
        code, result, _ = forwarders("delete-certificates")
        self.assertEqual(0, code)
        #Get certificates when no certificates exist
        print "Get-Certificate-None-Exists"
        code, result, _ = forwarders("list-certificates")
        print "Validate no certificates"
        self.assertFalse("content" in result)
        self.assertEqual(0, code)
    def test_delete_certificates(self):
        print "\n--Delete-Certificates-Tests--\n"
        print "Create-Certificate"
        code, result, _ = forwarders("create-certificate", PEM_FILEPATH)
        self.assertEqual(0, code)
        self.assertTrue("content" in result)
        code, result, _ = forwarders("delete-certificates")
        self.assertEqual(0, code)
        code, result, _ = forwarders("list-certificates")
        self.assertEqual(0, code)
        self.assertFalse("content" in result)
        print "Validate all certificates are deleted"
    def test_get_spec_json(self):
        print "\n--Get-Spec-Json--\n"
        code, result, _ = forwarders("get-spec-json")
        self.assertEqual(0, code)
        self.assertTrue(result)
    def test_get_spec_yaml(self):
        print "\n--Get-Spec-Yml--\n"
        code, result, _ = forwarders("get-spec-yaml")
        self.assertEqual(0, code)
        self.assertTrue(result)
if __name__ == "__main__":
    unittest.main()
 | 
| 
	# -*- coding: utf-8 -*-
"""
This modules provides most core features of PyAlaOCL.
It defines the various functions and classes that constitutes the OCL
library.
"""
#import setup
#__version__ = setup.getVersion()
import logging
log = logging.getLogger(__name__)
__all__ = (
    'floor',
    'isUndefined',
    'oclIsUndefined',
    'isEmpty',
    'notEmpty',
    'implies',
    'Invalid',
    'oclIsKindOf',
    'oclIsTypeOf',
    'registerIsKindOfFunction',
    'registerIsTypeOfFunction',
    'Collection',
    'Set',
    'Bag',
    'Seq',
    'asSet',
    'asBag',
    'asSeq',
    'isCollection',
    'asCollection',
    'emptyCollection',
    'listAll',
)
import inspect
import logging
logging.getLogger('pyalaocl').addHandler(logging.NullHandler())
class InfixedOperator:  # old-class style required
    def __init__(self, function):
        self.function = function
    def __rrshift__(self, other):
        return \
            InfixedOperator(
                lambda x, self=self, other=other: self.function(other, x))
    def __rshift__(self, other):
        return self.function(other)
    def __call__(self, value1, value2):
        return self.function(value1, value2)
class PostfixedOperator:
    def __init__(self, function):
        self.function = function
    def __rrshift__(self, other):
        return self.function(other)
def _isEmpty(value):
    if value is None:
        return True
    else:
        try:
            empty =value.isEmpty()
        except AttributeError:
            try:
                l = len(value)
            except (TypeError,AttributeError):
                return False
            else:
                return l == 0
        else:
            return empty
implies = InfixedOperator(lambda x,y: y if x else True)
isEmpty = PostfixedOperator(_isEmpty)
notEmpty = PostfixedOperator(lambda x: not _isEmpty(x))
def floor(r):
    """ Return the largest integer which is not greater than the parameter.
    """
    import math
    return math.floor(r)
def isUndefined(value):
    """
    Indicates if the given parameter is undefined (None) or not.
    :param value: any kind of value.
    :type value: any
    :return: True if the value is None.
    :rtype: bool
    Examples:
        >>> print isUndefined(3)
        False
        >>> print isUndefined(None)
        True
    """
    try:
        return value is None
    except:
        return True  # see OCL 11.3.4
def oclIsUndefined(value):
    return isUndefined(value)
class Invalid(Exception):
    def __init__(self,msg):
        super(Invalid,self).__init__(msg)
_OCL_IS_KIND_OF_DELEGATES = []
_OCL_IS_TYPE_OF_DELEGATES = []
def registerIsKindOfFunction(function):
    global _OCL_IS_KIND_OF_DELEGATES
    if function not in _OCL_IS_KIND_OF_DELEGATES:
        _OCL_IS_KIND_OF_DELEGATES.append(function)
def registerIsTypeOfFunction(function):
    global _OCL_IS_TYPE_OF_DELEGATES
    if function not in _OCL_IS_TYPE_OF_DELEGATES:
        _OCL_IS_TYPE_OF_DELEGATES.append(function)
def oclIsKindOf(value1,value2):
    """
    Evaluates to True if the type of the value is *exactly* the type given as
    a second parameter is an instance of type or one of its subtypes directly
    or indirectly. Use the method oclIsTypeOf if you want to check if a value
    is exactly of a given type.
    :param value: A scalar value, a collection or an object.
    :type value: Any
    :param aType: The type to check the value against
                  (e.g. int, float, str, unicode, bool or a class)
    :type aType: type
    :return: True if value is compatible with the type aType.
    :rtype: bool
    Examples:
        >>> print oclIsKindOf(3,int)
        True
        >>> print oclIsKindOf("3",int)
        False
        >>> print oclIsKindOf(2.5,float)
        True
        >>> print oclIsKindOf("hello",basestring)
        True
        >>> print oclIsKindOf(True,bool)
        True
        >>> class Person(object): pass
        >>> print oclIsKindOf(Person(),Person)
        True
        >>> print oclIsKindOf(Person(),object)
        True
        >>>
    """
    if inspect.isclass(value2) and isinstance(value1, value2):
        return True
    else:
        for is_kind_of_function in _OCL_IS_KIND_OF_DELEGATES:
            if is_kind_of_function(value1, value2):
                return True
        return False
def oclIsTypeOf(value1,value2):
    """
    Return True if the type of the value is *exactly* the type given as a
    second parameter. This function does not take into account sub-typing
    relationships. If this is what is intended, use oclIsKindOf instead.
    :param value: A scalar value, a collection or an object.
    :type value: Any
    :param aType: The type to check the value against
                (e.g. int, float, str, unicode, bool or a class)
    :type aType: type
    :return: True if value is compatible with the type aType.
    :rtype: bool
    Examples:
        >>> print oclIsTypeOf("hello",str)
        True
        >>> print oclIsTypeOf("hello",basestring)
        False
        >>> print oclIsTypeOf(u"çüabè",unicode)
        True
    """
    if type(value1) == value2:
        return True
    else:
        for is_type_of_function in _OCL_IS_TYPE_OF_DELEGATES:
            if is_type_of_function(value1, value2):
                return True
        return False
def evaluate(value,expression):
    """
    Evaluate an expression on a given value
    :param value:
    :type value:
    :param expression:
    :type expression:
    :return:
    :rtype:
    Examples:
        >>> evaluate(1,lambda x:x*2)
        2
        >>> evaluate('hello',len)
        5
        >>> evaluate('hello',str.islower)
        True
        >>> evaluate('hello','islower')
        True
        >>> evaluate('hello','len(_)')
        5
        >>> evaluate('hello','_.islower()')
        True
        >>> class A(object):
        ...    a = 3
        ...    def __init__(self):
        ...        self.b = 10
        ...    def c(self):
        ...        return 25
        >>> evaluate(A(),'a')
        3
        >>> evaluate(A(),'b')
        10
        >>> evaluate(A(),'c')
        25
    """
    if callable(expression):
        return expression(value)
    elif isinstance(expression,(str,unicode)):
        try:
            r = getattr(value,expression)
        except AttributeError:
            if '_' in expression:
                _ = value
                return eval(expression)
            else:
                msg = "evaluate(): %s is not an attribute of the type %s" \
                    % (expression,type(value))
                raise Invalid(msg)
        if callable(r):
            return r()
        else:
            return r
def evaluatePredicate(value,predicate):
    r = evaluate(value,predicate)
    t = type(r)
    if t is not bool:
        msg = "Predicate expected. Returned a value of type" \
              " %s instead of a boolean" % t
        raise Invalid(msg)
    else:
        return r
def flatten(value):
    """
    Return an OCL collection with all the elements at the first level.
    :param value: The collection to be flatten
    :rtype value: iterable[iterable]
    :return: A flatten collection.
    :rtype: Seq
    """
    try:
        return value.flatten()
    except NameError:
        if isCollection(value):
            flat = []
            for e in value:
                flat.extend(flatten(e))
            return flat
        else:
            return [value]
#==============================================================================
#                              Collections
#==============================================================================
from abc import ABCMeta, abstractmethod
# noinspection PyClassicStyleClass
class GenericCollection:  # old-class style required
    """
    Class used both to define brand new OCL collection (classes under
    Collection) but also to define JavaCollectionExtension. Due to restriction
    of class instrumentation we use old-style class, hence object is not the
    base class.
    """
    def __init__(self):
        pass
    def __len__(self):
        """
        Return the size of the collection.
        Not in OCL but  pythonic.
        :return: The number of elements in the collection
        :rtype: int
        Examples:
            >>> len(Set(2,2,3))
            2
            >>> len(Bag(1,1,1))
            3
            >>> len(Set(Set()))
            1
            >>> len(Set())
            0
        """
        return self.size()
    def isEmpty(self):
        return self.size() == 0
    def notEmpty(self):
        return not self.isEmpty()
    def includes(self,value):
        """
        Return True if the value is in the collection.
        :param value: Any kind of value.
        :type value: any
        :return: True if the element is in set, False otherwise.
        :rtype: bool
        Examples:
            >>> Set(1,3,"a").includes("3")
            False
            >>> Set(1,3,"a").includes(3)
            True
            >>> Set(Set()).includes(Set())
            True
            >>> Set().includes(Set())
            False
            >>> 3 in Set(2,3,1)
            True
            >>> "hello" in Set("a","b")
            False
            >>> Bag(10,"a",3,3,10,10).includes(10)
            True
            >>> Bag(2).includes(5)
            False
            >>> 2 in Bag(2,2)
            True
            >>> Seq(10,2,2,3).includes(3)
            True
            >>> 2 in Seq(1,0,1)
            False
        """
        return value in self
    def excludes(self,value):
        return value not in self
    def includesAll(self,elements):
        for e in elements:
            if e not in self:
                return False
        return True
    def excludesAll(self,elements):
        for e in elements:
            if e in self:
                return False
        return True
    def __or__(self,anyCollection):
        return self.union(anyCollection)
    def any(self,predicate):
        """
        Return any element in the collection that satisfy the predicate.
        This operation is non deterministic as various elements may satisfy
        the predicate.
        If not element satisfies the predicate an exception is raised.
        See OCL-11.9.1
        :param predicate: A predicate, that is a function returning a boolean.
        :type predicate: X->bool
        :return: Any element satisfying the predicate.
        :rtype X:
        Examples:
            >>> Set(1,2,-5,-10).any(lambda x:x<0) in [-5,-10]
            True
            >>> Set(1,2).any(lambda x:x<0)
            Traceback (most recent call last):
              ...
            Invalid: .any(...) failed: No such element.
        """
        # noinspection PyTypeChecker
        for e in self:
            if evaluatePredicate(e,predicate):
                return e
        raise Invalid(".any(...) failed: No such element.")
    def max(self):
        return max(self)
    def min(self):
        return min(self)
    def sum(self):
        """
        Sum of the number in the collection.
        Examples:
            >>> Set(1,0.5,-5).sum()
            -3.5
            >>> Set().sum()
            0
        """
        return sum(self)
    def selectByKind(self,aType):
        return self.select(lambda e:oclIsKindOf(e,aType))
    def selectByType(self,aType):
        return self.select(lambda e:oclIsTypeOf(e,aType))
    def reject(self,predicate):
        """
        Discard from the set all elements that satisfy the predicate.
        :param predicate: A predicate, that is a function returning a boolean.
        :return: The set without the rejected elements.
        :rtype set:
        Examples:
            >>> Set(2,3,2.5,-5).reject(lambda e:e>2) == Set(2,-5)
            True
            >>> Set(Set(1,2,3,4),Set()).reject(lambda e:e.size()>3) \
                     == Set(Set())
            True
        """
        return self.select(lambda e:not evaluatePredicate(e,predicate))
    def collect(self,expression):
        return self.collectNested(expression).flatten()
    def __getattr__(self,name):
        """
        :param name:
        :return:
        Examples:
            >>> class P(object):
            ...      def __init__(self,x):
            ...         self.a = x
            >>> P1 = P(1)
            >>> P4 = P(4)
            >>> P1.a
            1
            >>> P4.a
            4
            >>> Set(P1,P4).a == Bag(1,4)
            True
        """
        return self.collect(lambda e:getattr(e,name))
    def forAll(self,predicate):
        """
        Return True if the predicate given as parameter is satisfied by all
        elements of the collection.
        :param predicate: A predicate, that is a function returning a boolean.
        :type predicate: X->bool
        :return: Whether or not the predicate is satisfied by all elements.
        :rtype bool:
        Examples:
            >>> Set(2,3,5,-5).forAll(lambda e:e>=0)
            False
            >>> Set(2,3,5).forAll(lambda e:e>=0)
            True
            >>> Set().forAll(lambda e:e>=0)
            True
            >>> Bag(4,4,4).forAll(lambda e:e==4)
            True
            >>> Seq(Bag(1),Set(2),Seq(3)).forAll(lambda e:e.size()==1)
            True
        """
        # noinspection PyTypeChecker
        for e in self:
            if not evaluatePredicate(e,predicate):
                return False
        return True
    def exists(self,predicate):
        """
        Return True if the predicate given as parameter is satisfied by at
        least one element of the collection.
        :param predicate: A predicate, that is a function returning a boolean.
        :type predicate: X->bool
        :return: Whether or not the predicate is satisfied by at least one
                element.
        :rtype bool:
        Examples:
            >>> Set(2,3,5,-5).exists(lambda e:e<0)
            True
            >>> Set(2,3,5).exists(lambda e:e<0)
            False
            >>> Set().exists(lambda e:e>=0)
            False
            >>> Bag(Set(),Set(),Set(2),Set(3)).exists(lambda e:e.size()==1)
            True
        """
        # noinspection PyTypeChecker
        for e in self:
            if evaluatePredicate(e,predicate):
                return True
        return False
    def one(self,predicate):
        """
        Return True if the predicate given as parameter is satisfied by at
        one and only one element in the collection.
        :param predicate: A predicate, that is a function returning a boolean.
        :type predicate: X->bool
        :return: Whether or not the predicate is satisfied by exactly one
                element.
        :rtype bool:
        Examples:
            >>> Set(2,3,5,-5).one(lambda e:e<0)
            True
            >>> Bag(2,3,5,-5,-5).one(lambda e:e<0)
            False
            >>> Set().one(lambda e:e>=0)
            False
            >>> Seq().one(lambda e:e>=0)
            False
            >>> Seq(1).one(lambda e:e>=0)
            True
            >>> Bag(Set(2),Set(),Set(3),Set()).one(lambda e:e.size()==0)
            False
        """
        foundOne = False
        # noinspection PyTypeChecker
        for e in self:
            found = evaluatePredicate(e,predicate)
            if found and foundOne:
                return False
            elif found:
                foundOne = True
        return foundOne
    def closure(self,expression):
        """
        Return the transitive closure of the expression for all element in
        the collection.
        See OCL (section 7.6.5.
        :param expression: The expression to be applied again and again.
        :type: X->X
        :return: A set representing the transitive closure including the
        source elements/
        :type: Seq[X]
        Examples:
            >>> def f(x):
            ...     successors = {1:[2], 2:[1, 2, 3], 3:[4], 4:[], \
                                  5:[5], 6:[5], 7:[5, 7]}
            ...     return successors[x]
            >>> Set(1).closure(f) == Seq(1,2,3,4)
            True
            >>> Set(5).closure(f) == Seq(5)
            True
            >>> Seq(6,6,3).closure(f) == Seq(6,3,5,4)
            True
        """
        # FIXME: returns always a sequence, but the type changes in OCL.
        from collections import deque
        sources = list(self)
        to_visit = deque(sources)
        visited = []
        while len(to_visit) != 0:
            current = to_visit.popleft()
            if current not in visited:
                result = evaluate(current,expression)
                if isCollection(result):
                    successors = listAll(result)
                else:
                    successors = [result]
                # print "visited %s -> %s" % (current,successors)
                for s in successors:
                    if s not in visited:
                        to_visit.append(s)
                visited.append(current)
        return Seq.new(visited)
    def iterate(self):
        # FIXME: Not implemented (See 7.6.6)
        raise NotImplementedError()
    def isUnique(self,expression):
        return not self.collect(expression).hasDuplicates()
from collections import deque
class Collection(object, GenericCollection):
    """
    Base class for OCL collections.
    Collections are either:
    * sets (Set),
    * ordered set (OrderedSet)
    * bags (Bag),
    * sequences (Seq)
    """
    __metaclass__ = ABCMeta
    @abstractmethod
    def size(self):
        pass
    @abstractmethod
    def count(self,element):
        pass
    @abstractmethod
    def including(self,value):
        pass
    @abstractmethod
    def excluding(self,value):
        pass
    @abstractmethod
    def union(self,value):
        pass
    @abstractmethod
    def select(self,expression):
        pass
    @abstractmethod
    def flatten(self):
        pass
    @abstractmethod
    def collectNested(self,expression):
        pass
    @abstractmethod
    def hasDuplicates(self):
        pass
    @abstractmethod
    def duplicates(self):
        pass
    @abstractmethod
    def selectWithCount(self,number):
        pass
    @abstractmethod
    def sortedBy(self,expression):
        pass
    def asCollection(self):
        return self
    @abstractmethod
    def emptyCollection(self):
        return self
    @abstractmethod
    def asSet(self):
        pass
    @abstractmethod
    def asBag(self):
        pass
    @abstractmethod
    def asSeq(self):
        pass
    @abstractmethod
    def __str__(self):
        pass
    def __repr__(self):
        return self.__str__()
    @abstractmethod
    def __eq__(self,value):
        pass
    def __ne__(self,value):
        return not self.__eq__(value)
    @abstractmethod
    def __hash__(self):
        pass
    @abstractmethod
    def __contains__(self,item):
        pass
    @abstractmethod
    def __iter__(self):
        pass
#------------------------------------------------------------------------------
#   OCL Sets
#------------------------------------------------------------------------------
def asSet(collection):
    """
    Convert the given collection to a Set
    :param collection:
    :return:
    :rtype: Set
    """
    try:
        return collection.asSet()
    except AttributeError:
        return Set.new(collection)
class Set(Collection):
    """
    Set of elements.
    This class mimics OCL Sets. Being a set, there are no duplicates and no
    ordering of elements. By contrast to OCL Sets, here a set can contain
    any kind of elements at the same time. OCL sets are homogeneous,
    all elements being of the same type (or at least same supertype).
    """
    def __init__(self,*args):
        """
        Create a set from some elements.
        Eliminate duplicates if any.
        Examples:
           >>> Set(10,"a",3,10,10) == Set(10,"a",3)
           True
           >>> Set() <> Set(10,"a",3)
           True
           >>> Set(10,10).size()
           1
           >>> Set(Set()).size()
           1
           >>> Set("hello").size()
           1
           >>> Set(Set(2),Set(2)).size()
           1
        """
        # We cannot have Counter here. So list is ok (see listAll)
        super(Set, self).__init__()
        self.theSet = set(list(args))
    @classmethod
    def new(cls,anyCollection=()):
        newSet = cls()
        newSet.theSet = set(anyCollection)
        return newSet
    def emptyCollection(self):
        return Set.new()
    def size(self):
        """
        Return the size of the set.
        :return: The size of the set.
        :rtype: int
        Examples:
            >>> Set(1,4,2,1,1).size()
            3
            >>> Set().size()
            0
        """
        return len(self.theSet)
    def isEmpty(self):
        """
        Examples:
            >>> Set().isEmpty()
            True
            >>> Set(Set()).isEmpty()
            False
            >>> Set(2,3).isEmpty()
            False
        """
        return False if self.theSet else True
    def count(self,value):
        """
        Return the number of occurrence of the value in the set (0 or 1).
        :param value: The element to search in the set.
        :type value: any
        :return: 1 if the element is in the set, 0 otherwise.
        :rtype: bool
        Examples:
            >>> Set(1,3,"a").count("3")
            0
            >>> Set(1,3,3,3).count(3)
            1
        """
        return 1 if value in self.theSet else 0
    def includes(self,value):
        return value in self.theSet
    def including(self,value):
        """
        Add the element to the set if not already there.
        :param value: The element to add to the set.
        :type value: any
        :return: A set including this element.
        :rtype: Set
        Examples:
            >>> Set(1,3,"a").including("3") == Set(1,"3",3,"a")
            True
            >>> Set(1,3,3,3).including(3) == Set(3,1)
            True
        """
        fresh = set(self.theSet)
        fresh.add(value)
        return Set.new(fresh)
    def excluding(self,value):
        """
        Excludes a value from the set (if there).
        :param value: The element to add to the set.
        :type value: any
        :return: A set including this element.
        :rtype: Set
        Examples:
            >>> Set(1,3,"a").excluding("3") == Set(1,3,"a")
            True
            >>> Set(1,3,3,3).excluding(3) == Set(1)
            True
        """
        fresh = set(self.theSet)
        fresh.discard(value)
        return Set.new(fresh)
    def union(self,anyCollection):
        """
        Add all elements from the collection given to the set.
        :param anyCollection: A collection of values to be added to this set.
        :type anyCollection: collection
        :return: A set including all values added plus previous set elements.
        :rtype: Set
        Examples:
            >>> Set(1,3,'a').union([2,3,2]) == Set(1,3,"a",2)
            True
            >>> Set(1,3,3,3).union(Set(2,1,8)) == Set(1,2,3,8)
            True
            >>> Set().union(Set()) == Set()
            True
            >>> Set(1,3) | [2,3] == Set(1,2,3)
            True
        """
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        # We don't need to take special care with Counter as we remove
        # duplicates
        fresh = set(self.theSet)
        fresh = fresh | set(anyCollection)
        return Set.new(fresh)
    def __or__(self,anyCollection):
        return self.union(anyCollection)
    def intersection(self,anyCollection):
        """
        Retain only elements in the intersection between this set and the
        given collection.
        :param anyCollection: A collection of values to be added to this set.
        :type anyCollection: collection
        :return: A set including all values added plus previous set elements.
        :rtype: Set
        Examples:
            >>> Set(1,3,"a").intersection(["a","a",8]) == Set("a")
            True
            >>> Set(1,3,3,3).intersection(Set(1,3)) == Set(1,3)
            True
            >>> Set(2).intersection(Set()) == Set()
            True
            >>> Set(2) & Set(3,2) == Set(2)
            True
        """
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        # Don't need to take special care with Counter as we remove duplicates
        fresh = set(self.theSet)
        fresh = fresh & set(anyCollection)
        return Set.new(fresh)
    def __and__(self,anyCollection):
        return self.intersection(anyCollection)
    def difference(self,anyCollection):
        """
        Remove from the set all values in the collection.
        :param anyCollection: Any collection of values to be discarded from
        this set.
        :type anyCollection: collection
        :return: This set without the values in the collection.
        :rtype: Set
        Examples:
            >>> Set(1,3,"a").difference([2,3,2,'z']) == Set(1,"a")
            True
            >>> Set(1,3,3,3).difference(Set(1,3)) == Set()
            True
            >>> Set().difference(Set()) == Set()
            True
            >>> Set(1,3) - [2,3] == Set(1)
            True
        """
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        fresh = set(self.theSet)
        # No need for take special care with Counter as we remove duplicates
        fresh = fresh - set(anyCollection)
        return Set.new(fresh)
    def __sub__(self,anyCollection):
        return self.difference(anyCollection)
    def symmetricDifference(self,anyCollection):
        """
        Return the elements that are either in one set but not both sets.
        In fact this method accept any collection, but it is first converted
        to a set.
        :param anyCollection: A collection to make the difference with.
        :type anyCollection: collection
        :return: The symmetric difference.
        :rtype: Set
        Examples:
            >>> Set(1,2).symmetricDifference(Set(3,2)) == Set(1,3)
            True
            >>> Set(Set()).symmetricDifference(Set()) == Set(Set())
            True
        """
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        fresh = set(self.theSet)
        other_set = set(anyCollection)
        fresh = (fresh | other_set) - (fresh & other_set)
        return Set.new(fresh)
    def hasDuplicates(self):
        """
        Return always False for sets.
        This method is an extension to OCL. It makes is defined on sets just
        for consistency but is more useful for Bags or Sequences.
        :return: True
        :rtype: bool
        """
        return False
    def duplicates(self):
        """ Return always an empty bag for a set """
        return Bag.new()
    def selectWithCount(self, number):
        """ Return the set if 1 is selected as count. Otherwise return
        an empty set because there is no duplicated elements in a set.
        :param number:
        :type number:
        :return:
        :rtype:
        """
        if number==1:
            return self
        else:
            return Set()
    def flatten(self):
        """
        If the set is a set of collections, then return the set-union of all
        its elements.
        :return: Set
        :rtype: Set
        Examples:
            >>> Set(Set(2)).flatten() == Set(2)
            True
            >>> Set(Set(Set(2)),Set(2)).flatten() == Set(2)
            True
            >>> Set(Set(2,3),Set(4),Set(),Bag("a"),Bag(2,2)).flatten() \
                   == Set(2,3,4,"a")
            True
            #>>> Set().flatten() == Set()
            # True
            # >>> Set(2,3).flatten() == Set(2,3)
            # True
            # >>> Set(2,Set(3),Set(Set(2))).flatten() == Set(2,3)
            # True
        """
        fresh = set()
        for e in self.theSet:
            if isCollection(e):
                flat_set = set(flatten(e))
            else:
                flat_set = {e}
            fresh = fresh | flat_set
        return Set.new(fresh)
    def select(self,predicate):
        """
        Retain in the set only the elements satisfying the expression.
        :param predicate: A predicate, that is a function returning a boolean.
        :return: The set with only the selected elements.
        :rtype Set:
        Examples:
            >>> Set(2,3,2.5,-5).select(lambda e:e>2) == Set(3,2.5)
            True
            >>> Set(Set(1,2,3,4),Set()).select(lambda e:e.size()>3) \
                    == Set(Set(1,2,3,4))
            True
        """
        return Set.new(set([e for e in self if evaluatePredicate(e,predicate)]))
    def collectNested(self,expression):
        """
        Return a bag of values resulting from the evaluation of the given expression
        on all elements of the set.
        The transformation from this set to a bag is due to the fact that
        the expression can generate duplicates.
        :param expression: A function returning any kind of value.
        :type expression: X -> Y
        :return: The bag of values produced.
        :rtype Bag[Y]:
        Examples:
            >>> Set(2,3,5,-5).collectNested(lambda e:e*e) == Bag(25,25,4,9)
            True
            >>> Set(2,3).collectNested(lambda e:Bag(e,e)) \
                    == Bag(Bag(2,2),Bag(3,3))
            True
        """
        return Bag.new(map((lambda e:evaluate(e,expression)),self.theSet))
    def sortedBy(self,expression):
        # FIXME: should return a OrderedSet
        return \
            Seq.new(sorted(self.theSet,key=(lambda e:evaluate(e,expression))))
    def asSet(self):
        return self
    def asBag(self):
        return asBag(self.theSet)
    def asSeq(self):
        return asSeq(self.theSet)
    def __str__(self):
        """
        Return a string representation of the set where elements are
        separated by ", ".
        The result is non deterministic as there is no ordering between
        elements.
        :return: A string.
        :rtype: str
        Examples:
            >>> str(Set())
            'Set()'
            >>> str(Set(3))
            'Set(3)'
            >>> str(Set(3,2)).startswith('Set(')
            True
        """
        body = ", ".join(map(str,self.theSet))
        return "Set(%s)" % body
    def __repr__(self):
        return self.__str__()
    def __eq__(self,value):
        """
        Return true if the value given is a Set and has exactly the same
        elements.
        :param value: Any value, but succeed only for sets.
        :type value: any
        :return: True if "value" is a set with the same elements.
        :rtype: bool
        Examples:
            >>> Set() == []
            False
            >>> Set() == Set()
            True
            >>> Set(2,3,3) == Set(3,2)
            True
            >>> Set(2,"3",4) == Set(2,4,3)
            False
            >>> Set("hello") == Set("hello")
            True
            >>> Set(Set(1)) == Set(Set(1))
            True
            >>> Set(Set(1),Set(2,1)) == Set(Set(1,2),Set(1))
            True
        """
        if not isinstance(value,Set):
            return False
        return self.theSet == value.theSet
    def __ne__(self,value):
        return not self.__eq__(value)
    def __hash__(self):
        return hash(frozenset(self.theSet))
    def __iter__(self):
        """ Make Sets iterable for pythonic usage.
        :return: the iterator for this Set
        """
        return self.theSet.__iter__()
    def __contains__(self,item):
        return item in self.theSet
#------------------------------------------------------------------------------
#   OCL Bags
#------------------------------------------------------------------------------
def asBag(anyCollection):
    """
    :param anyCollection:
    :return:
    """
    try:
        return anyCollection.asBag()
    except AttributeError:
        return Bag.new(anyCollection)
from collections import Counter
class Bag(Collection):
    def __init__(self,*args):
        """
        Create a bag from some elements.
        Examples:
           >>> Bag(10,"a",3,10,10) == Bag(10,10,"a",3,10)
           True
           >>> Bag(2) <> Bag(2,2,2)
           True
           >>> Bag(3,3,4) == Bag(3,4,3)
           True
           >>> Bag(2,3) == Bag(3,2)
           True
           >>> Bag(Set(2,3),Set(3,2)).size()
           2
        """
        super(Bag,self).__init__()
        # We cannot have Counter here. So list is ok (see listAll)
        self.theCounter = Counter(list(args))
    @classmethod
    def new(cls,anyCollection=()):
        newBag = Bag()
        if isinstance(anyCollection,Counter):
            newBag.theCounter = anyCollection.copy()
            # Remove the 0 and negative elements from the counter. This
            # weird trick is indicated in python documentation for Counter.
            newBag.theCounter += Counter()
        elif isinstance(anyCollection,Bag):
            newBag.theCounter = anyCollection.theBag.copy()
        else:
            newBag.theCounter = Counter(listAll(anyCollection))
        return newBag
    def emptyCollection(self):
        return Bag.new()
    def size(self):
        """
        Return the total number of elements in the bag.
        :rtype! int
        Examples:
           >>> Bag(10,"a",3,3,10,10).size()
           6
           >>> Bag(2).size()
           1
           >>> Bag().size()
           0
        """
        return sum(self.theCounter.values())
    def count(self,value):
        """
        Return the number of occurrences of a given value within the bag.
        Examples:
           >>> Bag(10,"a",3,3,10,10).count(10)
           3
           >>> Bag(2).count(5)
           0
           >>> Bag().count(2)
           0
           >>> Bag(Set(1),Set(1)).count(Set(1))
           2
        """
        return self.theCounter[value]
#    def __getitem__(self,key):
#        return self.theCounter[key]
    def including(self,value):
        """
        Add a value into the bag.
        :param value: The value to be added.
        :type: any
        :return: The bag with one more occurrence of the value.
        :rtype: Bag
        Examples:
            >>> Bag(10,10,2,10).including(10) == Bag(10,10,10,10,2)
            True
            >>> Bag(10,10,2,10).including("a") == Bag(10,10,10,2,'a')
            True
            >>> Bag().including(34) == Bag(34)
            True
        """
        fresh = self.theCounter.copy()
        fresh[value] += 1
        return Bag.new(fresh)
    def excluding(self,value):
        """
        Remove *all* elements corresponding to the given value from the bag.
        :param value: Any value within the bag or not.
        :type: any
        :return: The bag without any occurrence of 'value'.
        :rtype: Bag
        Examples:
            >>> Bag(10,10,2,10).excluding(10) == Bag(2)
            True
            >>> Bag(10,10,2,10).excluding("a") == Bag(10,10,10,2)
            True
            >>> Bag().excluding(34) == Bag()
            True
        """
        fresh = self.theCounter.copy()
        del fresh[value]
        return Bag.new(fresh)
    def union(self,anyCollection):
        """
        Add to the bag all values in the collection given as a parameter.
        Examples:
           >>> Bag(10,"a",3,3,10,10).union(Bag(10,10,"b")) \
                    == Bag("b","a",3,3,10,10,10,10,10)
           True
           >>> Bag(2,4).union([2,4]) == Bag(2,2,4,4)
           True
           >>> Bag().union([1]) == Bag(1)
           True
           >>> Bag(3,3) | Set(3,3,3,2) == Bag(3,3,3,2)
           True
           >>> Bag(2,3,1) | Bag(3,3,2,4) == Bag(3,3,3,2,2,1,4)
           True
           >>> Bag(2,3,1) | Counter([3,3,2,4]) == Bag(3,3,3,2,2,1,4)
           True
        """
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        fresh = self.theCounter.copy()
        fresh.update(listAll(anyCollection))
        return Bag.new(fresh)
    def intersection(self,anyCollection):
        """
        Retain only elements that are in common with the given collection.
        Examples:
            >>> Bag(10,"a",3,3,10,10).intersection(Bag(10,10,"b")) == Bag(10,10)
            True
            >>> Bag(2,4).intersection(Bag(2,4)) == Bag(2,4)
            True
            >>> Bag() & [1] == Bag()
            True
            >>> Bag(3,3) & Set(3,3,3,2) == Bag(3)
            True
        """
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        return Bag.new(self.theCounter & Counter(list(anyCollection)))
    def __and__(self,anyCollection):
        return self.intersection(anyCollection)
    def sum(self):
        """
        Return the sum of all elements in a bag including duplicates.
        :return: the sum of all elements .
        :rtype: int
        Examples:
            >>> Bag().sum()
            0
            >>> Bag(3,3,2,3).sum()
            11
        """
        return sum([e * n for (e,n) in self.theCounter.items()])
    def flatten(self):
        """
        If the bag is a bag of collection then return the bag union of all
        its elements.
        :return: the sum of all elements .
        :rtype: int
        Examples:
            >>> Bag(Bag(2),Bag(3,3)).flatten() == Bag(2,3,3)
            True
            >>> Bag(Bag(),Bag(),Bag(3,2),Set(3)).flatten()  == Bag(3,2,3)
            True
        """
        counter = Counter()
        for (e,n) in self.theCounter.items():
            if isCollection(e):
                coll = e.flatten()
            else:
                coll = [e]
            for x in coll:
                counter[x] += n
        self.theCounter = counter
        return self
    def select(self,predicate):
        """
        Retain in the bag only the elements that satisfy the predicate.
        :param predicate: A predicate, that is a function returning a boolean.
        :return: The bag with only the selected elements.
        :rtype Bag:
        Examples:
            >>> Bag(2,3,2,3,-1,-2).select(lambda e:e>=0) == Bag(2,2,3,3)
            True
            >>> Bag().select(lambda e:True) == Bag()
            True
        """
        fresh = \
            Counter(dict([(e,n) for (e,n) in self.theCounter.items()
                          if evaluatePredicate(e,predicate)]))
        return Bag.new(fresh)
    def collectNested(self,expression):
        """
        Return a bag of values resulting from the evaluation of the given
        expression on all elements of the bag.
        It is assumed that the expression has no side effect; this
        expression is not called for each occurrence but only one for a
        given value. This is an optimisation for bags.
        :param expression: A function returning any kind of value.
        :type expression: X -> Y
        :return: The bag of values produced.
        :rtype Bag[Y]:
        Examples:
            >>> Bag(2,2,3,5,-5).collectNested(lambda e:e*e) == Bag(4,4,9,25,25)
            True
            >>> Bag(2,2).collectNested(lambda e:Bag(e,e)) \
                    == Bag(Bag(2,2),Bag(2,2))
            True
        """
        results = [(evaluate(e,expression),n)
                   for (e,n) in self.theCounter.items()]
        fresh = Counter()
        for (r,n) in results:
            fresh[r] += n
        return Bag.new(fresh)
    def hasDuplicates(self):
        """
        Return True if this bag has at least one element with more than one
        occurrence.
        This is not an OCL operation. It is provided here just for convenience.
        :return: True if there are some duplicates in the bag.
        :rtype: bool
        Examples:
            >>> Bag().hasDuplicates()
            False
            >>> Bag(2,3).hasDuplicates()
            False
            >>> Bag(2,2,1,3,3).hasDuplicates()
            True
        """
        for n in self.theCounter.values():
            if n > 1:
                return True
        return False
    def duplicates(self):
        """
            >>> Bag().duplicates() == Bag()
            True
            >>> Bag(2,3).duplicates() == Bag()
            True
            >>> Bag(2,2,1,3,3).duplicates()  == Bag(2,2,3,3)
            True
            """
        new_counter = \
            Counter(dict([(e,n) for (e,n) in self.theCounter.items() if n>=2]))
        return Bag.new(new_counter)
    def selectWithCount(self, number):
        """ Select in the bag only the elements that have exactly the
        exact number of element specified.
        :param number:
        :type number:
        :return:
        :rtype:
            >>> Bag().selectWithCount(2) == Bag()
            True
            >>> Bag(2,3).selectWithCount(2) == Bag()
            True
            >>> Bag(2,2,1,3,3).selectWithCount(2)  == Bag(2,2,3,3)
            True
            >>> Bag(2,2,9,3,3).selectWithCount(1)  == Bag(9)
            True
            """
        new_counter = \
            Counter(
                dict([(e, n) for (e, n) in self.theCounter.items()
                      if n == number]))
        return Bag.new(new_counter)
    def sortedBy(self,expression):
        r = []
        s = sorted(self.theCounter.keys(),key=lambda e:evaluate(e,expression))
        for key in s:
            r += [key] * self.theCounter[key]
        # FIXME: Should be an ordered set
        return r
    def asSet(self):
        return Set.new(self.theCounter.keys())
    def asBag(self):
        return self
    def asSeq(self):
        # A list with duplicates is wanted, so use elements().
        return Seq.new(list(self.theCounter.elements()))
    def __str__(self):
        def show_element((value,count)):
            return str(value)+('' if count==1 else '*'+str(count))
        body = ", ".join(map(
            show_element,
            self.theCounter.items()))
        return 'Bag(%s)' % str(body)
    def __repr__(self):
        return self.__str__()
    def __eq__(self,value):
        """
        Return True only if the value is a Bag with the same elements and
        number of occurrences.
        :param value: Any value.
        :type value: any
        :return: True if the value is equals to this bag.
        :rtype: bool
        Examples:
            >>> Bag(1,2,2) == Bag(2,2,1)
            True
            >>> Bag() == Bag()
            True
            >>> Bag(Bag(2,2)) == Bag(2,2)
            False
            >>> Bag(Set(2))==Bag(Set(2))
            True
        """
        if not isinstance(value,Bag):
            return False
        return self.theCounter == value.theCounter
    def __ne__(self,value):
        return not self.__eq__(value)
    def __hash__(self):
        return hash(tuple(self.theCounter.items()))
    def __iter__(self):
        """ Make Bags iterable for pythonic usage.
        :return: the iterator for this Bag
        :rtype: iterator
        Examples:
            >>> list(Bag())
            []
            >>> sorted(list(Bag(1,1,"a","b",1)))
            [1, 1, 1, 'a', 'b']
        """
        return self.theCounter.elements().__iter__()
    def __contains__(self,value):
        return self.theCounter[value] > 0
#------------------------------------------------------------------------------
#   OCL Sequences
#------------------------------------------------------------------------------
def asSeq(anyCollection):
    """
    Convert the given collection to a Seq
    :param anyCollection:
    :return:
    :rtype: Seq
    """
    try:
        return anyCollection.asSeq()
    except AttributeError:
        return Seq.new(anyCollection)
class Seq(Collection):
    def __init__(self,*args):
        """
        Create a Seq from some elements or from one collection.
        Examples:
           >>> Seq(10,"a",3,10,10) == Seq(10,10,"a",3,10)
           False
           >>> Seq(2) <> Seq(2,2,2)
           True
           >>> Seq(3,3,4) == Seq(3,4,3)
           False
           >>> Seq() == Seq()
           True
           >>> Seq() == Set()
           False
           >>> Seq(Seq(1,2)) == Seq(Seq(1),Seq(2))
           False
        """
        super(Seq,self).__init__()
        # no worry with args being a Counter
        self.theList = list(args)
    @classmethod
    def new(cls,anyCollection=()):
        newSeq = Seq()
        newSeq.theList = listAll(anyCollection)
        return newSeq
    def emptyCollection(self):
        return Seq.new()
    def size(self):
        return len(self.theList)
    def isEmpty(self):
        return False if self.theList else True
    def count(self,element):
        return self.theList.count(element)
    def includes(self,element):
        return element in self.theList
    def including(self,value):
        self.theList.append(value)
        return self
    def excluding(self,value):
        """
        Excludes all occurrence of the value from the sequence (if there).
        :param value: The element to add to the set.
        :type value: any
        :return: A set including this element.
        :rtype: Set
        Examples:
            >>> Seq(1,3,"a").excluding("3") == Seq(1,3,"a")
            True
            >>> Seq(1,3,3,2,3).excluding(3) == Seq(1,2)
            True
            >>> Seq().excluding(23) == Seq()
            True
        """
        return Seq.new([e for e in self.theList if e != value])
    def select(self,predicate):
        return Seq.new([e for e in self.theList
                        if evaluatePredicate(e,predicate)])
    def hasDuplicates(self):
        """
        Indicates if there duplicated elements in the sequence.
        This method is an extension to OCL. I
        :return: True
        :rtype: bool
        """
        return Bag.new(self).hasDuplicates()
    def duplicates(self):
        return Bag.new(self).duplicates()
    def selectWithCount(self,number):
        """
        Select only the elements that have exactly the number of occurences
        specified and return these elements in the original order.
        :param number:
        :type number:
        :return:
        :rtype:
            >>> Seq(1,2,2,1,1,5,3,2,5).selectWithCount(1) == Seq(3)
            True
            >>> Seq(1,2,2,1,1,5,3,2,5).selectWithCount(3) == Seq(1,2,2,1,1,2)
            True
            >>> Seq(1,2,2,1,1,5,3,2,5).selectWithCount(2) == Seq(5,5)
            True
            >>> Seq().selectWithCount(3) == Seq()
            True
            >>> Seq(2).selectWithCount(0) == Seq()
            True
        """
        keep = Bag.new(self).selectWithCount(number)
        return Seq.new([e for e in self.theList if e in keep ])
    def flatten(self):
        r = []
        for e in self.theList:
            if isCollection(e):
                flat_list = listAll(flatten(e))
            else:
                flat_list = [e]
            r = r + flat_list
        self.theList = r
        return self
    def collectNested(self,expression):
        return Seq.new(map((lambda e:evaluate(e,expression)),self.theList))
    def sortedBy(self,expression):
        return \
            Seq.new(sorted(self.theList,key=(lambda e:evaluate(e,expression))))
    def union(self,anyCollection):
        assert isCollection(anyCollection), \
            'Any collection expected, but found %s' % anyCollection
        return Seq.new(self.theList + listAll(anyCollection))
    def __add__(self,anyCollection):
        return self.union(anyCollection)
    def append(self,value):
        fresh = list(self.theList)
        fresh.append(value)
        return Seq.new(fresh)
    def prepend(self,value):
        fresh = list(self.theList)
        fresh.insert(0,value)
        return Seq.new(fresh)
    def subSequence(self,lower,upper):
        try:
            return Seq.new(self.theList[lower - 1:upper])
        except:
            msg = ".subSequence(%s,%s) failed: No such element."
            raise Invalid(msg % (lower,upper))
    def at(self,index):
        """
        Return the nth element of the sequence starting from 1.
        Note: In OCL the 1st element is at the index 1 while in python this
        is at 0. Both the OCL 'at' and python [] operators can be used,
        but remember the different way to index elements.
        Examples:
            >>> Seq(1,2,3,4).at(1)
            1
            >>> Seq(1,2,3,4)[0]
            1
        :param index: The index of the element to return, starting at:
         * 1 for the OCL 'at' operator.
         * 0 for the [] python operator.
        :type: int
        :return: The element at that position.
        :rtype: any
        """
        try:
            return self.theList[index - 1]
        except:
            raise Invalid(".at(%s) failed: No such element." % index)
    def __getitem__(self,item):
        return self.theList[item]
    def asSet(self):
        return Set.new(self.theList)
    def asBag(self):
        return Bag.new(self.theList)
    def asSeq(self):
        return self
    def first(self):
        try:
            return self.theList[0]
        except:
            raise Invalid(".first() failed: No such element.")
    def last(self):
        try:
            return self.theList[-1]
        except:
            raise Invalid(".last() failed: No such element.")
    def __str__(self):
        body = ", ".join(map(str,self.theList))
        return 'Seq(%s)' % body
    def __repr__(self):
        return self.__str__()
    def __eq__(self,value):
        if not isinstance(value,Seq):
            return False
        return self.theList == value.theList
    def __hash__(self):
        return hash(tuple(self.theList))
    def __contains__(self,item):
        return item in self.theList
    def __iter__(self):
        return self.theList.__iter__()
#==============================================================================
#     Conversions
#==============================================================================
import collections
class ConversionRule(object):
    def __init__(self,language,sourceType,collectionType):
        self.language = language
        self.sourceType = sourceType
        self.collectionType = collectionType
    def accept(self,value):
        return isinstance(value,self.sourceType)
    def asCollection(self,value):
        return self.collectionType.new(value)
    def emptyCollection(self):
        return self.collectionType.new()
from collections import OrderedDict
class Converter(object):
    def __init__(self):
        self.rules = OrderedDict()
        self.language_collections = OrderedDict()
        self.all_collections = []
    def registerConversionRules(self,language,conversionList):
        for (source,target) in conversionList:
            rule = ConversionRule(language,source,target)
            self.rules[source] = rule
            if language not in self.language_collections:
                self.language_collections[language] = []
            self.language_collections[language].append(source)
            self.all_collections.append(source)
    def _registerActualTypeRule(self,source,rule):
        self.registerConversionRules(
            self,rule.language,[(source,rule.collectionType)])
        return self.rules[source]
    def isCollection(self,value,language=None):
        if isinstance(value,basestring):
            return False
        if language == None:
            collections = self.all_collections
        else:
            collections = self.language_collections[language]
        return isinstance(value,tuple(collections))
    def findRule(self,value):
        """
        Return the type of the OCL collection corresponding to the given type.
        Raise an exception if no conversion is possible for the given type
        :param aType: The type to be converted.
        :type aType: type
        :return: A collection type.
        :rtype: type < Collection
        :raise: ValueError if there is no correspondance possible.
        """
        valueType = type(value)
        # check if we have some chance and the type is already registered
        if valueType in self.rules:
            return self.rules[valueType]
        else:
            # no chance. We have to check if this is a subtype.
            for rule in self.rules.values():
                if rule.accept(value):
                    return self._registerActualTypeRule(self,valueType,rule)
            msg = "getConversionRule(): Can't convert a value of type %s"
            raise ValueError(msg % valueType)
    def asCollection(self,value):
        try:
            return value.asCollection()
        except AttributeError:
            return self.findRule(value).asCollection(value)
    def emptyCollection(self,value):
        try:
            return value.emptyCollection()
        except NameError:
            return self.findRule(value).emptyCollection(value)
    def listAll(self,value):
        """
        Return all the elements of the collection as a list.
        This takes into account the Counter specificity: instead of using
        list and the standard enumeration on this collection this function
        use the "elements()" method. Otherwise occurrences are eliminated.
        """
        if isinstance(value,collections.Counter):
            return list(value.elements())
        else:
            return list(value)
CONVERTER = Converter()
pythonConversionRules = [   # Order is very important
    (set,Set),
    (frozenset,Set),
    (collections.Counter,Bag),
    (list,Seq),
    (tuple,Seq),
    (collections.deque,Seq),
    (collections.Iterable,Seq),
    (collections.Iterable,Seq),
]
CONVERTER.registerConversionRules('python',pythonConversionRules)
oclConversionRules = [
    (Set,Set),
    (Bag,Bag),
    (Seq,Seq),
]
CONVERTER.registerConversionRules('ocl',oclConversionRules)
def asCollection(anyCollection):
    """
    Convert any collection into the proper (OCL) collection.
    :param anyCollection: A python, java or ocl collection.
    :return: The OCL collection
    :rtype: Collection
    Examples:
        >>> asCollection({2,3}) == Set(3,2)
        True
        >>> asCollection(frozenset({1,5,1})) == Set(1,5)
        True
        >>> asCollection(Counter([1,1,3,1])) == Bag(1,1,1,3)
        True
        >>> asCollection(Counter({'hello':2,-1:0})) == Bag('hello','hello')
        True
        >>> asCollection([1,2,3,4]) == Seq(1,2,3,4)
        True
        >>> asCollection((1,2,3,4)) == Seq(1,2,3,4)
        True
        >>> asCollection(deque([1,2,3,4])) == Seq(1,2,3,4)
        True
    """
    return CONVERTER.asCollection(anyCollection)
def emptyCollection(anyCollection):
    return CONVERTER.emptyCollection(anyCollection)
def listAll(collection):
    return CONVERTER.listAll(collection)
def isCollection(value,language=None):
    """
    :param value:
    :param language:
    :return:
        >>> isCollection((2,3))
        True
        >>> isCollection([])
        True
        >>> isCollection(12)
        False
        >>> isCollection(Counter())
        True
        >>> isCollection("text")
        False
    """
    return CONVERTER.isCollection(value,language=language)
 | 
| 
	import os
import dj_database_url
import raven
from raven.exceptions import InvalidGitRepository
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
DEBUG = False
if 'SECRET_KEY' in os.environ:
    SECRET_KEY = os.environ['SECRET_KEY']
if 'ALLOWED_HOSTS' in os.environ:
    ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(',')
INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'django.contrib.sitemaps',
    'django.contrib.sites',
    'rest_framework',
    'timetracker.accounts',
    'timetracker.activities',
    'timetracker.api',
    'timetracker.projects',
    'timetracker.sheets.apps.SheetsConfig',
]
MIDDLEWARE = [
    'django.middleware.security.SecurityMiddleware',
    'django_referrer_policy.middleware.ReferrerPolicyMiddleware',
    'whitenoise.middleware.WhiteNoiseMiddleware',
    'django.contrib.sites.middleware.CurrentSiteMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.auth.middleware.RemoteUserMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'timetracker.urls'
TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [
            os.path.join(PROJECT_DIR, 'templates'),
        ],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]
WSGI_APPLICATION = 'timetracker.wsgi.application'
DATABASES = {'default': dj_database_url.config(conn_max_age=600)}
AUTH_PASSWORD_VALIDATORS = [
    {
        'NAME':
        'django.contrib.auth.password_validation.'
        'UserAttributeSimilarityValidator',
    },
    {
        'NAME':
        'django.contrib.auth.password_validation.MinimumLengthValidator',
    },
    {
        'NAME':
        'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME':
        'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]
LOGIN_REDIRECT_URL = 'home'
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.environ.get('STATIC_DIR', os.path.join(BASE_DIR, 'static'))
STATIC_URL = os.environ.get('STATIC_URL', '/static/')
STATICFILES_DIRS = [
    os.path.join(BASE_DIR, 'static_compiled'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_ROOT = os.environ.get('MEDIA_DIR', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = os.environ.get('MEDIA_URL', '/media/')
if os.environ.get('SECURE_SSL_REDIRECT', 'true').strip().lower() == 'true':
    SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
if 'SECURE_HSTS_SECONDS' in os.environ:
    SECURE_HSTS_SECONDS = int(os.environ['SECURE_HSTS_SECONDS'])
if os.environ.get('SECURE_BROWSER_XSS_FILTER',
                  'true').lower().strip() == 'true':
    SECURE_BROWSER_XSS_FILTER = True
if os.environ.get('SECURE_CONTENT_TYPE_NOSNIFF',
                  'true').lower().strip() == 'true':
    SECURE_CONTENT_TYPE_NOSNIFF = True
REFERRER_POLICY = os.environ.get('SECURE_REFERRER_POLICY',
                                 'strict-origin').strip()
LOGGING = {
    'version': 1,
    'disable_existing_loggers': False,
    'handlers': {
        'console': {
            'level': 'INFO',
            'class': 'logging.StreamHandler',
            'formatter': 'verbose',
        },
        'sentry': {
            'level': 'ERROR',
            'class':
            'raven.contrib.django.raven_compat.handlers.SentryHandler',
        },
    },
    'formatters': {
        'verbose': {
            'format':
            '[%(asctime)s][%(process)d][%(levelname)s][%(name)s] %(message)s'
        }
    },
    'loggers': {
        'timetracker': {
            'handlers': ['console', 'sentry'],
            'level': 'INFO',
            'propagate': False,
        },
        'django.request': {
            'handlers': ['console', 'sentry'],
            'level': 'ERROR',
            'propagate': False,
        },
        'django.security': {
            'handlers': ['console', 'sentry'],
            'level': 'WARNING',
            'propagate': False,
        },
        'django.security.DisallowedHost': {
            'handlers': ['console'],
            'propagate': False,
        },
    },
}
if os.environ.get('SECURE_SSL_REDIRECT', 'true').strip().lower() == 'true':
    SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
if 'SECURE_HSTS_SECONDS' in os.environ:
    SECURE_HSTS_SECONDS = int(os.environ['SECURE_HSTS_SECONDS'])
if os.environ.get('SECURE_BROWSER_XSS_FILTER',
                  'true').lower().strip() == 'true':
    SECURE_BROWSER_XSS_FILTER = True
if os.environ.get('SECURE_CONTENT_TYPE_NOSNIFF',
                  'true').lower().strip() == 'true':
    SECURE_CONTENT_TYPE_NOSNIFF = True
if 'AWS_STORAGE_BUCKET_NAME' in os.environ:
    INSTALLED_APPS.append('storages')
    DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
    AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
    AWS_QUERYSTRING_AUTH = False
    AWS_DEFAULT_ACL = None
    AWS_S3_FILE_OVERWRITE = False
    AWS_QUERYSTRING_AUTH = True
    AWS_QUERYSTRING_EXPIRE = 120
    AWS_S3_URL_PROTOCOL = os.environ.get('AWS_S3_URL_PROTOCOL', 'https:')
    AWS_S3_SIGNATURE_VERSION = os.environ.get('AWS_S3_SIGNATURE_VERSION',
                                              's3v4')
    AWS_S3_REGION_NAME = os.environ.get('AWS_S3_REGION_NAME')
if 'CELERY_BROKER_URL' in os.environ:
    CELERY_BROKER_URL = os.environ['CELERY_BROKER_URL']
if 'EMAIL_HOST' in os.environ:
    EMAIL_HOST = os.environ['EMAIL_HOST']
if 'EMAIL_PORT' in os.environ:
    try:
        EMAIL_PORT = int(os.environ['EMAIL_PORT'])
    except ValueError:
        pass
if 'EMAIL_HOST_USER' in os.environ:
    EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
if 'EMAIL_HOST_PASSWORD' in os.environ:
    EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
if os.environ.get('EMAIL_USE_TLS', 'false').lower().strip() == 'true':
    EMAIL_USE_TLS = True
if os.environ.get('EMAIL_USE_SSL', 'false').lower().strip() == 'true':
    EMAIL_USE_SSL = True
if 'EMAIL_SUBJECT_PREFIX' in os.environ:
    EMAIL_SUBJECT_PREFIX = os.environ['EMAIL_SUBJECT_PREFIX']
if 'SERVER_EMAIL' in os.environ:
    SERVER_EMAIL = DEFAULT_FROM_EMAIL = os.environ['SERVER_EMAIL']
if 'SENTRY_DSN' in os.environ:
    INSTALLED_APPS.append('raven.contrib.django.raven_compat')
    RAVEN_CONFIG = {
        'dsn': os.environ['SENTRY_DSN'],
        'tags': {},
    }
    RAVEN_CONFIG['tags']['lang'] = 'python'
    try:
        RAVEN_CONFIG['release'] = raven.fetch_git_sha(BASE_DIR)
    except InvalidGitRepository:
        pass
AUTH_USER_MODEL = 'accounts.User'
SITE_ID = 1
 | 
| 
	'''
Given an array of integers, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
'''
class Solution:
    # @param {integer[]} nums
    # @param {integer} target
    # @return {integer[]}
    
    def twoSum(self, nums, target):
        '''
        # naive approach:  Time Limit Exceeded
        for i in range(len(nums)):
            for j in range(i, len(nums)):
                if i != j and nums[i] + nums[j] == target:
                    if nums[i] < nums[j]:
                        return i, j
                    else:
                        return j, i
        '''
        pool = {}
        for i in range(len(nums)):
            if pool.has_key(nums[i]):
                pool[nums[i]].append(i)
            else:
                pool[nums[i]] = [i]
        
        for i in range(len(nums)):
            if pool.has_key(target - nums[i]):
                for j in pool[target - nums[i]]:
                    if i < j:
                        return i+1, j+1
                    elif j < i:
                        return j+1, i+1
        return -1 | 
| 
	import sys
from pathlib import Path
import warnings
from io import StringIO
import mypythontools
mypythontools.tests.setup_tests()
import mylogging
from help_file import info_outside, warn_outside, traceback_outside, warn_to_be_filtered
from conftest import logs_stream, setup_tests
setup_tests()
def display_logs(output: str):
    """If want to display check of logs (not tested).
    Also log images from readme and example log are generated here.
    Args:
        output (str, optional): "console" or "example.log". Defaults to "console".
    """
    if output == "console":
        mylogging.config.OUTPUT = "console"
    if output == "example":
        Path("example.log").unlink
        mylogging.config.OUTPUT = "example.log"
    mylogging.warn("I am interesting warning.")
    mylogging.print("No details about me.")
    try:
        print(10 / 0)
    except Exception:
        mylogging.traceback("Maybe try to use something different than 0")
    mylogging.fatal("This is fatal", caption="You can use captions")
def get_stdout_and_stderr(func, args=[], kwargs={}):
    old_stdout = sys.stdout
    old_stderr = sys.stderr
    my_stdout = StringIO()
    my_stderr = StringIO()
    sys.stdout = my_stdout
    sys.stderr = my_stderr
    func(*args, **kwargs)
    output = my_stdout.getvalue() + my_stderr.getvalue() + logs_stream.getvalue()
    logs_stream.truncate(0)
    my_stdout.close()
    my_stderr.close()
    sys.stdout = old_stdout
    sys.stderr = old_stderr
    return output
def test_return_str():
    try:
        raise Exception(mylogging.return_str("asdas", caption="User"))
    except Exception:
        # TODO test output
        pass
def test_logs():
    mylogging.config.LEVEL = "DEBUG"
    mylogging.config.FILTER = "always"
    mylogging.config.OUTPUT = "tests/delete.log"
    def check_log():
        with open("tests/delete.log", "r") as log:
            log_content = log.read()
        # Clear content before next run
        # To generate example log comment it out
        open("tests/delete.log", "w").close()
        if log_content:
            return True
        else:
            return False
    mylogging.info(
        "Hessian matrix copmputation failed for example", caption="RuntimeError on model x",
    )
    # Info not created
    assert check_log()
    mylogging.warn(
        "Hessian matrix copmputation failed for example", caption="RuntimeError on model x",
    )
    mylogging.warn("Second")
    # Warning not created
    assert check_log()
    try:
        print(10 / 0)
    except Exception:
        mylogging.traceback("Maybe try to use something different than 0")
    # Traceback not created
    assert check_log()
    for i in [info_outside, warn_outside, traceback_outside]:
        i("Message")
        # Outside function not working
        assert check_log()
    for handler in mylogging.my_logger.logger.handlers:
        handler.close()
    Path("tests/delete.log").unlink()
def test_warnings_filter():
    ################
    ### Debug = 0 - show not
    ################
    mylogging.config.FILTER = "ignore"
    # Debug 0. Printed, but should not.
    assert not get_stdout_and_stderr(mylogging.warn, ["Asdasd"])
    try:
        print(10 / 0)
    except Exception:
        # Debug = 0 - traceback. Printed, but should not.
        assert not get_stdout_and_stderr(mylogging.traceback, ["Maybe try to use something different than 0"])
    ################
    ### Debug = 1 - show once
    ################
    mylogging.config.FILTER = "once"
    # Debug 1. Not printed, but should.
    assert get_stdout_and_stderr(mylogging.info, ["Hello unique"])
    # Debug 1. Printed, but should not.
    assert not get_stdout_and_stderr(mylogging.info, ["Hello unique"])
    ################
    ### Debug = 2 - show always
    ################
    mylogging.config.FILTER = "always"
    # Debug 2. Not printed, but should.
    assert get_stdout_and_stderr(mylogging.warn, ["Asdasd"])
    # Debug 2. Not printed, but should.
    assert get_stdout_and_stderr(mylogging.warn, ["Asdasd"])
    # Test outer file
    mylogging.config.FILTER = "once"
    # Outside info not working
    assert get_stdout_and_stderr(info_outside, ["Info outside"])
def warn_mode():
    mylogging.config._console_log_or_warn = "warn"
    with warnings.catch_warnings(record=True) as w5:
        warn_outside("Warn outside")
        traceback_outside("Traceback outside")
        # Warn from other file not working
        assert len(w5) == 2
def test_blacklist():
    mylogging.config.BLACKLIST = ["Test blacklist one"]
    assert not get_stdout_and_stderr(mylogging.warn, ["Test blacklist one"])
    assert get_stdout_and_stderr(mylogging.warn, ["Test not blacklisted"])
def test_outer_filters():
    errors = []
    mylogging.config.FILTER = "always"
    warnings.filterwarnings("always")
    ignored_warnings = ["mean of empty slice"]
    # Sometimes only message does not work, then ignore it with class and warning type
    ignored_warnings_class_type = [
        ("TestError", FutureWarning),
    ]
    with warnings.catch_warnings(record=True) as fo:
        mylogging.outer_warnings_filter(ignored_warnings, ignored_warnings_class_type)
        warn_to_be_filtered()
    if fo:
        errors.append("Doesn't filter.")
    with warnings.catch_warnings(record=True) as fo2:
        warn_to_be_filtered()
    if not fo2:
        errors.append("Filter but should not.")
    mylogging.outer_warnings_filter(ignored_warnings, ignored_warnings_class_type)
    with warnings.catch_warnings(record=True) as w6:
        warn_to_be_filtered()
        if w6:
            errors.append("Doesn't filter.")
    mylogging.reset_outer_warnings_filter()
    with warnings.catch_warnings(record=True) as w7:
        warn_to_be_filtered()
    if not w7:
        errors.append("Doesn't filter.")
    assert not errors
def test_warnings_levels():
    errors = []
    # Logging to file is already tested, because level filtering occur before division console or file
    mylogging.config.FILTER = "always"
    all_levels_print_functions = [
        mylogging.debug,
        mylogging.info,
    ]
    all_levels_warnings_functions = [
        mylogging.warn,
        mylogging.error,
        mylogging.critical,
    ]
    message_number_should_pass = 1
    for i in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]:
        mylogging.config.LEVEL = i
        with warnings.catch_warnings(record=True) as wl:
            for i in all_levels_warnings_functions:
                i("Message")
        for j in all_levels_print_functions:
            if get_stdout_and_stderr(j, ["Message"]):
                wl.append("Message")
    if not len(wl) != message_number_should_pass:
        errors.append("DEBUG level not correct.")
        message_number_should_pass = message_number_should_pass + 1
    with warnings.catch_warnings(record=True) as wl2:
        mylogging.fatal("This is fatal.")
    if not len(wl2) != message_number_should_pass:
        errors.append("Fatal not working")
# def test_settings():
#     # TODO
#     # Test color and debug
#     pass
def test_readme_configs():
    import mylogging
    mylogging.config.COLORIZE = False  # Turn off colorization on all functions to get rid of weird symbols
    mylogging.info("Not color")
def test_STREAM():
    stream = StringIO()
    mylogging.config.STREAM = stream
    mylogging.warn("Another warning")
    assert stream.getvalue()
def test_redirect_TO_LIST_and_log():
    warnings.filterwarnings("always")
    logs_list = []
    warnings_list = []
    redirect = mylogging.redirect_logs_and_warnings_to_lists(logs_list, warnings_list)
    with warnings.catch_warnings(record=True):  # as warnings_not:
        warnings.warn("Warnings warning.")
        # Not simple to verify
        # assert not warnings_not
    assert not get_stdout_and_stderr(mylogging.warn, ["A warning."])
    redirect.close_redirect()
    with warnings.catch_warnings(record=True) as warnings_captured:
        assert get_stdout_and_stderr(mylogging.my_logger.log_and_warn_from_lists, [logs_list, warnings_list])
        # assert warnings_captured
    assert get_stdout_and_stderr(mylogging.warn, ["Should be printed again."])
    with warnings.catch_warnings(record=True) as warnings_again:
        warnings.warn("Warnings warning.")
    assert warnings_again
if __name__ == "__main__":
    # test_return_str()
    # test_logs()
    # test_warnings_filter()
    # test_outer_filters()
    # test_warnings_levels()
    mylogging.config.COLORIZE = True
    mylogging.config.LEVEL = "DEBUG"
    mylogging.config.FILTER = "always"
    display_logs(output="console")
    display_logs(output="example")
    pass
 | 
| 
	#!/usr/bin/python
# -*- coding utf-8 -*-
#                                                 
# Ausnahme - Klassen  von agla           
#
                                                 
#
# This file is part of agla
#
#
# Copyright (c) 2019 Holger Böttcher  [email protected]
#
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License
# You may obtain a copy of the License at
#  
#      http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#  
class AglaError(BaseException):
    """Die Ausnahme wird durch Klassen des agla-Paketes geworfen"""
    pass
 | 
| 
	import json
def test_add_project_check_with_soap(app, data_project):
    project = data_project
    old_projects = app.soap.get_project_list(app.config["webadmin"]["username"], app.config["webadmin"]["password"])
    app.session.ensure_login("webadmin")
    app.project.add(project)
    new_projects = app.soap.get_project_list(app.config["webadmin"]["username"], app.config["webadmin"]["password"])
    assert len(old_projects) + 1 == len(new_projects)
    old_projects.append(project)
    assert sorted(old_projects, key=lambda proj: proj.name.lower().replace(' ', '')) == new_projects
 | 
| 
	#!/usr/bin/env  python 3
############################################################################################
#                                                                                          #
#       Program purpose: Counts characters at same position in a given string (lower and   #
#                        uppercase characters) as in English alphabet.                     #
#       Program Author : Happi Yvan <[email protected]>                             #
#       Creation Date  : November 5, 2019                                                  #
#                                                                                          #
############################################################################################
def obtain_user_input(input_mess: str) -> str:
    is_valid, user_data = False, ''
    while is_valid is False:
        try:
            user_data = input(input_mess)
            if len(user_data) == 0:
                raise ValueError('Oops! Data needed')
            is_valid = True
        except ValueError as ve:
            print(f'[ERROR]: {ve}')
    return user_data
def count_char_position(str1: str) -> int:
    count_chars = 0
    for i in range(len(str1)):
        if (i == ord(str1[i]) - ord('A')) or (i == ord(str1[i]) - ord('a')):
            count_chars += 1
    return count_chars
if __name__ == "__main__":
    user_input = obtain_user_input(input_mess='Enter a string data: ')
    print(f'Number of characters of the said string at same position as in English alphabet: '
          f'{count_char_position(str1=user_input)}') | 
| 
	'''
Author: Jason.Parks
Created: April 19, 2012
Module: common.perforce.__init__
Purpose: to import perforce
'''
	
	
print "common.perforce.__init__ imported"  | 
| 
	class Subclass(Behave):
    def once(self): print '(%s)' % self.name
subInstance = Subclass("Queen Bee")
subInstance.repeat(3)
 | 
| 
	# _*_ coding:utf-8 _*_
# __author__ = 'JiangKui'
# __date__ = '2020/11/23 9:51 上午'
"""这个文件放入口函数"""
from yaml import load, Loader
def load_access_conf(conf=""):
    """
    读取配置文件
    :param file_name:
    :return:
    """
    file = open(conf)
    return load(file, Loader=Loader)
# 传入绝对路径
config_dict = load_access_conf(conf="/Users/MRJ/PycharmProjects/qingcloud-cli-mini/access_key_qingyun.csv")
qy_access_key_id = config_dict['qy_access_key_id']
qy_secret_access_key = config_dict['qy_secret_access_key']
base_url = 'https://api.qingcloud.com/iaas/?'
signature_method = 'HmacSHA1'
zone = 'sh1a'
 | 
| 
	from googletrans import Translator
import sys
translator = Translator()
translation = translator.translate(sys.argv[1], dest=sys.argv[2])
text = translation.text.encode('utf8')
sys.stdout.buffer.write(text)
 | 
| 
	from util import database, toolchain, bitdiff
with database.transact() as db:
    for device_name, device in db.items():
        package, pinout = next(iter(device['pins'].items()))
        for macrocell_name, macrocell in device['macrocells'].items():
            if macrocell['pad'] not in pinout:
                print(f"Skipping {macrocell_name} on {device_name} because it is not bonded out")
                continue
            def run(**kwargs):
                return toolchain.run(
                    f"module top(output O); assign O = 1'b0; endmodule",
                    {'O': pinout[macrocell['pad']]},
                    f"{device_name}-{package}",
                    strategy=kwargs)
            f_out  = run()
            f_fast = run(output_fast='O')
            f_oc   = run(open_collector='O')
            if device_name.endswith('AS'):
                f_lp   = run(MC_power='O')
            if device_name.endswith('BE'):
                f_hyst = run(schmitt_trigger='O')
                f_pu   = run(pull_up='O')
                f_pk   = run(pin_keep='O')
            macrocell.update({
                'slow_output':
                    bitdiff.describe(1, {'off': f_fast, 'on': f_out}),
                'open_collector':
                    bitdiff.describe(1, {'off': f_out, 'on': f_oc}),
            })
            if device_name.endswith('AS'):
                macrocell.update({
                    'low_power':
                        bitdiff.describe(1, {'off': f_out, 'on': f_lp}),
                })
            if device_name.endswith('BE'):
                macrocell.update({
                    'pull_up':
                        bitdiff.describe(1, {'off': f_out, 'on': f_pu}),
                    'schmitt_trigger':
                        bitdiff.describe(1, {'off': f_out, 'on': f_hyst}),
                    'bus_keeper':
                        bitdiff.describe(1, {'off': f_pu, 'on': f_pk}), # pk implies pu
                })
 | 
| 
	from django.urls import path, re_path
from django.contrib.auth.decorators import login_required
from apps.user.views import RegisterView, ActiveView, LoginView, CenterInfoView, CenterOrderView, CenterSiteView, LogoutView
urlpatterns = [
    path('register', RegisterView.as_view(), name='register'),  # 用户注册
    re_path('^active/(?P<token>.*)$', ActiveView.as_view(), name='active'),  # 用户激活
    path('login', LoginView.as_view(), name='login'),  # 登录
    path('logout', LogoutView.as_view(), name='logout'),  # 退出
    path('',  CenterInfoView.as_view(), name='user'),  # 用户信息中心
    path('order', CenterOrderView.as_view(), name='order'),  # 用户中心订单
    path('address', CenterSiteView.as_view(), name='address'),  # 用户中心订单
]
 | 
| 
	# -*- coding: utf-8 -*-
###############################################################################
#
# CSVToXLS
# Converts a CSV formatted file to Base64 encoded Excel data.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CSVToXLS(Choreography):
    def __init__(self, temboo_session):
        """
        Create a new instance of the CSVToXLS Choreo. A TembooSession object, containing a valid
        set of Temboo credentials, must be supplied.
        """
        super(CSVToXLS, self).__init__(temboo_session, '/Library/Utilities/DataConversions/CSVToXLS')
    def new_input_set(self):
        return CSVToXLSInputSet()
    def _make_result_set(self, result, path):
        return CSVToXLSResultSet(result, path)
    def _make_execution(self, session, exec_id, path):
        return CSVToXLSChoreographyExecution(session, exec_id, path)
class CSVToXLSInputSet(InputSet):
    """
    An InputSet with methods appropriate for specifying the inputs to the CSVToXLS
    Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
    """
    def set_CSV(self, value):
        """
        Set the value of the CSV input for this Choreo. ((conditional, multiline) The CSV data you want to convert to XLS format. Required unless using the VaultFile input alias (an advanced option used when running Choreos in the Temboo Designer).)
        """
        super(CSVToXLSInputSet, self)._set_input('CSV', value)
class CSVToXLSResultSet(ResultSet):
    """
    A ResultSet with methods tailored to the values returned by the CSVToXLS Choreo.
    The ResultSet object is used to retrieve the results of a Choreo execution.
    """
    def getJSONFromString(self, str):
        return json.loads(str)
    def get_XLS(self):
        """
        Retrieve the value for the "XLS" output from this Choreo execution. ((string) The Base64 encoded Excel data.)
        """
        return self._output.get('XLS', None)
class CSVToXLSChoreographyExecution(ChoreographyExecution):
    def _make_result_set(self, response, path):
        return CSVToXLSResultSet(response, path)
 | 
| 
	import setuptools
def _requirements():
    return ["http","urllib"]
def _test_requirements():
    return ["http","urllib"]
 
setuptools.setup(
    name="chatbotweb",
    version="0.2.4",
    author="Yoshiki Ohira",
    author_email="[email protected]",
    description="Automatic generation of web interface for user-defined chatbot",
    long_description="An interface that allows you to chat on the web using the chatbot engine of the question and answer type you created",
    long_description_content_type="text/markdown",
    url="",
    packages=setuptools.find_packages(),
    package_data={
        'chatbotweb': ['chat_server.py'],
    },
    classifiers=[
        "Programming Language :: Python :: 3",
        "License :: OSI Approved :: BSD License",
        "Operating System :: OS Independent",
    ]
) | 
| 
	#imports 
import numpy as np
import skimage.draw as draw
import math
import matplotlib.pyplot as plt
import random
import imageio
import skvideo
skvideo.setFFmpegPath("C:/ffmpeg/bin")
import skvideo.io
class FaceCreator(object):    
    #initialize the variables 
    def __init__(self, ImageSize, FaceSize = 3, SubareaSize = 7, BlackBorder = False):
        #TODO: Work with even numbers. They currently go out of range!
        #helper function to create subshapes for the faces
        def createSubshapes(SubareaSize):
            #create an empty array for the subshapes 
            shapeArray = np.zeros((5,SubareaSize,SubareaSize),dtype=np.uint8)
            #add the circle
            if BlackBorder:
                rr,cc = draw.circle_perimeter(math.floor(SubareaSize/2.),math.floor(SubareaSize/2.),math.floor(SubareaSize/2.)-1)
            else:
                rr,cc = draw.circle_perimeter(math.floor(SubareaSize/2.),math.floor(SubareaSize/2.),math.floor(SubareaSize/2.))
            shapeArray[0,rr,cc] = 255
            #draw the cross
            if BlackBorder:
                #first line from top left to bottom right
                rr,cc = draw.line(1,1,SubareaSize-2,SubareaSize-2)
                shapeArray[1,rr,cc] = 255
                #draw the second line from top right to bottom left
                rr,cc = draw.line(SubareaSize-2,1,1,SubareaSize-2)    
            else:
                #first line from top left to bottom right
                rr,cc = draw.line(0,0,SubareaSize-1,SubareaSize-1)
                shapeArray[1,rr,cc] = 255
                #draw the second line from top right to bottom left
                rr,cc = draw.line(SubareaSize-1,0,0,SubareaSize-1)
            shapeArray[1,rr,cc] = 255
            #create the half circle open bottom
            circleOffset = math.floor(SubareaSize/3.)
            if BlackBorder:
                #create all indexes for the circle with the radius at the bottom
                rr,cc = draw.circle_perimeter(SubareaSize-circleOffset,math.floor(SubareaSize/2.),math.floor(SubareaSize/2.)-1)
            else:
                rr,cc = draw.circle_perimeter(SubareaSize-circleOffset,math.floor(SubareaSize/2.),math.floor(SubareaSize/2.))
            #get the positions of all indices that are inside
            #the subarea size
            insideImageValues = rr < SubareaSize-circleOffset
            #only select the indices that are inside the subarea size
            cc = cc[insideImageValues]
            rr = rr[insideImageValues]               
            shapeArray[2,rr,cc] = 255
            #create the half circle open top
            if BlackBorder:
                #create all indexes for the circle with the radius at the top
                rr,cc = draw.circle_perimeter(circleOffset-1,math.floor(SubareaSize/2.),math.floor(SubareaSize/2.)-1)
            else:
                rr,cc = draw.circle_perimeter(circleOffset-1,math.floor(SubareaSize/2.),math.floor(SubareaSize/2.))
            #get the positions of all indices that are inside the subarea size
            insideImageValues = rr >= circleOffset
            #only select the indices that are inside the subarea size
            cc = cc[insideImageValues]
            rr = rr[insideImageValues]
            shapeArray[3,rr,cc] = 255
            #create a line in the middle with 1 pxl space 
            if BlackBorder:
                rr,cc = draw.line(2,math.floor(SubareaSize/2.),SubareaSize-3,math.floor(SubareaSize/2.))
            else:
                rr,cc = draw.line(1,math.floor(SubareaSize/2.),SubareaSize-2,math.floor(SubareaSize/2.))
            shapeArray[4,rr,cc] = 255
            return shapeArray
        #create an empty canvas for the painting of the images, As we want to have some space to draw the faces at the border we add some
        #extra space on the outside 
        self._canvas        = np.zeros((ImageSize[0]+6*SubareaSize,ImageSize[1]+6*SubareaSize,3))    
        #create an empty list for the faces
        self._faces         = []
        
        #store the variables 
        self._imageSize     = ImageSize
        self._faceSize      = FaceSize
        self._subareaSize   = SubareaSize
        self._subShapes     = createSubshapes(SubareaSize)
    #creates a numpy array for the face, fills it with the desired type and returns it as record together with the position and velocity 
    def createFace(self, type, x, y, vx, vy):
        faceMap = np.zeros((self._subareaSize*3,self._subareaSize*3),dtype=np.uint8)
        
        #add the nose as it is the same in all versions 
        faceMap[self._subareaSize:2*self._subareaSize,self._subareaSize:2*self._subareaSize] = self._subShapes[4]
        #normal face 
        if type == 0:
            #add the left eye
            faceMap[:self._subareaSize,:self._subareaSize] = self._subShapes[0]
            #add the right eye
            faceMap[:self._subareaSize,2*self._subareaSize:3*self._subareaSize] = self._subShapes[0]           
            #add the mouth
            faceMap[2*self._subareaSize:3*self._subareaSize,self._subareaSize:2*self._subareaSize] = self._subShapes[3]
        #sad face
        elif type == 1:
            #add the left eye
            faceMap[:self._subareaSize,:self._subareaSize] = self._subShapes[0]
            #add the right eye
            faceMap[:self._subareaSize,2*self._subareaSize:3*self._subareaSize] = self._subShapes[0]           
            #add the mouth
            faceMap[2*self._subareaSize:3*self._subareaSize,self._subareaSize:2*self._subareaSize] = self._subShapes[2]
        #winky face 
        elif type == 2:
            #add the left eye
            faceMap[:self._subareaSize,:self._subareaSize] = self._subShapes[3]
            #add the right eye
            faceMap[:self._subareaSize,2*self._subareaSize:3*self._subareaSize] = self._subShapes[0]           
            #add the mouth
            faceMap[2*self._subareaSize:3*self._subareaSize,self._subareaSize:2*self._subareaSize] = self._subShapes[3]         
        #dead face 
        elif type == 3:
            #add the left eye
            faceMap[:self._subareaSize,:self._subareaSize] = self._subShapes[1]
            #add the right eye
            faceMap[:self._subareaSize,2*self._subareaSize:3*self._subareaSize] = self._subShapes[1]           
            #add the mouth
            faceMap[2*self._subareaSize:3*self._subareaSize,self._subareaSize:2*self._subareaSize] = self._subShapes[2]
        #random faces
        elif type == 4:
            selectionArray = np.zeros((2,self._subareaSize,self._subareaSize),dtype=np.uint8)
            selectionArray[1] = np.ones((self._subareaSize,self._subareaSize),dtype=np.uint8)
            faceMap = selectionArray[np.random.choice([0,1],size=(3,3),p=[0.8,0.2])].transpose(0,2,1,3).reshape(faceMap.shape) * \
                        self._subShapes[np.random.randint(0,len(self._subShapes),size=(3,3))].transpose(0,2,1,3).reshape(faceMap.shape)
        return {'pixels':faceMap,'x':x,'y':y,'v_x':vx,'v_y':vy}
    def fillCanvasWithRandomImages(self, FaceCount):
        #calculate the maximum speed in pixel/frame for the movement of the faces 
        #60 is choosen because a face needs at least 5s to travel the entire canvas @ 30FPS
        maxSpeed = self._imageSize[0]/300.
        for i in range(FaceCount):
            #get a random face type
            faceType = random.randint(0,4)
            #get a random position 
            x = random.randint(0,self._imageSize[0]-6*self._subareaSize)
            y = random.randint(0,self._imageSize[1]-6*self._subareaSize)
            #get a random velocity in pixels per frame, we have a flaot from 0..1 so we need to convert it from -maxSpeed..maxSpeed
            vx = round(random.random() * 2*maxSpeed - maxSpeed)
            vy = round(random.random() * 2*maxSpeed - maxSpeed)
            #add the face to the list
            self._faces.append(self.createFace(faceType,x,y,vx,vy))
    def updateCanvas(self):
        #clear the canvas of the previous frame 
        canvasShape = self._canvas.shape
        self._canvas = np.zeros(canvasShape)
        #create the offset for the selection
        offset = math.floor(self._subareaSize/2.)+self._subareaSize
        sideOffset = 3*self._subareaSize
        #empty list for faces to be be kept
        keepFaces = []
        #add the individual faces to the canvas
        for face in self._faces:
            #write the current face to the list 
            #should never have an error here. Just in case something goes the position will get written to console
            try:
                self._canvas[face['x']-offset+sideOffset:face['x']+offset+1+sideOffset,face['y']-offset+sideOffset:face['y']+offset+1+sideOffset][face['pixels'] != 0] = 255
            except:
                print(self._canvas.shape,face['x']-offset+sideOffset, face['x']+offset+1+sideOffset, face['y']-offset+sideOffset, face['y']+offset+1+sideOffset)
            #update the position for the next step
            face['x'] += face['v_x']
            face['y'] += face['v_y']
            #calculate the treshold when a face is outside the border
            threshold = math.ceil(self._subareaSize*3/2.)
            #if the face is outside, remove it and add a new one
            if not (face['x'] <= threshold or face['x'] >= canvasShape[0]-threshold-sideOffset or face['y'] <= threshold or face['y'] >= canvasShape[1]-threshold-sideOffset):
                keepFaces.append(face)
            
        facesRemoved = len(self._faces)-len(keepFaces)   
        self._faces = keepFaces
        if facesRemoved > 0:
            self.fillCanvasWithRandomImages(facesRemoved)
        
    def saveCanvasAsFrame(self, Path, Invert):
        self.updateCanvas()
        if Invert:
            imageio.imwrite(Path, 255-self._canvas[3*self._subareaSize:-3*self._subareaSize,3*self._subareaSize:-3*self._subareaSize].astype(np.uint8))
        else:
            imageio.imwrite(Path, self._canvas[3*self._subareaSize:-3*self._subareaSize,3*self._subareaSize:-3*self._subareaSize].astype(np.uint8))
    def createVideo(self, frameCount, Path, NoiseSteps, Inverse):
        #sideoffset of the canvas
        sideOffset = 3*self._subareaSize
        #allocate memory for the videos
        video = np.zeros((frameCount,self._imageSize[0],self._imageSize[1],3),dtype=np.uint8)
        #just for easier acces of the canvas shape
        canvasShape = self._canvas.shape
        for i in range(frameCount):
            #draw a new canvas and update the positions
            self.updateCanvas()
            #take the current canvas and store it into the frame. Ignore the sideoffset
            video[i] = self._canvas[sideOffset:canvasShape[0]-sideOffset,sideOffset:canvasShape[1]-sideOffset]
            #create noise map and values and apply it to the video frame 
            for step in NoiseSteps:
                if step["startframe"] <= i and step["stopframe"] > i:
                    print(i,step["startframe"],step["stopframe"],step["noisePercentage"])
                    #create a boolean map if this pixel gets noise or not
                    noisePaddern    = np.random.choice([0,1], (video[i].shape[0],video[i].shape[1]), p=[1-step["noisePercentage"],step["noisePercentage"]]).astype(bool)
                    #create a map with a magnitude of noise for each pixel
                    noiseValue      = np.random.randint(0,255,size=(video[i].shape[0],video[i].shape[1]))
                    #as the video frame has 3 values (r,g,b) we need to take stack the noise for each of the channels and add that on to the final picture. 
                    #if noise + video > 255 it overflows and thus reduces the brightness in that area.
                    video[i]        += np.stack((noiseValue*noisePaddern,noiseValue*noisePaddern,noiseValue*noisePaddern),axis=2).astype(np.uint8)
           
        #if wanted inverse the video: White Background with black image
        if Inverse:
            video = 255-video
        #export the video
        skvideo.io.vwrite(Path+"\\Faces.mp4",video)
    def exportSubshapesForTraining(self, Count, MixedPictures, MoveRadius, NoisePercentage, Invert, Path):
        
        if not MixedPictures:
            if MoveRadius > 0:
                #calculate the amout of different classes, +2 because we need an empty and invalid class as well 
                countPerShape       = int(Count/(self._subShapes.shape[0]+2)) 
                diffferentShapes    = self._subShapes.shape[0]+2
            else:
                #one extra is needed for background
                countPerShape       = int(Count/(self._subShapes.shape[0]+1)) 
                diffferentShapes    = self._subShapes.shape[0]+1
        else:
            #we have one more
            countPerShape       = int(Count/(self._subShapes.shape[0]+3)) 
            diffferentShapes    = self._subShapes.shape[0]+3
        #empty array for subshapes
        subshapesArray = np.zeros((diffferentShapes*countPerShape,self._subareaSize,self._subareaSize),dtype=np.uint8)
        #create label list
        labels = np.zeros(diffferentShapes*countPerShape,dtype=np.uint8)
        #go through all the shapes 
        for index, shape in enumerate(self._subShapes):
            #set the labels to the current subshape, plus one as we want the background to be class 0
            labels[index*countPerShape:(index+1)*countPerShape] = index+1
            for i in range(countPerShape):
            #if we want to have moved pictures 
                if MoveRadius > 0: 
                    #get random offsets, we need to have the value from 0 to 2xMoveRadius as we can not select a negative value
                    x_offset = random.randint(-MoveRadius,MoveRadius) + MoveRadius
                    y_offset = random.randint(-MoveRadius,MoveRadius) + MoveRadius
                    #empty selection area
                    selectionArea = np.zeros((self._subareaSize+2*MoveRadius,self._subareaSize+2*MoveRadius),dtype=np.uint8)
                    #add the shape in the middle 
                    selectionArea[MoveRadius:self._subareaSize+MoveRadius,MoveRadius:self._subareaSize+MoveRadius] = shape
                    #add the subshape to the list
                    subshapesArray[index*countPerShape+i] = selectionArea[x_offset:x_offset+self._subareaSize,y_offset:y_offset+self._subareaSize].astype(np.uint8)
                else: 
                    #if we do not want to move it, just add the shape
                    subshapesArray[index*countPerShape+i] = shape.astype(np.uint8)
        #add the moved pictures that are outside the allwoed move radius 
        if MoveRadius > 0:
            for i in range(countPerShape*2):
                #create an offset but this time we need to go outside the subarea size
                x_offset = random.randint(MoveRadius+1,math.ceil(self._subareaSize/2)) 
                y_offset = random.randint(MoveRadius+1,math.ceil(self._subareaSize/2))
                #we need have both positive and negative offsets so we multiply it by either -1 or 1 
                direction  = random.choice([0,1])
                x_offset *= random.choice([1,-1]) * direction 
                y_offset *= random.choice([1,-1]) * (1-direction)
                #as we need a positive number to index the array we need to add the left offset
                x_offset += self._subareaSize + (1-direction) * random.randint(-MoveRadius,MoveRadius)
                y_offset += self._subareaSize + direction * random.randint(-MoveRadius,MoveRadius)
                #empty selection area, this time bigger as we want to create invalid pictures and thus have to move it further
                selectionArea = np.zeros((3*self._subareaSize,3*self._subareaSize),dtype=np.uint8)
                #add a random shape in the middle, -1 as len(self._subShapes) is always one higher than the highest index as python is 0 indexed
                selectionArea[self._subareaSize:2*self._subareaSize,self._subareaSize:2*self._subareaSize] = self._subShapes[random.randint(0,len(self._subShapes)-1)]
                #we do not need to add +1, same reason as above
                subshapesArray[len(self._subShapes)*countPerShape+i] = selectionArea[x_offset:x_offset+self._subareaSize,y_offset:y_offset+self._subareaSize].astype(np.uint8)
            #add the class for the moved pictures
            labels[len(self._subShapes)*countPerShape:(len(self._subShapes)+2)*countPerShape] =  len(self._subShapes)+1
        #add the pictures that have multiple shapes in the picture
        if MixedPictures:
            for i in range(countPerShape):
                #create an offset, only until half the size since we want to have parts of multiple shapes inside 
                x_offset = random.randint(MoveRadius+1,math.ceil(self._subareaSize/2.)) 
                y_offset = random.randint(MoveRadius+1,math.ceil(self._subareaSize/2.))
                #we need have both positive and negative offsets so we multiply it by either -1 or 1 
                x_offset *= random.choice([1,-1])
                y_offset *= random.choice([1,-1])
                #as we need a positive number to index the array we need to add the left offset
                x_offset += self._subareaSize
                y_offset += self._subareaSize             
                #create a grid of 3x3 random values to select the shape in the field 
                shapeSelection  = np.random.randint(0, high = len(self._subShapes),size = (3,3), dtype=np.uint8)
                #create the selection grid from the subshapes and the selection. If we just index the subshapes with the 3x3 grid we get an array of dimension
                #(3,3,7,7). In order to get a (21,21) shape we have to reshape it. Before reshaping the transpose is required otherwise reshape creates the wrong
                #grid
                selectionArea   = self._subShapes[shapeSelection].transpose(0,2,1,3).reshape(3*self._subareaSize,3*self._subareaSize)
                #add the selection to the list
                subshapesArray[(len(self._subShapes)+1)*countPerShape+i] = selectionArea[x_offset:x_offset+self._subareaSize,y_offset:y_offset+self._subareaSize].astype(np.uint8)
            #add the class for the mixed pictures
            labels[(len(self._subShapes)+1)*countPerShape:(len(self._subShapes)+2)*countPerShape] =  len(self._subShapes)+2
        #add noise if desired
        if NoisePercentage > 0:
            #create a boolean map if this pixel gets noise or not
            noisePaddern    = np.random.choice([0,1], subshapesArray.shape, p=[1-NoisePercentage,NoisePercentage]).astype(bool)
            #create a map with a magnitude of noise for each pixel
            noiseValue      = np.random.randint(0,255,size=subshapesArray.shape)       
            #as faces do not have 3 components and we do not need it for training we do not have to do the stacking. 
            subshapesArray +=  (noiseValue*noisePaddern).astype(np.uint8)    
        #if we want to invert the image we have to substract it from the max value. So max -> 0 and 0 -> max
        if Invert:
            subshapesArray = 255-subshapesArray.astype(np.uint8)
        #save the images
        with open(Path + '\\' + 'images_'+str(NoisePercentage)+'.npy',"wb") as f :
            np.save(f,subshapesArray)
        #save the labels 
        with open(Path + '\\' + 'labels_'+str(NoisePercentage)+'.npy',"wb") as f :
            np.save(f,labels)
    def exportSubshapesForValidationAsImage(self, Type, OutsideShift, NoisePercentage, Invert, Path):
        #create an empty image 
        validationImg = np.zeros((self._subareaSize+2*OutsideShift,self._subareaSize+2*OutsideShift),dtype=np.uint8)
        #add the subshape to the center if it is a valid selection
        if Type > 0 and Type < len(self._subShapes):
            validationImg[OutsideShift:OutsideShift+self._subareaSize,OutsideShift:OutsideShift+self._subareaSize] = self._subShapes[Type]
            validationImg[OutsideShift+self._subareaSize:OutsideShift+2*self._subareaSize,OutsideShift:OutsideShift+self._subareaSize] = self._subShapes[random.randint(0,4)]
        #if requried add noise
        if NoisePercentage > 0:
            #create a boolean map if this pixel gets noise or not
            noisePaddern    = np.random.choice([0,1], validationImg.shape, p=[1-NoisePercentage,NoisePercentage]).astype(bool)
            #create a map with a magnitude of noise for each pixel
            noiseValue      = np.random.randint(0,255,size=validationImg.shape)       
            #as faces do not have 3 components and we do not need it for training we do not have to do the stacking. 
            validationImg +=  (noiseValue*noisePaddern).astype(np.uint8)    
        #if we want to invert the image we have to substract it from the max value. So max -> 0 and 0 -> max
        if Invert:
            validationImg = 255-validationImg.astype(np.uint8)
            
        imageio.imsave(Path+"\\validationImg.bmp",validationImg)    
   
    def exportSubshapesForValidation(self, Count, OutsideShift, NoisePercentage, Invert, Path):
        #calculate the amout of different classes, +1 because we need an empty class as well 
        countPerShape       = int(Count/(self._subShapes.shape[0]+1)) 
        diffferentShapes    = self._subShapes.shape[0]+1
        #empty list for 
        validationList = np.zeros((countPerShape*diffferentShapes,self._subareaSize+2*OutsideShift,self._subareaSize+2*OutsideShift),dtype=np.uint8)
        for index, shape in enumerate(self._subShapes):
            for i in range(countPerShape):
                validationList[i+index*countPerShape,OutsideShift:OutsideShift+self._subareaSize,OutsideShift:OutsideShift+self._subareaSize] = shape
        #add noise if desired
        if NoisePercentage > 0:
            #create a boolean map if this pixel gets noise or not
            noisePaddern    = np.random.choice([0,1], validationList.shape, p=[1-NoisePercentage,NoisePercentage]).astype(bool)
            #create a map with a magnitude of noise for each pixel
            noiseValue      = np.random.randint(0,255,size=validationList.shape)       
            #as faces do not have 3 components and we do not need it for training we do not have to do the stacking. 
            validationList +=  (noiseValue*noisePaddern).astype(np.uint8)    
        #if we want to invert the image we have to substract it from the max value. So max -> 0 and 0 -> max
        if Invert:
            validationList = 255-validationList.astype(np.uint8)
            
        #save the images
        with open(Path + '\\' + 'validation.npy',"wb") as f :
            np.save(f,validationList)
    def exportFacesForTraining(self, Count, CreateInvalid, MoveRadius, NoisePercentage, Invert, Path):
        #we need plus one as we have empty class as well
        differentClasses = 4 + 1
        if CreateInvalid and MoveRadius > 0:
            differentClasses += 1
        #create an zero initialized array for the labels. We need to make sure that labels and faces have the same length 
        #so we need to also concider differences caused by rounding 
        labels = np.zeros((differentClasses*int(Count/differentClasses),),dtype=np.uint8)
        #the add the background labels 
        labels[:int(Count/differentClasses)] = 0
        faceShape = self._faceSize*self._subareaSize
        #create an empty array to store the faces 
        faces = np.zeros((differentClasses*int(Count/differentClasses),faceShape,faceShape))
        for i in range(4):
            #add 1/5th of the required counts 
            for j in range(int(Count/differentClasses)):
                if MoveRadius > 0: 
                    #create an empty canvas 
                    canvas = np.zeros((3*faceShape,3*faceShape))
                    #we do not care about speed or velocity so we just take the numpy array from the dictionary and place it in the middle of the canvas
                    canvas[faceShape:2*faceShape,faceShape:2*faceShape] = \
                                self.createFace(i,0,0,0,0,)['pixels']
                    #calculate the offset 
                    x_offset = random.randint(-MoveRadius,MoveRadius) + faceShape
                    y_offset = random.randint(-MoveRadius,MoveRadius) + faceShape
                    faces[(i+1) * int(Count/differentClasses) + j] = canvas[x_offset:x_offset+faceShape,y_offset:y_offset+faceShape]
                else: 
                    faces[(i+1) * int(Count/differentClasses) + j] = self.createFace(i,0,0,0,0,)['pixels']
            #now we add the labels for the other classes, +1 as we have 0 as background
            labels[int(Count/differentClasses)*(i+1):int(Count/differentClasses)*(i+2)] = i+1
        if CreateInvalid and MoveRadius > 0:
            for j in range(int(Count/differentClasses)):
                 #create an empty canvas 
                    canvas = np.zeros((3*faceShape,3*faceShape))
                    #we do not care about speed or velocity so we just take the numpy array from the dictionary and place it in the middle of the canvas
                    canvas[faceShape:2*faceShape,faceShape:2*faceShape] = \
                                self.createFace(random.randint(0,3),0,0,0,0,)['pixels']
                    #create an offset but this time we need to go outside the subarea size
                    x_offset = random.randint(MoveRadius+1,self._subareaSize) 
                    y_offset = random.randint(MoveRadius+1,self._subareaSize)
                    #we need have both positive and negative offsets so we multiply it by either -1 or 1 
                    direction  = random.choice([0,1])
                    x_offset *= random.choice([1,-1]) * direction 
                    y_offset *= random.choice([1,-1]) * (1-direction)
                    x_offset += faceShape
                    y_offset += faceShape
                    faces[5 * int(Count/differentClasses) + j] = canvas[x_offset:x_offset+faceShape,y_offset:y_offset+faceShape]
            labels[int(Count/differentClasses)*(5):] = differentClasses-1
        #create noise map and values and apply it to the video frame 
        if NoisePercentage > 0:
            #create a boolean map if this pixel gets noise or not
            noisePaddern    = np.random.choice([0,1], faces.shape, p=[1-NoisePercentage,NoisePercentage]).astype(bool)
            #create a map with a magnitude of noise for each pixel
            noiseValue      = np.random.randint(0,255,size=faces.shape)       
            #as faces do not have 3 components and we do not need it for training we do not have to do the stacking. 
            faces +=  noiseValue*noisePaddern    
        if Invert:
            faces = 255-faces.astype(np.uint8)
        faces = faces.astype(np.uint8)    
        #save the images
        with open(Path + '\\' + 'images_'+str(NoisePercentage)+'.npy',"wb") as f :
            np.save(f,faces)
        #save the labels 
        with open(Path + '\\' + 'labels_'+str(NoisePercentage)+'.npy',"wb") as f :
            np.save(f,labels)
    
def main():
    face = FaceCreator(ImageSize=(720,1280))
    path = "C:\\Users\\Unknown\\Documents\\Master-Convolution\\Python\\Subshapes"
    #face.exportFacesForTraining(10000,True, 1, 0.0,False,path)
    #face.exportFacesForTraining(10000,True, 1, 0.1,False,path)
    #face.exportFacesForTraining(10000,True, 1, 0.2,False,path)
    #face.exportFacesForTraining(10000,True, 1, 0.3,False,path)
    #face.exportSubshapesForTraining(100000,False,1,0,False,path)
    #face.exportSubshapesForTraining(100000,False,1,0.1,False,path)
    #face.exportSubshapesForTraining(100000,False,1,0.2,False,path)
    #face.exportSubshapesForTraining(100000,False,1,0.3,False,path)
    #face.exportSubshapesForValidation(10000,5,0.05,False,path)
    #face.exportSubshapesForValidationAsImage(random.randint(0,5),20,0.02,False,path)
    face.fillCanvasWithRandomImages(50)
    # face.saveCanvasAsFrame(path+"\\test.jpg",False)
    face.createVideo(450,path,[ {"startframe":100,"stopframe":200,"noisePercentage":0.05},{"startframe":200,"stopframe":300,"noisePercentage":0.125}
                                ,{"startframe":300,"stopframe":450,"noisePercentage":0.25}],False)
if __name__ == "__main__":
    main() | 
| 
	import numpy as np
import re
import string
import pandas as pd
corpus = """
Simple example with Cats and Mouse
Another simple example with dogs and cats
Another simple example with mouse and cheese
""".split("\n")[1:-1]
# clearing and tokenizing
l_A = corpus[0].lower().split()
l_B = corpus[1].lower().split()
l_C = corpus[2].lower().split()
# Calculating bag of words
word_set = set(l_A).union(set(l_B)).union(set(l_C))
word_dict_A = dict.fromkeys(word_set, 0)
word_dict_B = dict.fromkeys(word_set, 0)
word_dict_C = dict.fromkeys(word_set, 0)
for word in l_A:
	word_dict_A[word] += 1
for word in l_B:
	word_dict_B[word] += 1
for word in l_C:
	word_dict_C[word] += 1
print('bag of word')
print(word_dict_C)
# compute term frequency
def compute_tf(word_dict, l):
	tf = {}
	sum_nk = len(l)
	for word, count in word_dict.items():
		tf[word] = count/sum_nk
	return tf
  
  
tf_A = compute_tf(word_dict_A, l_A)
tf_B = compute_tf(word_dict_B, l_B)
tf_C = compute_tf(word_dict_C, l_C)
# compute idf
def compute_idf(strings_list):
    n = len(strings_list)
    idf = dict.fromkeys(strings_list[0].keys(), 0)
    for l in strings_list:
        for word, count in l.items():
            if count > 0:
                idf[word] += 1
    
    for word, v in idf.items():
        idf[word] = np.log(n / float(v))
    return idf
    
idf = compute_idf([word_dict_A, word_dict_B, word_dict_C])
def compute_tf_idf(tf, idf):
    tf_idf = dict.fromkeys(tf.keys(), 0)
    for word, v in tf.items():
        tf_idf[word] = v * idf[word]
    return tf_idf
    
tf_idf_A = compute_tf_idf(tf_A, idf)
tf_idf_B = compute_tf_idf(tf_B, idf)
tf_idf_C = compute_tf_idf(tf_C, idf)
print('with tf idf')
tf_idf_simple=pd.DataFrame([tf_idf_A, tf_idf_B, tf_idf_C])
print(tf_idf_simple.head())
# now serious example
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
all_text  =  """"
Google and Facebook are strangling the free press to death. Democracy is the loserGoogle an
Your 60-second guide to security stuff Google touted today at Next '18
A Guide to Using Android Without Selling Your Soul to Google
Review: Lenovo’s Google Smart Display is pretty and intelligent
Google Maps user spots mysterious object submerged off the coast of Greece - and no-one knows what it is
Android is better than IOS
In information retrieval, tf–idf or TFIDF, short for term frequency–inverse document frequency
is a numerical statistic that is intended to reflect
how important a word is to a document in a collection or corpus.
It is often used as a weighting factor in searches of information retrieval
text mining, and user modeling. The tf-idf value increases proportionally
to the number of times a word appears in the document
and is offset by the frequency of the word in the corpus
""".split("\n")[1:-1]
# Preprocessing and tokenizing
def preprocessing(line):
    line = line.lower()
    line = re.sub(r"[{}]".format(string.punctuation), " ", line)
    return line
tfidf_vectorizer = TfidfVectorizer(preprocessor=preprocessing)
tfidf = tfidf_vectorizer.fit_transform(all_text)
kmeans = KMeans(n_clusters=2).fit(tfidf)
lines_for_predicting = ["tf and idf is awesome!"]
print(tfidf_vectorizer.transform(corpus))
# kmeans.predict(tfidf_vectorizer.transform(lines_for_predicting))
 | 
| 
	# 6-1 アンダーサンプリングによる不均衡データの調整
# §4, 5と同様の方法で実現可能
# 偏りのないランダムサンプリングを実現するためには、
# 事前にデータをクラスタリングし、作成されたクラスタごとにサンプリングする
# 貴重なサンプル数を減らすことになるので、なるべく使わないようにする
# 6-2 オーバーサンプリングによる不均衡データの調整
# True/False間で極端なデータ量の差がある場合、それを調整する必要がある
# SMOTEの実装→imblearnライブラリを使うのがシンプルで使いやすい
from preprocess.load_data.data_loader import load_production
from imblearn.over_sampling import SMOTE
production_tb = load_production()
# ratio="auto"部分が動作しない…
sm = SMOTE(ratio="auto", k_neighbors=5, random_state=71)
balance_data, balance_target = \
    sm.fit_sample(production_tb[["length", "thickness"]],
                  production_tb["fault_fig"])
 | 
| 
	import timeit
import numpy as np
from bingo.SymbolicRegression.AGraph.AGraphCrossover import AGraphCrossover
from bingo.SymbolicRegression.AGraph.AGraphMutation import AGraphMutation
from bingo.SymbolicRegression.AGraph.AGraphGenerator import AGraphGenerator
from bingo.SymbolicRegression.AGraph.ComponentGenerator \
    import ComponentGenerator
from bingo.SymbolicRegression.ExplicitRegression import ExplicitRegression, \
                                                        ExplicitTrainingData
from bingo.Base.AgeFitnessEA import AgeFitnessEA
from bingo.Base.Evaluation import Evaluation
from bingo.Base.Island import Island
from bingo.Base.ContinuousLocalOptimization import ContinuousLocalOptimization
from performance_benchmarks import StatsPrinter
POP_SIZE = 128
STACK_SIZE = 64
MUTATION_PROBABILITY = 0.4
CROSSOVER_PROBABILITY = 0.4
NUM_POINTS = 100
START = -10
STOP = 10
ERROR_TOLERANCE = 10e-9
SEED = 20
def init_x_vals(start, stop, num_points):
    return np.linspace(start, stop, num_points).reshape([-1, 1])
def equation_eval(x):
    return x**2 + 3.5*x**3
def init_island():
    np.random.seed(15)
    x = init_x_vals(START, STOP, NUM_POINTS)
    y = equation_eval(x)
    training_data = ExplicitTrainingData(x, y)
    component_generator = ComponentGenerator(x.shape[1])
    component_generator.add_operator(2)
    component_generator.add_operator(3)
    component_generator.add_operator(4)
    crossover = AGraphCrossover()
    mutation = AGraphMutation(component_generator)
    agraph_generator = AGraphGenerator(STACK_SIZE, component_generator)
    fitness = ExplicitRegression(training_data=training_data)
    local_opt_fitness = ContinuousLocalOptimization(fitness, algorithm='lm')
    evaluator = Evaluation(local_opt_fitness)
    ea_algorithm = AgeFitnessEA(evaluator, agraph_generator, crossover,
                                mutation, MUTATION_PROBABILITY,
                                CROSSOVER_PROBABILITY, POP_SIZE)
    island = Island(ea_algorithm, agraph_generator, POP_SIZE)
    return island
TEST_ISLAND = init_island()
class IslandStatsPrinter(StatsPrinter):
    def __init__(self):
        super().__init__()
        self._output = ["-"*24+":::: REGRESSION BENCHMARKS ::::" + "-"*23,
                        self._header_format_string.format("NAME", "MEAN",
                                                          "STD", "MIN", "MAX"),
                        "-"*78]
def explicit_regression_benchmark():
    island = init_island()
    while island.best_individual().fitness > ERROR_TOLERANCE:
        island.execute_generational_step()
def do_benchmarking():
    printer = IslandStatsPrinter()
    printer.add_stats("Explicit Regression",
                      timeit.repeat(explicit_regression_benchmark,
                                    number=10,
                                    repeat=10))
    printer.print()
if __name__ == "__main__":
    do_benchmarking() | 
| 
	# coding: utf-8
from __future__ import unicode_literals
import pytest
def test_ml_tokenizer_handles_long_text(ml_tokenizer):
    text = """അനാവശ്യമായി കണ്ണിലും മൂക്കിലും വായിലും സ്പർശിക്കാതിരിക്കുക"""
    tokens = ml_tokenizer(text)
    assert len(tokens) == 5
@pytest.mark.parametrize("text,length", [("എന്നാൽ അച്ചടിയുടെ ആവിർഭാവം ലിപിയിൽ കാര്യമായ മാറ്റങ്ങൾ വരുത്തിയത് കൂട്ടക്ഷരങ്ങളെ അണുഅക്ഷരങ്ങളായി പിരിച്ചുകൊണ്ടായിരുന്നു", 10), ("പരമ്പരാഗതമായി മലയാളം ഇടത്തുനിന്ന് വലത്തോട്ടാണ് എഴുതുന്നത്", 5)])
def test_ml_tokenizer_handles_cnts(ml_tokenizer, text, length):
    tokens = ml_tokenizer(text)
    assert len(tokens) == length
 | 
| 
	"""
Script that scans a list of IPs to see which support TCP Fast Open.
Done using scapy and by looking for the fast open option to be sent in the SYN ACK from the server.
Requires sudo to run scapy.
"""
__author__ = "Jacob Davis as part of research at imaal.byu.edu"
from scapy.all import sr1
from scapy.layers.inet import IP, TCP
from scapy.layers.inet6 import IPv6
import argparse
import multiprocessing as mp
from tqdm import tqdm
import os
import json
port = 53
ip6_src = None
TARGET = "target"
RESULT = "result"
json_keys = [TARGET, RESULT]
def query(ip):
    """
    queries an IP to see if TCP Fast Open option is set in SYN ACK
    :param ip: the ip to query. Uses `dport` constant
    :return: a tuple of ip, (True, False, Timeout). True if TFO set and Timeout if no response received
    """
    ip = ip.strip('\n')
    json_response = {key: None for key in json_keys}
    json_response[TARGET] = ip
    # sr1 - get single response, flags="S" - send SYN, options TFO - set fast open in options
    try:
        ip_layer = IP(dst=ip) if ":" not in ip else IPv6(dst=ip, src=ip6_src)
        # ip_layer.show()
        res = sr1(ip_layer / TCP(dport=port, flags="S", options=[('TFO', '')]), timeout=5, verbose=False)
        # res.show()
        if res is None:
            json_response[RESULT] = "Timeout"
        else:
            json_response[RESULT] = ('TFO' in dict(res[1].options))  # check if TFO is set in TCP response options
    except Exception as e:
        print(e)
        print(ip)
        json_response[RESULT] = "Can't resolve"
    finally:
        return json_response
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Running a series of scapy scans on a list of IPs to look for TFO")
    parser.add_argument('input', help="Input file containing a list of IPs")
    parser.add_argument('output', help="File to write results to", default='TFO_output.txt')
    parser.add_argument('-p', '--port', help="The port to run the scans on", default=53, type=int)
    parser.add_argument('-n', '--num-threads', help="Number of threads to execute queries", default=64, type=int)
    parser.add_argument('-6', '--ip6_src', help="Specifies the source address for ipv6 since scapy doesn't autofill")
    args = parser.parse_args()
    ip_file = open(args.input)
    ips = ip_file.readlines()
    if not ips[0][0].isdecimal():
        ips = ips[1:]
    ip_file.close()
    threads = min(args.num_threads, len(ips))
    port = args.port
    ip6_src = args.ip6_src
    summary = open(args.output, 'w')
    results = []
    print("Beginning the {} queries using {} threads. ".format(len(ips), threads))
    with open(args.output, 'w') as output_file:
        with mp.Pool(processes=threads) as p:
            try:
                for result in tqdm(p.imap_unordered(query, ips), total=len(ips)):
                    output_file.write(json.dumps(result) + '\n')
            except KeyboardInterrupt:
                p.terminate()
                p.join()
                print("Exiting early from queries. Current results will still be written")
    print("Queries finished. Writing results")
    os.chmod(args.output, 0o777)  # since script runs privileged, change file to be user writeable
 | 
| 
	"""Manage AWS policies."""
import copy
import json
import math
import pathlib
import re
import uuid
from typing import Dict, Generator, List, Union
from xdg import xdg_cache_home
from wonk import aws, exceptions, optimizer
from wonk.constants import ACTION_KEYS, JSON_ARGS, MAX_MANAGED_POLICY_SIZE, PolicyKey
from wonk.models import InternalStatement, Policy, Statement, which_type
POLICY_CACHE_DIR = xdg_cache_home() / "com.amino.wonk" / "policies"
def grouped_statements(policies: List[Policy]) -> Dict[str, InternalStatement]:
    """Merge the policies' statements by their zone of effect."""
    statement_sets: Dict[str, InternalStatement] = {}
    for policy in policies:
        # According to the policy language grammar (see
        # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_grammar.html) the
        # Statement key should have a list of statements, and indeed that's almost always the case.
        # Some of Amazon's own policies (see AWSCertificateManagerReadOnly) have a Statement key
        # that points to a dict instead of a list of dicts. This ensures that we're always dealing
        # with a list of statements.
        statements = policy[PolicyKey.STATEMENT]
        if isinstance(statements, dict):
            statements = [statements]
        for statement in statements:
            internal_statement = InternalStatement(statement)
            group = internal_statement.grouping_key()
            try:
                existing_item = statement_sets[group]
            except KeyError:
                statement_sets[group] = internal_statement
            else:
                existing_item.action_value |= internal_statement.action_value
    return statement_sets
def blank_policy() -> Policy:
    """Return the skeleton of a policy with no statments."""
    return {
        PolicyKey.VERSION: "2012-10-17",
        PolicyKey.ID: uuid.uuid4().hex,
        PolicyKey.STATEMENT: [],
    }
def render(statement_sets: Dict[str, InternalStatement]) -> Policy:
    """Turn the contents of the statement sets into a valid AWS policy."""
    policy = blank_policy()
    # Sort everything that can be sorted. This ensures that separate runs of the program generate
    # the same outputs, which 1) makes `git diff` happy, and 2) lets us later check to see if we're
    # actually updating a policy that we've written out, and if so, skip writing it again (with a
    # new `Id` key).
    for internal_statement in sorted(statement_sets.values(), key=lambda obj: obj.sorting_key()):
        policy[PolicyKey.STATEMENT].append(internal_statement.render())
    return policy
def packed_json(data: Policy, max_size: int) -> str:
    """Return the most aesthetic representation of the data that fits in the size."""
    for args in JSON_ARGS:
        packed = json.dumps(data, sort_keys=True, **args)
        if len(packed) <= max_size:
            return packed
    raise exceptions.UnshrinkablePolicyError(
        f"Unable to shrink the data into into {max_size} characters: {data!r}"
    )
def tiniest_json(data: Union[Policy, Statement]) -> str:
    """Return the smallest representation of the data."""
    return json.dumps(data, sort_keys=True, **JSON_ARGS[-1])
def split_statement(
    statement: Statement, max_statement_size: int
) -> Generator[Statement, None, None]:
    """Split the original statement into a series of chunks that are below the size limit."""
    statement_action = which_type(statement, ACTION_KEYS)
    actions = statement[statement_action]
    # Why .45? If we need to break a statement up, we may as well make the resulting parts small
    # enough that the solver can easily pack them with others. A bad outcome here would be to end
    # up with 20 statements that were each 60% of the maximum size so that no two could be packed
    # together. However, there _is_ a little bit of overhead in splitting them because each
    # statement is wrapped in a dict that may have several keys in it. In the end, "a little
    # smaller than half the maximum" seemed about right.
    chunks = math.ceil(len(tiniest_json(statement)) / (max_statement_size * 0.45))
    chunk_size = math.ceil(len(actions) / chunks)
    for base in range(0, len(actions), chunk_size):
        sub_statement = {key: value for key, value in statement.items() if key != statement_action}
        sub_statement[statement_action] = actions[base : base + chunk_size]  # noqa: E203
        yield sub_statement
def combine(policies: List[Policy]) -> List[Policy]:
    """Combine policy files into the smallest possible set of outputs."""
    new_policy = render(grouped_statements(policies))
    # Simplest case: we're able to squeeze everything into a single file. This is the ideal.
    try:
        packed_json(new_policy, MAX_MANAGED_POLICY_SIZE)
    except exceptions.UnshrinkablePolicyError:
        pass
    else:
        return [new_policy]
    # Well, that didn't work. Now we need to split the policy into several documents. Subtract the
    # length of the tightest packaging of the policy "envelope" from the maximum size, then
    # subtract the number of statements[1] (because we might have to glue the results together
    # with commas). This is how much room we have to pack statements into.
    #
    # [1] Why "len(statements) - 2"? Because you can glue n statements together with n-1 commas,
    # and it's guaranteed that we can fit at most n-1 statements into a single document because if
    # we could fit all n then we wouldn't have made it to this point in the program. And yes, this
    # is exactly the part of the program where we start caring about every byte.
    statements = new_policy[PolicyKey.STATEMENT]
    minimum_possible_policy_size = len(tiniest_json(blank_policy()))
    max_number_of_commas = len(statements) - 2
    max_statement_size = (
        MAX_MANAGED_POLICY_SIZE - minimum_possible_policy_size - max_number_of_commas
    )
    packed_list = []
    for statement in statements:
        packed = tiniest_json(statement)
        if len(packed) > max_statement_size:
            for splitted in split_statement(statement, max_statement_size):
                packed_list.append(tiniest_json(splitted))
        else:
            packed_list.append(packed)
    statement_sets = optimizer.pack_statements(packed_list, max_statement_size, 10)
    policies = []
    for statement_set in statement_sets:
        # The splitting process above might have resulted in this policy having multiple statements
        # that could be merged back together. The easiest way to handle this is to create a new
        # policy as-is, then group its statements together into *another* new, optimized policy,
        # and emit that one.
        unmerged_policy = blank_policy()
        unmerged_policy[PolicyKey.STATEMENT] = [
            json.loads(statement) for statement in statement_set
        ]
        merged_policy = render(grouped_statements([unmerged_policy]))
        policies.append(merged_policy)
    return policies
def policy_set_pattern(policy_set: str) -> re.Pattern:
    """Return a regexp matching the policy set's name."""
    final = policy_set.rsplit("/", maxsplit=1)[-1]
    return re.compile(rf"^{final}_\d+$")
def write_policy_set(output_dir: pathlib.Path, base_name: str, policies: List[Policy]):
    """Write the packed sets, return the names of the files written, and collect garbage."""
    # Get the list of existing files for this policy set so that we can delete them later. First,
    # get a list of candidates with Path.glob() because that's faster and easier than getting a
    # list of _every_ file and filtering it with Python. Then use a regular expression to match
    # each candidate so that policy set "foo" doesn't unintentionally delete policy set "foo_bar"'s
    # files.
    pattern = policy_set_pattern(base_name)
    cleanup = {
        candidate
        for candidate in output_dir.glob(f"{base_name}_*")
        if pattern.match(candidate.stem)
    }
    if len(cleanup) > 10:
        # Wonk only creates at most 10 policies for a policy set. If we've found more than 10
        # matches then something's gone awry, like the policy set is "*" or such. Either way, pull
        # the plug and refuse to delete them.
        raise exceptions.TooManyPoliciesError(base_name, len(cleanup))
    # Write each of the files that file go into this policy set, and create a list of the filenames
    # we've written.
    output_filenames = []
    for i, policy in enumerate(policies, 1):
        output_path = output_dir / f"{base_name}_{i}.json"
        output_filenames.append(str(output_path))
        # Don't delete a file right after we create it.
        cleanup.discard(output_path)
        # Check if the on-disk file is identical to this one. If so, leave it alone so that we
        # don't have unnecessary churn in Git, Terraform, etc.
        #
        # We minimize churn by sorting collections whenever possible so that they're always output
        # in the same order if the original filenames change.
        try:
            on_disk_policy = json.loads(output_path.read_text())
        except FileNotFoundError:
            pass
        else:
            if policies_are_identical(on_disk_policy, policy):
                continue
        output_path.write_text(packed_json(policy, MAX_MANAGED_POLICY_SIZE))
    # Delete all of the pre-existing files, minus the ones we visited above.
    for old in cleanup:
        old.unlink()
    return output_filenames
def policies_are_identical(old_policy: Policy, new_policy: Policy) -> bool:
    """Return True if the old and new policies are identical other than their IDs."""
    old_policy, new_policy = copy.deepcopy(old_policy), copy.deepcopy(new_policy)
    try:
        # If the on-disk policy is missing the `Id` key, then the policy's been altered and we know
        # it's no longer identical to the new one.
        del old_policy[PolicyKey.ID]
    except KeyError:
        return False
    new_policy.pop(PolicyKey.ID, None)
    # We minimize churn by sorting collections whenever possible so that they're always output in
    # the same order if the input policies haven't changed. That's better (and in the long run,
    # easier) than implementing an order-insensitive comparison here because it also keeps the
    # on-disk policy stable between runs. This makes git happy.
    return old_policy == new_policy
def make_cache_file(name: str, version: str) -> pathlib.Path:
    """Return the path to the document's cache file."""
    cache_dir = POLICY_CACHE_DIR / name
    cache_dir.mkdir(parents=True, exist_ok=True)
    return cache_dir / f"{version}.json"
def fetch(client, arn: str, force: bool = False) -> str:
    """Return the contents of the policy."""
    current_version = aws.get_policy_version(client, arn)
    cache_file = make_cache_file(aws.name_for(arn), current_version)
    policy_doc = None
    try:
        if not force:
            policy_doc = cache_file.read_text()
    except FileNotFoundError:
        pass
    if policy_doc is None:
        policy_doc = aws.get_policy(client, arn, current_version)
        cache_file.write_text(policy_doc)
    return policy_doc
 | 
| 
	from pymongo import MongoClient
from datetime import datetime
client = MongoClient("mongodb+srv://doodko:[email protected]/PyLab?retryWrites=true&w=majority")
db = client['PyLab']
friends = db['friends']
def add_new_friend(name):
    friend = {'name': name, 'date': datetime.now()}
    friends.insert_one(friend)
def check_if_met_before(name):
    if friends.find_one({'name': name}) is None:
        add_new_friend(name)
        return f"Приємно познайомитись, {name}."
    return f"Здається, ми вже бачились, {name}."
def find_all_friends():
    friends_list = [fr["name"] for fr in friends.find()]
    return friends_list  
 | 
| 
	def add(w,y):
    w=list(w)
    y=list(y)
    k=[0]*32
    c=[0]*32
    #print w[31]
    if w[31]=='1' and y[31]=='1':
        c[31]='0'
        k[30]='1'
    elif w[31]=='0'and y[31]=='0':
        c[31]='0'
        k[30]='0'
    else:
        c[31]='1'
        k[30]='0'
    for i in range(31):
        if w[30-i]=='1' and y[30-i]=='1' and k[30-i]=='1':
            c[30-i]='1'
            k[29-i]='1'
        elif w[30-i]=='0' and y[30-i]=='0' and k[30-i]=='1':
            c[30-i]='1'
            k[29-i]='0'
        elif w[30 - i] == '0' and y[30 - i] == '0' and k[30 - i] == '0':
            c[30 - i] = '0'
            k[29 - i] = '0'
        elif w[30-i]=='1' and y[30-i]=='1' and k[30-i]=='0':
            c[30-i]='0'
            k[29-i]='1'
        elif w[30-i]=='1' and y[30-i]=='0' and k[30-i]=='0' or w[30-i]=='0' and y[30-i]=='1' and k[30-i]=='0':
            c[30-i]='1'
            k[29-i]='0'
        elif w[30-i]=='1' and y[30-i]=='0' and k[30-i]=='1' or w[30-i]=='0' and y[30-i]=='1' and k[30-i]=='1':
            c[30-i]='0'
            k[29-i]='1'
    c=''.join(c)
    return c
 | 
| 
	# -*- coding: UTF-8 -*-
import json
from jsonpath import jsonpath
import config
from awesome.utils.get_content import send_request
from awesome.utils.operation_to_mysql import OsuMySQL
def get_osu_id(user_id):
    """
    根据qq号查询osu的id
    :param user_id: qq号
    :return: osu_id, mode
    """
    sql_str = "select osu_id, mode from {} where qq_id='{}'".format(config.MYSQL_TBNAME, user_id)
    o = OsuMySQL()
    try:
        result = o.query_data(sql_str)
        o.close()
    except Exception as e:
        o.close()
        return None
    if result:
        return {"osu_id": result[0][0], "mode": result[0][1]}
    return None
async def get_user_best(user, mode, limit_num):
    """
    查询用户最好成绩
    :param user:        osuid
    :param mode:        模式
    :param play_time:   限制显示的数量和
    :return:            显示的成绩dict
    """
    bp_resp = await send_request(part_url=config.GET_USER_BEST_URL, u=user, m=mode, limit=limit_num)
    bp_dict = json.loads(bp_resp)  # 返回列表字典
    # 若查询出为空值 & 出现异常情况 返回空
    if len(bp_dict) == 0 or "error" in bp_dict[0].keys():
        return None
    return bp_dict
 | 
| 
	# -*- coding: utf-8 -*-
import datetime
import urllib
import scrapy
from scrapy.spiders.init import InitSpider
from scrapy import Selector
from scrapy_splash import SplashRequest
from spider.consts import DOWNLOADER_MIDDLEWARES_HTTP_PROXY_OFF, MYSQL_ITEM_PIPELINES
from spider.items import SpiderLoaderItem, NovelNoberuItem, NovelChapterItem
class SFAcgSpider(scrapy.Spider):
    name = "sfacg"
    custom_settings = {
        'ITEM_PIPELINES': MYSQL_ITEM_PIPELINES,
        'SHOW_SQL': False,
        'DOWNLOADER_MIDDLEWARES': DOWNLOADER_MIDDLEWARES_HTTP_PROXY_OFF,
        #'DOWNLOAD_DELAY': 3
    }
    allowed_domains = ['www.sfacg.com']
    batch_date = datetime.datetime.now().date()
    default_data = {
    }
    default_data = urllib.parse.urlencode(default_data)
    default_headers = {
    }
    def start_requests(self):
        yield scrapy.Request(
            url='http://book.sfacg.com/List/?tid=-1', headers=self.default_headers, body=self.default_data, callback=self.get_final_url, dont_filter=True)
    def get_final_url(self, response):
        total_page = response.xpath('//ul[@class="nav pagebar"]/li[4]/a/text()').extract()[0]
        pages = int(total_page)
        for page in range(1,pages+1):
            yield scrapy.Request(
                url='http://book.sfacg.com/List/default.aspx?tid=-1&PageIndex='+str(page),
                headers=self.default_headers, body=self.default_data, callback=self.parse_basic_info, dont_filter=True)
    def parse_basic_info(self, response):
        contents=response.xpath('//div[@class="comic_cover Blue_link3"]/ul[@class="Comic_Pic_List"]').extract()
        for content in contents:
            img = Selector(text=content).xpath('//ul[@class="Comic_Pic_List"]/li[@class="Conjunction"]/a/img/@src').extract()[0]
            url='http://book.sfacg.com'+Selector(text=content).xpath('//ul[@class="Comic_Pic_List"]/li[@class="Conjunction"]/a/@href').extract()[0]
            name = Selector(text=content).xpath('//ul[@class="Comic_Pic_List"]/li[@class="Conjunction"]/a/img/@alt').extract()[0]
            yield scrapy.Request(url=url, meta={'img':img,'name':name}, callback=self.parse_detail_info, dont_filter=True)
    def parse_detail_info(self, response):
        url=response.url
        img=response.meta['img']
        name = response.meta['name']
        spans=response.xpath('//div[@class="count-detail"]//span').extract()
        author_names=response.xpath('//div[@class="author-name"]/span/text()').extract()[0]
        size=Selector(text=spans[1]).xpath('//span/text()').extract()[0].split(':')[1].split('[')[0]
        status_part1=Selector(text=spans[1]).xpath('//span/text()').extract()[0].split(':')[1].split('[')[1][0:-1]
        status_part2=''
        if len(response.xpath('//div[@class="chapter-info"]//h3[@class="chapter-title"]/a[@class="link"]/text()').extract())>0:
            status_part2 =  response.xpath('//div[@class="chapter-info"]//h3[@class="chapter-title"]/a[@class="link"]/text()').extract()[0]
        status=status_part1+status_part2
        introduction = response.xpath('//p[@class="introduce"]').extract()[0]
        update_time=Selector(text=spans[3]).xpath('//span/text()').extract()[0][3:]
        type=Selector(text=spans[0]).xpath('//span/text()').extract()[0].split(':')[1]
        item_noberu = SpiderLoaderItem(item=NovelNoberuItem(image_urls=[img]), response=response)
        item_noberu.add_value('batch_date', self.batch_date)
        item_noberu.add_value('host', self.allowed_domains[0])
        item_noberu.add_value('url', url)
        item_noberu.add_value('img', img)
        item_noberu.add_value('name', name)
        item_noberu.add_value('author_names', author_names)
        item_noberu.add_value('status', status)
        item_noberu.add_value('type', type)
        item_noberu.add_value('size', size)
        item_noberu.add_value('category', '轻小说')
        item_noberu.add_value('introduction', introduction)
        item_noberu.add_value('update_time', update_time)
        item_noberu.add_value('table_name', 'novel_noberu')
        yield item_noberu.load_item()
        tmp_url = ''
        if len(response.xpath('//div[@id="BasicOperation"]/a[1]/@href').extract()) > 0:
            tmp_url = 'http://book.sfacg.com'+response.xpath('//div[@id="BasicOperation"]/a[1]/@href').extract()[0]
            if tmp_url.find('javascript')<=0:
                yield scrapy.Request(url=tmp_url, meta={'noberu_url': url}, callback=self.parse_chapter_info, dont_filter=True)
    def parse_chapter_info(self, response):
        noberu_url=response.meta['noberu_url']
        chapters=response.xpath('//div[@class="story-catalog"]').extract()
        for chapter in chapters:
            part_name=Selector(text=chapter).xpath('//div[@class="story-catalog"]/div[@class="catalog-hd"]/h3[@class="catalog-title"]/text()').extract()[0]
            subchapters=Selector(text=chapter).xpath('//div[@class="story-catalog"]/div[@class="catalog-list"]/ul/li').extract()
            for subchapter in subchapters:
                if len(Selector(text=subchapter).xpath('//li/a/@title').extract())>0:
                    chapter_name = part_name+Selector(text=subchapter).xpath('//li/a/@title').extract()[0]
                chapter_url = ''
                if len(Selector(text=subchapter).xpath('//li/a/@href').extract())>0:
                    chapter_url = 'http://book.sfacg.com'+Selector(text=subchapter).xpath('//li/a/@href').extract()[0]
                item_chapter = SpiderLoaderItem(item=NovelChapterItem(), response=response)
                item_chapter.add_value('batch_date', self.batch_date)
                item_chapter.add_value('noberu_url', noberu_url)
                item_chapter.add_value('chapter_name', chapter_name)
                item_chapter.add_value('chapter_url', chapter_url)
                item_chapter.add_value('table_name', 'novel_chapter')
                yield item_chapter.load_item() | 
| 
	import spacy
from spacy.tokens import Token
nlp = spacy.blank("en")
# Define the getter function that takes a token and returns its reversed text
def get_reversed(token):
    return token.text[::-1]
# Register the Token property extension "reversed" with the getter get_reversed
Token.set_extension("reversed", getter=get_reversed)
# Process the text and print the reversed attribute for each token
doc = nlp("All generalizations are false, including this one.")
for token in doc:
    print("reversed:", token._.reversed)
 | 
| 
	import pickle
import socket
import _thread
from scripts.multiplayer import game, board, tetriminos
server = "192.168.29.144"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
    s.bind((server, port))
except socket.error as e:
    print(e)
s.listen()
print("Waiting for connection")
connected = set()
games = {}
idCount = 0
def threaded_client(conn, p, gameId):
    global idCount
    conn.send(str.encode(str(p)))
    reply = ""
    while True:
        try:
            data = conn.recv(4096).decode()
            if gameId in games:
                game = games[gameId]
                if not data:
                    break
                else:
                    game.update(p, data)
                    reply = game
                    conn.sendall(pickle.dumps(reply))
            else:
                break
        except:
            break
    print("Lost Connection!")
    try:
        del games[gameId]
        print("Closing Game", gameId)
    except:
        pass
    idCount -= 1
    conn.close()
while True:
    conn, addr = s.accept()
    print("Connected to: ", addr)
    idCount += 1
    p = 0
    game_id = (idCount - 1) // 2
    if idCount % 2 == 1:
        games[game_id] = game.Game((0, 0, 0), None, board)
    else:
        games[game_id].ready = True
        p = 1
    _thread.start_new_thread(threaded_client, (conn, p, game_id))
 | 
| 
	import warnings
import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmdet.ops import RoIAlign, RoIPool
def init_detector(config, checkpoint=None, device='cuda:0'):
    """Initialize a detector from config file.
    Args:
        config (str or :obj:`mmcv.Config`): Config file path or the config
            object.
        checkpoint (str, optional): Checkpoint path. If left as None, the model
            will not load any weights.
    Returns:
        nn.Module: The constructed detector.
    """
    if isinstance(config, str):
        config = mmcv.Config.fromfile(config)
    elif not isinstance(config, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        f'but got {type(config)}')
    config.model.pretrained = None
    model = build_detector(config.model, test_cfg=config.test_cfg)
    if checkpoint is not None:
        checkpoint = load_checkpoint(model, checkpoint)
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            warnings.simplefilter('once')
            warnings.warn('Class names are not saved in the checkpoint\'s '
                          'meta data, use COCO classes by default.')
            model.CLASSES = get_classes('coco')
    model.cfg = config  # save the config in the model for convenience
    model.to(device)
    model.eval()
    return model
class LoadImage(object):
    """A simple pipeline to load image"""
    def __call__(self, results):
        """Call function to load images into results
        Args:
            results (dict): A result dict contains the file name
                of the image to be read.
        Returns:
            dict: ``results`` will be returned containing loaded image.
        """
        if isinstance(results['img'], str):
            results['filename'] = results['img']
            results['ori_filename'] = results['img']
        else:
            results['filename'] = None
            results['ori_filename'] = None
        img = mmcv.imread(results['img'])
        results['img'] = img
        results['img_fields'] = ['img']
        results['img_shape'] = img.shape
        results['ori_shape'] = img.shape
        return results
def inference_detector(model, img):
    """Inference image(s) with the detector.
    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.
    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = collate([data], samples_per_gpu=1)
    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        # Use torchvision ops for CPU mode instead
        for m in model.modules():
            if isinstance(m, (RoIPool, RoIAlign)):
                if not m.aligned:
                    # aligned=False is not implemented on CPU
                    # set use_torchvision on-the-fly
                    m.use_torchvision = True
        warnings.warn('We set use_torchvision=True in CPU mode.')
        # just get the actual data from DataContainer
        data['img_metas'] = data['img_metas'][0].data
    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
async def async_inference_detector(model, img):
    """Async inference image(s) with the detector.
    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.
    Returns:
        Awaitable detection results.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    # We don't restore `torch.is_grad_enabled()` value during concurrent
    # inference since execution can overlap
    torch.set_grad_enabled(False)
    result = await model.aforward_test(rescale=True, **data)
    return result
def show_result_pyplot(model, savepath, img, result, score_thr=0.3, fig_size=(15, 10)):
    """Visualize the detection results on the image.
    Args:
        model (nn.Module): The loaded detector.
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        score_thr (float): The threshold to visualize the bboxes and masks.
        fig_size (tuple): Figure size of the pyplot figure.
    """
    if hasattr(model, 'module'):
        model = model.module
    img = model.show_result(img, result, score_thr=score_thr, show=False)
    #plt.figure(figsize=fig_size)
    import cv2
    cv2.imwrite(savepath, img)
    #plt.imshow(mmcv.bgr2rgb(img))
    #plt.show()
 | 
| 
	from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Tuple, Optional, ClassVar
@dataclass(eq=False, repr=False, frozen=True)
class StringTreeNode(ABC):
  """
  A Abstract class that can be inherited to build delimited strings.
  By subclassing DotStringTree, one can design a DSL describing a tree,
  where each node is a string, from predetermined set of possible values.
  For instance, root.[foo.[here|there]|bar.[one|two]] is such a DSL
  If a tree node, foo for instance, is marked as a variable, then we surround it with "{}",
  and the resulted string might look like: root.{foo}.here
  Later we can substitute variable foo with run time value, for instance "314565",
  and the string will be converted to root.314565.here
  """
  _path_to_parent: Tuple[str, ...] = ()
  _value: Optional[str] = None
  # Concrete subclass of StringTreeNode, if not a leaf, must override the class field _children
  # with all possible string values of its children nodes.
  _children: ClassVar[Tuple[str, ...]] = ()
  _is_variable: ClassVar[bool] = False
  _variable_mark_left: ClassVar[str] = "{"
  _variable_mark_right: ClassVar[str] = "}"
  def path(self) -> Tuple[str, ...]:
    if self._value is None:  # only possible when this node is the builder
      return ()
    # surround this node with variable marks to mark it as an variable
    if self.__class__._is_variable:
      value = self.__class__._variable_mark_left + self._value + self.__class__._variable_mark_right
    else:
      value = self._value
    if self._path_to_parent:
      return self._path_to_parent + (value,)
    else:
      return value,
  @abstractmethod
  def _get_class(self, item: str):
    """
    The subclass must have the following implementation of this method
    def _get_class(self, item: str):
      return globals()[item]
    globals() only contains the objects in the same module where self.__class__ is defined
    """
    pass
  def __getattribute__(self, item):
    children = object.__getattribute__(self, '_children')
    if item in children:
      # load the class with class name 'item'
      class_whose_name_is_item = object.__getattribute__(self, '_get_class')(item)
      # load the path of the current node
      path = object.__getattribute__(self, 'path')()
      # create and return the new tree node an instance of class 'item'
      return class_whose_name_is_item(_path_to_parent=path, _value=item)
    else:  # if attribute 'item' not registered as a child, just treat as a normal attribute
      return object.__getattribute__(self, item)
 | 
| 
	from confluent_kafka import TopicPartition
from confluent_kafka.admin import (
    BrokerMetadata,
    GroupMember,
    GroupMetadata,
    PartitionMetadata,
    TopicMetadata,
)
from kaskade.kafka.models import Broker, Cluster, Group, Partition, Topic
from tests import faker
def random_broker(id=faker.pyint()):
    return Broker(id=id, host=faker.hostname(), port=faker.port_number())
def random_brokers(nb_elements=10, variable_nb_elements=True):
    id_list = faker.pylist(
        nb_elements=nb_elements,
        variable_nb_elements=variable_nb_elements,
        value_types=int,
    )
    return [random_broker(id) for id in id_list]
def random_group(id=faker.pystr()):
    return Group(
        id=id,
        broker=random_broker(),
        state=faker.random.choice(["stable", "empty"]),
        members=faker.pyint(),
    )
def random_groups(nb_elements=10, variable_nb_elements=True):
    id_list = faker.pylist(
        nb_elements=nb_elements,
        variable_nb_elements=variable_nb_elements,
        value_types=str,
    )
    return [random_group(id) for id in id_list]
def random_partition(id=faker.pyint()):
    return Partition(
        id=id,
        leader=faker.pyint(),
        replicas=faker.pylist(value_types=int),
        isrs=faker.pylist(value_types=int),
    )
def random_partitions(nb_elements=10, variable_nb_elements=True):
    id_list = faker.pylist(
        nb_elements=nb_elements,
        variable_nb_elements=variable_nb_elements,
        value_types=int,
    )
    return [random_partition(id) for id in id_list]
def random_topic(name=faker.pystr()):
    return Topic(name=name, partitions=random_partitions(), groups=random_groups())
def random_topics(nb_elements=10, variable_nb_elements=True):
    id_list = faker.pylist(
        nb_elements=nb_elements,
        variable_nb_elements=variable_nb_elements,
        value_types=str,
    )
    return [random_topic(id) for id in id_list]
def random_cluster():
    return Cluster(
        brokers=random_brokers(),
        version=faker.bothify("#.#.#"),
        has_schemas=faker.pybool(),
        protocol=faker.random.choice(["plain", "ssl"]),
    )
def random_broker_metadata():
    broker_metadata = BrokerMetadata()
    broker_metadata.id = faker.pyint()
    broker_metadata.host = faker.hostname()
    broker_metadata.port = faker.port_number()
    return broker_metadata
def random_group_member_metadata():
    group_member = GroupMember()
    group_member.id = faker.pystr()
    group_member.client_host = faker.hostname()
    group_member.client_id = faker.pystr()
    return group_member
def random_group_metadata():
    group_metadata = GroupMetadata()
    group_metadata.broker = random_broker_metadata()
    group_metadata.id = faker.pystr()
    group_metadata.state = faker.random.choice(["Stable", "Empty"])
    group_metadata.members = [
        random_group_member_metadata() for _ in range(faker.pyint(max_value=10))
    ]
    return group_metadata
def random_topic_partition_metadata():
    topic_partition = TopicPartition(faker.pystr(), faker.pyint())
    topic_partition.offset = faker.pyint()
    return topic_partition
def random_partition_metadata():
    partition_metadata = PartitionMetadata()
    partition_metadata.id = faker.pyint()
    partition_metadata.leader = faker.pyint()
    partition_metadata.replicas = (faker.pylist(value_types=int),)
    partition_metadata.isrs = faker.pylist(value_types=int)
    return partition_metadata
def random_topic_metadata():
    topic_metadata = TopicMetadata()
    topic_metadata.topic = faker.pystr()
    topic_metadata.partitions = {
        i: random_partition_metadata() for i in range(faker.pyint(max_value=10))
    }
    return topic_metadata
 | 
| 
	import sys
import subprocess
import importlib
import argparse
import torch
def parse_args():
    a = argparse.ArgumentParser()
    a.add_argument('--batch-dim', type=int, default=8)
    a.add_argument('--height-dim', type=int, default=300)
    a.add_argument('--width-dim', type=int, default=300)
    a.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')
    a.add_argument('--ssd-module-name', type=str)
    a.add_argument('--trt-module-name', type=str)
    a.add_argument('--output-names', default='out')
    return a.parse_args()
if __name__ =='__main__':
    args = parse_args()
    args.output_names = args.output_names.split(',')
    ds_ssd300 = importlib.import_module(args.ssd_module_name)
    ds_trt = importlib.import_module(args.trt_module_name)
    dest_path = f'checkpoints/{args.trt_module_name}.engine'
    onnx_path = f'checkpoints/{args.trt_module_name}.onnx'
    device = torch.device(args.device)
    threshold = 0.4
    model_precision = 'fp16'
    image_nchw = (torch.randn((args.batch_dim, 3, args.height_dim, args.width_dim)) * 255).to(device, torch.float32)
    tensorrt_model = ds_trt.TensorRTPart(ds_ssd300.SSD300(threshold, model_precision, args.batch_dim)).to(device)
    # sanity test
    result = tensorrt_model(image_nchw)
    num_result_dims = len(result[0].size())
    result_includes_dummy = num_result_dims == 5
    batch_dim_num = 1 if result_includes_dummy else 0
    torch.onnx.export(
        tensorrt_model,
        image_nchw,
        onnx_path,
        input_names=['image_nchw'],
        output_names=args.output_names,
        dynamic_axes={'image_nchw': {0: 'batch_dim'}, **{o: {batch_dim_num: 'batch_dim'} for o in args.output_names}},
        opset_version=11
    )
    trt_output = subprocess.run([
            'trtexec',
            f'--onnx={onnx_path}',
            f'--saveEngine={dest_path}',
            '--fp16',
            '--explicitBatch',
            f'--minShapes=image_nchw:1x3x{args.height_dim}x{args.width_dim}',
            f'--optShapes=image_nchw:{args.batch_dim}x3x{args.height_dim}x{args.width_dim}',
            f'--maxShapes=image_nchw:{args.batch_dim}x3x{args.height_dim}x{args.width_dim}',
            '--buildOnly'
        ],
        shell=False,
        check=True,
        # capture_output=True,
        text=True
    )
    print(trt_output.args)
    print(trt_output.stdout)
    print(trt_output.stderr)
 | 
| 
	# BOJ 3079 입국심사
import sys
sys.stdin = open("../input.txt", "r")
input = sys.stdin.readline
def check(mid):
    cnt = 0
    for i in range(n):
        cnt += mid // arr[i]
    return True if cnt >= m else False
n, m = map(int, input().split())
arr = [int(input()) for _ in range(n)]
arr.sort()
start = 1
end = arr[-1] * m
answer = 0
while start <= end:
    mid = (start + end) // 2
    if check(mid):
        answer = mid
        end = mid - 1
    else:
        start = mid + 1
print(answer) | 
| 
	# Copyright 2017 Aaron Barany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .reg import *
from .FunctionInfo import *
class ElementInfo:
	def __init__(self, text, require):
		self.text = text
		self.require = require
class FeatureInfo:
	def __init__(self, interface):
		self.name = interface.get('name')
		self.extension = interface.tag == 'extension'
		self.types = []
		self.enums = []
		self.functions = []
class GLHeaderGenerator(OutputGenerator):
	def __init__(self, errFile = sys.stderr, warnFile = sys.stderr, diagFile = sys.stdout):
		OutputGenerator.__init__(self, errFile, warnFile, diagFile)
		self.lastTypeRequire = None
		self.glesOnlyTypes = ['khrplatform']
		self.glOnlyTypes = \
			[
				'stddef',
				'inttypes',
				'GLfloat',
				'GLclampf',
			]
		self.curFeature = None
		self.features = []
	def newLine(self):
		write('', file = self.outFile)
	def write(self, *args):
		write(*args, file = self.outFile)
	def beginFile(self, genOpts):
		OutputGenerator.beginFile(self, genOpts)
		if self.genOpts.filename:
			headerSym = '__AnyGL_' + re.sub('\.h', '_h_', os.path.basename(self.genOpts.filename))
			self.write('#pragma once')
			self.write('#ifndef', headerSym)
			self.write('#define', headerSym, '1')
			self.newLine()
		self.write('#include "AnyGLConfig.h"')
		self.newLine()
		self.write('/* Generated by AnyGL. */')
		self.newLine()
		self.write('#ifdef __cplusplus')
		self.write('extern "C" {')
		self.write('#endif')
		self.newLine()
		self.write('#if defined(__clang__)')
		self.write('#pragma GCC diagnostic push')
		self.write('#pragma GCC diagnostic ignored "-Wmacro-redefined"')
		self.write('#elif defined(_MSC_VER)')
		self.write('#pragma warning(push)')
		self.write('#pragma warning(disable: 4005)')
		self.write('#endif')
		self.newLine()
		self.write('#define ANYGL_SUPPORTED(func) (AnyGL_ ## func != 0)')
		self.newLine()
		self.write('ANYGL_EXPORT void AnyGL_setLastCallsite(const char* file, ' \
			'const char* function, unsigned int line);')
		self.write('#if ANYGL_ALLOW_DEBUG')
		self.write('#define ANYGL_CALL(func) (AnyGL_setLastCallsite(__FILE__, __FUNCTION__,' \
			'__LINE__), (func))')
		self.write('#else')
		self.write('#define ANYGL_CALL(func) (func)')
		self.write('#endif')
		self.newLine()
	def endFile(self):
		self.write('/* handle include for standard gl.h */')
		for feature in self.features:
			self.write('#ifndef', feature.name)
			self.write('#define', feature.name, '1')
			self.write('#define ANY' + feature.name, '1')
			self.write('#endif')
			if feature.extension:
				self.write('ANYGL_EXPORT extern int Any' + feature.name + ';')
			self.newLine()
		self.write('/* Type declarations */')
		# Basic types are now use khrplatform with the latest spec, which is only guaranteed to be
		# available on GLES platforms. Need to manually declare them for desktop platforms.
		self.write('#if !ANYGL_GLES')
		self.write('#include <stddef.h>')
		self.write('#include <stdint.h>')
		self.write('typedef int8_t GLbyte;')
		self.write('typedef uint8_t GLubyte;')
		self.write('typedef int16_t GLshort;')
		self.write('typedef uint16_t GLushort;')
		self.write('typedef uint16_t GLushort;')
		self.write('typedef float GLfloat;')
		self.write('typedef float GLclampf;')
		self.write('typedef uint16_t GLhalf;')
		self.write('typedef int32_t GLfixed;')
		self.write('typedef ptrdiff_t GLintptr;')
		self.write('typedef ptrdiff_t GLsizeiptr;')
		self.write('typedef int64_t GLint64;')
		self.write('typedef uint64_t GLuint64;')
		self.write('#endif')
		self.newLine()
		for feature in self.features:
			if not feature.types:
				continue
			# These features have overlap between the types.
			if feature.name == 'GL_VERSION_1_0'  or feature.name == 'GL_ES_VERSION_2_0':
				self.write('#if defined(ANYGL_VERSION_1_0) || defined(ANYGL_ES_VERSION_2_0)')
			else:
				self.write('#ifdef ANY' + feature.name)
			curRequire = None
			for elem in feature.types:
				if elem.require != curRequire:
					if curRequire:
						self.write('#endif')
					if elem.require:
						self.write('#if', elem.require)
					curRequire = elem.require
				self.write(elem.text)
			if curRequire:
				self.write('#endif')
			self.write('#endif /*', feature.name, '*/')
			self.newLine()
		self.write('/* Enum definitions */')
		self.write('#ifndef ANYGL_NO_DEFINES')
		for feature in self.features:
			if not feature.enums:
				continue
			self.write('#ifdef ANY' + feature.name)
			curRequire = None
			for elem in feature.enums:
				if elem.require != curRequire:
					if curRequire:
						self.write('#endif')
					if elem.require:
						self.write('#if', elem.require)
					curRequire = elem.require
				self.write(elem.text)
			if curRequire:
				self.write('#endif')
			self.write('#endif /*', feature.name, '*/')
			self.newLine()
		self.write('#endif /* ANYGL_NO_DEFINES */')
		self.write('/* Workaround for GL_HALF_FLOAT_OES */')
		self.write('ANYGL_EXPORT extern GLenum AnyGL_HALF_FLOAT;')
		self.newLine()
		self.write('/* Function declarations */')
		for feature in self.features:
			self.write('/*', feature.name, '*/')
			for function in feature.functions:
				if not function.alias:
					self.write(function.getTypeDecl())
			self.newLine()
			for function in feature.functions:
				if not function.alias:
					self.write('ANYGL_EXPORT extern', function.type,
						'AnyGL_' + function.name + ';')
			self.newLine()
			self.write('#ifndef ANYGL_NO_DEFINES')
			for function in feature.functions:
				params = '('
				paramList = function.getParamList()
				for param in paramList:
					if param != paramList[0]:
						params += ', '
					params += param.name
				params += ')'
				if function.alias:
					self.write('#define', function.name + params, 'ANYGL_CALL(AnyGL_' + \
						function.alias + ')' + params)
				else:
					self.write('#define', function.name + params, 'ANYGL_CALL(AnyGL_' + \
						function.name + ')' + params)
			self.write('#endif /* ANYGL_NO_DEFINES */')
			self.newLine()
		self.write('#if defined(__clang__)')
		self.write('#pragma GCC diagnostic pop')
		self.write('#elif defined(_MSC_VER)')
		self.write('#pragma warning(pop)')
		self.write('#endif')
		self.newLine()
		self.write('#ifdef __cplusplus')
		self.write('}')
		self.write('#endif')
		if self.genOpts.filename:
			self.newLine()
			self.write('#endif')
		OutputGenerator.endFile(self)
	def beginFeature(self, interface, emit):
		OutputGenerator.beginFeature(self, interface, emit)
		if emit:
			self.curFeature = FeatureInfo(interface)
	def endFeature(self):
		self.features.append(self.curFeature)
		self.curFeature = None
	def genType(self, typeinfo, name):
		OutputGenerator.genType(self, typeinfo, name)
		# Types are declared differently between GLES and desktop GL.
		typeElem = typeinfo.elem
		require = None
		# Special cases.
		name = typeElem.get('name')
		if name == 'GLuint64EXT':
			typeElem.text = 'typedef GLuint64 '
		else:
			requires = typeElem.get('requires')
			if requires == 'khrplatform':
				require = 'ANYGL_GLES'
			elif requires in self.glOnlyTypes:
				require = '!ANYGL_GLES'
			else:
				if name in self.glesOnlyTypes:
					require = 'ANYGL_GLES'
				elif name in self.glOnlyTypes:
					require = '!ANYGL_GLES'
		s = noneStr(typeElem.text)
		for elem in typeElem:
			if (elem.tag == 'apientry'):
				s += 'APIENTRY' + noneStr(elem.tail)
			else:
				s += noneStr(elem.text) + noneStr(elem.tail)
		if (len(s) > 0):
			self.curFeature.types.append(ElementInfo(s, require))
	def genEnum(self, enuminfo, name):
		OutputGenerator.genEnum(self, enuminfo, name)
		s = '#define ' + name.ljust(33) + ' ' + enuminfo.elem.get('value')
		# Handle non-integer 'type' fields by using it as the C value suffix
		t = enuminfo.elem.get('type')
		if (t != '' and t != 'i'):
			s += enuminfo.type
		self.curFeature.enums.append(ElementInfo(s, None))
	def genCmd(self, cmdinfo, name):
		OutputGenerator.genCmd(self, cmdinfo, name)
		self.curFeature.functions.append(FunctionInfo(cmdinfo.elem, self.curFeature.name))
 | 
| 
	
import time
words = input('Please input the words you want to say!:')
#例子:words = "Dear lili, Happy Valentine's Day! Lyon Will Always Love You Till The End! ? Forever!  ?"
for item in words.split():
    #要想实现打印出字符间的空格效果,此处添加:item = item+' '
    letterlist = []#letterlist是所有打印字符的总list,里面包含y条子列表list_X
    for y in range(12, -12, -1):
        list_X = []#list_X是X轴上的打印字符列表,里面装着一个String类的letters
        letters = ''#letters即为list_X内的字符串,实际是本行要打印的所有字符
        for x in range(-30, 30):#*是乘法,**是幂次方
            expression = ((x*0.05)**2+(y*0.1)**2-1)**3-(x*0.05)**2*(y*0.1)**3
            if expression <= 0:
                letters += item[(x-y) % len(item)]
            else:
                letters += ' '
        list_X.append(letters)
        letterlist += list_X
    print('\n'.join(letterlist))
    time.sleep(1.5);
 | 
| 
	import logging
from logging.config import dictConfig
from os import name
from configs import StaticConfigs
from configs.configs import base_url,ds_contribution_endpoint,model_bm_contribution_endpoint,ds_search_list_endpoint
from utils.notifierutils import NotifierUtils
from repository import NotifierRepo
log     =   logging.getLogger('file')
utils   =   NotifierUtils()
repo    =   NotifierRepo()
class NotifierEvent:
    def __init__(self,userID):
        query           =   {"userID":userID}
        exclude         =   {"_id":0}
        user_details    =   repo.search(query,exclude,None,None)
        self.user_email      =   user_details[0]["email"] 
        self.user_name       =   user_details[0]["firstName"]
    
    #dumping errors onto redis store
    def data_submission_notifier(self, data):
        log.info(f'Request for notifying data submission updates for entityID:{data["entityID"]}')
        log.info(data)
        try:
            status  =   (data["event"].split('-'))[-1]
            if      status      == "completed":
                template        =   'ds_submit_success.html'
                receiver_list   =   [self.user_email]
                subject         =   StaticConfigs.DS_SUBMIT_SUCCESS.value
            elif    status      == "failed":
                template        =   'ds_submit_failed.html'
                subject         =   StaticConfigs.DS_SUBMIT_FAILED.value
            link                =   f'{base_url}{ds_contribution_endpoint}{data["entityID"]}'
            template_vars       =   {"firstname":self.user_name,"activity_link":link,"datasetName":data["details"]["datasetName"],"datasetType":None,"modelName":None}
            receiver_list       =   [self.user_email]
            utils.generate_email_notification(template,template_vars,receiver_list,subject)
            
        except Exception as e:
            log.exception(f'Exception while writing errors: {e}')
            return False
    def data_search_notifier(self, data):
        log.info(f'Request for notifying data search updates for entityID:{data["entityID"]}')
        log.info(data)
        try:
            status  =   (data["event"].split('-'))[-1]
            if      status      == "completed":
                template        =   'search_success.html'
                receiver_list   =   [self.user_email]
                subject         =   StaticConfigs.DS_SEARCH_COMPLETE.value
            types               =   {"parallel-corpus":"Parallel Dataset","monolingual-corpus":"Monolingual Dataset","asr-corpus":"ASR/TTS Dataset",
                                        "asr-unlabeled-corpus":"ASR Unlabeled Dataset","ocr-corpus":"OCR Dataset","document-layout-corpus":"Document Layout Dataset"}
            dtype               =   types.get(data["details"]["datasetType"])
            link                =   f'{base_url}{ds_search_list_endpoint}{data["entityID"]}'
            template_vars       =   {"firstname":self.user_name,"activity_link":link,"datasetType":dtype,"modelName":None,"datasetName":None}
            receiver_list       =   [self.user_email]
            utils.generate_email_notification(template,template_vars,receiver_list,subject)
        except Exception as e:
            log.exception(f'Exception while writing errors: {e}')
            return False
    
    def benchmark_submission_notifier(self, data):
        log.info(f'Request for notifying benchmark submission updates for entityID:{data["entityID"]}')
        log.info(data)
        try:
            status  =   (data["event"].split('-'))[-1]
            if      status      == "completed":
                template        =   'bm_run_success.html'
                receiver_list   =   [self.user_email]
                subject         =   StaticConfigs.BM_RUN_SUCCESS.value
            elif    status      == "failed":
                template        =   'bm_run_failed.html'
                subject         =   StaticConfigs.BM_RUN_FAILED.value
            link                =   f'{base_url}{model_bm_contribution_endpoint}{data["entityID"]}'
            template_vars       =   {"firstname":self.user_name,"activity_link":link,"datasetType":None,"datasetName":None,"modelName":data["details"]["modelName"]}#
            receiver_list       =   [self.user_email]
            utils.generate_email_notification(template,template_vars,receiver_list,subject)
        except Exception as e:
            log.exception(f'Exception while writing errors: {e}')
            return False
 
# Log config
dictConfig({
    'version': 1,
    'formatters': {'default': {
        'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
    }},
    'handlers': {
        'info': {
            'class': 'logging.FileHandler',
            'level': 'DEBUG',
            'formatter': 'default',
            'filename': 'info.log'
        },
        'console': {
            'class': 'logging.StreamHandler',
            'level': 'DEBUG',
            'formatter': 'default',
            'stream': 'ext://sys.stdout',
        }
    },
    'loggers': {
        'file': {
            'level': 'DEBUG',
            'handlers': ['info', 'console'],
            'propagate': ''
        }
    },
    'root': {
        'level': 'DEBUG',
        'handlers': ['info', 'console']
    }
}) | 
| 
	import collections, os, sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.special import erf
import scipy.interpolate
fontsize = 11/1.4
latex_preamble = r'''
    \usepackage{lmodern}
    \usepackage{amsmath}
    \usepackage{amsfonts}
    \usepackage{mathtools}
    \usepackage{bm}
'''
matplotlib.rcParams.update({
    'text.usetex'        : True,
    'font.family'        : 'serif',
    'font.serif'         : 'cmr10',
    'font.size'          : fontsize,
    'mathtext.fontset'   : 'cm',
    'axes.formatter.use_mathtext': True,
    'text.latex.preamble': latex_preamble,
})
fig, ax = plt.subplots(1, 1, figsize=(8.44, 3.9))
def get_aspect(ax=None):
    if ax is None:
        ax = plt.gca()
    fig = ax.figure
    ll, ur = ax.get_position() * fig.get_size_inches()
    width, height = ur - ll
    axes_ratio = height / width
    aspect = axes_ratio / ax.get_data_ratio()
    return aspect
def draw_arrow(x, y, dir, color='k', rot=None, sync=False, zorder=None):
    if zorder is None:
        zorder = -15
    text = (r'$\bm{\uparrow}$' if dir == 'up' else r'$\bm{\downarrow}$')
    va = ('top' if dir == 'up' else 'bottom')
    fontsize = 19
    if sync:
        fontsize = 14.9
    if rot is not None:
        v = [np.cos(rot*np.pi/180), np.sin(rot*np.pi/180)/get_aspect()]
        t = -0.034 #-0.053
        dy = -0.001
        plt.text(
            x + t*v[0], y + t*v[1] + dy,
            r'$\bm{\rightarrow}$',
            va='center', ha='center', fontsize=fontsize,
            zorder=zorder, color=color, rotation=rot,
        )
        # Hide stalk
        if not sync:
            for dt in (-0.0056, ):
                plt.text(
                    x + (t + dt)*v[0], y + (t + dt)*v[1] + dy,
                    r'$\bm{-}$',
                    va='center', ha='center', fontsize=22,
                    zorder=zorder+1, color='w', rotation=rot,
                )
            for dt in (-0.036, ):
                plt.text(
                    x + (t + dt)*v[0], y + (t + dt)*v[1] + dy,
                    r'$\bm{-}$',
                    va='center', ha='center', fontsize=36,
                    zorder=zorder+1, color='w', rotation=rot,
                )
        return
    # Not rotated
    plt.text(
        x, y, text,
        va=va, ha='center', fontsize=fontsize,
        zorder=zorder, color=color,
    )
    # Hide stalk
    if not sync:
        dx = 0.010
        dy = 0.192
        dY = (-0.145 if dir == 'up' else +0.145)
        plt.fill(
            [x - 0.5*dx, x + 0.5*dx, x + 0.5*dx, x - 0.5*dx, x - 0.5*dx],
            np.array([y + 0.5*dy, y + 0.5*dy, y - 0.5*dy, y - 0.5*dy, y + 0.5*dy]) + dY,
            'w', ec='none', zorder=zorder+1,
        )
        dY += 0.1*dY
        dx *= 1.3
        plt.fill(
            [x - 0.5*dx, x + 0.5*dx, x + 0.5*dx, x - 0.5*dx, x - 0.5*dx],
            np.array([y + 0.5*dy, y + 0.5*dy, y - 0.5*dy, y - 0.5*dy, y + 0.5*dy]) + dY,
            'w', ec='none', zorder=zorder+1,
        )
theta = np.linspace(np.pi, 0, 201)
def step(bgn, end, offset_y, dir, color, colors=None, jump_up=False, jump_down=False):
    global y_jump_up_last, y_jump_down_last
    arrow_offset = 0.04
    jump_up_height = 0.10 #0.0925 #0.135
    if offset_y == offset_y0:
        jump_down_height = 0.79 - 0.05
    else:
        jump_down_height = 0.614 + 0.018 - 0.05
    if offset_y == offset_y2:
        jump_up_height += 0.013 #0.008
    x = bgn + ((end - bgn)/2)*(1 + np.cos(theta))
    if dir == 'up':
        y = (height/2)*np.sin(theta)
    elif dir == 'down':
        y = -(height/2)*np.sin(theta)
    else:
        print(f'Unrecognized dir="{dir}"', file=sys.stderr, flush=True)
        sys.exit(1)
    y += offset_y
    if colors:
        color0, color1 = colors
        color0 = np.asarray(matplotlib.colors.ColorConverter().to_rgb(color0), dtype=float)
        color1 = np.asarray(matplotlib.colors.ColorConverter().to_rgb(color1), dtype=float)
        mid = (x.size - 1)/2
        for i in range(x.size - 1):
            w = (1 + erf(1.8*(i - mid)/mid))/2
            color = (1 - w)*color0 + w*color1
            plt.plot([x[i], x[i + 1]], [y[i], y[i + 1]], '-', color=color, lw=1.2)
            # Arrow
            if i == int((x.size - 1)*0.30):
                dy = (y[i+1] - y[i-1])/2*get_aspect()
                dx = (x[i+1] - x[i-1])/2
                draw_arrow(x[i], y[i], 'up', color, rot=180/np.pi*np.arctan2(dy, dx))
        el_skip = 16
        if jump_up:
            if jump_up is True:
                y_jump = np.array(
                      list(y[:len(y)//2])
                    + list(offset_y + np.linspace(
                          height/2,
                          height/2 + jump_up_height,
                          len(y) - len(y)//2,
                      ))
                )
                X = bgn + (end - bgn)/2
                x_jump = np.array(list(x[:len(x)//2]) + [X]*(len(x) - len(x)//2))
                mid = (y_jump.size - 1)/2
                random_fac = 1.22  # because I can't do the math, apparently
                mid *= random_fac
                for i in range(len(y)//2 + el_skip, y_jump.size - 1):
                    w = (1 + erf(1.95*(i - mid)/mid))/2
                    color = (1 - w)*color0 + w*color1
                    plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
                        '-', color=color, lw=1.2)
                # Arrow
                draw_arrow(x_jump[i+1], y_jump[i+1] + arrow_offset, 'up', color1)
            else:
                X1 = bgn + (jump_up - bgn)/2
                index1 = np.argmin((X1 - x)**2)
                x_jump = np.array([X1]*(len(x)//2))
                y_jump = np.linspace(
                    offset_y + height/2 + 1e-3,
                    y_jump_up_last[-1],  #offset_y + height/2 + jump_up_height,
                    x_jump.size,
                 )
                mid = (y_jump.size - 1)/2
                random_fac = 1.22  # because I can't do the math, apparently
                for i in range(y_jump.size - 1):
                    w = (1 + erf(1.95*(i - mid)/mid))/2
                    color = (1 - w)*(color0/(1 + random_fac*index1/len(x_jump))) + w*color1
                    plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
                        '-', color=color, lw=1.2)
                # Arrow
                draw_arrow(x_jump[i+1], y_jump[i+1] + arrow_offset, 'up', color1)
            y_jump_up_last = y_jump
        if jump_down:
            if jump_down is True:
                X = bgn + (end - bgn)*3/4
                x_jump = np.array(list(x[:3*len(x)//4]) + [X]*(len(x) - 3*len(x)//4))
                Y = np.interp(X, x, y)
                y_jump = np.array(
                      list(y[:3*len(y)//4])
                    + list(np.linspace(
                          Y - 2e-3,
                          Y - jump_down_height,
                          len(y) - 3*len(y)//4,
                      ))
                )
                mid = (y_jump.size - 1)/2
                for i in range(3*len(y)//4, y_jump.size - 1):
                    w = (1 + erf(1.4*(i - mid)/mid))/2
                    color = (1 - w)*color0 + w*color1
                    plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
                        '-', color=color, lw=1.2)
                # Arrow
                draw_arrow(x_jump[i+1], y_jump[i+1] - arrow_offset, 'down', color1)
            else:
                X1 = bgn + 3*(jump_down - bgn)/4
                Y = np.interp(X1, x, y)
                index1 = np.argmin((X1 - x)**2)
                x_jump = np.array([X1]*(1*len(x)//2))
                y_jump = np.linspace(Y - 2e-3, y_jump_down_last[-1], len(x_jump))
                mid = (y_jump.size - 1)/2
                random_fac = 3.70  # because I can't do the math, apparently
                for i in range(y_jump.size - 1):
                    w = (1 + erf(1.4*(i - mid)/mid))/2
                    color = (1 - w)*(color0/(1 + random_fac*index1/len(x_jump))) + w*color1
                    plt.plot([x_jump[i], x_jump[i+1]], [y_jump[i], y_jump[i + 1]],
                        '-', color=color, lw=1.2)
                # Arrow
                draw_arrow(x_jump[i+1], y_jump[i+1] - arrow_offset, 'down', color1)
            y_jump_down_last = y_jump
    else:
        plt.plot(x, y, '-', color=color, lw=1.2)
        # Arrow
        i = int((x.size - 1)*0.33)
        dy = (y[i+1] - y[i])*get_aspect()
        dx = (x[i+1] - x[i])
        draw_arrow(x[i], y[i], 'down', color, rot=180/np.pi*np.arctan2(dy, dx))
y_jump_up_last = None
y_jump_down_last = None
# Specs
height = 0.615  #0.68
rung_offset = 0.75
rung0_final_step = 0.5 #0.21 #0.457
offset_y0 = 0
offset_y1 = -1.102*rung_offset
offset_y2 = -2*rung_offset
offset_ydrift = -2.73*rung_offset
end_sync = 1/2 + 1 + 1 + rung0_final_step
particle_scatter_size = 98
particle_vert_offset = 0.0135*np.sqrt(particle_scatter_size)
dy_vert = 0.085 #0.079
dy_vert_fac = 1.2
dx_rung0 = 0.0567 # 0.0507
dx_rung1 = 0.033 #0.0295
colors = ['C0', 'C1', 'C2', 'C3']
# Curve through blue points
lw_fat = 14.5
alpha_fat = 0.154
def draw_fat_blue_curve(x_offset):
    dX_up = 0.017 #-0.015 #0.036
    dX_down = -0.006
    dY_up = 0.1 #0.22
    dY_down = 0.1
    X = [
        1.0*dX_down + 1 - 0.015,
        1 + 0.4*dX_down,
        #
        1,
        1 + 1/8,
        0.2*(2*(1 + 1/4) + 3*(1 + 1/4 - dx_rung1)),
        0.2*(2*(1 + 1/2) + 3*(1 + 1/2 - dx_rung0)),
        #
        #(1 + 1/2),
        #(1 + 1/2),
        dX_up + (1 + 1/2),
    ]
    X = np.array(X) + x_offset
    Y = [
        -1.0*dY_down + offset_ydrift + 0.0,
        -0.4*dY_down + offset_ydrift + 0.03,
        #
        0.05 + 0.2*(2*(offset_ydrift) + 3*(offset_ydrift + dy_vert_fac*dy_vert)) + 0.03,
        0.2*(2*(offset_y2) + 3*(offset_y2 - dy_vert_fac*dy_vert)) + 0.03,
        0.2*(2*(offset_y1) + 3*(offset_y1 - dy_vert_fac*dy_vert)),
        0.2*(2*(offset_y0) + 3*(offset_y0 - dy_vert*(1 + dy_vert_fac))),
        #
        #offset_y0,
        #0.4*dY_up + offset_y0,
        1.0*dY_up + offset_y0,
    ]
    tck, u = scipy.interpolate.splprep([X, Y], s=1.58e-3, k=2)
    unew = np.arange(0, 1.01, 0.01)
    out = scipy.interpolate.splev(unew, tck)
    color_C0 = np.asarray(matplotlib.colors.ColorConverter().to_rgb('C0'), dtype=float)
    color_c = np.asarray(matplotlib.colors.ColorConverter().to_rgb('c'), dtype=float)
    w = 0.66
    color = w*color_C0 + (1 - w)*color_c
    plt.plot(out[0], out[1], '-', color=color, lw=lw_fat, alpha=alpha_fat, zorder=-12.9, solid_capstyle='round')
draw_fat_blue_curve(0)
draw_fat_blue_curve(1)
# Black curves
plt.plot([0, 0], [offset_ydrift - 0.1, offset_y0 + 0.1],
    'k', lw=lw_fat, alpha=alpha_fat, zorder=-12.9, solid_capstyle='round')
plt.plot([end_sync, end_sync], [offset_ydrift - 0.1, offset_y0 + 0.1],
    'k', lw=lw_fat, alpha=alpha_fat, zorder=-12.9, solid_capstyle='round')
# Labels
x = -0.085
dy = 0.123
fontsize = 11
plt.text(x, offset_y0 - dy, 'rung 0',
    va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x - 0.067, offset_y0 - dy, 'long-range,',
    va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x, offset_y1 - dy, 'rung 1',
    va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x, offset_y2 - dy, 'rung 2',
    va='bottom', ha='right', fontsize=fontsize, rotation=90)
plt.text(x, offset_ydrift + dy, 'drift',
    va='top', ha='right', fontsize=fontsize, rotation=90)
# Delta t
y = 0.529
space = r'\,'*736
plt.text(0.5, y,
    rf'$\overbrace{{{space}}}^{{}}$',
    fontsize=1, ha='center', va='center', rotation=0, color='k', zorder=np.inf)
plt.text(0.5, y + 0.140, r'initial, $\Delta t$',
    fontsize=fontsize, ha='center', va='center', color='k', zorder=np.inf)
plt.text(1.5, y,
    rf'$\overbrace{{{space}}}^{{}}$',
    fontsize=1, ha='center', va='center', rotation=0, color='k', zorder=np.inf)
plt.text(1.5, y + 0.140, r'repeatable, $\Delta t$',
    fontsize=fontsize, ha='center', va='center', color='k', zorder=np.inf)
space = r'\,'*int(round(len(space)/2*(end_sync - 2)/1) - 1)
plt.text(0.5*(2 + end_sync), y,
    rf'$\overbrace{{{space}}}^{{}}$',
    fontsize=1, ha='center', va='center', rotation=0, color='k', zorder=np.inf)
plt.text(0.5*(2 + end_sync), y + 0.140, r'synchronisation, $\leq\Delta t$',
    fontsize=fontsize, ha='center', va='center', color='k', zorder=np.inf)
# Time step
y = -2.47
plt.text(0, y, r'$t_0$', fontsize=fontsize, ha='center', va='top')
plt.text(1, y, r'$t_1$', fontsize=fontsize, ha='center', va='top')
plt.text(2, y, r'$t_2$', fontsize=fontsize, ha='center', va='top')
plt.text(end_sync, y, r'$t_{\text{sync}}$', fontsize=fontsize, ha='center', va='top')
# For testing for ellipticity
"""
THETA = np.linspace(0, 2*np.pi, 200)
end = 0
for i in range(int(((1/2 + 1 + 1 + rung0_final_step) - 0)//(1/4))):
    bgn, end = end, end + 1/4  # full step
    if i == 3:
        step(bgn, end, offset_ydrift, 'down', 'k')
        R = 3.5
        x = bgn + R*((end - bgn)/2)*(1 + np.cos(THETA))
        y = -1 -R*(height/2)*np.sin(THETA)
        plt.plot(x, y, 'r-', lw=0.5, zorder=np.inf)
        break
"""
# Set axis
def set_axis():
    xlim = -0.06, end_sync + 0.06
    ylim = -2.8, 0.8 # -2.4, 0.55
    plt.xlim(xlim)
    plt.ylim(ylim)
    plt.axis('off')
    plt.tight_layout()
    plt.xlim(xlim)
    plt.ylim(ylim)
set_axis()
# Rung 0
bgn = 0
end = bgn + 1/2
step(bgn, end, offset_y0, 'up', 'k', ('k', colors[0]))  # init
for i in range(2):
    bgn, end = end, end + 1  # full step
    step(bgn, end, offset_y0, 'up', 'k', (colors[3], colors[0]),
        jump_down=True)
bgn, end = end, end + rung0_final_step  # sync step
step(bgn, end, offset_y0, 'up', 'k', (colors[3], 'k'))
# Rung 1
bgn = 0
end = bgn + 1/4
step(bgn, end, offset_y1, 'up', 'k', ('k', colors[0]))  # init
for i in range(int(((1/2 + 1 + 1 + rung0_final_step) - 1/4)//(1/2))):
    bgn, end = end, end + 1/2  # full step
    step(bgn, end, offset_y1, 'up', 'k',
        (colors[3], colors[0]) if i%2 else (colors[1], colors[2]),
        jump_up=(not i%2),
        jump_down=True,
    )
bgn, end = end, end_sync  # sync step
#step(bgn, end, offset_y1, 'up', 'k', (colors[1], colors[2]),
#    jump_up=(bgn + 1/2), jump_down=(bgn + 1/2))
step(bgn, end, offset_y1, 'up', 'k', (colors[3], 'k'))
# Rung 2
bgn = 0
end = bgn + 1/8
step(bgn, end, offset_y2, 'up', 'k', ('k', colors[0]))  # init
for i in range(int(((1/2 + 1 + 1 + rung0_final_step) - 1/8)//(1/4))):
    bgn, end = end, end + 1/4  # full step
    step(bgn, end, offset_y2, 'up', 'k', (colors[i%4], colors[(i+1)%4]),
        jump_up=(not i%2))
bgn, end = end, end_sync  # sync step
step(bgn, end, offset_y2, 'up', 'k', (colors[3], 'k'))
# Drifts
end = 0
for i in range(int(((1/2 + 1 + 1 + rung0_final_step) - 0)//(1/4))):
    bgn, end = end, end + 1/4  # full step
    step(bgn, end, offset_ydrift, 'down', 'k')
#bgn, end = end, end_sync  # sync step
#step(bgn, end, offset_ydrift, 'down', 'k')
# Vertical lines
color_vert = [0.47]*3  # 'grey'
lw_vert = 1.0
# Sync lines
for x in (0, end_sync):
    plt.plot([x]*2, [-2.33 - 0.102 + 0.02, 0.34 + 0.102], '-', color=color_vert, lw=lw_vert, zorder=-16)
# Fixups due to hiding of arrow stalks
plt.plot([0]*2, [0.1, 0.3], '-', color=color_vert, lw=lw_vert, zorder=-13)
plt.plot([0]*2, [-0.8, -0.5], '-', color=color_vert, lw=lw_vert, zorder=-13)
plt.plot([0]*2, [-1.4, -1.26], '-', color=color_vert, lw=lw_vert, zorder=-13)
plt.plot([0]*2, [-2.3, -2.1], '-', color=color_vert, lw=lw_vert, zorder=-13)
# Full time step indicaters
for i in range(1, 3):
    plt.plot([i]*2, [-2.33 - 0.102 + 0.02, 0.34 + 0.102], '--', color=color_vert,
        lw=lw_vert, zorder=-13)
# Horizontal separator between kicks and drifts
dots = np.linspace(0, end_sync, 108)[1:-1]
plt.plot(dots, [0.5*(offset_y2 + offset_ydrift)]*len(dots), '.',
    color=color_vert, zorder=-13, ms=2.0, lw=0,  markeredgecolor='none')
# Vertical black arrows
"""
blackarrow_dy = 0.153
#
y1 = offset_ydrift + dy_vert_fac*dy_vert
y2 = offset_y2 - dy_vert_fac*dy_vert
plt.plot([0, 0], [y1, y2], 'k', lw=lw_vert, zorder=-10)
y1 += blackarrow_dy
y2 -= blackarrow_dy
blackarrow_dy_between = y2 - y1
draw_arrow(0, y1, 'up', color='k', sync=True)
draw_arrow(0, y2, 'down', color='k', sync=True)
#
y1 = offset_y2 - dy_vert_fac*dy_vert
y2 = offset_y1 - dy_vert_fac*dy_vert
y3 = 0.5*(y1 + y2) - 0.5*blackarrow_dy_between
y4 = 0.5*(y1 + y2) + 0.5*blackarrow_dy_between
draw_arrow(0, y3, 'up', color='k', sync=True, zorder=-13.9)
draw_arrow(0, y4, 'down', color='k', sync=True)
plt.plot([0, 0], [y1, y2], 'k', lw=lw_vert, zorder=-10)
#
y1 = offset_y1 - dy_vert_fac*dy_vert
y2 = offset_y0 - dy_vert_fac*dy_vert
y3 = 0.5*(y1 + y2) - 0.5*blackarrow_dy_between
y4 = 0.5*(y1 + y2) + 0.5*blackarrow_dy_between
draw_arrow(0, y3, 'up', color='k', sync=True, zorder=-13.9)
draw_arrow(0, y4, 'down', color='k', sync=True)
plt.plot([0, 0], [y1, y2], 'k', lw=lw_vert, zorder=-10)
"""
# Particles
bank = collections.Counter()
#for step in range(1, 4):
#    bank[0, step] = 4 - 1
#for step in range(1, 7):
#    bank[1, step] = 2 - 1
def draw_particle(rung, step, color, hatch=None):
    lw = 0.135*np.sqrt(particle_scatter_size)
    x = 0
    y = 0
    y += particle_vert_offset*bank[rung, step]
    if rung == 0:
        y -= particle_vert_offset*bank[rung, step]
        dx = dx_rung0
        y -= dy_vert_fac*dy_vert
        if bank[rung, step] == 0:
            if 0 < step < 4 and step != 2.5:
                x -= dx
                y -= dy_vert
        elif bank[rung, step] == 1:
            pass
        elif bank[rung, step] == 2:
            y -= 2*dy_vert
        elif bank[rung, step] == 3:
            x += dx
            y -= dy_vert
    elif rung == 1:
        y -= particle_vert_offset*bank[rung, step]
        dx = dx_rung1
        y -= dy_vert_fac*dy_vert
        if bank[rung, step] == 0 and step > 0:
            x -= dx
        elif bank[rung, step] == 1:
            x += dx
    elif rung == 2:
        y -= particle_vert_offset*bank[rung, step]
        y -= dy_vert_fac*dy_vert
    elif rung == 'drift':
        y -= particle_vert_offset*bank[rung, step]
        y += dy_vert_fac*dy_vert
    #bank[rung, step] -= 1
    bank[rung, step] += 1
    ec = 0.90*np.asarray(matplotlib.colors.ColorConverter().to_rgb(color), dtype=float)
    if rung == 0:
        y += offset_y0
    elif rung == 1:
        y += offset_y1
    elif rung == 2:
        y += offset_y2
    elif rung == 'drift':
        y += offset_ydrift
    else:
        print(f'Could not understand rung = {rung}', file=sys.stderr, flush=True)
        sys.exit(1)
    if rung == 'drift':
        x += 1/4*step
    else:
        if step > 0:
            x += 1/2**(rung + 1)
        if step > 1:
            x += 1/2**rung*(step - 1)
    if x > end_sync:
        x = end_sync
    marker = 'o'
    if rung == 'drift':
        marker = 'h'
    plt.scatter(x, y, particle_scatter_size, c='w', marker=marker,
        edgecolors='w', lw=lw, zorder=10)
    alpha = 0.65
    plt.scatter(x, y, particle_scatter_size, c=color, marker=marker,
        alpha=alpha, edgecolors='None', zorder=10)
    if hatch is not None:
        theta_hatch = np.linspace(0, 2*np.pi, 50)
        r_hatch = 0.025
        aspect = get_aspect()
        matplotlib.rcParams['hatch.linewidth'] = 0.93
        for hatch_color, hatch_alpha in [('w', 1), (hatch, alpha)]:
            plt.fill(
                x + r_hatch*np.cos(theta_hatch),
                y + r_hatch/aspect*np.sin(theta_hatch),
                color='none', edgecolor=hatch_color, zorder=10.1, hatch='/'*8,
                fill=False, lw=0, alpha=hatch_alpha,
            )
        # Manual hatch as dotted hatching apparently
        # does not work properly with PDF.
        """
        r_hatch = 0.025
        n_hatch = 6
        for hatch_color, hatch_alpha in [('w', 1), (hatch, alpha)]:
            X = np.linspace(-2.3*r_hatch, +2*r_hatch, 2*n_hatch)
            Y = np.linspace(-2.3*r_hatch/aspect, +2*r_hatch/aspect, 2*n_hatch)
            Y -= 0.015
            X += 0.0025
            for xx in X:
                for j, yy in enumerate(Y):
                    x_offset = 0
                    if j%2:
                        x_offset = 0.5*(X[1] - X[0])
                    xxx = xx + x_offset
                    if xxx**2 + (yy*aspect)**2 > (0.98*r_hatch)**2:
                        continue
                    plt.scatter(x + xxx, y + yy, 0.015*particle_scatter_size,
                        c=hatch_color, edgecolors='r', lw=0, zorder=10.1,
                        alpha=hatch_alpha)
        """
    plt.scatter(x, y, particle_scatter_size, marker=marker,
        facecolors='none', edgecolors=ec, lw=lw, zorder=10.2)
########################
# Particle "positions" #
########################
# At initial point
draw_particle(0,       0, 'k')
draw_particle(1,       0, 'k')
draw_particle(2,       0, 'k')
draw_particle('drift', 0, 'k', hatch=colors[0])
# Init step
draw_particle(0,       1, colors[0])
draw_particle(1,       1, colors[0])
draw_particle(2,       1, colors[0])
draw_particle('drift', 1, colors[1])
# Rung 2 step + drift
draw_particle(0,       1, colors[1])
draw_particle(1,       1, colors[1])
draw_particle(2,       2, colors[1])
draw_particle('drift', 2, colors[2])
# Rung 2+1 step + drift
draw_particle(0,       1, colors[2])
draw_particle(1,       2, colors[2])
draw_particle(2,       3, colors[2])
draw_particle('drift', 3, colors[3])
# Rung 2 step + drift
draw_particle(0,       1, colors[3])
draw_particle(1,       2, colors[3])
draw_particle(2,       4, colors[3])
draw_particle('drift', 4, colors[0])
# Rung 2+1+0 step + drift
draw_particle(0,       2, colors[0])
draw_particle(1,       3, colors[0])
draw_particle(2,       5, colors[0])
draw_particle('drift', 5, colors[1])
# Rung 2 step + drift
draw_particle(0,       2, colors[1])
draw_particle(1,       3, colors[1])
draw_particle(2,       6, colors[1])
draw_particle('drift', 6, colors[2])
# Rung 2+1 step + drift
draw_particle(0,       2, colors[2])
draw_particle(1,       4, colors[2])
draw_particle(2,       7, colors[2])
draw_particle('drift', 7, colors[3])
# Rung 2 step + drift
draw_particle(0,       2, colors[3])
draw_particle(1,       4, colors[3])
draw_particle(2,       8, colors[3])
draw_particle('drift', 8, colors[0])
# Rung 2+1+0 step + drift
draw_particle(0,       3, colors[0])
draw_particle(1,       5, colors[0])
draw_particle(2,       9, colors[0])
draw_particle('drift', 9, colors[1])
# Rung 2 step + drift
draw_particle(0,        3, colors[1])
draw_particle(1,        5, colors[1])
draw_particle(2,       10, colors[1])
draw_particle('drift', 10, colors[2])
# Rung 2+1 step + drift
draw_particle(0,        3, colors[2])
draw_particle(1,        6, colors[2])
draw_particle(2,       11, colors[2])
draw_particle('drift', 11, colors[3])
# Rung 2 step + drift
draw_particle(0,        3, colors[3])
draw_particle(1,        6, colors[3])
draw_particle(2,       12, colors[3])
draw_particle('drift', 12, 'k')
# Rung 2+1+0 step
draw_particle(0,        4, 'k')
draw_particle(1,        7, 'k')
draw_particle(2,       13, 'k')
# Sync point
"""
dx_rung0_bak = dx_rung0; dx_rung0 = 0
draw_particle(0, 2.5, 'k')
dx_rung0 = dx_rung0_bak
dx_rung1_bak = dx_rung1; dx_rung1 = 0
draw_particle(1, 4.5, 'k')
dx_rung1 = dx_rung1_bak
draw_particle(2, 8.5, 'k')
draw_particle('drift', 8, 'k', hatch=colors[0])
"""
# Save figure
set_axis()
plt.savefig('../figure/.timestepping.pdf')
os.system('cd ../figure && pdfcrop --margins 0.5 .timestepping.pdf timestepping.pdf >/dev/null && rm -f .timestepping.pdf')
 | 
| 
	import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
         'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('../python-write-test-325506-04f791ddf6a9.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open('python 書き込みテスト').sheet1
wks.update_acell('A1', 'Hello World!')
print(wks.acell('A1'))
 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
