id
stringlengths
1
8
text
stringlengths
6
1.05M
dataset_id
stringclasses
1 value
/h2o_pysparkling_2.3-3.42.0.2.post1.tar.gz/h2o_pysparkling_2.3-3.42.0.2.post1/ai/h2o/sparkling/ml/algos/H2OAutoML.py
from warnings import warn from pyspark import keyword_only from ai.h2o.sparkling.Initializer import Initializer from ai.h2o.sparkling.ml.Utils import Utils from ai.h2o.sparkling.ml.algos.H2OSupervisedAlgorithmWithFoldColumn import H2OSupervisedAlgorithmWithFoldColumn from ai.h2o.sparkling.ml.params.H2OAutoMLParams import H2OAutoMLParams from ai.h2o.sparkling.ml.algos.H2OAutoMLExtras import H2OAutoMLExtras class H2OAutoML(H2OAutoMLParams, H2OSupervisedAlgorithmWithFoldColumn, H2OAutoMLExtras): @keyword_only def __init__(self, ignoredCols=None, leaderboardDataFrame=None, blendingDataFrame=None, columnsToCategorical=[], keepBinaryModels=False, withContributions=False, dataFrameSerializer="ai.h2o.sparkling.utils.JSONDataFrameSerializer", withLeafNodeAssignments=False, convertInvalidNumbersToNa=False, detailedPredictionCol="detailed_prediction", validationDataFrame=None, featuresCols=[], predictionCol="prediction", convertUnknownCategoricalLevelsToNa=False, splitRatio=1.0, monotoneConstraints={}, withStageResults=False, projectName=None, nfolds=-1, balanceClasses=False, classSamplingFactors=None, maxAfterBalanceSize=5.0, keepCrossValidationPredictions=False, keepCrossValidationModels=False, keepCrossValidationFoldAssignment=False, exportCheckpointsDir=None, distribution="AUTO", tweediePower=1.5, quantileAlpha=0.5, huberAlpha=0.9, customDistributionFunc=None, labelCol="label", foldCol=None, weightCol=None, sortMetric="AUTO", seed=-1, maxModels=0, maxRuntimeSecs=0.0, maxRuntimeSecsPerModel=0.0, stoppingRounds=3, stoppingMetric="AUTO", stoppingTolerance=-1.0, excludeAlgos=None, includeAlgos=["GLM", "DRF", "GBM", "DeepLearning", "StackedEnsemble", "XGBoost"], exploitationRatio=-1.0): Initializer.load_sparkling_jar() super(H2OAutoML, self).__init__() self._java_obj = self._new_java_obj("ai.h2o.sparkling.ml.algos.H2OAutoML", self.uid) self._setDefaultValuesFromJava() kwargs = Utils.getInputKwargs(self) kwargs = self._updateInitKwargs(kwargs) if 'interactionPairs' in kwargs: warn("Interaction pairs are not supported!") self._set(**kwargs) self._transfer_params_to_java()
PypiClean
/hardware-control-3.0.1.tar.gz/hardware-control-3.0.1/hardware_control/instruments/trinity_power/tpi.py
from dataclasses import dataclass import logging import time import numpy as np from ...base import Instrument, StopBits from ...base.hooks import call_hooks logger = logging.getLogger(__name__) @dataclass(frozen=True) class Command: command: bytes parameter_length: int = 0 parameter_type: np.dtype = int respond_length: int = 0 def validate_frequency(value: int): """Value is in kHz.""" if value < 35_000: return False if value > 4_400_000: return False return True class TPI(Instrument): """TPI Signal Generator .. image:: /images/TPI.jpg :height: 200 TPI-1001, TPI-1002, & TPI-1005. Implements its own binary driver. """ def __init__(self, instrument_name: str, connection_addr: str): super().__init__(instrument_name, connection_addr) self.manufacturer = "Trinity Power" self.model = "TDI Signal Generator" self.add_parameter( "FREQUENCY", read_command=Command(b"\x07\x09", 4, np.dtype("<i4")), set_command=Command(b"\x08\x09", 4, np.dtype("<i4")), post_hooks=[lambda x: str(int(x))], ) self.add_parameter( "OUTPUT_LEVEL", read_command=Command(b"\x07\x0A", 1, np.dtype("i1")), set_command=Command(b"\x08\x0A", 1, np.dtype("i1")), post_hooks=[lambda x: str(int(x))], ) self.add_parameter( "OUTPUT_ON_OFF", read_command=Command(b"\x07\x0B", 1, np.dtype(bool)), set_command=Command(b"\x08\x0B", 1, np.dtype(bool)), post_hooks=[lambda x: str(bool(x))], ) READ_ONLY_COMMANDS = { "USER_CONTROL_STATUS": Command(b"\x07\x01", 1, np.dtype(bool)), "READ_MODEL_NUMBER": Command(b"\x07\x02", 16, np.dtype("a16")), "READ_SERIAL_NUMBER": Command(b"\x07\x03", 16, np.dtype("a16")), "READ_HARDWARE_VERSION": Command(b"\x07\x04", 16, np.dtype("a16")), "READ_FIRMWARE_VERSION": Command(b"\x07\x05", 16, np.dtype("a16")), "READ_TPI_LINK_VERSION": Command(b"\x07\x06", 8, np.dtype("<i2")), "READ_SUPPLY_VOLTAGE": Command(b"\x07\x07", 24, np.dtype("<f4")), "READ_CURRENT_STATE": Command(b"\x07\x08", 2, np.dtype("u1")), } for k, v in READ_ONLY_COMMANDS.items(): self.add_parameter(k, v) COMMANDS = { "USER_CONTROL": Command(b"\x08\x01"), "SCAN_STOP": Command(b"\x08\x08\x04\x00"), "SCAN_START": Command(b"\x08\x08\x04\x01"), "SCAN_PAUSE": Command(b"\x08\x08\x04\x02"), "SCRIPT_STOP": Command(b"\x08\x08\x05\x00"), "SCRIPT_START": Command(b"\x08\x08\x05\x01"), "SCRIPT_CONTINUE": Command(b"\x08\x08\x05\x02"), } for k, v in COMMANDS.items(): self.add_command(k, v) self.add_command("OUTPUT_ON", self.output_on) self.add_command("OUTPUT_OFF", self.output_off) def output_on(self): self["OUTPUT_ON_OFF"] = 1 def output_off(self): self["OUTPUT_ON_OFF"] = 0 def config(self): """Needs to be called when coming online.""" self.config_serial( baud_rate=3_000_000, stop_bits=StopBits.one, data_bits=8, parity=None, ) self.device.write_termination = None self.device.read_termination = None self.device.end_input = False self.device.end_output = False self.delay = 0.01 def try_connect(self) -> bool: """Overwrite try_connect to always call config when getting online.""" ret = super().try_connect() if self._online: self.config() self.command("USER_CONTROL") return ret def read_package(self): """Read a full package from the instrument. Reads a package and checks the checksum. A package consists of: 0xAA, 0x55, L1, L2, body bytes, checksum with: L1 the high order bytes of a 16 bit integer L2 the low order bytes of a 16 bit integer that is L = 256*L1 + L2 body bytes are L bytes checksum is 0xFF - sum(all bytes ignoreing the first two) """ header = self.device.read_bytes(2) if header[0] != 0xAA: print("Error") if header[1] != 0x55: print("Error") length = self.device.read_bytes(2) L = length[0] * 256 + length[1] body = self.device.read_bytes(L) checksum = self.device.read_bytes(1) # calculate package checksum tmp = 0xFF - (sum(length + body) % 256) if checksum != tmp.to_bytes(1, byteorder="little"): print( f"Error in checksum: got {checksum}, expected {tmp}. bytes: {length+body}." ) return body def write_package(self, command: bytes): """Low level write binary data to the instrument. Builds a package and sends it. A package consists of: 0xAA, 0x55, L1, L2, body bytes, checksum with: L1 the high order bytes of a 16 bit integer L2 the low order bytes of a 16 bit integer that is L = 256*L1 + L2 body bytes are L bytes checksum is 0xFF - sum(all bytes ignoreing the first two) """ L = len(command) L1 = L // 256 L2 = L % 256 out = ( L1.to_bytes(1, byteorder="little") + L2.to_bytes(1, byteorder="little") + command ) checksum = 0xFF - (sum(out) % 256) package = ( 0xAA.to_bytes(1, byteorder="little") + 0x55.to_bytes(1, byteorder="little") + out + checksum.to_bytes(1, byteorder="little") ) self.device.write_raw(package) def query_serial(self, command, delay): self.write_package(command.command) time.sleep(delay) reply = self.read_package() reply = reply[2:] reply = np.frombuffer(reply, dtype=command.parameter_type)[0] return reply def write(self, command): self.write_package(command.command) self.read_package() def set_value(self, parameter: str, value) -> None: if parameter in self.set_commands: if parameter in self.pre_hooks: hooks = self.pre_hooks[parameter] value = call_hooks(hooks, value) if value is None: return # if we get strings for values and expect bool, we need to convert manually if ( self.set_commands[parameter].parameter_type == np.dtype(bool) and type(value) == str ): value = value == "True" value = np.array(value, dtype=self.set_commands[parameter].parameter_type) msg = self.set_commands[parameter].command + value.tobytes() self.write_package(msg) reply = self.read_package() return logger.error( f"When calling 'Instrumentset_value': '{parameter}' not available in instrument '{self.name}'" )
PypiClean
/signalsnap-0.1.13.tar.gz/signalsnap-0.1.13/docs/_static/underscore-1.3.1.js
(function() { // Baseline setup // -------------- // Establish the root object, `window` in the browser, or `global` on the server. var root = this; // Save the previous value of the `_` variable. var previousUnderscore = root._; // Establish the object that gets returned to break out of a loop iteration. var breaker = {}; // Save bytes in the minified (but not gzipped) version: var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype; // Create quick reference variables for speed access to core prototypes. var slice = ArrayProto.slice, unshift = ArrayProto.unshift, toString = ObjProto.toString, hasOwnProperty = ObjProto.hasOwnProperty; // All **ECMAScript 5** native function implementations that we hope to use // are declared here. var nativeForEach = ArrayProto.forEach, nativeMap = ArrayProto.map, nativeReduce = ArrayProto.reduce, nativeReduceRight = ArrayProto.reduceRight, nativeFilter = ArrayProto.filter, nativeEvery = ArrayProto.every, nativeSome = ArrayProto.some, nativeIndexOf = ArrayProto.indexOf, nativeLastIndexOf = ArrayProto.lastIndexOf, nativeIsArray = Array.isArray, nativeKeys = Object.keys, nativeBind = FuncProto.bind; // Create a safe reference to the Underscore object for use below. var _ = function(obj) { return new wrapper(obj); }; // Export the Underscore object for **Node.js**, with // backwards-compatibility for the old `require()` API. If we're in // the browser, add `_` as a global object via a string identifier, // for Closure Compiler "advanced" mode. if (typeof exports !== 'undefined') { if (typeof module !== 'undefined' && module.exports) { exports = module.exports = _; } exports._ = _; } else { root['_'] = _; } // Current version. _.VERSION = '1.3.1'; // Collection Functions // -------------------- // The cornerstone, an `each` implementation, aka `forEach`. // Handles objects with the built-in `forEach`, arrays, and raw objects. // Delegates to **ECMAScript 5**'s native `forEach` if available. var each = _.each = _.forEach = function(obj, iterator, context) { if (obj == null) return; if (nativeForEach && obj.forEach === nativeForEach) { obj.forEach(iterator, context); } else if (obj.length === +obj.length) { for (var i = 0, l = obj.length; i < l; i++) { if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return; } } else { for (var key in obj) { if (_.has(obj, key)) { if (iterator.call(context, obj[key], key, obj) === breaker) return; } } } }; // Return the results of applying the iterator to each element. // Delegates to **ECMAScript 5**'s native `map` if available. _.map = _.collect = function(obj, iterator, context) { var results = []; if (obj == null) return results; if (nativeMap && obj.map === nativeMap) return obj.map(iterator, context); each(obj, function(value, index, list) { results[results.length] = iterator.call(context, value, index, list); }); if (obj.length === +obj.length) results.length = obj.length; return results; }; // **Reduce** builds up a single result from a list of values, aka `inject`, // or `foldl`. Delegates to **ECMAScript 5**'s native `reduce` if available. _.reduce = _.foldl = _.inject = function(obj, iterator, memo, context) { var initial = arguments.length > 2; if (obj == null) obj = []; if (nativeReduce && obj.reduce === nativeReduce) { if (context) iterator = _.bind(iterator, context); return initial ? obj.reduce(iterator, memo) : obj.reduce(iterator); } each(obj, function(value, index, list) { if (!initial) { memo = value; initial = true; } else { memo = iterator.call(context, memo, value, index, list); } }); if (!initial) throw new TypeError('Reduce of empty array with no initial value'); return memo; }; // The right-associative version of reduce, also known as `foldr`. // Delegates to **ECMAScript 5**'s native `reduceRight` if available. _.reduceRight = _.foldr = function(obj, iterator, memo, context) { var initial = arguments.length > 2; if (obj == null) obj = []; if (nativeReduceRight && obj.reduceRight === nativeReduceRight) { if (context) iterator = _.bind(iterator, context); return initial ? obj.reduceRight(iterator, memo) : obj.reduceRight(iterator); } var reversed = _.toArray(obj).reverse(); if (context && !initial) iterator = _.bind(iterator, context); return initial ? _.reduce(reversed, iterator, memo, context) : _.reduce(reversed, iterator); }; // Return the first value which passes a truth test. Aliased as `detect`. _.find = _.detect = function(obj, iterator, context) { var result; any(obj, function(value, index, list) { if (iterator.call(context, value, index, list)) { result = value; return true; } }); return result; }; // Return all the elements that pass a truth test. // Delegates to **ECMAScript 5**'s native `filter` if available. // Aliased as `select`. _.filter = _.select = function(obj, iterator, context) { var results = []; if (obj == null) return results; if (nativeFilter && obj.filter === nativeFilter) return obj.filter(iterator, context); each(obj, function(value, index, list) { if (iterator.call(context, value, index, list)) results[results.length] = value; }); return results; }; // Return all the elements for which a truth test fails. _.reject = function(obj, iterator, context) { var results = []; if (obj == null) return results; each(obj, function(value, index, list) { if (!iterator.call(context, value, index, list)) results[results.length] = value; }); return results; }; // Determine whether all of the elements match a truth test. // Delegates to **ECMAScript 5**'s native `every` if available. // Aliased as `all`. _.every = _.all = function(obj, iterator, context) { var result = true; if (obj == null) return result; if (nativeEvery && obj.every === nativeEvery) return obj.every(iterator, context); each(obj, function(value, index, list) { if (!(result = result && iterator.call(context, value, index, list))) return breaker; }); return result; }; // Determine if at least one element in the object matches a truth test. // Delegates to **ECMAScript 5**'s native `some` if available. // Aliased as `any`. var any = _.some = _.any = function(obj, iterator, context) { iterator || (iterator = _.identity); var result = false; if (obj == null) return result; if (nativeSome && obj.some === nativeSome) return obj.some(iterator, context); each(obj, function(value, index, list) { if (result || (result = iterator.call(context, value, index, list))) return breaker; }); return !!result; }; // Determine if a given value is included in the array or object using `===`. // Aliased as `contains`. _.include = _.contains = function(obj, target) { var found = false; if (obj == null) return found; if (nativeIndexOf && obj.indexOf === nativeIndexOf) return obj.indexOf(target) != -1; found = any(obj, function(value) { return value === target; }); return found; }; // Invoke a method (with arguments) on every item in a collection. _.invoke = function(obj, method) { var args = slice.call(arguments, 2); return _.map(obj, function(value) { return (_.isFunction(method) ? method || value : value[method]).apply(value, args); }); }; // Convenience version of a common use case of `map`: fetching a property. _.pluck = function(obj, key) { return _.map(obj, function(value){ return value[key]; }); }; // Return the maximum element or (element-based computation). _.max = function(obj, iterator, context) { if (!iterator && _.isArray(obj)) return Math.max.apply(Math, obj); if (!iterator && _.isEmpty(obj)) return -Infinity; var result = {computed : -Infinity}; each(obj, function(value, index, list) { var computed = iterator ? iterator.call(context, value, index, list) : value; computed >= result.computed && (result = {value : value, computed : computed}); }); return result.value; }; // Return the minimum element (or element-based computation). _.min = function(obj, iterator, context) { if (!iterator && _.isArray(obj)) return Math.min.apply(Math, obj); if (!iterator && _.isEmpty(obj)) return Infinity; var result = {computed : Infinity}; each(obj, function(value, index, list) { var computed = iterator ? iterator.call(context, value, index, list) : value; computed < result.computed && (result = {value : value, computed : computed}); }); return result.value; }; // Shuffle an array. _.shuffle = function(obj) { var shuffled = [], rand; each(obj, function(value, index, list) { if (index == 0) { shuffled[0] = value; } else { rand = Math.floor(Math.random() * (index + 1)); shuffled[index] = shuffled[rand]; shuffled[rand] = value; } }); return shuffled; }; // Sort the object's values by a criterion produced by an iterator. _.sortBy = function(obj, iterator, context) { return _.pluck(_.map(obj, function(value, index, list) { return { value : value, criteria : iterator.call(context, value, index, list) }; }).sort(function(left, right) { var a = left.criteria, b = right.criteria; return a < b ? -1 : a > b ? 1 : 0; }), 'value'); }; // Groups the object's values by a criterion. Pass either a string attribute // to group by, or a function that returns the criterion. _.groupBy = function(obj, val) { var result = {}; var iterator = _.isFunction(val) ? val : function(obj) { return obj[val]; }; each(obj, function(value, index) { var key = iterator(value, index); (result[key] || (result[key] = [])).push(value); }); return result; }; // Use a comparator function to figure out at what index an object should // be inserted so as to maintain order. Uses binary search. _.sortedIndex = function(array, obj, iterator) { iterator || (iterator = _.identity); var low = 0, high = array.length; while (low < high) { var mid = (low + high) >> 1; iterator(array[mid]) < iterator(obj) ? low = mid + 1 : high = mid; } return low; }; // Safely convert anything iterable into a real, live array. _.toArray = function(iterable) { if (!iterable) return []; if (iterable.toArray) return iterable.toArray(); if (_.isArray(iterable)) return slice.call(iterable); if (_.isArguments(iterable)) return slice.call(iterable); return _.values(iterable); }; // Return the number of elements in an object. _.size = function(obj) { return _.toArray(obj).length; }; // Array Functions // --------------- // Get the first element of an array. Passing **n** will return the first N // values in the array. Aliased as `head`. The **guard** check allows it to work // with `_.map`. _.first = _.head = function(array, n, guard) { return (n != null) && !guard ? slice.call(array, 0, n) : array[0]; }; // Returns everything but the last entry of the array. Especcialy useful on // the arguments object. Passing **n** will return all the values in // the array, excluding the last N. The **guard** check allows it to work with // `_.map`. _.initial = function(array, n, guard) { return slice.call(array, 0, array.length - ((n == null) || guard ? 1 : n)); }; // Get the last element of an array. Passing **n** will return the last N // values in the array. The **guard** check allows it to work with `_.map`. _.last = function(array, n, guard) { if ((n != null) && !guard) { return slice.call(array, Math.max(array.length - n, 0)); } else { return array[array.length - 1]; } }; // Returns everything but the first entry of the array. Aliased as `tail`. // Especially useful on the arguments object. Passing an **index** will return // the rest of the values in the array from that index onward. The **guard** // check allows it to work with `_.map`. _.rest = _.tail = function(array, index, guard) { return slice.call(array, (index == null) || guard ? 1 : index); }; // Trim out all falsy values from an array. _.compact = function(array) { return _.filter(array, function(value){ return !!value; }); }; // Return a completely flattened version of an array. _.flatten = function(array, shallow) { return _.reduce(array, function(memo, value) { if (_.isArray(value)) return memo.concat(shallow ? value : _.flatten(value)); memo[memo.length] = value; return memo; }, []); }; // Return a version of the array that does not contain the specified value(s). _.without = function(array) { return _.difference(array, slice.call(arguments, 1)); }; // Produce a duplicate-free version of the array. If the array has already // been sorted, you have the option of using a faster algorithm. // Aliased as `unique`. _.uniq = _.unique = function(array, isSorted, iterator) { var initial = iterator ? _.map(array, iterator) : array; var result = []; _.reduce(initial, function(memo, el, i) { if (0 == i || (isSorted === true ? _.last(memo) != el : !_.include(memo, el))) { memo[memo.length] = el; result[result.length] = array[i]; } return memo; }, []); return result; }; // Produce an array that contains the union: each distinct element from all of // the passed-in arrays. _.union = function() { return _.uniq(_.flatten(arguments, true)); }; // Produce an array that contains every item shared between all the // passed-in arrays. (Aliased as "intersect" for back-compat.) _.intersection = _.intersect = function(array) { var rest = slice.call(arguments, 1); return _.filter(_.uniq(array), function(item) { return _.every(rest, function(other) { return _.indexOf(other, item) >= 0; }); }); }; // Take the difference between one array and a number of other arrays. // Only the elements present in just the first array will remain. _.difference = function(array) { var rest = _.flatten(slice.call(arguments, 1)); return _.filter(array, function(value){ return !_.include(rest, value); }); }; // Zip together multiple lists into a single array -- elements that share // an index go together. _.zip = function() { var args = slice.call(arguments); var length = _.max(_.pluck(args, 'length')); var results = new Array(length); for (var i = 0; i < length; i++) results[i] = _.pluck(args, "" + i); return results; }; // If the browser doesn't supply us with indexOf (I'm looking at you, **MSIE**), // we need this function. Return the position of the first occurrence of an // item in an array, or -1 if the item is not included in the array. // Delegates to **ECMAScript 5**'s native `indexOf` if available. // If the array is large and already in sort order, pass `true` // for **isSorted** to use binary search. _.indexOf = function(array, item, isSorted) { if (array == null) return -1; var i, l; if (isSorted) { i = _.sortedIndex(array, item); return array[i] === item ? i : -1; } if (nativeIndexOf && array.indexOf === nativeIndexOf) return array.indexOf(item); for (i = 0, l = array.length; i < l; i++) if (i in array && array[i] === item) return i; return -1; }; // Delegates to **ECMAScript 5**'s native `lastIndexOf` if available. _.lastIndexOf = function(array, item) { if (array == null) return -1; if (nativeLastIndexOf && array.lastIndexOf === nativeLastIndexOf) return array.lastIndexOf(item); var i = array.length; while (i--) if (i in array && array[i] === item) return i; return -1; }; // Generate an integer Array containing an arithmetic progression. A port of // the native Python `range()` function. See // [the Python documentation](http://docs.python.org/library/functions.html#range). _.range = function(start, stop, step) { if (arguments.length <= 1) { stop = start || 0; start = 0; } step = arguments[2] || 1; var len = Math.max(Math.ceil((stop - start) / step), 0); var idx = 0; var range = new Array(len); while(idx < len) { range[idx++] = start; start += step; } return range; }; // Function (ahem) Functions // ------------------ // Reusable constructor function for prototype setting. var ctor = function(){}; // Create a function bound to a given object (assigning `this`, and arguments, // optionally). Binding with arguments is also known as `curry`. // Delegates to **ECMAScript 5**'s native `Function.bind` if available. // We check for `func.bind` first, to fail fast when `func` is undefined. _.bind = function bind(func, context) { var bound, args; if (func.bind === nativeBind && nativeBind) return nativeBind.apply(func, slice.call(arguments, 1)); if (!_.isFunction(func)) throw new TypeError; args = slice.call(arguments, 2); return bound = function() { if (!(this instanceof bound)) return func.apply(context, args.concat(slice.call(arguments))); ctor.prototype = func.prototype; var self = new ctor; var result = func.apply(self, args.concat(slice.call(arguments))); if (Object(result) === result) return result; return self; }; }; // Bind all of an object's methods to that object. Useful for ensuring that // all callbacks defined on an object belong to it. _.bindAll = function(obj) { var funcs = slice.call(arguments, 1); if (funcs.length == 0) funcs = _.functions(obj); each(funcs, function(f) { obj[f] = _.bind(obj[f], obj); }); return obj; }; // Memoize an expensive function by storing its results. _.memoize = function(func, hasher) { var memo = {}; hasher || (hasher = _.identity); return function() { var key = hasher.apply(this, arguments); return _.has(memo, key) ? memo[key] : (memo[key] = func.apply(this, arguments)); }; }; // Delays a function for the given number of milliseconds, and then calls // it with the arguments supplied. _.delay = function(func, wait) { var args = slice.call(arguments, 2); return setTimeout(function(){ return func.apply(func, args); }, wait); }; // Defers a function, scheduling it to run after the current call stack has // cleared. _.defer = function(func) { return _.delay.apply(_, [func, 1].concat(slice.call(arguments, 1))); }; // Returns a function, that, when invoked, will only be triggered at most once // during a given window of time. _.throttle = function(func, wait) { var context, args, timeout, throttling, more; var whenDone = _.debounce(function(){ more = throttling = false; }, wait); return function() { context = this; args = arguments; var later = function() { timeout = null; if (more) func.apply(context, args); whenDone(); }; if (!timeout) timeout = setTimeout(later, wait); if (throttling) { more = true; } else { func.apply(context, args); } whenDone(); throttling = true; }; }; // Returns a function, that, as long as it continues to be invoked, will not // be triggered. The function will be called after it stops being called for // N milliseconds. _.debounce = function(func, wait) { var timeout; return function() { var context = this, args = arguments; var later = function() { timeout = null; func.apply(context, args); }; clearTimeout(timeout); timeout = setTimeout(later, wait); }; }; // Returns a function that will be executed at most one time, no matter how // often you call it. Useful for lazy initialization. _.once = function(func) { var ran = false, memo; return function() { if (ran) return memo; ran = true; return memo = func.apply(this, arguments); }; }; // Returns the first function passed as an argument to the second, // allowing you to adjust arguments, run code before and after, and // conditionally execute the original function. _.wrap = function(func, wrapper) { return function() { var args = [func].concat(slice.call(arguments, 0)); return wrapper.apply(this, args); }; }; // Returns a function that is the composition of a list of functions, each // consuming the return value of the function that follows. _.compose = function() { var funcs = arguments; return function() { var args = arguments; for (var i = funcs.length - 1; i >= 0; i--) { args = [funcs[i].apply(this, args)]; } return args[0]; }; }; // Returns a function that will only be executed after being called N times. _.after = function(times, func) { if (times <= 0) return func(); return function() { if (--times < 1) { return func.apply(this, arguments); } }; }; // Object Functions // ---------------- // Retrieve the names of an object's properties. // Delegates to **ECMAScript 5**'s native `Object.keys` _.keys = nativeKeys || function(obj) { if (obj !== Object(obj)) throw new TypeError('Invalid object'); var keys = []; for (var key in obj) if (_.has(obj, key)) keys[keys.length] = key; return keys; }; // Retrieve the values of an object's properties. _.values = function(obj) { return _.map(obj, _.identity); }; // Return a sorted list of the function names available on the object. // Aliased as `methods` _.functions = _.methods = function(obj) { var names = []; for (var key in obj) { if (_.isFunction(obj[key])) names.push(key); } return names.sort(); }; // Extend a given object with all the properties in passed-in object(s). _.extend = function(obj) { each(slice.call(arguments, 1), function(source) { for (var prop in source) { obj[prop] = source[prop]; } }); return obj; }; // Fill in a given object with default properties. _.defaults = function(obj) { each(slice.call(arguments, 1), function(source) { for (var prop in source) { if (obj[prop] == null) obj[prop] = source[prop]; } }); return obj; }; // Create a (shallow-cloned) duplicate of an object. _.clone = function(obj) { if (!_.isObject(obj)) return obj; return _.isArray(obj) ? obj.slice() : _.extend({}, obj); }; // Invokes interceptor with the obj, and then returns obj. // The primary purpose of this method is to "tap into" a method chain, in // order to perform operations on intermediate results within the chain. _.tap = function(obj, interceptor) { interceptor(obj); return obj; }; // Internal recursive comparison function. function eq(a, b, stack) { // Identical objects are equal. `0 === -0`, but they aren't identical. // See the Harmony `egal` proposal: http://wiki.ecmascript.org/doku.php?id=harmony:egal. if (a === b) return a !== 0 || 1 / a == 1 / b; // A strict comparison is necessary because `null == undefined`. if (a == null || b == null) return a === b; // Unwrap any wrapped objects. if (a._chain) a = a._wrapped; if (b._chain) b = b._wrapped; // Invoke a custom `isEqual` method if one is provided. if (a.isEqual && _.isFunction(a.isEqual)) return a.isEqual(b); if (b.isEqual && _.isFunction(b.isEqual)) return b.isEqual(a); // Compare `[[Class]]` names. var className = toString.call(a); if (className != toString.call(b)) return false; switch (className) { // Strings, numbers, dates, and booleans are compared by value. case '[object String]': // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is // equivalent to `new String("5")`. return a == String(b); case '[object Number]': // `NaN`s are equivalent, but non-reflexive. An `egal` comparison is performed for // other numeric values. return a != +a ? b != +b : (a == 0 ? 1 / a == 1 / b : a == +b); case '[object Date]': case '[object Boolean]': // Coerce dates and booleans to numeric primitive values. Dates are compared by their // millisecond representations. Note that invalid dates with millisecond representations // of `NaN` are not equivalent. return +a == +b; // RegExps are compared by their source patterns and flags. case '[object RegExp]': return a.source == b.source && a.global == b.global && a.multiline == b.multiline && a.ignoreCase == b.ignoreCase; } if (typeof a != 'object' || typeof b != 'object') return false; // Assume equality for cyclic structures. The algorithm for detecting cyclic // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`. var length = stack.length; while (length--) { // Linear search. Performance is inversely proportional to the number of // unique nested structures. if (stack[length] == a) return true; } // Add the first object to the stack of traversed objects. stack.push(a); var size = 0, result = true; // Recursively compare objects and arrays. if (className == '[object Array]') { // Compare array lengths to determine if a deep comparison is necessary. size = a.length; result = size == b.length; if (result) { // Deep compare the contents, ignoring non-numeric properties. while (size--) { // Ensure commutative equality for sparse arrays. if (!(result = size in a == size in b && eq(a[size], b[size], stack))) break; } } } else { // Objects with different constructors are not equivalent. if ('constructor' in a != 'constructor' in b || a.constructor != b.constructor) return false; // Deep compare objects. for (var key in a) { if (_.has(a, key)) { // Count the expected number of properties. size++; // Deep compare each member. if (!(result = _.has(b, key) && eq(a[key], b[key], stack))) break; } } // Ensure that both objects contain the same number of properties. if (result) { for (key in b) { if (_.has(b, key) && !(size--)) break; } result = !size; } } // Remove the first object from the stack of traversed objects. stack.pop(); return result; } // Perform a deep comparison to check if two objects are equal. _.isEqual = function(a, b) { return eq(a, b, []); }; // Is a given array, string, or object empty? // An "empty" object has no enumerable own-properties. _.isEmpty = function(obj) { if (_.isArray(obj) || _.isString(obj)) return obj.length === 0; for (var key in obj) if (_.has(obj, key)) return false; return true; }; // Is a given value a DOM element? _.isElement = function(obj) { return !!(obj && obj.nodeType == 1); }; // Is a given value an array? // Delegates to ECMA5's native Array.isArray _.isArray = nativeIsArray || function(obj) { return toString.call(obj) == '[object Array]'; }; // Is a given variable an object? _.isObject = function(obj) { return obj === Object(obj); }; // Is a given variable an arguments object? _.isArguments = function(obj) { return toString.call(obj) == '[object Arguments]'; }; if (!_.isArguments(arguments)) { _.isArguments = function(obj) { return !!(obj && _.has(obj, 'callee')); }; } // Is a given value a function? _.isFunction = function(obj) { return toString.call(obj) == '[object Function]'; }; // Is a given value a string? _.isString = function(obj) { return toString.call(obj) == '[object String]'; }; // Is a given value a number? _.isNumber = function(obj) { return toString.call(obj) == '[object Number]'; }; // Is the given value `NaN`? _.isNaN = function(obj) { // `NaN` is the only value for which `===` is not reflexive. return obj !== obj; }; // Is a given value a boolean? _.isBoolean = function(obj) { return obj === true || obj === false || toString.call(obj) == '[object Boolean]'; }; // Is a given value a date? _.isDate = function(obj) { return toString.call(obj) == '[object Date]'; }; // Is the given value a regular expression? _.isRegExp = function(obj) { return toString.call(obj) == '[object RegExp]'; }; // Is a given value equal to null? _.isNull = function(obj) { return obj === null; }; // Is a given variable undefined? _.isUndefined = function(obj) { return obj === void 0; }; // Has own property? _.has = function(obj, key) { return hasOwnProperty.call(obj, key); }; // Utility Functions // ----------------- // Run Underscore.js in *noConflict* mode, returning the `_` variable to its // previous owner. Returns a reference to the Underscore object. _.noConflict = function() { root._ = previousUnderscore; return this; }; // Keep the identity function around for default iterators. _.identity = function(value) { return value; }; // Run a function **n** times. _.times = function (n, iterator, context) { for (var i = 0; i < n; i++) iterator.call(context, i); }; // Escape a string for HTML interpolation. _.escape = function(string) { return (''+string).replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;').replace(/'/g, '&#x27;').replace(/\//g,'&#x2F;'); }; // Add your own custom functions to the Underscore object, ensuring that // they're correctly added to the OOP wrapper as well. _.mixin = function(obj) { each(_.functions(obj), function(name){ addToWrapper(name, _[name] = obj[name]); }); }; // Generate a unique integer id (unique within the entire client session). // Useful for temporary DOM ids. var idCounter = 0; _.uniqueId = function(prefix) { var id = idCounter++; return prefix ? prefix + id : id; }; // By default, Underscore uses ERB-style template delimiters, change the // following template settings to use alternative delimiters. _.templateSettings = { evaluate : /<%([\s\S]+?)%>/g, interpolate : /<%=([\s\S]+?)%>/g, escape : /<%-([\s\S]+?)%>/g }; // When customizing `templateSettings`, if you don't want to define an // interpolation, evaluation or escaping regex, we need one that is // guaranteed not to match. var noMatch = /.^/; // Within an interpolation, evaluation, or escaping, remove HTML escaping // that had been previously added. var unescape = function(code) { return code.replace(/\\\\/g, '\\').replace(/\\'/g, "'"); }; // JavaScript micro-templating, similar to John Resig's implementation. // Underscore templating handles arbitrary delimiters, preserves whitespace, // and correctly escapes quotes within interpolated code. _.template = function(str, data) { var c = _.templateSettings; var tmpl = 'var __p=[],print=function(){__p.push.apply(__p,arguments);};' + 'with(obj||{}){__p.push(\'' + str.replace(/\\/g, '\\\\') .replace(/'/g, "\\'") .replace(c.escape || noMatch, function(match, code) { return "',_.escape(" + unescape(code) + "),'"; }) .replace(c.interpolate || noMatch, function(match, code) { return "'," + unescape(code) + ",'"; }) .replace(c.evaluate || noMatch, function(match, code) { return "');" + unescape(code).replace(/[\r\n\t]/g, ' ') + ";__p.push('"; }) .replace(/\r/g, '\\r') .replace(/\n/g, '\\n') .replace(/\t/g, '\\t') + "');}return __p.join('');"; var func = new Function('obj', '_', tmpl); if (data) return func(data, _); return function(data) { return func.call(this, data, _); }; }; // Add a "chain" function, which will delegate to the wrapper. _.chain = function(obj) { return _(obj).chain(); }; // The OOP Wrapper // --------------- // If Underscore is called as a function, it returns a wrapped object that // can be used OO-style. This wrapper holds altered versions of all the // underscore functions. Wrapped objects may be chained. var wrapper = function(obj) { this._wrapped = obj; }; // Expose `wrapper.prototype` as `_.prototype` _.prototype = wrapper.prototype; // Helper function to continue chaining intermediate results. var result = function(obj, chain) { return chain ? _(obj).chain() : obj; }; // A method to easily add functions to the OOP wrapper. var addToWrapper = function(name, func) { wrapper.prototype[name] = function() { var args = slice.call(arguments); unshift.call(args, this._wrapped); return result(func.apply(_, args), this._chain); }; }; // Add all of the Underscore functions to the wrapper object. _.mixin(_); // Add all mutator Array functions to the wrapper. each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) { var method = ArrayProto[name]; wrapper.prototype[name] = function() { var wrapped = this._wrapped; method.apply(wrapped, arguments); var length = wrapped.length; if ((name == 'shift' || name == 'splice') && length === 0) delete wrapped[0]; return result(wrapped, this._chain); }; }); // Add all accessor Array functions to the wrapper. each(['concat', 'join', 'slice'], function(name) { var method = ArrayProto[name]; wrapper.prototype[name] = function() { return result(method.apply(this._wrapped, arguments), this._chain); }; }); // Start chaining a wrapped Underscore object. wrapper.prototype.chain = function() { this._chain = true; return this; }; // Extracts the result from a wrapped and chained object. wrapper.prototype.value = function() { return this._wrapped; }; }).call(this);
PypiClean
/thrift-unofficial-0.14.1rc0.tar.gz/thrift-unofficial-0.14.1rc0/src/transport/sslcompat.py
import logging import sys from thrift.transport.TTransport import TTransportException logger = logging.getLogger(__name__) def legacy_validate_callback(cert, hostname): """legacy method to validate the peer's SSL certificate, and to check the commonName of the certificate to ensure it matches the hostname we used to make this connection. Does not support subjectAltName records in certificates. raises TTransportException if the certificate fails validation. """ if 'subject' not in cert: raise TTransportException( TTransportException.NOT_OPEN, 'No SSL certificate found from %s' % hostname) fields = cert['subject'] for field in fields: # ensure structure we get back is what we expect if not isinstance(field, tuple): continue cert_pair = field[0] if len(cert_pair) < 2: continue cert_key, cert_value = cert_pair[0:2] if cert_key != 'commonName': continue certhost = cert_value # this check should be performed by some sort of Access Manager if certhost == hostname: # success, cert commonName matches desired hostname return else: raise TTransportException( TTransportException.UNKNOWN, 'Hostname we connected to "%s" doesn\'t match certificate ' 'provided commonName "%s"' % (hostname, certhost)) raise TTransportException( TTransportException.UNKNOWN, 'Could not validate SSL certificate from host "%s". Cert=%s' % (hostname, cert)) def _optional_dependencies(): try: import ipaddress # noqa logger.debug('ipaddress module is available') ipaddr = True except ImportError: logger.warn('ipaddress module is unavailable') ipaddr = False if sys.hexversion < 0x030500F0: try: from backports.ssl_match_hostname import match_hostname, __version__ as ver ver = list(map(int, ver.split('.'))) logger.debug('backports.ssl_match_hostname module is available') match = match_hostname if ver[0] * 10 + ver[1] >= 35: return ipaddr, match else: logger.warn('backports.ssl_match_hostname module is too old') ipaddr = False except ImportError: logger.warn('backports.ssl_match_hostname is unavailable') ipaddr = False try: from ssl import match_hostname logger.debug('ssl.match_hostname is available') match = match_hostname except ImportError: logger.warn('using legacy validation callback') match = legacy_validate_callback return ipaddr, match _match_has_ipaddress, _match_hostname = _optional_dependencies()
PypiClean
/starry_process-0.9.8.tar.gz/starry_process-0.9.8/docs/index.rst
.. raw:: html <div align="center"> <img src="https://github.com/rodluger/starry_process/blob/master/starry_process.gif?raw=true" width="450px"> </img> <br/> </div> <br/><br/> Documentation ============= Welcome to the :py:mod:`starry_process` documentation. The :py:mod:`starry_process` code is an implementation of an **interpretable Gaussian process (GP) for stellar light curves.** This means that the hyperparameters of the GP are actual physical properties of the stellar surface, such as the size, position, contrast, and number of star spots. The primary application of :py:mod:`starry_process` is to model stellar light curves with the goal of inferring their spot parameters. For more information, check out the `JOSS paper <https://ui.adsabs.harvard.edu/abs/2021arXiv210201774L>`_, the Mapping Stellar Surfaces paper series (`Paper I <https://ui.adsabs.harvard.edu/abs/2021arXiv210200007L>`_, `Paper II <https://ui.adsabs.harvard.edu/abs/2021arXiv210201697L>`_), as well as this `interactive live demo <http://starry-process.flatironinstitute.org>`_. .. toctree:: :maxdepth: 1 :caption: Contents: Installation <install> Examples <examples> API <api> Live demo <http://starry-process.flatironinstitute.org> GitHub <https://github.com/rodluger/starry_process> Submit an issue <https://github.com/rodluger/starry_process/issues> Read the JOSS paper <https://ui.adsabs.harvard.edu/abs/2021arXiv210201774L> Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
PypiClean
/thomas-jupyter-widget-0.1.2.tar.gz/thomas-jupyter-widget-0.1.2/js/lib/widget.js
var widgets = require('@jupyter-widgets/base'); var _ = require('lodash'); var Konva = require('konva'); const { v1: uuid1 } = require('uuid'); // See widget.py for the kernel counterpart to this file. // Custom Model. Custom widgets models must at least provide default values // for model attributes, including // // - `_view_name` // - `_view_module` // - `_view_module_version` // // - `_model_name` // - `_model_module` // - `_model_module_version` // // when different from the base class. // When serializing the entire widget state for embedding, only values that // differ from the defaults will be specified. var Model = widgets.DOMWidgetModel.extend({ defaults: _.extend(widgets.DOMWidgetModel.prototype.defaults(), { _view_name : 'View', _model_name : 'Model', _view_module : 'thomas-jupyter-widget', _model_module : 'thomas-jupyter-widget', _model_module_version : '0.1.0', _view_module_version : '0.1.0', value : {}, marginals_and_evidence : {}, evidence_sink: '', height: 300, }) }); function intersect(x1, y1, x2, y2, x3, y3, x4, y4) { // Check if none of the lines are of length 0 if ((x1 === x2 && y1 === y2) || (x3 === x4 && y3 === y4)) { console.warn('Found line of length 0'); return false } var denominator = ((y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)) // Lines are parallel if (denominator === 0) { console.warn('Denominator is zero 0'); return false } let ua = ((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)) / denominator let ub = ((x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)) / denominator // is the intersection along the segments if (ua < 0 || ua > 1 || ub < 0 || ub > 1) { // console.debug('Intersection outside segments'); return false } // Return an object with the x and y coordinates of the intersection let x = x1 + ua * (x2 - x1) let y = y1 + ua * (y2 - y1) return {x, y} } function compute_intersection(corners, line) { const points = [ ['tl', 'tr'], ['tl', 'bl'], ['tr', 'br'], ['bl', 'br'], ]; var intersection = false; for (const p of points) { const key1 = p[0], key2 = p[1]; intersection = intersect( corners[key1].x, corners[key1].y, corners[key2].x, corners[key2].y, line.src.x, line.src.y, line.dst.x, line.dst.y, ); if (intersection) { break; } } if (!intersection) { console.warn('Could not determine intersection'); } return intersection; } class Node extends Konva.Group { /** * Create a new edge between two nodes. * * Args: * node (object): part of the JSON that defines a Node. * marginals (dict): dict of marginals, indexed by state */ constructor(node, marginals, evidence, eventhandlers) { // First things first super({ x: node.position[0], y: node.position[1], draggable: true, }); const { onDragMove, onDragEnd, onStateSelected, } = eventhandlers; this.RV = node.RV; this.name = node.name; this.node = node; this.edges = []; this._title_height = 15; this._state_offset = 8; this._state_height = 14; this._state_padding = 2; this._width = 180 this._height = this.computeHeight(); this.width(this._width); this.height(this._height); this.createBackground(); this.createTitle(); this.createStates(marginals, evidence, onStateSelected); this.on('dragmove', () => onDragMove(this)); this.on('dragend', () => onDragEnd(this)); } addEdge(edge) { this.edges.push(edge); } getCenter() { return { x: this.x() + this.width() / 2, y: this.y() + this.height() / 2 } } getCorners() { return { 'tl': {x: this.x(), y: this.y()}, 'tr': {x: this.x() + this.width(), y: this.y()}, 'bl': {x: this.x(), y: this.y() + this.height()}, 'br': {x: this.x() + this.width(), y: this.y() + this.height()}, } } /** * Compute the node's height based on the number of states. */ computeHeight() { const { node } = this; return ( node.states.length * this._state_height + 2 * this._state_offset + this._title_height ) } /** * Create an opaque background. */ createBackground() { this.add( new Konva.Rect({ fill: '#efefef', width: this._width, height: this._height, cornerRadius: 5, shadowBlur: 5, }) ); } /** * Create a label to display the RV. */ createTitle() { const { node } = this; // Node's RV in the top-left const label = new Konva.Label(); label.add( new Konva.Text({ text: node.RV, padding: 4, fontSize: this._title_height, fontStyle: "bold", width: this._width, }) ); // If full name differs from RV, display it behind the RV if (node.RV != node.name) { label.add( new Konva.Text({ x: 20, text: `| ${node.name}`, padding: 4, fontSize: this._title_height - 2, fontStyle: "normal", verticalAlign: "bottom", width: this._width, }) ); } this.add(label); } /** * Create a Group to hold a state's shapes. * * Args: * state (str): name/identifier of the state. * idx (int): position in the list of states * marginals (dict): dict of marginals, indexed by state */ createState(state, idx, marginals, evidence, onStateSelected) { const y = ( this._title_height + this._state_offset + idx * this._state_height ) const label_width = 70, marginal_width = 55; const remaining_width = this._width - label_width - marginal_width; var marginal = '...'; var bar_width = 0; var bar_color = '#003366'; if (evidence && evidence === state) { bar_color = '#00BCCC'; } if (marginals) { marginal = (100 * marginals[state]).toFixed(2) + '%'; bar_width = 1 + remaining_width * marginals[state] } // Create the Group const group = new Konva.Group({y: y}); // State label group.add( new Konva.Label().add( new Konva.Text({ text: state, padding: this._state_padding, fontSize: this._state_height - this._state_padding, wrap: 'none', ellipsis: 'ellipsis', width: this._width, }) ) ); // State bar group.add( new Konva.Rect({ x: label_width, y: 1, width: bar_width, height: this._state_height - 2, fill: bar_color, }) ); // State marginal group.add( new Konva.Label({ x: this._width - marginal_width }).add( new Konva.Text({ text: marginal, padding: this._state_padding, fontSize: this._state_height - this._state_padding, align: "right", wrap: "none", width: marginal_width, }) ) ); group.on('dblclick', () => onStateSelected(this.RV, state)) this.add(group); } createStates(marginals, evidence, onStateSelected) { const { states } = this.node; states.map((state, idx) => { this.createState(state, idx, marginals, evidence, onStateSelected) }); } } class Edge extends Konva.Arrow { /** * Create a new edge between two nodes. * * Args: * src (Node): src * dst (Node): dst */ constructor(src, dst) { super({ x: 0, y: 0, // points: [src_i.x, src_i.y, dst_i.x, dst_i.y], pointerLength: 10, pointerWidth: 10, fill: 'black', stroke: 'black', strokeWidth: 2, }); this.src = src; this.dst = dst; this.recomputePoints(); src.addEdge(this); dst.addEdge(this); } /** * Recompute the arrow src -> dst. */ recomputePoints() { const { src, dst } = this; const src_center = src.getCenter(); const dst_center = dst.getCenter(); const src_i = compute_intersection( src.getCorners(), {src: src_center, dst: dst_center} ); const dst_i = compute_intersection( dst.getCorners(), {src: src_center, dst: dst_center} ); this.points([src_i.x, src_i.y, dst_i.x, dst_i.y]); } /** * Called by View when a Node moved. */ onNodeMoving() { this.recomputePoints(); } } // Custom View. Renders the widget model. var View = widgets.DOMWidgetView.extend({ // Defines how the widget gets rendered into the DOM render: function() { // this.model refers to the *Python* model associated with this widget. var height = this.model.get('height'); // console.log("And everyday I'm rendering", height); this.container_id = `konva-container-${uuid1()}` this.node_title_height = 15; this.node_state_offset = 8; this.node_state_height = 14; this.node_state_padding = 2; this.node_width = 180; this.nodes = []; this.edges = []; this.map = {}; this.el.innerHTML = ` <div> <!-- <div style="padding: 10px; background-color: #336699"> <button id="save">Save as image</button> </div> --> <div id="${this.container_id}" style="background-color: #336699" > </div> </div> `; // Run this *after* the above <div> has rendered. setTimeout(() => { // console.log('Setting up Konva ...'); this.stage = new Konva.Stage({ container: this.container_id, width: 2048, height: height }); // Create a Layer to hold all shapes this.layer = new Konva.Layer(); this.model.on('change:value', this.value_changed, this); this.model.on('change:marginals_and_evidence', this.value_changed, this); /* document.getElementById('save').addEventListener( 'click', () => { // var dataURL = this.layer.toDataURL({ pixelRatio: 3 }); var dataURL = this.layer.toDataURL(); this.downloadURI(dataURL, 'stage.png'); }, false ); */ this.value_changed(); }, 0) }, downloadURI: function(uri, name) { var link = document.createElement('a'); link.download = name; link.href = uri; document.body.appendChild(link); link.click(); document.body.removeChild(link); delete link; }, /** * Called once when this.model.get('value') changes. * This should trigger a complete re-render of the canvas. */ value_changed: function() { // console.log('value_changed()'); // value holds the output of BayesianNetwork.as_dict() var value = this.model.get('value'); var { marginals, evidence } = this.model.get('marginals_and_evidence'); if (value.type !== 'BayesianNetwork') { return } // console.log('marginals:', marginals); // console.log('evidence:', evidence); // Clear the layer this.layer.removeChildren(); // Create nodes & mapping (indexed by RV) this.map = {}; var n; this.nodes = value.nodes.map(node => { n = new Node( node, marginals[node.RV], evidence[node.RV], { onDragMove: (n) => this.on_node_moving(n), onDragEnd: (n) => this.on_node_moved(n), onStateSelected: (RV, state) => this.on_state_selected(RV, state), } ); this.map[node.RV] = n; return n; }); // Create edges this.edges = value.edges.map(e => { const src = this.map[e[0]], dst = this.map[e[1]]; return new Edge(src, dst); }) // Add nodes & edges to the layer to the stage. this.edges.forEach(i => this.layer.add(i)); this.nodes.forEach(i => this.layer.add(i)); this.stage.add(this.layer); this.layer.draw(); }, on_node_moving(node) { node.edges.forEach(e => e.onNodeMoving()); this.layer.draw(); }, on_node_moved(node) { console.log(`node ${node.RV} moved!`); // node.node contains a reference to the node's JSON definition node.node.position = [node.x(), node.y()]; var value = this.model.get('value'); // For some reason it is necessary to set value twice. this.model.set('value', 'null'); this.model.set('value', Object.assign({}, value)); this.touch(); }, on_state_selected(RV, state) { // console.log(`on_state_selected("${RV}", "${state}")`); const { marginals, evidence } = this.model.get('marginals_and_evidence'); // console.log('evidence: ', evidence); const e = Object.assign({}, evidence); if (e[RV] && e[RV] === state) { // console.log(' disabling state ...') e[RV] = '' } else { // console.log(' setting evidence ...') e[RV] = state; } // console.log('e: ', e); this.model.set('evidence_sink', e); this.touch(); } }); module.exports = { Model: Model, View: View };
PypiClean
/tf-transformers-2.0.0.tar.gz/tf-transformers-2.0.0/src/tf_transformers/models/mt5/configuration_mt5.py
""" MT5 model configuration """ from tf_transformers.core import TransformerConfig class MT5Config(TransformerConfig): r""" This is the configuration class to store the configuration of a :class:`~tf_transformers.models.MT5Model`. It is used to instantiate an MT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MT5 `base <https://huggingface.co/t5-small>`__ architecture. Configuration objects inherit from :class:`~tf_transformers.models.TransformerConfig` and can be used to control the model outputs. Read the documentation from :class:`~tf_transformers.models.TransformerConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30000): Vocabulary size of the MT5 model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~tf_transformers.model.MT5Model` or :class:`~tf_transformers.models.MT5Encoder`. embedding_size (:obj:`int`, `optional`, defaults to 128): Dimensionality of vocabulary embeddings. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. attention_head_size (:obj:`int`): Size of attention heads in each layer. Normally (embedding_size//num_attention_heads). intermediate_size (:obj:`int`, `optional`, defaults to 3072): The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. inner_group_num (:obj:`int`, `optional`, defaults to 1): The number of inner repetition of attention and ffn. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and many are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.MT5Model` or :class:`~transformers.TFMT5Model`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for attached classifiers. position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`): Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`, :obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on :obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.) <https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to `Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.) <https://arxiv.org/abs/2009.13658>`__. bidirectional (:obj:`bool`, `optional`, defaults to True): For relative positional embeddings, Encoder has :obj:`bidirectional=True`, while Decoder has :obj:`bidirectional=False`. positional_buckets (:obj:`int`, `optional`, defaults to 32): The number of buckets to use for each attention layer. For relative positional embeddings. Examples:: >>> from tf_transformers.models import MT5Config, MT5Model >>> # Initializing an bert-base-uncased style configuration >>> configuration = MT5Config() >>> # Initializing an MT5 different style configuration >>> configuration_new = MT5Config( ... embedding_size=768, ... num_attention_heads=12, ... intermediate_size=3072, ... ) >>> # Initializing a model from the original configuration >>> model = MT5Model.from_config(configuration) >>> # Accessing the model configuration >>> configuration = model._config_dict # This has more details than original configuration """ def __init__( self, vocab_size=250112, embedding_size=512, num_hidden_layers=8, num_attention_heads=6, attention_head_size=64, intermediate_size=1024, hidden_act="gelu", intermediate_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=-1, type_vocab_size=-1, initializer_range=0.02, layer_norm_epsilon=1e-6, position_embedding_type="relative", bidirectional=True, positional_buckets=32, decoder_start_token_id=0, ): super().__init__( vocab_size=vocab_size, embedding_size=embedding_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, attention_head_size=attention_head_size, hidden_act=hidden_act, intermediate_act=intermediate_act, intermediate_size=intermediate_size, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_epsilon=layer_norm_epsilon, position_embedding_type=position_embedding_type, bidirectional=bidirectional, positional_buckets=positional_buckets, decoder_start_token_id=decoder_start_token_id, )
PypiClean
/ftw.book-4.1.11.tar.gz/ftw.book-4.1.11/ftw/book/table/tablepart.py
class TablePart(object): """ A Tablepart object stores different attributes used to create tablerows """ def __init__( self, rows, parent, begin_at, column_names, first_column_a_header, border_layout, ): self.rows = rows self.begin_at = begin_at self.parent = parent self.column_names = column_names self.first_column_a_header = first_column_a_header self.row_node = None self.is_first_cell = False self.border_layout = border_layout def is_first_column_a_header(self): """ Should the first column be a header-row """ return self.first_column_a_header def is_cell_header_cell(self): """ Are we on the first cell and should the first column be a header-row """ if self.is_first_cell and self.first_column_a_header: return True return False def get_row_type(self): """ Return the type of the row. Can be tr or th' """ return 'tr' def get_cell_type(self): """ Type of the cell, Can be td or th... """ if self.is_cell_header_cell(): return 'th' return 'td' def set_row_node(self, row): """ Set the actual row-node we create cells """ self.row_node = row def get_row_node(self): """ Get the actual row-node we create cells """ return self.row_node def set_is_first_cell(self, is_first_cell): """ Set true if we are on the first cell """ self.is_first_cell = is_first_cell def get_css(self, css, row_num, col_name): """ Return default and additional css classes in a list """ css = css if self.border_layout in ['fancy_listing', 'grid']: css.append('border-bottom') if self.border_layout in ['grid']: css.append('border-top') css.append('border-left') css.append('border-right') css = self.get_additional_css(css, row_num, col_name) css = self.cleanup_css(css) return css def get_additional_attrs(self, row_num, col_name): """ Return additional attrs """ return {} def get_additional_css(self, css, row_num, col_name): """ Get special css classes """ return css def wrap_text_in_attr(self, text): """ Wrap a text into an attr """ return text def cleanup_css(self, css): """ Cleanup the given css. Remove double entries and different other cleanups """ if 'noborders' in css: for css_class in ['border-bottom', 'border-top', 'noborders']: if css_class in css: self._remove_css_class(css, css_class) if 'scriptsize' in css and 'bold' in css: self._remove_css_class(css, 'bold') return set(css) def _is_last_column(self, col_name): """ Ist the given column name the last of all available columns """ return col_name != self.column_names[-1] and True or False def _remove_css_class(self, css, css_class): """ Try to remove a css class from the given list of css-classes """ try: css.remove(css_class) except ValueError: pass return css class TablePartHeader(TablePart): """ Used for the Head """ def __init__( self, rows, parent, begin_at, column_names, first_column_a_header, border_layout, ): super(TablePartHeader, self).__init__( rows, parent, begin_at, column_names, first_column_a_header, border_layout, ) def get_cell_type(self): return 'th' def get_additional_attrs(self, row_num, col_name): attrs = {'align': 'left'} if row_num <= 0: attrs['id'] = col_name return attrs def is_last_row(self, row_num): """ Is it the last row """ return len(self.rows) <= row_num + 1 class TablePartFooter(TablePart): """ Used for the footer """ def __init__( self, rows, parent, begin_at, column_names, first_column_a_header, border_layout, footer_is_bold, ): super(TablePartFooter, self).__init__( rows, parent, begin_at, column_names, first_column_a_header, border_layout, ) self.footer_is_bold = footer_is_bold def get_css(self, css, row_num, col_name): classes = super(TablePartFooter, self).get_css(css, row_num, col_name) if self.footer_is_bold and 'bold' not in classes: classes.add('bold') return classes class TablePartBody(TablePart): """ Used for the Body """ def get_additional_attrs(self, row_num, col_name): attrs = {} if self.is_cell_header_cell(): attrs = {'id': 'row%i' % row_num} elif self.is_first_column_a_header(): attrs = {'headers': '%s row%i' % (col_name, row_num)} else: attrs = {'headers': '%s' % col_name} return attrs
PypiClean
/scgpm_seqresults_dnanexus-0.5.0-py3-none-any.whl/scgpm_seqresults_dnanexus-0.5.0.data/scripts/add_standard_props_to_projects.py
### # © 2018 The Board of Trustees of the Leland Stanford Junior University # Nathaniel Watson # [email protected] # 2016-11-17 ### """ To fill in. """ import argparse import re import dxpy import scgpm_lims lane_reg = re.compile("_L(\d)_") def get_parser(): parser = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("-b","--billing-account",required=True,help=""" The name of the DNAnexus billing account that the project belongs to. This will only be used to restrict the search of projects.""") return parser def main(): parser = get_parser() uhts_conn = scgpm_lims.Connection() args = parser.parse_args() billing_account = args.billing_account if not billing_account.startswith("org-"): billing_account = "org-" + billing_account projects = list(dxpy.find_projects(billed_to=billing_account)) #projects = [{u'permissionSources': [u'user-nathankw'], u'public': False, u'id': u'project-BvxVV1Q092QgFKk9Qv2bKj6Z', u'level': u'ADMINISTER'}] for p in projects: dx_proj = dxpy.DXProject(p["id"]) dx_proj_name = dx_proj.name if not dx_proj_name.startswith("16"): continue #not a sequencing results project if dx_proj_name == "160715_HEK-ZNF_Controls": continue print(dx_proj_name) uhts_run_name,lane,rest = lane_reg.split(dx_proj_name) runinfo = uhts_conn.getruninfo(run=uhts_run_name)["run_info"] laneinfo = runinfo["lanes"][lane] merge_props = {} merge_props["seq_run_name"] = uhts_run_name merge_props["seq_lane_index"] = lane merge_props["seq_instrument"] = runinfo["sequencing_instrument"] merge_props["paired_end"] = str(runinfo["paired_end"]) merge_props["sequencer_type"] = runinfo["platform_name"] merge_props["lab"] = laneinfo["lab"] merge_props["queue"] = laneinfo["queue"] merge_props["library_name"] = laneinfo["sample_name"].split()[0] #take first whitespace separated element. dx_properties = dx_proj.describe(input_params={"properties": True})["properties"] #check for empty prop vals and delete them: pop_attrs = [] for dx_prop_name in dx_properties: val = dx_properties[dx_prop_name].strip() if not val: pop_attrs.append(dx_prop_name) for pa in pop_attrs: dx_properties.pop(pa) dx_properties.update(merge_props) dxpy.api.project_set_properties(object_id=dx_proj.id,input_params={"properties": dx_properties}) if __name__ == "__main__": main()
PypiClean
/mianalyzer-0.3.1.tar.gz/mianalyzer-0.3.1/mia/utils/Observer.py
from PyQt5.QtCore import * from abc import ABC, abstractmethod import traceback import sys class Observer(ABC): @abstractmethod def Error(self, exctype, value, traceback): pass @abstractmethod def Finished(self): pass @abstractmethod def Result(self,result): pass @abstractmethod def Progress(self,i): pass @abstractmethod def Started(self): pass class InterfaceSignals(QObject): finished = pyqtSignal() error = pyqtSignal(tuple) result = pyqtSignal(object) progress = pyqtSignal() start = pyqtSignal() epoch_end = pyqtSignal(int) class dlObservable(): def __init__(self): self.observers = [] def attachObserver(self,o): self.observers.append(o) def detachObserver(self,o): self.observers.remove(o) def notifyError(self): for o in self.observers: exctype, value, tb = sys.exc_info() o.Error(exctype, value, tb) def notifyPredictionFinished(self, prediction): for o in self.observers: o.Result(prediction) def notifyTrainingStarted(self): for o in self.observers: o.Started() def notifyTrainingFinished(self): for o in self.observers: o.Finished() def notifyTrainingResult(self, result): for o in self.observers: o.Result(result) def notifyTrainingProgress(self): for o in self.observers: o.Progress() def notifyEpochEnd(self, epoch): for o in self.observers: o.EpochEnd(epoch) class QtObserver(Observer): def __init__(self): self.signals = InterfaceSignals() def Error(self,exctype, value, traceback): self.signals.error.emit((exctype, value, traceback)) def Finished(self): self.signals.finished.emit() def Result(self, result): self.signals.result.emit(result) def Progress(self): self.signals.progress.emit() def Started(self): self.signals.start.emit() def EpochEnd(self, epoch): self.signals.epoch_end.emit(epoch)
PypiClean
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/regenerate-unicode-properties/Binary_Property/Case_Ignorable.js
const set = require('regenerate')(0x27, 0x2E, 0x3A, 0x5E, 0x60, 0xA8, 0xAD, 0xAF, 0xB4, 0x37A, 0x387, 0x559, 0x5BF, 0x5C7, 0x5F4, 0x61C, 0x640, 0x670, 0x70F, 0x711, 0x7FA, 0x7FD, 0x93A, 0x93C, 0x94D, 0x971, 0x981, 0x9BC, 0x9CD, 0x9FE, 0xA3C, 0xA51, 0xA75, 0xABC, 0xACD, 0xB01, 0xB3C, 0xB3F, 0xB4D, 0xB56, 0xB82, 0xBC0, 0xBCD, 0xC00, 0xC04, 0xC81, 0xCBC, 0xCBF, 0xCC6, 0xD4D, 0xDCA, 0xDD6, 0xE31, 0xEB1, 0xEC6, 0xF35, 0xF37, 0xF39, 0xFC6, 0x1082, 0x108D, 0x109D, 0x10FC, 0x17C6, 0x17D7, 0x17DD, 0x1843, 0x18A9, 0x1932, 0x1A1B, 0x1A56, 0x1A60, 0x1A62, 0x1A7F, 0x1AA7, 0x1B34, 0x1B3C, 0x1B42, 0x1BE6, 0x1BED, 0x1CED, 0x1CF4, 0x1D78, 0x1FBD, 0x2024, 0x2027, 0x2071, 0x207F, 0x2D6F, 0x2D7F, 0x2E2F, 0x3005, 0x303B, 0xA015, 0xA60C, 0xA67F, 0xA770, 0xA802, 0xA806, 0xA80B, 0xA8FF, 0xA9B3, 0xA9CF, 0xAA43, 0xAA4C, 0xAA70, 0xAA7C, 0xAAB0, 0xAAC1, 0xAADD, 0xAAF6, 0xABE5, 0xABE8, 0xABED, 0xFB1E, 0xFE13, 0xFE52, 0xFE55, 0xFEFF, 0xFF07, 0xFF0E, 0xFF1A, 0xFF3E, 0xFF40, 0xFF70, 0xFFE3, 0x101FD, 0x102E0, 0x10A3F, 0x11001, 0x110BD, 0x110CD, 0x11173, 0x11234, 0x1123E, 0x112DF, 0x11340, 0x11446, 0x1145E, 0x114BA, 0x1163D, 0x116AB, 0x116AD, 0x116B7, 0x119E0, 0x11A47, 0x11C3F, 0x11D3A, 0x11D47, 0x11D95, 0x11D97, 0x16F4F, 0x16FE3, 0x1DA75, 0x1DA84, 0xE0001); set.addRange(0xB7, 0xB8).addRange(0x2B0, 0x36F).addRange(0x374, 0x375).addRange(0x384, 0x385).addRange(0x483, 0x489).addRange(0x591, 0x5BD).addRange(0x5C1, 0x5C2).addRange(0x5C4, 0x5C5).addRange(0x600, 0x605).addRange(0x610, 0x61A).addRange(0x64B, 0x65F).addRange(0x6D6, 0x6DD).addRange(0x6DF, 0x6E8).addRange(0x6EA, 0x6ED).addRange(0x730, 0x74A).addRange(0x7A6, 0x7B0).addRange(0x7EB, 0x7F5).addRange(0x816, 0x82D).addRange(0x859, 0x85B).addRange(0x8D3, 0x902).addRange(0x941, 0x948).addRange(0x951, 0x957).addRange(0x962, 0x963).addRange(0x9C1, 0x9C4).addRange(0x9E2, 0x9E3).addRange(0xA01, 0xA02).addRange(0xA41, 0xA42).addRange(0xA47, 0xA48).addRange(0xA4B, 0xA4D).addRange(0xA70, 0xA71).addRange(0xA81, 0xA82).addRange(0xAC1, 0xAC5).addRange(0xAC7, 0xAC8).addRange(0xAE2, 0xAE3).addRange(0xAFA, 0xAFF).addRange(0xB41, 0xB44).addRange(0xB62, 0xB63).addRange(0xC3E, 0xC40).addRange(0xC46, 0xC48).addRange(0xC4A, 0xC4D).addRange(0xC55, 0xC56).addRange(0xC62, 0xC63).addRange(0xCCC, 0xCCD).addRange(0xCE2, 0xCE3).addRange(0xD00, 0xD01).addRange(0xD3B, 0xD3C).addRange(0xD41, 0xD44).addRange(0xD62, 0xD63).addRange(0xDD2, 0xDD4).addRange(0xE34, 0xE3A).addRange(0xE46, 0xE4E); set.addRange(0xEB4, 0xEBC).addRange(0xEC8, 0xECD).addRange(0xF18, 0xF19).addRange(0xF71, 0xF7E).addRange(0xF80, 0xF84).addRange(0xF86, 0xF87).addRange(0xF8D, 0xF97).addRange(0xF99, 0xFBC).addRange(0x102D, 0x1030).addRange(0x1032, 0x1037).addRange(0x1039, 0x103A).addRange(0x103D, 0x103E).addRange(0x1058, 0x1059).addRange(0x105E, 0x1060).addRange(0x1071, 0x1074).addRange(0x1085, 0x1086).addRange(0x135D, 0x135F).addRange(0x1712, 0x1714).addRange(0x1732, 0x1734).addRange(0x1752, 0x1753).addRange(0x1772, 0x1773).addRange(0x17B4, 0x17B5).addRange(0x17B7, 0x17BD).addRange(0x17C9, 0x17D3).addRange(0x180B, 0x180E).addRange(0x1885, 0x1886).addRange(0x1920, 0x1922).addRange(0x1927, 0x1928).addRange(0x1939, 0x193B).addRange(0x1A17, 0x1A18).addRange(0x1A58, 0x1A5E).addRange(0x1A65, 0x1A6C).addRange(0x1A73, 0x1A7C).addRange(0x1AB0, 0x1ABE).addRange(0x1B00, 0x1B03).addRange(0x1B36, 0x1B3A).addRange(0x1B6B, 0x1B73).addRange(0x1B80, 0x1B81).addRange(0x1BA2, 0x1BA5).addRange(0x1BA8, 0x1BA9).addRange(0x1BAB, 0x1BAD).addRange(0x1BE8, 0x1BE9).addRange(0x1BEF, 0x1BF1).addRange(0x1C2C, 0x1C33).addRange(0x1C36, 0x1C37).addRange(0x1C78, 0x1C7D).addRange(0x1CD0, 0x1CD2).addRange(0x1CD4, 0x1CE0).addRange(0x1CE2, 0x1CE8).addRange(0x1CF8, 0x1CF9).addRange(0x1D2C, 0x1D6A); set.addRange(0x1D9B, 0x1DF9).addRange(0x1DFB, 0x1DFF).addRange(0x1FBF, 0x1FC1).addRange(0x1FCD, 0x1FCF).addRange(0x1FDD, 0x1FDF).addRange(0x1FED, 0x1FEF).addRange(0x1FFD, 0x1FFE).addRange(0x200B, 0x200F).addRange(0x2018, 0x2019).addRange(0x202A, 0x202E).addRange(0x2060, 0x2064).addRange(0x2066, 0x206F).addRange(0x2090, 0x209C).addRange(0x20D0, 0x20F0).addRange(0x2C7C, 0x2C7D).addRange(0x2CEF, 0x2CF1).addRange(0x2DE0, 0x2DFF).addRange(0x302A, 0x302D).addRange(0x3031, 0x3035).addRange(0x3099, 0x309E).addRange(0x30FC, 0x30FE).addRange(0xA4F8, 0xA4FD).addRange(0xA66F, 0xA672).addRange(0xA674, 0xA67D).addRange(0xA69C, 0xA69F).addRange(0xA6F0, 0xA6F1).addRange(0xA700, 0xA721).addRange(0xA788, 0xA78A).addRange(0xA7F8, 0xA7F9).addRange(0xA825, 0xA826).addRange(0xA8C4, 0xA8C5).addRange(0xA8E0, 0xA8F1).addRange(0xA926, 0xA92D).addRange(0xA947, 0xA951).addRange(0xA980, 0xA982).addRange(0xA9B6, 0xA9B9).addRange(0xA9BC, 0xA9BD).addRange(0xA9E5, 0xA9E6).addRange(0xAA29, 0xAA2E).addRange(0xAA31, 0xAA32).addRange(0xAA35, 0xAA36).addRange(0xAAB2, 0xAAB4).addRange(0xAAB7, 0xAAB8).addRange(0xAABE, 0xAABF).addRange(0xAAEC, 0xAAED).addRange(0xAAF3, 0xAAF4).addRange(0xAB5B, 0xAB5F).addRange(0xFBB2, 0xFBC1).addRange(0xFE00, 0xFE0F).addRange(0xFE20, 0xFE2F).addRange(0xFF9E, 0xFF9F); set.addRange(0xFFF9, 0xFFFB).addRange(0x10376, 0x1037A).addRange(0x10A01, 0x10A03).addRange(0x10A05, 0x10A06).addRange(0x10A0C, 0x10A0F).addRange(0x10A38, 0x10A3A).addRange(0x10AE5, 0x10AE6).addRange(0x10D24, 0x10D27).addRange(0x10F46, 0x10F50).addRange(0x11038, 0x11046).addRange(0x1107F, 0x11081).addRange(0x110B3, 0x110B6).addRange(0x110B9, 0x110BA).addRange(0x11100, 0x11102).addRange(0x11127, 0x1112B).addRange(0x1112D, 0x11134).addRange(0x11180, 0x11181).addRange(0x111B6, 0x111BE).addRange(0x111C9, 0x111CC).addRange(0x1122F, 0x11231).addRange(0x11236, 0x11237).addRange(0x112E3, 0x112EA).addRange(0x11300, 0x11301).addRange(0x1133B, 0x1133C).addRange(0x11366, 0x1136C).addRange(0x11370, 0x11374).addRange(0x11438, 0x1143F).addRange(0x11442, 0x11444).addRange(0x114B3, 0x114B8).addRange(0x114BF, 0x114C0).addRange(0x114C2, 0x114C3).addRange(0x115B2, 0x115B5).addRange(0x115BC, 0x115BD).addRange(0x115BF, 0x115C0).addRange(0x115DC, 0x115DD).addRange(0x11633, 0x1163A).addRange(0x1163F, 0x11640).addRange(0x116B0, 0x116B5).addRange(0x1171D, 0x1171F).addRange(0x11722, 0x11725).addRange(0x11727, 0x1172B).addRange(0x1182F, 0x11837).addRange(0x11839, 0x1183A).addRange(0x119D4, 0x119D7).addRange(0x119DA, 0x119DB).addRange(0x11A01, 0x11A0A).addRange(0x11A33, 0x11A38).addRange(0x11A3B, 0x11A3E).addRange(0x11A51, 0x11A56).addRange(0x11A59, 0x11A5B).addRange(0x11A8A, 0x11A96); set.addRange(0x11A98, 0x11A99).addRange(0x11C30, 0x11C36).addRange(0x11C38, 0x11C3D).addRange(0x11C92, 0x11CA7).addRange(0x11CAA, 0x11CB0).addRange(0x11CB2, 0x11CB3).addRange(0x11CB5, 0x11CB6).addRange(0x11D31, 0x11D36).addRange(0x11D3C, 0x11D3D).addRange(0x11D3F, 0x11D45).addRange(0x11D90, 0x11D91).addRange(0x11EF3, 0x11EF4).addRange(0x13430, 0x13438).addRange(0x16AF0, 0x16AF4).addRange(0x16B30, 0x16B36).addRange(0x16B40, 0x16B43).addRange(0x16F8F, 0x16F9F).addRange(0x16FE0, 0x16FE1).addRange(0x1BC9D, 0x1BC9E).addRange(0x1BCA0, 0x1BCA3).addRange(0x1D167, 0x1D169).addRange(0x1D173, 0x1D182).addRange(0x1D185, 0x1D18B).addRange(0x1D1AA, 0x1D1AD).addRange(0x1D242, 0x1D244).addRange(0x1DA00, 0x1DA36).addRange(0x1DA3B, 0x1DA6C).addRange(0x1DA9B, 0x1DA9F).addRange(0x1DAA1, 0x1DAAF).addRange(0x1E000, 0x1E006).addRange(0x1E008, 0x1E018).addRange(0x1E01B, 0x1E021).addRange(0x1E023, 0x1E024).addRange(0x1E026, 0x1E02A).addRange(0x1E130, 0x1E13D).addRange(0x1E2EC, 0x1E2EF).addRange(0x1E8D0, 0x1E8D6).addRange(0x1E944, 0x1E94B).addRange(0x1F3FB, 0x1F3FF).addRange(0xE0020, 0xE007F).addRange(0xE0100, 0xE01EF); module.exports = set;
PypiClean
/py_base_layer-1.0.0-py3-none-any.whl/pymongo/uri_parser.py
import re import sys import warnings from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options from pymongo.common import ( INTERNAL_URI_OPTION_NAME_MAP, SRV_SERVICE_NAME, URI_OPTIONS_DEPRECATION_MAP, _CaseInsensitiveDictionary, get_validated_options, ) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver from pymongo.typings import _Address SCHEME = "mongodb://" SCHEME_LEN = len(SCHEME) SRV_SCHEME = "mongodb+srv://" SRV_SCHEME_LEN = len(SRV_SCHEME) DEFAULT_PORT = 27017 def _unquoted_percent(s): """Check for unescaped percent signs. :Paramaters: - `s`: A string. `s` can have things like '%25', '%2525', and '%E2%85%A8' but cannot have unquoted percent like '%foo'. """ for i in range(len(s)): if s[i] == "%": sub = s[i : i + 3] # If unquoting yields the same string this means there was an # unquoted %. if unquote_plus(sub) == sub: return True return False def parse_userinfo(userinfo: str) -> Tuple[str, str]: """Validates the format of user information in a MongoDB URI. Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", "]", "@") as per RFC 3986 must be escaped. Returns a 2-tuple containing the unescaped username followed by the unescaped password. :Paramaters: - `userinfo`: A string of the form <username>:<password> """ if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): raise InvalidURI( "Username and password must be escaped according to " "RFC 3986, use urllib.parse.quote_plus" ) user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. if not user: raise InvalidURI("The empty string is not valid username.") return unquote_plus(user), unquote_plus(passwd) def parse_ipv6_literal_host( entity: str, default_port: Optional[int] ) -> Tuple[str, Optional[Union[str, int]]]: """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where port is default_port if it wasn't specified in entity. :Parameters: - `entity`: A string that represents an IPv6 literal enclosed in braces (e.g. '[::1]' or '[::1]:27017'). - `default_port`: The port number to use when one wasn't specified in entity. """ if entity.find("]") == -1: raise ValueError( "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." ) i = entity.find("]:") if i == -1: return entity[1:-1], default_port return entity[1:i], entity[i + 2 :] def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port if it wasn't specified in the string. :Parameters: - `entity`: A host or host:port string where host could be a hostname or IP address. - `default_port`: The port number to use when one wasn't specified in entity. """ host = entity port: Optional[Union[str, int]] = default_port if entity[0] == "[": host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port elif entity.find(":") != -1: if entity.count(":") > 1: raise ValueError( "Reserved characters such as ':' must be " "escaped according RFC 2396. An IPv6 " "address literal must be enclosed in '[' " "and ']' according to RFC 2732." ) host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: raise ValueError("Port must be an integer between 0 and 65535: %r" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the hello response. return host.lower(), port # Options whose values are implicitly determined by tlsInsecure. _IMPLICIT_TLSINSECURE_OPTS = { "tlsallowinvalidcertificates", "tlsallowinvalidhostnames", "tlsdisableocspendpointcheck", } def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion, and the use of a unicode options string.""" options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") if key.lower() == "readpreferencetags": options.setdefault(key, []).append(value) else: if key in options: warnings.warn("Duplicate URI option '%s'." % (key,)) if key.lower() == "authmechanismproperties": val = value else: val = unquote_plus(value) options[key] = val return options def _handle_security_options(options): """Raise appropriate errors when conflicting TLS options are present in the options dictionary. :Parameters: - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ # Implicitly defined options must not be explicitly specified. tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: err_msg = "URI options %s and %s cannot be specified simultaneously." raise InvalidURI( err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) ) # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") if tlsallowinvalidcerts is not None: if "tlsdisableocspendpointcheck" in options: err_msg = "URI options %s and %s cannot be specified simultaneously." raise InvalidURI( err_msg % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) ) if tlsallowinvalidcerts is True: options["tlsdisableocspendpointcheck"] = True # Handle co-occurence of CRL and OCSP-related options. tlscrlfile = options.get("tlscrlfile") if tlscrlfile is not None: for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): if options.get(opt) is True: err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." raise InvalidURI(err_msg % (opt,)) if "ssl" in options and "tls" in options: def truth_value(val): if val in ("true", "false"): return val == "true" if isinstance(val, bool): return val return val if truth_value(options.get("ssl")) != truth_value(options.get("tls")): err_msg = "Can not specify conflicting values for URI options %s and %s." raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) return options def _handle_option_deprecations(options): """Issue appropriate warnings when deprecated options are present in the options dictionary. Removes deprecated option key, value pairs if the options dictionary is found to also have the renamed option. :Parameters: - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ for optname in list(options): if optname in URI_OPTIONS_DEPRECATION_MAP: mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] if mode == "renamed": newoptname = message if newoptname in options: warn_msg = "Deprecated option '%s' ignored in favor of '%s'." warnings.warn( warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), DeprecationWarning, stacklevel=2, ) options.pop(optname) continue warn_msg = "Option '%s' is deprecated, use '%s' instead." warnings.warn( warn_msg % (options.cased_key(optname), newoptname), DeprecationWarning, stacklevel=2, ) elif mode == "removed": warn_msg = "Option '%s' is deprecated. %s." warnings.warn( warn_msg % (options.cased_key(optname), message), DeprecationWarning, stacklevel=2, ) return options def _normalize_options(options): """Normalizes option names in the options dictionary by converting them to their internally-used names. :Parameters: - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ # Expand the tlsInsecure option. tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: # Implicit options are logically the same as tlsInsecure. options[opt] = tlsinsecure for optname in list(options): intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) if intname is not None: options[intname] = options.pop(optname) return options def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: """Validates and normalizes options passed in a MongoDB URI. Returns a new dictionary of validated and normalized options. If warn is False then errors will be thrown for invalid options, otherwise they will be ignored and a warning will be issued. :Parameters: - `opts`: A dict of MongoDB URI options. - `warn` (optional): If ``True`` then warnings will be logged and invalid options will be ignored. Otherwise invalid options will cause errors. """ return get_validated_options(opts, warn) def split_options( opts: str, validate: bool = True, warn: bool = False, normalize: bool = True ) -> MutableMapping[str, Any]: """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. :Parameters: - `opt`: A string representing MongoDB URI options. - `validate`: If ``True`` (the default), validate and normalize all options. - `warn`: If ``False`` (the default), suppress all warnings raised during validation of options. - `normalize`: If ``True`` (the default), renames all options to their internally-used names. """ and_idx = opts.find("&") semi_idx = opts.find(";") try: if and_idx >= 0 and semi_idx >= 0: raise InvalidURI("Can not mix '&' and ';' for option separators.") elif and_idx >= 0: options = _parse_options(opts, "&") elif semi_idx >= 0: options = _parse_options(opts, ";") elif opts.find("=") != -1: options = _parse_options(opts, None) else: raise ValueError except ValueError: raise InvalidURI("MongoDB URI options are key=value pairs.") options = _handle_security_options(options) options = _handle_option_deprecations(options) if normalize: options = _normalize_options(options) if validate: options = validate_options(options, warn) if options.get("authsource") == "": raise InvalidURI("the authSource database cannot be an empty string") return options def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[_Address]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. Returns a set of 2-tuples containing the host name (or IP) followed by port number. :Parameters: - `hosts`: A string of the form host1[:port],host2[:port],... - `default_port`: The port number to use when one wasn't specified for a host. """ nodes = [] for entity in hosts.split(","): if not entity: raise ConfigurationError("Empty host (or extra comma in host list).") port = default_port # Unix socket entities don't have ports if entity.endswith(".sock"): port = None nodes.append(parse_host(entity, port)) return nodes # Prohibited characters in database name. DB names also can't have ".", but for # backward-compat we allow "db.collection" in URI. _BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") _ALLOWED_TXT_OPTS = frozenset( ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] ) def _check_options(nodes, options): # Ensure directConnection was not True if there are multiple seeds. if len(nodes) > 1 and options.get("directconnection"): raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") if options.get("loadbalanced"): if len(nodes) > 1: raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") if options.get("directconnection"): raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") if options.get("replicaset"): raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") def parse_uri( uri: str, default_port: Optional[int] = DEFAULT_PORT, validate: bool = True, warn: bool = False, normalize: bool = True, connect_timeout: Optional[float] = None, srv_service_name: Optional[str] = None, srv_max_hosts: Optional[int] = None, ) -> Dict[str, Any]: """Parse and validate a MongoDB URI. Returns a dict of the form:: { 'nodelist': <list of (host, port) tuples>, 'username': <username> or None, 'password': <password> or None, 'database': <database name> or None, 'collection': <collection name> or None, 'options': <dict of MongoDB URI options>, 'fqdn': <fqdn of the MongoDB+SRV URI> or None } If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done to build nodelist and options. :Parameters: - `uri`: The MongoDB URI to parse. - `default_port`: The port number to use when one wasn't specified for a host in the URI. - `validate` (optional): If ``True`` (the default), validate and normalize all options. Default: ``True``. - `warn` (optional): When validating, if ``True`` then will warn the user then ignore any invalid options or values. If ``False``, validation will error when options are unsupported or values are invalid. Default: ``False``. - `normalize` (optional): If ``True``, convert names of URI options to their internally-used names. Default: ``True``. - `connect_timeout` (optional): The maximum time in milliseconds to wait for a response from the DNS server. - 'srv_service_name` (optional): A custom SRV service name .. versionchanged:: 4.0 To better follow RFC 3986, unquoted percent signs ("%") are no longer supported. .. versionchanged:: 3.9 Added the ``normalize`` parameter. .. versionchanged:: 3.6 Added support for mongodb+srv:// URIs. .. versionchanged:: 3.5 Return the original value of the ``readPreference`` MongoDB URI option instead of the validated read preference mode. .. versionchanged:: 3.1 ``warn`` added so invalid options can be ignored. """ if uri.startswith(SCHEME): is_srv = False scheme_free = uri[SCHEME_LEN:] elif uri.startswith(SRV_SCHEME): if not _HAVE_DNSPYTHON: python_path = sys.executable or "python" raise ConfigurationError( 'The "dnspython" module must be ' "installed to use mongodb+srv:// URIs. " "To fix this error install pymongo with the srv extra:\n " '%s -m pip install "pymongo[srv]"' % (python_path) ) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: raise InvalidURI( "Invalid URI scheme: URI must begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) ) if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") user = None passwd = None dbase = None collection = None options = _CaseInsensitiveDictionary() host_part, _, path_part = scheme_free.partition("/") if not host_part: host_part = path_part path_part = "" if not path_part and "?" in host_part: raise InvalidURI("A '/' is required between the host list and any options.") if path_part: dbase, _, opts = path_part.partition("?") if dbase: dbase = unquote_plus(dbase) if "." in dbase: dbase, collection = dbase.split(".", 1) if _BAD_DB_CHARS.search(dbase): raise InvalidURI('Bad database name "%s"' % dbase) else: dbase = None if opts: options.update(split_options(opts, validate, warn, normalize)) if srv_service_name is None: srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) if "@" in host_part: userinfo, _, hosts = host_part.rpartition("@") user, passwd = parse_userinfo(userinfo) else: hosts = host_part if "/" in hosts: raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) hosts = unquote_plus(hosts) fqdn = None srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: if options.get("directConnection"): raise ConfigurationError( "Cannot specify directConnection=true with %s URIs" % (SRV_SCHEME,) ) nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: raise InvalidURI("%s URIs must include one, and only one, hostname" % (SRV_SCHEME,)) fqdn, port = nodes[0] if port is not None: raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. connect_timeout = connect_timeout or options.get("connectTimeoutMS") dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) nodes = dns_resolver.get_hosts() dns_options = dns_resolver.get_options() if dns_options: parsed_dns_options = split_options(dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( "Only authSource, replicaSet, and loadBalanced are supported from DNS" ) for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val if options.get("loadBalanced") and srv_max_hosts: raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") if options.get("replicaSet") and srv_max_hosts: raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") if "tls" not in options and "ssl" not in options: options["tls"] = True if validate else "true" elif not is_srv and options.get("srvServiceName") is not None: raise ConfigurationError( "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" ) elif not is_srv and srv_max_hosts: raise ConfigurationError( "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" ) else: nodes = split_hosts(hosts, default_port=default_port) _check_options(nodes, options) return { "nodelist": nodes, "username": user, "password": passwd, "database": dbase, "collection": collection, "options": options, "fqdn": fqdn, } def _parse_kms_tls_options(kms_tls_options): """Parse KMS TLS connection options.""" if not kms_tls_options: return {} if not isinstance(kms_tls_options, dict): raise TypeError("kms_tls_options must be a dict") contexts = {} for provider, opts in kms_tls_options.items(): if not isinstance(opts, dict): raise TypeError(f'kms_tls_options["{provider}"] must be a dict') opts.setdefault("tls", True) opts = _CaseInsensitiveDictionary(opts) opts = _handle_security_options(opts) opts = _normalize_options(opts) opts = validate_options(opts) ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) if ssl_context is None: raise ConfigurationError("TLS is required for KMS providers") if allow_invalid_hostnames: raise ConfigurationError("Insecure TLS options prohibited") for n in [ "tlsInsecure", "tlsAllowInvalidCertificates", "tlsAllowInvalidHostnames", "tlsDisableOCSPEndpointCheck", "tlsDisableCertificateRevocationCheck", ]: if n in opts: raise ConfigurationError(f"Insecure TLS options prohibited: {n}") contexts[provider] = ssl_context return contexts if __name__ == "__main__": import pprint try: pprint.pprint(parse_uri(sys.argv[1])) except InvalidURI as exc: print(exc) sys.exit(0)
PypiClean
/distributions_doppiavi-0.1.tar.gz/distributions_doppiavi-0.1/distributions_doppiavi/Gaussiandistribution.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Gaussian(Distribution): """ Gaussian distribution class for calculating and visualizing a Gaussian distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats extracted from the data file """ def __init__(self, mu=0, sigma=1): Distribution.__init__(self, mu, sigma) def calculate_mean(self): """Function to calculate the mean of the data set. Args: None Returns: float: mean of the data set """ avg = 1.0 * sum(self.data) / len(self.data) self.mean = avg return self.mean def calculate_stdev(self, sample=True): """Function to calculate the standard deviation of the data set. Args: sample (bool): whether the data represents a sample or population Returns: float: standard deviation of the data set """ if sample: n = len(self.data) - 1 else: n = len(self.data) mean = self.calculate_mean() sigma = 0 for d in self.data: sigma += (d - mean) ** 2 sigma = math.sqrt(sigma / n) self.stdev = sigma return self.stdev def plot_histogram(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.hist(self.data) plt.title('Histogram of Data') plt.xlabel('data') plt.ylabel('count') def pdf(self, x): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2) def plot_histogram_pdf(self, n_spaces = 50): """Function to plot the normalized histogram of the data and a plot of the probability density function along the same range Args: n_spaces (int): number of data points Returns: list: x values for the pdf plot list: y values for the pdf plot """ mu = self.mean sigma = self.stdev min_range = min(self.data) max_range = max(self.data) # calculates the interval between x values interval = 1.0 * (max_range - min_range) / n_spaces x = [] y = [] # calculate the x values to visualize for i in range(n_spaces): tmp = min_range + interval*i x.append(tmp) y.append(self.pdf(tmp)) # make the plots fig, axes = plt.subplots(2,sharex=True) fig.subplots_adjust(hspace=.5) axes[0].hist(self.data, density=True) axes[0].set_title('Normed Histogram of Data') axes[0].set_ylabel('Density') axes[1].plot(x, y) axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation') axes[0].set_ylabel('Density') plt.show() return x, y def __add__(self, other): """Function to add together two Gaussian distributions Args: other (Gaussian): Gaussian instance Returns: Gaussian: Gaussian distribution """ result = Gaussian() result.mean = self.mean + other.mean result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2) return result def __repr__(self): """Function to output the characteristics of the Gaussian instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}".format(self.mean, self.stdev)
PypiClean
/hcipy-0.5.0.tar.gz/hcipy-0.5.0/doc/tutorial_notebooks/VortexCoronagraph/VortexCoronagraph.ipynb
# Imaging with a vortex coronagraph We will simulate on-axis and off-axis images of stars through a (ring-apodized) vortex coronagraph. We'll start by importing all relevant libraries and setting up our pupil and focal grids. We'll slightly oversize our pupil grid to more clearly see the effects of the vortex coronagraph in the Lyot plane. ``` from hcipy import * import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # For notebook animations from matplotlib import animation from IPython.display import HTML mpl.rcParams['figure.dpi'] = 100 pupil_grid = make_pupil_grid(256, 1.5) focal_grid = make_focal_grid(8, 12) prop = FraunhoferPropagator(pupil_grid, focal_grid) ``` We start of by creating a circular aperture. A vortex coronagraph works perfectly for a circular aperture. We'll use supersampling to evaluate this aperture to partially suppress sampling artefacts. We'll also use a slightly-undersized circular Lyot stop. ``` aperture = evaluate_supersampled(circular_aperture(1), pupil_grid, 4) lyot_mask = evaluate_supersampled(circular_aperture(0.95), pupil_grid, 4) plt.subplot(1,2,1) plt.title('Aperture') imshow_field(aperture, cmap='gray') plt.subplot(1,2,2) plt.title('Lyot stop') imshow_field(lyot_mask, cmap='gray') plt.show() ``` We can perform non-coronagraphic imaging by just using the Fraunhofer propagation defined above, to propagate the light from the pupil to the focal plane of our telescope. ``` wf = Wavefront(aperture) img_ref = prop(wf).intensity imshow_field(np.log10(img_ref / img_ref.max()), vmin=-5, cmap='inferno') plt.show() ``` This shows the usual Airy pattern. We'll now generate the vortex coronagraph itself. It requires a pupil grid and the charge of the vortex. The vortex coronagraph object propagates light from the pupil plane to the Lyot plane. ``` charge = 2 coro = VortexCoronagraph(pupil_grid, charge) lyot_stop = Apodizer(lyot_mask) ``` We can now propagate light through the vortex coronagraph. Internally the vortex coronagraph performs many propagations through the vortex with successively higher resolutions. This is done to adequately sample the vortex singularity. ``` wf = Wavefront(aperture) lyot_plane = coro(wf) imshow_field(lyot_plane.intensity, cmap='inferno') plt.show() ``` We can now block this light with a Lyot stop. ``` post_lyot_mask = lyot_stop(lyot_plane) img = prop(post_lyot_mask).intensity imshow_field(np.log10(img / img_ref.max()), vmin=-5, vmax=0, cmap='inferno') plt.show() ``` The star has completely been suppressed. We can see the star appear again, when we look at an off-axis object: ``` wf = Wavefront(aperture * np.exp(2j * np.pi * pupil_grid.x * 1.5)) img = prop(lyot_stop(coro(wf))).intensity imshow_field(np.log10(img / img_ref.max()), vmin=-5, vmax=0, cmap='inferno') plt.show() ``` And the Lyot plane image looks totally different for an off-axis star: ``` lyot = coro(wf) imshow_field(lyot.intensity, vmax=2, cmap='inferno') plt.show() ``` Unintuitively, the light in the Lyot stop is offset in the vertical direction, while the star is offset in the horizontal direction. We can see this effect clearer in an animation. ``` def create_offaxis_animation(coro): fig = plt.figure() plt.subplot(1,2,1) plt.title('Lyot plane') im1 = imshow_field(lyot_plane.intensity, vmax=1, cmap='inferno') plt.subplot(1,2,2) plt.title('Science image plane') im2 = imshow_field(np.log10(img / img_ref.max()), vmin=-5, vmax=0, cmap='inferno') plt.close(fig) def animate(angular_separation): wf = Wavefront(aperture * np.exp(2j * np.pi * pupil_grid.x * angular_separation)) lyot = coro(wf) img = prop(lyot_stop(lyot)) im1.set_data(*pupil_grid.separated_coords, lyot.intensity.shaped) im2.set_data(*focal_grid.separated_coords, np.log10(img.intensity.shaped / img_ref.max())) return [im1, im2] angular_separations = np.linspace(-5, 5, 51) anim = animation.FuncAnimation(fig, animate, angular_separations, interval=160, blit=True) return HTML(anim.to_html5_video()) create_offaxis_animation(coro) ``` We can also simulate vortex coronagraphs with other charges: ``` vortex4 = VortexCoronagraph(pupil_grid, charge=4) create_offaxis_animation(vortex4) ```
PypiClean
/django_djmongo-0.7.9.6-py3-none-any.whl/djmongo/static/djmongo/console/modern-business/font-awesome-4.7.0/src/3.2.1/assets/js/prettify.min.js
var q=null;window.PR_SHOULD_USE_CONTINUATION=!0; (function(){function L(a){function m(a){var f=a.charCodeAt(0);if(f!==92)return f;var b=a.charAt(1);return(f=r[b])?f:"0"<=b&&b<="7"?parseInt(a.substring(1),8):b==="u"||b==="x"?parseInt(a.substring(2),16):a.charCodeAt(1)}function e(a){if(a<32)return(a<16?"\\x0":"\\x")+a.toString(16);a=String.fromCharCode(a);if(a==="\\"||a==="-"||a==="["||a==="]")a="\\"+a;return a}function h(a){for(var f=a.substring(1,a.length-1).match(/\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\S\s]|[^\\]/g),a= [],b=[],o=f[0]==="^",c=o?1:0,i=f.length;c<i;++c){var j=f[c];if(/\\[bdsw]/i.test(j))a.push(j);else{var j=m(j),d;c+2<i&&"-"===f[c+1]?(d=m(f[c+2]),c+=2):d=j;b.push([j,d]);d<65||j>122||(d<65||j>90||b.push([Math.max(65,j)|32,Math.min(d,90)|32]),d<97||j>122||b.push([Math.max(97,j)&-33,Math.min(d,122)&-33]))}}b.sort(function(a,f){return a[0]-f[0]||f[1]-a[1]});f=[];j=[NaN,NaN];for(c=0;c<b.length;++c)i=b[c],i[0]<=j[1]+1?j[1]=Math.max(j[1],i[1]):f.push(j=i);b=["["];o&&b.push("^");b.push.apply(b,a);for(c=0;c< f.length;++c)i=f[c],b.push(e(i[0])),i[1]>i[0]&&(i[1]+1>i[0]&&b.push("-"),b.push(e(i[1])));b.push("]");return b.join("")}function y(a){for(var f=a.source.match(/\[(?:[^\\\]]|\\[\S\s])*]|\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\\d+|\\[^\dux]|\(\?[!:=]|[()^]|[^()[\\^]+/g),b=f.length,d=[],c=0,i=0;c<b;++c){var j=f[c];j==="("?++i:"\\"===j.charAt(0)&&(j=+j.substring(1))&&j<=i&&(d[j]=-1)}for(c=1;c<d.length;++c)-1===d[c]&&(d[c]=++t);for(i=c=0;c<b;++c)j=f[c],j==="("?(++i,d[i]===void 0&&(f[c]="(?:")):"\\"===j.charAt(0)&& (j=+j.substring(1))&&j<=i&&(f[c]="\\"+d[i]);for(i=c=0;c<b;++c)"^"===f[c]&&"^"!==f[c+1]&&(f[c]="");if(a.ignoreCase&&s)for(c=0;c<b;++c)j=f[c],a=j.charAt(0),j.length>=2&&a==="["?f[c]=h(j):a!=="\\"&&(f[c]=j.replace(/[A-Za-z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return f.join("")}for(var t=0,s=!1,l=!1,p=0,d=a.length;p<d;++p){var g=a[p];if(g.ignoreCase)l=!0;else if(/[a-z]/i.test(g.source.replace(/\\u[\da-f]{4}|\\x[\da-f]{2}|\\[^UXux]/gi,""))){s=!0;l=!1;break}}for(var r= {b:8,t:9,n:10,v:11,f:12,r:13},n=[],p=0,d=a.length;p<d;++p){g=a[p];if(g.global||g.multiline)throw Error(""+g);n.push("(?:"+y(g)+")")}return RegExp(n.join("|"),l?"gi":"g")}function M(a){function m(a){switch(a.nodeType){case 1:if(e.test(a.className))break;for(var g=a.firstChild;g;g=g.nextSibling)m(g);g=a.nodeName;if("BR"===g||"LI"===g)h[s]="\n",t[s<<1]=y++,t[s++<<1|1]=a;break;case 3:case 4:g=a.nodeValue,g.length&&(g=p?g.replace(/\r\n?/g,"\n"):g.replace(/[\t\n\r ]+/g," "),h[s]=g,t[s<<1]=y,y+=g.length, t[s++<<1|1]=a)}}var e=/(?:^|\s)nocode(?:\s|$)/,h=[],y=0,t=[],s=0,l;a.currentStyle?l=a.currentStyle.whiteSpace:window.getComputedStyle&&(l=document.defaultView.getComputedStyle(a,q).getPropertyValue("white-space"));var p=l&&"pre"===l.substring(0,3);m(a);return{a:h.join("").replace(/\n$/,""),c:t}}function B(a,m,e,h){m&&(a={a:m,d:a},e(a),h.push.apply(h,a.e))}function x(a,m){function e(a){for(var l=a.d,p=[l,"pln"],d=0,g=a.a.match(y)||[],r={},n=0,z=g.length;n<z;++n){var f=g[n],b=r[f],o=void 0,c;if(typeof b=== "string")c=!1;else{var i=h[f.charAt(0)];if(i)o=f.match(i[1]),b=i[0];else{for(c=0;c<t;++c)if(i=m[c],o=f.match(i[1])){b=i[0];break}o||(b="pln")}if((c=b.length>=5&&"lang-"===b.substring(0,5))&&!(o&&typeof o[1]==="string"))c=!1,b="src";c||(r[f]=b)}i=d;d+=f.length;if(c){c=o[1];var j=f.indexOf(c),k=j+c.length;o[2]&&(k=f.length-o[2].length,j=k-c.length);b=b.substring(5);B(l+i,f.substring(0,j),e,p);B(l+i+j,c,C(b,c),p);B(l+i+k,f.substring(k),e,p)}else p.push(l+i,b)}a.e=p}var h={},y;(function(){for(var e=a.concat(m), l=[],p={},d=0,g=e.length;d<g;++d){var r=e[d],n=r[3];if(n)for(var k=n.length;--k>=0;)h[n.charAt(k)]=r;r=r[1];n=""+r;p.hasOwnProperty(n)||(l.push(r),p[n]=q)}l.push(/[\S\s]/);y=L(l)})();var t=m.length;return e}function u(a){var m=[],e=[];a.tripleQuotedStrings?m.push(["str",/^(?:'''(?:[^'\\]|\\[\S\s]|''?(?=[^']))*(?:'''|$)|"""(?:[^"\\]|\\[\S\s]|""?(?=[^"]))*(?:"""|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$)|"(?:[^"\\]|\\[\S\s])*(?:"|$))/,q,"'\""]):a.multiLineStrings?m.push(["str",/^(?:'(?:[^'\\]|\\[\S\s])*(?:'|$)|"(?:[^"\\]|\\[\S\s])*(?:"|$)|`(?:[^\\`]|\\[\S\s])*(?:`|$))/, q,"'\"`"]):m.push(["str",/^(?:'(?:[^\n\r'\\]|\\.)*(?:'|$)|"(?:[^\n\r"\\]|\\.)*(?:"|$))/,q,"\"'"]);a.verbatimStrings&&e.push(["str",/^@"(?:[^"]|"")*(?:"|$)/,q]);var h=a.hashComments;h&&(a.cStyleComments?(h>1?m.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,q,"#"]):m.push(["com",/^#(?:(?:define|elif|else|endif|error|ifdef|include|ifndef|line|pragma|undef|warning)\b|[^\n\r]*)/,q,"#"]),e.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h|[a-z]\w*)>/,q])):m.push(["com",/^#[^\n\r]*/, q,"#"]));a.cStyleComments&&(e.push(["com",/^\/\/[^\n\r]*/,q]),e.push(["com",/^\/\*[\S\s]*?(?:\*\/|$)/,q]));a.regexLiterals&&e.push(["lang-regex",/^(?:^^\.?|[!+-]|!=|!==|#|%|%=|&|&&|&&=|&=|\(|\*|\*=|\+=|,|-=|->|\/|\/=|:|::|;|<|<<|<<=|<=|=|==|===|>|>=|>>|>>=|>>>|>>>=|[?@[^]|\^=|\^\^|\^\^=|{|\||\|=|\|\||\|\|=|~|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\s*(\/(?=[^*/])(?:[^/[\\]|\\[\S\s]|\[(?:[^\\\]]|\\[\S\s])*(?:]|$))+\/)/]);(h=a.types)&&e.push(["typ",h]);a=(""+a.keywords).replace(/^ | $/g, "");a.length&&e.push(["kwd",RegExp("^(?:"+a.replace(/[\s,]+/g,"|")+")\\b"),q]);m.push(["pln",/^\s+/,q," \r\n\t\xa0"]);e.push(["lit",/^@[$_a-z][\w$@]*/i,q],["typ",/^(?:[@_]?[A-Z]+[a-z][\w$@]*|\w+_t\b)/,q],["pln",/^[$_a-z][\w$@]*/i,q],["lit",/^(?:0x[\da-f]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+-]?\d+)?)[a-z]*/i,q,"0123456789"],["pln",/^\\[\S\s]?/,q],["pun",/^.[^\s\w"-$'./@\\`]*/,q]);return x(m,e)}function D(a,m){function e(a){switch(a.nodeType){case 1:if(k.test(a.className))break;if("BR"===a.nodeName)h(a), a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)e(a);break;case 3:case 4:if(p){var b=a.nodeValue,d=b.match(t);if(d){var c=b.substring(0,d.index);a.nodeValue=c;(b=b.substring(d.index+d[0].length))&&a.parentNode.insertBefore(s.createTextNode(b),a.nextSibling);h(a);c||a.parentNode.removeChild(a)}}}}function h(a){function b(a,d){var e=d?a.cloneNode(!1):a,f=a.parentNode;if(f){var f=b(f,1),g=a.nextSibling;f.appendChild(e);for(var h=g;h;h=g)g=h.nextSibling,f.appendChild(h)}return e} for(;!a.nextSibling;)if(a=a.parentNode,!a)return;for(var a=b(a.nextSibling,0),e;(e=a.parentNode)&&e.nodeType===1;)a=e;d.push(a)}var k=/(?:^|\s)nocode(?:\s|$)/,t=/\r\n?|\n/,s=a.ownerDocument,l;a.currentStyle?l=a.currentStyle.whiteSpace:window.getComputedStyle&&(l=s.defaultView.getComputedStyle(a,q).getPropertyValue("white-space"));var p=l&&"pre"===l.substring(0,3);for(l=s.createElement("LI");a.firstChild;)l.appendChild(a.firstChild);for(var d=[l],g=0;g<d.length;++g)e(d[g]);m===(m|0)&&d[0].setAttribute("value", m);var r=s.createElement("OL");r.className="linenums";for(var n=Math.max(0,m-1|0)||0,g=0,z=d.length;g<z;++g)l=d[g],l.className="L"+(g+n)%10,l.firstChild||l.appendChild(s.createTextNode("\xa0")),r.appendChild(l);a.appendChild(r)}function k(a,m){for(var e=m.length;--e>=0;){var h=m[e];A.hasOwnProperty(h)?window.console&&console.warn("cannot override language handler %s",h):A[h]=a}}function C(a,m){if(!a||!A.hasOwnProperty(a))a=/^\s*</.test(m)?"default-markup":"default-code";return A[a]}function E(a){var m= a.g;try{var e=M(a.h),h=e.a;a.a=h;a.c=e.c;a.d=0;C(m,h)(a);var k=/\bMSIE\b/.test(navigator.userAgent),m=/\n/g,t=a.a,s=t.length,e=0,l=a.c,p=l.length,h=0,d=a.e,g=d.length,a=0;d[g]=s;var r,n;for(n=r=0;n<g;)d[n]!==d[n+2]?(d[r++]=d[n++],d[r++]=d[n++]):n+=2;g=r;for(n=r=0;n<g;){for(var z=d[n],f=d[n+1],b=n+2;b+2<=g&&d[b+1]===f;)b+=2;d[r++]=z;d[r++]=f;n=b}for(d.length=r;h<p;){var o=l[h+2]||s,c=d[a+2]||s,b=Math.min(o,c),i=l[h+1],j;if(i.nodeType!==1&&(j=t.substring(e,b))){k&&(j=j.replace(m,"\r"));i.nodeValue= j;var u=i.ownerDocument,v=u.createElement("SPAN");v.className=d[a+1];var x=i.parentNode;x.replaceChild(v,i);v.appendChild(i);e<o&&(l[h+1]=i=u.createTextNode(t.substring(b,o)),x.insertBefore(i,v.nextSibling))}e=b;e>=o&&(h+=2);e>=c&&(a+=2)}}catch(w){"console"in window&&console.log(w&&w.stack?w.stack:w)}}var v=["break,continue,do,else,for,if,return,while"],w=[[v,"auto,case,char,const,default,double,enum,extern,float,goto,int,long,register,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"], "catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],F=[w,"alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,dynamic_cast,explicit,export,friend,inline,late_check,mutable,namespace,nullptr,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],G=[w,"abstract,boolean,byte,extends,final,finally,implements,import,instanceof,null,native,package,strictfp,super,synchronized,throws,transient"], H=[G,"as,base,by,checked,decimal,delegate,descending,dynamic,event,fixed,foreach,from,group,implicit,in,interface,internal,into,is,lock,object,out,override,orderby,params,partial,readonly,ref,sbyte,sealed,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,var"],w=[w,"debugger,eval,export,function,get,null,set,undefined,var,with,Infinity,NaN"],I=[v,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"], J=[v,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],v=[v,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],K=/^(DIR|FILE|vector|(de|priority_)?queue|list|stack|(const_)?iterator|(multi)?(set|map)|bitset|u?(int|float)\d*)/,N=/\S/,O=u({keywords:[F,H,w,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END"+ I,J,v],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),A={};k(O,["default-code"]);k(x([],[["pln",/^[^<?]+/],["dec",/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\S\s]*?(?:--\>|$)/],["lang-",/^<\?([\S\s]+?)(?:\?>|$)/],["lang-",/^<%([\S\s]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\S\s]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\S\s]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\S\s]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]), ["default-markup","htm","html","mxml","xhtml","xml","xsl"]);k(x([["pln",/^\s+/,q," \t\r\n"],["atv",/^(?:"[^"]*"?|'[^']*'?)/,q,"\"'"]],[["tag",/^^<\/?[a-z](?:[\w-.:]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^\s"'>]*(?:[^\s"'/>]|\/(?=\s)))/],["pun",/^[/<->]+/],["lang-js",/^on\w+\s*=\s*"([^"]+)"/i],["lang-js",/^on\w+\s*=\s*'([^']+)'/i],["lang-js",/^on\w+\s*=\s*([^\s"'>]+)/i],["lang-css",/^style\s*=\s*"([^"]+)"/i],["lang-css",/^style\s*=\s*'([^']+)'/i],["lang-css", /^style\s*=\s*([^\s"'>]+)/i]]),["in.tag"]);k(x([],[["atv",/^[\S\s]+/]]),["uq.val"]);k(u({keywords:F,hashComments:!0,cStyleComments:!0,types:K}),["c","cc","cpp","cxx","cyc","m"]);k(u({keywords:"null,true,false"}),["json"]);k(u({keywords:H,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:K}),["cs"]);k(u({keywords:G,cStyleComments:!0}),["java"]);k(u({keywords:v,hashComments:!0,multiLineStrings:!0}),["bsh","csh","sh"]);k(u({keywords:I,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}), ["cv","py"]);k(u({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["perl","pl","pm"]);k(u({keywords:J,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb"]);k(u({keywords:w,cStyleComments:!0,regexLiterals:!0}),["js"]);k(u({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,true,try,unless,until,when,while,yes", hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,regexLiterals:!0}),["coffee"]);k(x([],[["str",/^[\S\s]+/]]),["regex"]);window.prettyPrintOne=function(a,m,e){var h=document.createElement("PRE");h.innerHTML=a;e&&D(h,e);E({g:m,i:e,h:h});return h.innerHTML};window.prettyPrint=function(a){function m(){for(var e=window.PR_SHOULD_USE_CONTINUATION?l.now()+250:Infinity;p<h.length&&l.now()<e;p++){var n=h[p],k=n.className;if(k.indexOf("prettyprint")>=0){var k=k.match(g),f,b;if(b= !k){b=n;for(var o=void 0,c=b.firstChild;c;c=c.nextSibling)var i=c.nodeType,o=i===1?o?b:c:i===3?N.test(c.nodeValue)?b:o:o;b=(f=o===b?void 0:o)&&"CODE"===f.tagName}b&&(k=f.className.match(g));k&&(k=k[1]);b=!1;for(o=n.parentNode;o;o=o.parentNode)if((o.tagName==="pre"||o.tagName==="code"||o.tagName==="xmp")&&o.className&&o.className.indexOf("prettyprint")>=0){b=!0;break}b||((b=(b=n.className.match(/\blinenums\b(?::(\d+))?/))?b[1]&&b[1].length?+b[1]:!0:!1)&&D(n,b),d={g:k,h:n,i:b},E(d))}}p<h.length?setTimeout(m, 250):a&&a()}for(var e=[document.getElementsByTagName("pre"),document.getElementsByTagName("code"),document.getElementsByTagName("xmp")],h=[],k=0;k<e.length;++k)for(var t=0,s=e[k].length;t<s;++t)h.push(e[k][t]);var e=q,l=Date;l.now||(l={now:function(){return+new Date}});var p=0,d,g=/\blang(?:uage)?-([\w.]+)(?!\S)/;m()};window.PR={createSimpleLexer:x,registerLangHandler:k,sourceDecorator:u,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit", PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ"}})();
PypiClean
/chainer-chemistry-0.7.1.tar.gz/chainer-chemistry-0.7.1/chainer_chemistry/functions/loss/mean_squared_error.py
import numpy from chainer import cuda from chainer import function_node import chainer.functions from chainer.utils import type_check class MeanSquaredError(function_node.FunctionNode): """Mean squared error (a.k.a. Euclidean loss) function.""" def __init__(self, ignore_nan=False): # TODO(mottodora): implement task weight calculation self.ignore_nan = ignore_nan def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) type_check.expect( in_types[0].dtype == numpy.float32, in_types[1].dtype == numpy.float32, in_types[0].shape == in_types[1].shape ) def forward_cpu(self, inputs): self.retain_inputs((0, 1)) diff = (inputs[0] - inputs[1]).ravel() # TODO(mottodora): add reduce option if self.ignore_nan: diff[numpy.isnan(diff)] = 0. return numpy.array(diff.dot(diff) / diff.size, dtype=diff.dtype), def forward_gpu(self, inputs): cupy = cuda.cupy self.retain_inputs((0, 1)) diff = (inputs[0] - inputs[1]).ravel() # TODO(mottodora): add reduce option if self.ignore_nan: diff[cupy.isnan(diff)] = 0. return diff.dot(diff) / diff.dtype.type(diff.size), def backward(self, indexes, gy): x0, x1 = self.get_retained_inputs() xp = cuda.get_array_module(x0) ret = [] diff = x0 - x1 if self.ignore_nan: diff = chainer.functions.where(xp.isnan(diff.array), xp.zeros_like(diff.array), diff) gy0 = chainer.functions.broadcast_to(gy[0], diff.shape) gx0 = gy0 * diff * (2. / diff.size) if 0 in indexes: ret.append(gx0) if 1 in indexes: ret.append(-gx0) return ret def mean_squared_error(x0, x1, ignore_nan=False): """Mean squared error function. This function computes mean squared error between two variables. The mean is taken over the minibatch. Note that the error is not scaled by 1/2. Args: x0 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Input variable. x1 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Input variable. ignore_nan (bool): If `True`, this function compute mean squared error ignoring NaNs. The arithmetic mean is the sum of the non-NaN elements along the axis divided by the number of whole elements. Returns: ~chainer.Variable: A variable holding an array representing the mean squared error of two inputs. """ return MeanSquaredError(ignore_nan).apply((x0, x1))[0]
PypiClean
/pypiemma-4.0.1.tar.gz/pypiemma-4.0.1/Emma/shared_libs/emma_helper.py
import os import re import json import base64 from pypiscout.SCout_Logger import Logger as sc import markdown import markdown.extensions.codehilite import markdown.extensions.fenced_code import markdown.extensions.toc import markdown.extensions.tables from Emma.shared_libs.stringConstants import * # pylint: disable=unused-wildcard-import,wildcard-import def checkIfFolderExists(folderName): """ Check whether a folder exists in current directory; If not exit with error message :param folderName: Project to check """ if not os.path.isdir(folderName): sc().error("Given directory (" + os.path.abspath(folderName) + ") does not exist; exiting...") def checkIfFileExists(filePath): """ Check whether a file exists; If not exit with error message :param filePath: File path to check """ if not os.path.exists(filePath): sc().error("Given file (" + filePath + ") does not exist; exiting...") def mkDirIfNeeded(path): """ Creates path and all intermediate directories until there :param path: Path to create """ if not os.path.isdir(path): os.makedirs(path) sc().info("Directory " + path + " created since not present") def readJson(jsonInFilePath): """ Function to read a JSON file :return: dict of JSON """ checkIfFileExists(os.path.abspath(jsonInFilePath)) # Absolute path for more readable error message try: with open(jsonInFilePath, "r") as fp: dictFromJson = json.load(fp) except FileNotFoundError: sc().error(f"The file `{os.path.abspath(jsonInFilePath)}` was not found!") except json.JSONDecodeError: sc().error(f"JSON syntax error in `{os.path.abspath(jsonInFilePath)}`!") return dictFromJson def writeJson(jsonOutFilePath, dictToWrite): """ Function to write a JSON file :return: dict of JSON """ with open(jsonOutFilePath, "w") as fp: json.dump(dictToWrite, fp, indent='\t') def unifyAddress(address): """ Convert hex or dec address and returns both (in this order) :param address: hex or dec address :return: [addressHex, addressDec) """ if isinstance(address, str) and address is not None: address = int(address, 16) addressHex = hex(address) elif isinstance(address, int) and address is not None: addressHex = hex(address) else: sc().error("unifyAddress(): Address must be either of type int or str!") raise TypeError return addressHex, address def getTimestampFromFilename(filename): """ Get the timestamp from a report file name. :param filename: Name of the report file. :return: The timestamp in string form if found in the filename, else None. """ result = None pattern = re.compile(r"\d{4}-\d{2}-\d{2}-\d{2}h\d{2}s\d{2}") # Matches timestamps of the following format: `2017-11-06-14h56s52` match = re.search(pattern, filename) if match: result = match.group() else: sc().error("Could not match the given filename:", filename) return result def getColourValFromString(inputString): """ Hashes an input string and returns an 6 digit hex string from it :param inputString: an arbitrary string to convert :return: 6 digit hex string """ import hashlib # FIXME: import in function since it is currently not used (just to be sure that it won't break if it will be used in the future) hashedString = hashlib.sha256(inputString.encode()) return hashedString.hexdigest()[len(hashedString.hexdigest())-6:] # FIXME: stripping anything except the first 6 characters might fail in some cases >> investigate this further (MSc) def lastModifiedFilesInDir(path, extension, subStringIdentifier=""): """ :param path: Directory the files are in :param extension: Only files with a specified extension are included :param subStringIdentifier: [str] Substring in file names to filter files; If an empty string is provided (= default) all files will be considered :return: Sorted list of modified files """ result = [] if os.path.isdir(path): directory = os.listdir(path) fileTimestamps = [] for file in directory: file = joinPath(path, file) if os.path.isfile(file) and file.endswith(extension) and subStringIdentifier in file: time = os.path.getmtime(file) fileTimestamps.append([time, file]) # Python sorts always by first element for nested lists; we only need the last element (last change) and only its filename (>> [1]) result = [item[1] for item in sorted(fileTimestamps)] return result def evalSummary(filename): """ Function to check whether current report file is section or object summary. :param filename: Filename of a report. :return: FILE_IDENTIFIER_SECTION_SUMMARY if it´s a section- or FILE_IDENTIFIER_OBJECT_SUMMARY if it´s an object report, else None. """ result = None if FILE_IDENTIFIER_SECTION_SUMMARY in filename: result = FILE_IDENTIFIER_SECTION_SUMMARY elif FILE_IDENTIFIER_OBJECT_SUMMARY in filename: result = FILE_IDENTIFIER_OBJECT_SUMMARY return result def projectNameFromPath(path): """ Derives the project name from path :param path: :return: """ return os.path.split(os.path.normpath(path))[-1] def joinPath(*paths): """ Join paths together maintaining one slash direction. This is especially important when using multiple operating systems (use forward slashes only). :param paths: [List(string)] List of paths which are going to be joined together :return: [string] The joined path """ # Removing the elements that are None because these can be optional path elements and they would cause an exception listOfReceivedPaths = [i for i in paths if i is not None] return os.path.normpath(os.path.join(*listOfReceivedPaths)) def changePictureLinksToEmbeddingInHtmlData(htmlData, sourceDataPath=""): """ The function looks for linked pictures in a html formatted string. Then it tries to open every picture file that was linked, encodes their content with base64.encodebytes and replaces the picture links with the encoded data. This function should be used whenever a portable .html file needs to be created that has all the pictures embedded into it. :param htmlData: Path of the .html file. :param sourceDataPath: This is the path of the file from which the htmlData comes from. It is needed during the search for the picture files. :return: The modified htmlData. """ listOfLinkedPictures = re.findall(r"<img src=\"([^\"]*)", htmlData) for linkedPicture in listOfLinkedPictures: # If the linkedPicture is not an absolute path it needs to be prepended with the sourceDataPath if os.path.isabs(linkedPicture): linkedPicturePath = linkedPicture else: linkedPicturePath = joinPath(os.path.dirname(sourceDataPath), linkedPicture) if not os.path.exists(linkedPicturePath): sc().warning("The file " + linkedPicturePath + " does not exist!") continue try: with open(linkedPicturePath, "rb") as fileObject: encodedPictureData = base64.encodebytes(fileObject.read()) except FileNotFoundError: sc().error(f"The file `{os.path.abspath(linkedPicturePath)}` was not found!") linkedPictureFileExtension = os.path.splitext(linkedPicture)[1][1:] replacementString = "data:image/" + linkedPictureFileExtension + ";base64," + encodedPictureData.decode() + "\" alt=\"" + linkedPicture htmlData = htmlData.replace(linkedPicture, replacementString) return htmlData def convertMarkdownDataToHtmlData(markdownData): """ Function to convert markdown formatted data to html formatted data. :param markdownData: The markdown formatted data that will be converted. :return: The created html formatted data. """ # For available extensions see here: https://github.com/Python-Markdown/markdown/blob/master/docs/extensions/index.md htmlData = markdown.markdown(markdownData, extensions=[markdown.extensions.codehilite.CodeHiliteExtension(), markdown.extensions.toc.TocExtension(), markdown.extensions.fenced_code.FencedCodeExtension(), markdown.extensions.tables.TableExtension()]) return htmlData def convertMarkdownFileToHtmlFile(markdownFilePath, htmlFilePath): """ Function to convert a .md file to a .html file. :param markdownFilePath: Path to the .md file. :param htmlFilePath: Path to the .html file. :return: nothing """ try: with open(markdownFilePath, "r") as fileObject: markdownData = fileObject.read() except FileNotFoundError: sc().error(f"The file `{os.path.abspath(markdownFilePath)}` was not found!") htmlData = convertMarkdownDataToHtmlData(markdownData) htmlData = changePictureLinksToEmbeddingInHtmlData(htmlData, markdownFilePath) htmlData = HTML_TEMPLATE.replace(HTML_TEMPLATE_BODY_PLACEHOLDER, htmlData) with open(htmlFilePath, "w") as fileObject: fileObject.write(htmlData) def findFilesInDir(searchDirectory, regexPattern=r".*", includingRoot=True): """ It looks recursively for files in the search_directory that are matching the regex_pattern. :param searchDirectory: The directory in which the search will be done. :param regexPattern: The regex patterns that the files will be matched against. :param includingRoot: If true, the search directory will be added to the path of the search results as well. :return: The paths of the files found. :rtype: list of str """ result = [] for (root, _, files) in os.walk(searchDirectory): for file in files: if re.search(regexPattern, file) is not None: if includingRoot: result.append(joinPath(root, file)) else: result.append(file) return result def saveMatplotlibPicture(pictureData, pathToSave, savefigFormat, savefigDpi, savefigTransparent): """ Function to save a matplotlib figure to disk. It ensures that the picture file is properly flushed. :param pictureData: A matplotlib Figure object that has a savefig method. :param pathToSave: The path where the picture will be saved to. :param savefigFormat: This value will be forwarded to the savefig method of the Figure object. (See savefig´s description for details) :param savefigDpi: This value will be forwarded to the savefig method of the Figure object. (See savefig´s description for details) :param savefigTransparent: This value will be forwarded to the savefig method of the Figure object. (See savefig´s description for details) :return: nothing """ with open(pathToSave, "wb") as fileObject: pictureData.savefig(fileObject, format=savefigFormat, dpi=savefigDpi, transparent=savefigTransparent) fileObject.flush() def toHumanReadable(num, suffix='B'): # pylint: disable=inconsistent-return-statements # Rationale: This code was copied from the source below, it will not be changed to keep it aligned with the original. # pylint: disable=invalid-name # Rationale: This code was copied from the source below, it will not be changed to keep it aligned with the original. """ Converts a number into a human readable format: humanReadableSize(168963795964) -> ' 157.36 GiB' Note: we use binary prefixes (-> 1kiB = 1024 Byte) We expect data type to be int (-> we do not expect "half" bytes) or reasonable convertible to int MIT License toHumanReadable Copyright (c) 2019,2020 Marcel Schmalzl, Steve Göring https://github.com/TeamFlowerPower/kb/wiki/humanReadable :param num: Number to convert :param suffix: The suffix that will be added to the quantifier :return: Formatted string """ count = 0 bit_10 = 10 num_tmp = num for prefix in UNIT_PREFIXES: if abs(num_tmp) > 1024: num_tmp >>= bit_10 count += 1 else: return "{: .2f} {}{}".format(num/2**(count*bit_10), prefix, suffix) class Prompt: # pylint: disable=too-few-public-methods # Rationale: This is legacy code, changing it into a function would require changes in other code parts. """ Class that contains functions that help handling of user prompts. """ @staticmethod def idx(): """ Prompt for an index [0, inf) and return it if in this range. :return: The index entered by the user, None otherwise. """ result = -1 text = input("> ") if text is not None and text != "": result = int(text) return result def parseGivenArgStrOrStdIn(arguments: str, parser): """ Either parse the arguments string if it is not empty or (the default case) parse the data from sys.argv :param arguments: [str] Argument string to parse (empty string if std-in should be parsed) :param parser: The set-up parser (i.e. after you added all arguments to it) :return: [Namespace] parsed arguments """ # if arguments == "": parsedArguments = parser.parse_args() else: # Arguments were passed to this function (e.g. for unit testing) parsedArguments = parser.parse_args(arguments) return parsedArguments
PypiClean
/tablecheck-0.1.0.tar.gz/tablecheck-0.1.0/LICENSE.md
### GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. ### Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. ### TERMS AND CONDITIONS #### 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. #### 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. #### 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. #### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. #### 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. #### 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - a) The work must carry prominent notices stating that you modified it, and giving a relevant date. - b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". - c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. - d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. #### 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. - b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. - c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. - d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. - e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. #### 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or - b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or - c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or - d) Limiting the use for publicity purposes of names of licensors or authors of the material; or - e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or - f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. #### 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. #### 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. #### 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. #### 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. #### 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. #### 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. #### 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. #### 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. #### 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #### 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS ### How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: <program> Copyright (C) <year> <name of author> This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands \`show w' and \`show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <http://www.gnu.org/licenses/>. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.
PypiClean
/pyMissingAHP-1.1.5.tar.gz/pyMissingAHP-1.1.5/README.md
# pyMissingAHP ## Introduction The multi-criteria technique Analytic Hierarchy Process (AHP) needs a complete Pairwise Comparison Matrix (PCM) to generate results. With an incomplete PCM, our algorithm can infer the best (continuous or discrete) values to complete the missing data. The values can be calculated based on the minimum inconsistency (f0), target rank preservation (f1), or both (f0_f1). The target rank preservation can be total (when all criteria are ranked) or partial (when only a set of criteria are ranked). We also allow ties in ranks (criteria with the same rank). For small problems with discrete scale, we offer a brute force method that can find all available solutions. It's worth noting that our implementation can deal with AHP and Fuzzy AHP. The Fuzzy AHP needs a fuzzy triangular scale to work, and although the user can define his scale, we have implemented a default fuzzy triangular scale that can be used in most problems: | Crisp Number | Fuzzy Number | |--------------|-----------------| | 1/9 | (1/9, 1/9, 1/9) | | 1/8 | (1/9, 1/8, 1/7) | | 1/7 | (1/8, 1/7, 1/6) | | 1/6 | (1/7, 1/6, 1/5) | | 1/5 | (1/6, 1/5, 1/4) | | 1/4 | (1/5, 1/4, 1/3) | | 1/3 | (1/4, 1/3, 1/2) | | 1/2 | (1/3, 1/2, 1) | | 1 | ( 1, 1, 1) | | 2 | ( 1, 2, 3) | | 3 | ( 2, 3, 4) | | 4 | ( 3, 4, 5) | | 5 | ( 4, 5, 6) | | 6 | ( 5, 6, 7) | | 7 | ( 6, 7, 8) | | 8 | ( 7, 8, 9) | | 9 | ( 9, 9, 9) | ## Usage 1. Install ```bash pip install pyMissingAHP ``` 2. Try it in **Colab**: Single Objective AHP: - Example 1a - (AHP; f0; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/11FoDq0i5WGY7IH1Kxf7FBboWGAk6Mw9A?usp=sharing)) - Example 1b - (AHP; f0; Continuous): ([ Colab Demo ](https://colab.research.google.com/drive/1Jebj8Dqzm96DAmabF_i1RrS-d_-Au_YI?usp=sharing)) - Example 1c - (AHP; f1; Discrete; Different Rank Positions): ([ Colab Demo ](https://colab.research.google.com/drive/1n9hcYCW85bK5qU_LpNyZcaTalSnvT-de?usp=sharing)) - Example 1d - (AHP; f1; Continuous; Different Rank Positions): ([ Colab Demo ](https://colab.research.google.com/drive/1kB3nJl4jlSWUoviKZXblqMgIJk8iz_VA?usp=sharing)) - Example 1e - (AHP; f1; Discrete; Same Rank Positions): ([ Colab Demo ](https://colab.research.google.com/drive/1D6ae7wgcZg-yNFr_gj5pmxEriL-oG09X?usp=sharing)) - Example 1f - (AHP; f1; Continuous; Same Rank Positions): ([ Colab Demo ](https://colab.research.google.com/drive/1-wMDIPN4ZRgWX3JpyltUjpI8xiR-BKlh?usp=sharing)) - Example 1g - (AHP; f1; Discrete; Partial Rank Positions): ([ Colab Demo ](https://colab.research.google.com/drive/1LScLnOoSFI4FMR5qMRuyykwIcnj_S2lU?usp=sharing)) - Example 1h - (AHP; f1; Continuous; Partial Rank Positions): ([ Colab Demo ](https://colab.research.google.com/drive/1QjqU3uo0pnW4CuyTTmnaEyElpRdfDiE6?usp=sharing)) - Example 1 - Brute Force - (AHP; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/1y1tycNbDFxFYiSb3_BrHmP2dUnOOHIqG?usp=sharing)) Single Objective Fuzzy AHP: - Example 2a - (Fuzzy AHP; f0; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/1aBEP7lYbSvpHJxJGxYrg4QS4na8Jk49f?usp=sharing)) - Example 2b - (Fuzzy AHP; f1; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/18aeD00Q2jmc_P6QSHGjuEKDUKeEoIiq4?usp=sharing)) - Example 2c - (Fuzzy AHP; f0; Discrete; Custom Fuzzy Scale): ([ Colab Demo ](https://colab.research.google.com/drive/1vPBq4CzNXS503W-ANdW8-WYacdDgSOVr?usp=sharing)) - Example 2d - (Fuzzy AHP; f1; Discrete; Custom Fuzzy Scale): ([ Colab Demo ](https://colab.research.google.com/drive/1sfpmhM7U3xvSKfGlbRlVNKhSszN4vAA4?usp=sharing)) - Example 2 - Brute Force - (Fuzzy AHP; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/1FmhWnZw3SA7sCGxLYLK6-ISsrYX9kEZU?usp=sharing)) Multiobjective AHP: - Example 3a - (AHP; f0 & f1; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/1kDo5Ur0_xK2LzGmDOPd0kLjwQwuOKezE?usp=sharing)) - Example 3b - (AHP; f0 & f1; Continuous): ([ Colab Demo ](https://colab.research.google.com/drive/1IwRxyxHXMEAAdDPSTr6yy8otEkv7l3kW?usp=sharing)) Multiobjective Fuzzy AHP: - Example 4a - (Fuzzy AHP; f0 & f1; Discrete): ([ Colab Demo ](https://colab.research.google.com/drive/1_zRxMOmGgoEoiddF94383OHYq_ztF0nT?usp=sharing)) - Example 4b - (Fuzzy AHP; f0 & f1; Discrete; Custom Fuzzy Scale): ([ Colab Demo ](https://colab.research.google.com/drive/1Jn6KElsYwN6W9IXR4XbBDy2AW6JYoh9t?usp=sharing)) 3. Others - [pyDecision](https://github.com/Valdecy/pyDecision) - A library for many MCDA methods - [3MOAHP](https://github.com/Valdecy/Method_3MOAHP) - Inconsistency Reduction Technique for AHP and Fuzzy-AHP Methods - [ELECTRE-Tree](https://github.com/Valdecy/ELECTRE-Tree) - Algorithm to infer the ELECTRE Tri-B method parameters - [Ranking-Trees](https://github.com/Valdecy/Ranking-Trees) - Algorithm to infer the ELECTRE II, III, IV and PROMETHEE I, II, III, IV method parameters
PypiClean
/sosse-1.5.0.tar.gz/sosse-1.5.0/se/stats.py
from datetime import date, timedelta import os from django.conf import settings from django.db import connection, models from django.shortcuts import render, redirect, reverse from django.utils.timezone import now from langdetect.detector_factory import PROFILES_DIRECTORY import pygal from .document import Document from .login import login_required from .models import CrawlerStats, DAILY, MINUTELY from .utils import get_unit, human_filesize from .views import get_context def datetime_graph(pygal_config, pygal_style, freq, data, col, _now): if freq == MINUTELY: start = _now - timedelta(hours=23) start = start.replace(minute=0, second=0, microsecond=0) timespan = timedelta(hours=24) dt = timedelta(hours=6) format_str = '%H:%M' x_title = 'UTC time' x_labels = [start] t = start while timespan.total_seconds() > 0: t += dt timespan -= dt if freq == DAILY: t = t.replace(day=1, hour=0, minute=0, second=0, microsecond=0) x_labels.append(t) cls = pygal.DateTimeLine else: start = _now - timedelta(days=364) start = start.replace(day=1, hour=0, minute=0, second=0, microsecond=0) x_labels = [start] t = start for i in range(1, 7): month = t.month + (i * 2) year = int((month - 1) / 12) month = ((month - 1) % 12) + 1 d = date(year=t.year + year, month=month, day=1) x_labels.append(d) format_str = '%b' x_title = None cls = pygal.DateLine g = cls(pygal_config, style=pygal_style, disable_xml_declaration=True, truncate_label=-1, show_legend=False, fill=True, x_value_formatter=lambda dt: dt.strftime(format_str), x_title=x_title, range=(0, None)) g.x_labels = x_labels stats_max = data.aggregate(m=models.Max(col)).get('m', 0) or 0 factor, unit = get_unit(stats_max) entries = [] for entry in data: val = getattr(entry, col) if val is not None: entries.append((entry.t.timestamp(), val / factor)) if entries == []: entries = [(start, 0), (_now, 0)] g.add('', entries) return g def crawler_stats(pygal_config, pygal_style, freq): _now = now() if freq == MINUTELY: dt = _now - timedelta(days=1) else: dt = _now - timedelta(days=365) data = CrawlerStats.objects.filter(t__gte=dt, freq=freq).order_by('t') if data.count() < 1: return {} pygal_style.colors = ('#c6dcff',) pygal_style.title_font_size = 30 # Doc count minutely doc_count = datetime_graph(pygal_config, pygal_style, freq, data, 'doc_count', _now) factor, unit = get_unit(data.aggregate(m=models.Max('doc_count')).get('m', 0) or 0) doc_count.title = 'Doc count' if unit: doc_count.title += ' (%s)' % unit doc_count = doc_count.render() # Processing speed minutely if freq == MINUTELY: idx_speed_data = data.annotate(speed=models.F('indexing_speed') / 60.0) factor, unit = get_unit(data.aggregate(m=models.Max('indexing_speed')).get('m', 0) or 0.0) else: idx_speed_data = data.annotate(speed=models.F('indexing_speed') / 60.0 / 60.0 / 24.0) factor, unit = get_unit(idx_speed_data.aggregate(m=models.Max('speed')).get('m', 0) or 0.0) idx_speed = datetime_graph(pygal_config, pygal_style, freq, idx_speed_data, 'speed', _now) if not unit: unit = 'doc' idx_speed.title = 'Processing speed (%s/s)' % unit idx_speed = idx_speed.render() # Url queued minutely url_queue = datetime_graph(pygal_config, pygal_style, freq, data, 'queued_url', _now) factor, unit = get_unit(data.aggregate(m=models.Max('queued_url')).get('m', 1)) url_queue.title = 'URL queued' if unit: url_queue.title += ' (%s)' % unit url_queue = url_queue.render() freq = freq.lower() return { '%s_doc_count' % freq: doc_count, '%s_idx_speed' % freq: idx_speed, '%s_url_queue' % freq: url_queue, } def dir_size(d): # https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python size = 0 for dirpath, dirnames, filenames in os.walk(d): for f in filenames: fp = os.path.join(dirpath, f) if not os.path.islink(fp): size += os.path.getsize(fp) return size @login_required def stats(request): if not request.user.is_staff and not request.user.is_superuser: return redirect(reverse('search')) pygal_config = pygal.Config() pygal_config.js = (settings.STATIC_URL + '/se/pygal-tooltips.min.js',) pygal_style = pygal.style.Style( background='transparent', plot_background='transparent', title_font_size=40, legend_font_size=30, label_font_size=30, major_label_font_size=30, value_font_size=30, value_label_font_size=30 ) with connection.cursor() as cursor: cursor.execute('SELECT pg_database_size(%s)', [settings.DATABASES['default']['NAME']]) db_size = cursor.fetchall()[0][0] doc_count = Document.objects.count() indexed_langs = Document.objects.exclude(lang_iso_639_1__isnull=True).values('lang_iso_639_1').annotate(count=models.Count('lang_iso_639_1')).order_by('-count') # Language chart lang_chart = None if indexed_langs: lang_chart = pygal.Bar(pygal_config, style=pygal_style, disable_xml_declaration=True, range=(0, None)) lang_chart.title = "Document's language" factor, unit = get_unit(indexed_langs[0]['count']) if unit: lang_chart.title += ' (%s)' % unit for lang in indexed_langs[:8]: lang_iso = lang['lang_iso_639_1'] lang_desc = settings.SOSSE_LANGDETECT_TO_POSTGRES.get(lang_iso, {}) title = lang_iso.title() if lang_desc.get('flag'): title = title + ' ' + lang_desc['flag'] percent = lang['count'] / factor lang_chart.add(title, percent) lang_chart = lang_chart.render() # HDD chart statvfs = os.statvfs('/var/lib') hdd_size = statvfs.f_frsize * statvfs.f_blocks hdd_free = statvfs.f_frsize * statvfs.f_bavail factor, unit = get_unit(hdd_size) screenshot_size = dir_size(settings.SOSSE_SCREENSHOTS_DIR) html_size = dir_size(settings.SOSSE_HTML_SNAPSHOT_DIR) hdd_other = hdd_size - hdd_free - db_size - screenshot_size - html_size hdd_pie = pygal.Pie(pygal_config, style=pygal_style, disable_xml_declaration=True) hdd_pie.title = 'HDD size (total %s)' % human_filesize(hdd_size) hdd_pie.add('DB(%s)' % human_filesize(db_size), db_size) hdd_pie.add('Screenshots(%s)' % human_filesize(screenshot_size), screenshot_size) hdd_pie.add('HTML(%s)' % human_filesize(html_size), html_size) hdd_pie.add('Other(%s)' % human_filesize(hdd_other), hdd_other) hdd_pie.add('Free(%s)' % human_filesize(hdd_free), hdd_free) # Crawler stats context = get_context({ 'title': 'Statistics', # index 'doc_count': doc_count, 'lang_count': len(indexed_langs), 'db_size': human_filesize(db_size), 'doc_size': 0 if doc_count == 0 else human_filesize(db_size / doc_count), 'lang_recognizable': len(os.listdir(PROFILES_DIRECTORY)), 'lang_parsable': [lang.title() for lang in sorted(Document.get_supported_langs())], 'lang_chart': lang_chart, 'hdd_pie': hdd_pie.render(), }) context.update(crawler_stats(pygal_config, pygal_style, MINUTELY)) context.update(crawler_stats(pygal_config, pygal_style, DAILY)) return render(request, 'se/stats.html', context)
PypiClean
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/documentdb/v20150401/database_account_sql_container.py
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['DatabaseAccountSqlContainer'] class DatabaseAccountSqlContainer(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, container_name: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, resource: Optional[pulumi.Input[pulumi.InputType['SqlContainerResourceArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ An Azure Cosmos DB container. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: Cosmos DB database account name. :param pulumi.Input[str] container_name: Cosmos DB container name. :param pulumi.Input[str] database_name: Cosmos DB database name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request. :param pulumi.Input[pulumi.InputType['SqlContainerResourceArgs']] resource: The standard JSON format of a container :param pulumi.Input[str] resource_group_name: Name of an Azure resource group. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if account_name is None and not opts.urn: raise TypeError("Missing required property 'account_name'") __props__['account_name'] = account_name if container_name is None and not opts.urn: raise TypeError("Missing required property 'container_name'") __props__['container_name'] = container_name if database_name is None and not opts.urn: raise TypeError("Missing required property 'database_name'") __props__['database_name'] = database_name if options is None and not opts.urn: raise TypeError("Missing required property 'options'") __props__['options'] = options if resource is None and not opts.urn: raise TypeError("Missing required property 'resource'") __props__['resource'] = resource if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['conflict_resolution_policy'] = None __props__['default_ttl'] = None __props__['etag'] = None __props__['indexing_policy'] = None __props__['location'] = None __props__['name'] = None __props__['partition_key'] = None __props__['rid'] = None __props__['tags'] = None __props__['ts'] = None __props__['type'] = None __props__['unique_key_policy'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb:DatabaseAccountSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/latest:DatabaseAccountSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:DatabaseAccountSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:DatabaseAccountSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:DatabaseAccountSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:DatabaseAccountSqlContainer")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(DatabaseAccountSqlContainer, __self__).__init__( 'azure-nextgen:documentdb/v20150401:DatabaseAccountSqlContainer', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'DatabaseAccountSqlContainer': """ Get an existing DatabaseAccountSqlContainer resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return DatabaseAccountSqlContainer(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="conflictResolutionPolicy") def conflict_resolution_policy(self) -> pulumi.Output[Optional['outputs.ConflictResolutionPolicyResponse']]: """ The conflict resolution policy for the container. """ return pulumi.get(self, "conflict_resolution_policy") @property @pulumi.getter(name="defaultTtl") def default_ttl(self) -> pulumi.Output[Optional[int]]: """ Default time to live """ return pulumi.get(self, "default_ttl") @property @pulumi.getter def etag(self) -> pulumi.Output[Optional[str]]: """ A system generated property representing the resource etag required for optimistic concurrency control. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="indexingPolicy") def indexing_policy(self) -> pulumi.Output[Optional['outputs.IndexingPolicyResponse']]: """ The configuration of the indexing policy. By default, the indexing is automatic for all document paths within the container """ return pulumi.get(self, "indexing_policy") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ The location of the resource group to which the resource belongs. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the database account. """ return pulumi.get(self, "name") @property @pulumi.getter(name="partitionKey") def partition_key(self) -> pulumi.Output[Optional['outputs.ContainerPartitionKeyResponse']]: """ The configuration of the partition key to be used for partitioning data into multiple partitions """ return pulumi.get(self, "partition_key") @property @pulumi.getter def rid(self) -> pulumi.Output[Optional[str]]: """ A system generated property. A unique identifier. """ return pulumi.get(self, "rid") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". """ return pulumi.get(self, "tags") @property @pulumi.getter def ts(self) -> pulumi.Output[Optional[Any]]: """ A system generated property that denotes the last updated timestamp of the resource. """ return pulumi.get(self, "ts") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of Azure resource. """ return pulumi.get(self, "type") @property @pulumi.getter(name="uniqueKeyPolicy") def unique_key_policy(self) -> pulumi.Output[Optional['outputs.UniqueKeyPolicyResponse']]: """ The unique key policy configuration for specifying uniqueness constraints on documents in the collection in the Azure Cosmos DB service. """ return pulumi.get(self, "unique_key_policy") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
PypiClean
/django-formulaic-1.0.0.tar.gz/django-formulaic-1.0.0/formulaic/serializers.py
import pytz import six from tzlocal import get_localzone from formulaic import models from rest_framework import serializers class BooleanFieldSerializer(serializers.ModelSerializer): class Meta: model = models.BooleanField fields = ( 'id', 'display_name', 'data_name', 'slug', 'required', 'help_text', 'model_class', 'position', 'css_class', 'form', 'subtype', 'default_checked', ) class HiddenFieldSerializer(serializers.ModelSerializer): class Meta: model = models.HiddenField fields = ( 'id', 'display_name', 'data_name', 'slug', 'required', 'help_text', 'model_class', 'position', 'css_class', 'form', 'subtype', 'value', ) class JsonField(serializers.Field): def to_internal_value(self, data): types = [int, six.text_type, dict, list] if any(map(lambda t: isinstance(data, t), types)): return data else: msg = self.error_messages.get('invalid', "unknown error") raise serializers.ValidationError(msg) def to_representation(self, obj): return obj class DefaultOptionField(serializers.Field): def to_internal_value(self, data): if isinstance(data, six.text_type): return data else: msg = self.error_messages['invalid'] raise serializers.ValidationError(msg) def to_representation(self, obj): return obj class DefaultOptionsField(serializers.Field): def to_internal_value(self, data): if isinstance(data, list): return data else: msg = self.error_messages['invalid'] raise serializers.ValidationError(msg) def to_representation(self, obj): return obj class ChoiceFieldSerializer(serializers.ModelSerializer): default_option = DefaultOptionField(allow_null=True) default_options = DefaultOptionsField(allow_null=True) class Meta: model = models.ChoiceField fields = ( 'id', 'display_name', 'data_name', 'slug', 'required', 'help_text', 'model_class', 'position', 'css_class', 'form', #'multiselect', 'minimum_selections', 'maximum_selections', 'option_list', 'option_group', 'default_option', 'default_options', 'default_text', 'subtype', ) class TextFieldSerializer(serializers.ModelSerializer): class Meta: model = models.TextField fields = ( 'id', 'display_name', 'data_name', 'slug', 'required', 'help_text', 'model_class', 'position', 'css_class', 'form', 'subtype', ) class FieldSerializer(serializers.ModelSerializer): booleanfield = BooleanFieldSerializer() choicefield = ChoiceFieldSerializer() textfield = TextFieldSerializer() hiddenfield = HiddenFieldSerializer() class Meta: model = models.Field fields = ( 'id', 'display_name', 'data_name', 'slug', 'required', 'help_text', 'model_class', 'position', 'css_class', 'form', 'subtype', #'enabled', "textfield", "booleanfield", "choicefield", "hiddenfield", "content_type", ) class FormSerializer(serializers.ModelSerializer): #fields = FieldSerializer(source='field_set', many=True) class Meta: model = models.Form fields = ( 'id', 'name', 'slug', 'success_message', 'privacy_policy', #'fields', ) class PrivacyPolicySerializer(serializers.ModelSerializer): class Meta: model = models.PrivacyPolicy fields = ( 'id', 'name', 'text', ) class OptionSerializer(serializers.ModelSerializer): class Meta: model = models.Option fields = ( 'id', 'name', 'value', 'position', 'list', ) class OptionListSerializer(serializers.ModelSerializer): options = OptionSerializer(source='option_set', many=True) class Meta: model = models.OptionList fields = ( 'id', 'name', 'options', 'groups', ) class OptionGroupSerializer(serializers.ModelSerializer): options = OptionSerializer(many=True) class Meta: model = models.OptionGroup fields = ( 'id', 'name', 'position', 'options', 'list', ) class RuleResultSerializer(serializers.ModelSerializer): id = serializers.IntegerField(read_only=False, required=False) class Meta: model = models.RuleResult fields = ( 'id', 'action', 'field', 'rule', 'option_group', ) class RuleConditionSerializer(serializers.ModelSerializer): # id: override read_only default id = serializers.IntegerField(read_only=False, required=False) # value: handle property (not a Django field) value = JsonField(required=False, allow_null=True) class Meta: model = models.RuleCondition fields = ( 'id', 'position', 'rule', 'field', 'operator', 'value', ) class RuleSerializer(serializers.ModelSerializer): conditions = RuleConditionSerializer(many=True) results = RuleResultSerializer(many=True) class Meta: model = models.Rule fields = ( 'id', 'form', 'operator', 'position', 'conditions', 'results', ) def update(self, rule_obj, validated_data): conditions = validated_data.pop('conditions') results = validated_data.pop('results') # Update rule rule_obj.operator = validated_data.get('operator', rule_obj.operator) rule_obj.position = validated_data.get('position', rule_obj.position) rule_obj.save() # Update, create, or delete conditions condition_objs = list(rule_obj.conditions.all()) for condition in conditions: if condition.get('id'): # exists for condition_obj in condition_objs: if condition.get('id') == condition_obj.id: # update existing condition condition_obj.position = condition.get('position', condition_obj.position) condition_obj.field_id = condition.get('field', condition_obj.field_id) condition_obj.operator = condition.get('operator', condition_obj.operator) condition_obj.value = condition.get('value', condition_obj.value) # mark to avoid deletion condition_obj.keep_alive = True else: # create new condition condition['rule'] = rule_obj # condition['value'] = "Test" # TODO: remove hard coded value value = condition.pop('value') # Note: can't use `create` method; need access to `data` property condition_obj = models.RuleCondition(**condition) condition_obj.value = value condition_obj.save() # Carry out saves / deletes for condition_obj in condition_objs: if getattr(condition_obj, 'keep_alive', False): condition_obj.save() else: condition_obj.delete() # Update, create, or delete results result_objs = list(rule_obj.results.all()) for result in results: if result.get('id'): # exists for result_obj in result_objs: if result.get('id') == result_obj.id: # update existing result result_obj.action = result.get('action', result_obj.action) result_obj.field_id = result.get('field', result_obj.field_id) result_obj.option_group = result.get('option_group', result_obj.option_group) # mark to avoid deletion result_obj.keep_alive = True else: # create new rule result['rule'] = rule_obj models.RuleResult.objects.create(**result) # Carry out saves / deletes for result_obj in result_objs: if getattr(result_obj, 'keep_alive', False): result_obj.save() else: result_obj.delete() return rule_obj def create(self, validated_data): conditions = validated_data.pop('conditions') results = validated_data.pop('results') # Create rule rule = models.Rule.objects.create(**validated_data) # Create all conditions for condition in conditions: condition['rule'] = rule # condition['value'] = "Test" # TODO: remove hard coded value value = condition.pop('value', None) # Note: can't use `create` method; need access to `data` property condition_obj = models.RuleCondition(**condition) condition_obj.value = value condition_obj.save() # Create all results for result in results: result['rule'] = rule models.RuleResult.objects.create(**result) return rule class CustomDateTimeField(serializers.DateTimeField): def to_representation(self, obj): try: local_tz = get_localzone() obj_aware = pytz.timezone(local_tz.zone).localize(obj) except ValueError: obj_aware = obj return obj_aware.strftime('%m/%d/%Y %H:%M %Z') class SubmissionSerializer(serializers.ModelSerializer): # values: handle property (not a Django field) custom_data = serializers.ReadOnlyField() date_created = CustomDateTimeField() class Meta: model = models.Submission fields = ( 'id', 'date_created', 'form', 'custom_data', 'source', 'promo_source', )
PypiClean
/PyCDSL-0.9.0.tar.gz/PyCDSL-0.9.0/pycdsl/utils.py
import re import logging from indic_transliteration.sanscript import SCHEMES, transliterate from .constants import SEARCH_MODES ############################################################################### LOGGER = logging.getLogger(__name__) ############################################################################### def validate_search_mode(mode: str) -> str or None: """Validate the search mode Parameters ---------- mode : str Search mode Returns ------- str or None If mode is valid, mode.lower() otherwise, None. """ if mode is not None: _mode = mode.lower() if mode.lower() in SEARCH_MODES else None if _mode is None: LOGGER.warning(f"Invalid search mode '{mode}'.") else: _mode = None return _mode def validate_scheme(scheme: str) -> str or None: """Validate the name of transliteration scheme Parameters ---------- scheme : str Name of the transltieration scheme Returns ------- str or None If scheme is valid, scheme.lower() otherwise, None. """ if scheme is not None: _scheme = scheme.lower() if scheme.lower() in SCHEMES else None if _scheme is None: LOGGER.warning(f"Invalid transliteration scheme '{scheme}'.") else: _scheme = None return _scheme def transliterate_between( text: str, from_scheme: str, to_scheme: str, start_pattern: str, end_pattern: str ) -> str: """Transliterate the text appearing between two patterns Only the text appearing between patterns `start_pattern` and `end_pattern` it transliterated. `start_pattern` and `end_pattern` can appear multiple times in the full text, and for every occurrence, the text between them is transliterated. `from_scheme` and `to_scheme` should be compatible with scheme names from `indic-transliteration` Parameters ---------- text : str Full text from_scheme : str Input transliteration scheme to_scheme : str Output transliteration scheme start_pattern : regexp Pattern describing the start tag end_pattern : regexp Pattern describing the end tag """ if from_scheme == to_scheme: return text def transliterate_match(matchobj): target = matchobj.group(1) replacement = transliterate(target, from_scheme, to_scheme) return f"{start_pattern}{replacement}{end_pattern}" pattern = "%s(.*?)%s" % (re.escape(start_pattern), re.escape(end_pattern)) return re.sub(pattern, transliterate_match, text, flags=re.DOTALL) ###############################################################################
PypiClean
/omg-tools-0.1.3.tar.gz/omg-tools-0.1.3/omgtools/problems/multiframeproblem.py
from problem import Problem from point2point import Point2pointProblem from ..basics.optilayer import OptiFather, OptiChild from ..vehicles.fleet import get_fleet_vehicles from ..execution.plotlayer import PlotLayer from ..basics.spline_extra import definite_integral from ..basics.spline_extra import shift_spline, evalspline from ..basics.geometry import compute_rectangle_overlap_center from casadi import inf import numpy as np class MultiFrameProblem(Problem): # Problem consisting of multiple frames, connecting multiple spline segments def __init__(self, fleet, environment, n_frames, options=None): Problem.__init__(self, fleet, environment, options, label='multiframeproblem') self.n_frames = n_frames if self.n_frames > len(self.environment.room): raise RuntimeError('Number of frames you want to consider at once is larger' + 'than the amount of rooms provided in environment') self.init_time = None self.start_time = 0. self.objective = 0. def set_default_options(self): Problem.set_default_options(self) def set_options(self, options): Problem.set_options(self, options) # ======================================================================== # Optimization modelling related functions # ======================================================================== def construct(self): self.t = self.define_parameter('t') self.motion_times = [] # holds motion time through each frame for frame in range(self.n_frames): self.motion_times.append(self.define_variable('T'+str(frame), value=10)) # positivity contraint on motion time for motion_time in self.motion_times: self.define_constraint(-motion_time, -inf, 0.) # collision constraints # self.environment.init() for vehicle in self.vehicles: vehicle.init() # create splines with correct amount of segments, i.e. a segment per frame total_splines = vehicle.define_splines(n_seg=self.n_frames) for frame in range(self.n_frames): vehicle.define_trajectory_constraints(total_splines[frame], self.motion_times[frame]) self.environment.define_collision_constraints(vehicle, total_splines, self.motion_times) if len(self.vehicles) > 1 and self.options['inter_vehicle_avoidance']: self.environment.define_intervehicle_collision_constraints(self.vehicles) # constrain spline segments self.define_init_constraints() self.define_terminal_constraints() self.define_connection_constraints() # minimize total motion time obj = sum(self.motion_times) # add regularization on jerk to avoid nervous solutions if self.n_frames > 1: for frame in range(self.n_frames): for s in total_splines[frame]: dds = s.derivative(3) obj += definite_integral((0.01*dds)**2, 0., 1.) self.define_objective(obj) def define_init_constraints(self): # place initial constraints only on first spline segment for vehicle in self.vehicles: init_con = vehicle.get_initial_constraints(vehicle.splines[0], self.motion_times[0]) for con in init_con: spline, condition = con[0], con[1] # use dimensionless time for first segment self.define_constraint( evalspline(spline, self.t/self.motion_times[0]) - condition, 0., 0.) def define_terminal_constraints(self): # place final constraints only on last spline segment for vehicle in self.vehicles: term_con, term_con_der = vehicle.get_terminal_constraints( vehicle.splines[-1], horizon_time=self.motion_times[-1]) # select last spline segment if ('no_term_con_der' in self.options and self.options['no_term_con_der']): term_con_der = [] for con in (term_con + term_con_der): spline, condition = con[0], con[1] self.define_constraint(spline(1.) - condition, 0., 0.) def define_connection_constraints(self): # connect splines over different frames # only necessary when n_frames>1 for j in range(self.n_frames-1): for vehicle in self.vehicles: for spline1, spline2 in zip(vehicle.splines[j], vehicle.splines[j+1]): for d in range(spline1.basis.degree): # in connection point splines should be equal until derivative of order degree-1 # give dimensions by multplication with the motion time self.define_constraint( evalspline(spline1.derivative(d), 1)*self.motion_times[j+1]**d - evalspline(spline2.derivative(d), 0)*self.motion_times[j]**d, 0., 0.) def set_parameters(self, current_time): parameters = Problem.set_parameters(self, current_time) # current time is always 0 for FreeT problem, time axis always resets if self.init_time is None: parameters[self]['t'] = 0 else: parameters[self]['t'] = self.init_time return parameters # ======================================================================== # Deploying related functions # ======================================================================== def reinitialize(self, father=None): if father is None: father = self.father Problem.reinitialize(self) for vehicle in self.vehicles: # compute initial guess for all spline values subgoals = [] for k in range(self.n_frames-1): room1 = self.environment.room[k] room2 = self.environment.room[k+1] # subgoals is given as initial position, center of overlap of regions and overall goal # compute center of overlap region of rooms subgoals.append(compute_rectangle_overlap_center(room1['shape'], room1['position'], room2['shape'], room2['position'])) init = vehicle.get_init_spline_value(subgoals = subgoals) for k in range(self.n_frames): father.set_variables(init[k], vehicle, 'splines_seg'+str(k)) def store(self, current_time, update_time, sample_time): segment_times = [] # compute total remaining motion time for frame in range(self.n_frames): segment_times.append(self.father.get_variables(self, 'T'+str(frame))[0][0]) horizon_time = sum(segment_times) # total horizon time if self.init_time is None: rel_current_time = 0.0 else: rel_current_time = self.init_time if horizon_time < sample_time: # otherwise interp1d() crashes return # update vehicles for vehicle in self.vehicles: n_samp = int( round((horizon_time-rel_current_time)/sample_time, 6)) + 1 time_axis = np.linspace(rel_current_time, rel_current_time + (n_samp-1)*sample_time, n_samp) spline_segments = [self.father.get_variables(vehicle, 'splines_seg'+str(k)) for k in range(vehicle.n_seg)] vehicle.store(current_time, sample_time, spline_segments, segment_times, time_axis) def reset_init_time(self): self.init_time = None # ======================================================================== # Simulation related functions # ======================================================================== def simulate(self, current_time, simulation_time, sample_time): horizon_time = 0 # compute total remaining motion time for frame in range(self.n_frames): horizon_time += self.father.get_variables(self, 'T'+str(frame))[0][0] if self.init_time is None: rel_current_time = 0.0 else: rel_current_time = self.init_time if horizon_time < sample_time: # otherwise interp1d() crashes return if horizon_time < simulation_time: simulation_time = horizon_time if horizon_time - rel_current_time < simulation_time: simulation_time = horizon_time - rel_current_time self.compute_partial_objective(current_time+simulation_time-self.start_time) for vehicle in self.vehicles: vehicle.simulate(simulation_time, sample_time) self.environment.simulate(simulation_time, sample_time) self.fleet.update_plots() self.update_plots() def stop_criterium(self, current_time, update_time): T_tot = 0 # compute total remaining motion time for k in range(self.n_frames): T_tot += self.father.get_variables(self, 'T'+str(k))[0][0] if T_tot < update_time: return True stop = True for vehicle in self.vehicles: stop *= vehicle.check_terminal_conditions() return stop def final(self): self.reset_init_time() obj = self.compute_objective() if self.options['verbose'] >= 1: print '\nWe reached our target!' print '%-18s %6g' % ('Objective:', obj) print '%-18s %6g ms' % ('Max update time:', max(self.update_times)*1000.) print '%-18s %6g ms' % ('Av update time:', (sum(self.update_times)*1000. / len(self.update_times))) def init_step(self, current_time, update_time): if (current_time - self.start_time) > 0: # compute total remaining motion time T = 0 for frame in range(self.n_frames): T += self.father.get_variables(self, 'T'+str(frame))[0][0] # check if almost arrived, if so lower the update time if T < 2*update_time: update_time = T - update_time target_time = T else: target_time = T - update_time # create spline which starts from the position at update_time and goes # to goal position at target_time. Approximate/Represent this spline in # a new basis with new equidistant knots. # shifting spline is only required for first segment (index 0), so seg_shift=[0] self.father.transform_primal_splines( lambda coeffs, basis: shift_spline(coeffs, update_time/target_time, basis), seg_shift=[0]) T_0 = self.father.get_variables(self, 'T'+str(0))[0][0] # remaining motion time for first segment self.father.set_variables(T_0-update_time, self, 'T0') # only change time of first segment def compute_partial_objective(self, current_time): self.objective = current_time def compute_objective(self): return self.objective
PypiClean
/azure-mgmt-eventgrid-10.3.0b1.zip/azure-mgmt-eventgrid-10.3.0b1/azure/mgmt/eventgrid/aio/operations/_partner_topic_event_subscriptions_operations.py
import sys from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload import urllib.parse from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, map_error, ) from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._partner_topic_event_subscriptions_operations import ( build_create_or_update_request, build_delete_request, build_get_delivery_attributes_request, build_get_full_url_request, build_get_request, build_list_by_partner_topic_request, build_update_request, ) if sys.version_info >= (3, 8): from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports else: from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class PartnerTopicEventSubscriptionsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.mgmt.eventgrid.aio.EventGridManagementClient`'s :attr:`partner_topic_event_subscriptions` attribute. """ models = _models def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def get( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, **kwargs: Any ) -> _models.EventSubscription: """Get an event subscription of a partner topic. Get properties of an event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be found. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: EventSubscription or the result of cls(response) :rtype: ~azure.mgmt.eventgrid.models.EventSubscription :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None) request = build_get_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.get.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("EventSubscription", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } async def _create_or_update_initial( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_info: Union[_models.EventSubscription, IO], **kwargs: Any ) -> _models.EventSubscription: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None _content = None if isinstance(event_subscription_info, (IO, bytes)): _content = event_subscription_info else: _json = self._serialize.body(event_subscription_info, "EventSubscription") request = build_create_or_update_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, subscription_id=self._config.subscription_id, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self._create_or_update_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize("EventSubscription", pipeline_response) if response.status_code == 201: deserialized = self._deserialize("EventSubscription", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore _create_or_update_initial.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } @overload async def begin_create_or_update( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_info: _models.EventSubscription, *, content_type: str = "application/json", **kwargs: Any ) -> AsyncLROPoller[_models.EventSubscription]: """Create or update an event subscription of a partner topic. Asynchronously creates or updates an event subscription of a partner topic with the specified parameters. Existing event subscriptions will be updated with this API. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :param event_subscription_info: Event subscription properties containing the destination and filter information. Required. :type event_subscription_info: ~azure.mgmt.eventgrid.models.EventSubscription :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either EventSubscription or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def begin_create_or_update( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_info: IO, *, content_type: str = "application/json", **kwargs: Any ) -> AsyncLROPoller[_models.EventSubscription]: """Create or update an event subscription of a partner topic. Asynchronously creates or updates an event subscription of a partner topic with the specified parameters. Existing event subscriptions will be updated with this API. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :param event_subscription_info: Event subscription properties containing the destination and filter information. Required. :type event_subscription_info: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either EventSubscription or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def begin_create_or_update( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_info: Union[_models.EventSubscription, IO], **kwargs: Any ) -> AsyncLROPoller[_models.EventSubscription]: """Create or update an event subscription of a partner topic. Asynchronously creates or updates an event subscription of a partner topic with the specified parameters. Existing event subscriptions will be updated with this API. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :param event_subscription_info: Event subscription properties containing the destination and filter information. Is either a model type or a IO type. Required. :type event_subscription_info: ~azure.mgmt.eventgrid.models.EventSubscription or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either EventSubscription or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token: Optional[str] = kwargs.pop("continuation_token", None) if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, event_subscription_info=event_subscription_info, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): deserialized = self._deserialize("EventSubscription", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore begin_create_or_update.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } async def _delete_initial( # pylint: disable=inconsistent-return-statements self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, **kwargs: Any ) -> None: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) cls: ClsType[None] = kwargs.pop("cls", None) request = build_delete_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self._delete_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } @distributed_trace_async async def begin_delete( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Delete an event subscription of a partner topic. Delete an existing event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) cls: ClsType[None] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token: Optional[str] = kwargs.pop("continuation_token", None) if cont_token is None: raw_result = await self._delete_initial( # type: ignore resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, api_version=api_version, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore begin_delete.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } async def _update_initial( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_update_parameters: Union[_models.EventSubscriptionUpdateParameters, IO], **kwargs: Any ) -> _models.EventSubscription: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None) content_type = content_type or "application/json" _json = None _content = None if isinstance(event_subscription_update_parameters, (IO, bytes)): _content = event_subscription_update_parameters else: _json = self._serialize.body(event_subscription_update_parameters, "EventSubscriptionUpdateParameters") request = build_update_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, subscription_id=self._config.subscription_id, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self._update_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("EventSubscription", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } @overload async def begin_update( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_update_parameters: _models.EventSubscriptionUpdateParameters, *, content_type: str = "application/json", **kwargs: Any ) -> AsyncLROPoller[_models.EventSubscription]: """Update event subscription of a partner topic. Update an existing event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :param event_subscription_update_parameters: Updated event subscription information. Required. :type event_subscription_update_parameters: ~azure.mgmt.eventgrid.models.EventSubscriptionUpdateParameters :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either EventSubscription or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def begin_update( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_update_parameters: IO, *, content_type: str = "application/json", **kwargs: Any ) -> AsyncLROPoller[_models.EventSubscription]: """Update event subscription of a partner topic. Update an existing event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :param event_subscription_update_parameters: Updated event subscription information. Required. :type event_subscription_update_parameters: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either EventSubscription or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def begin_update( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, event_subscription_update_parameters: Union[_models.EventSubscriptionUpdateParameters, IO], **kwargs: Any ) -> AsyncLROPoller[_models.EventSubscription]: """Update event subscription of a partner topic. Update an existing event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :param event_subscription_update_parameters: Updated event subscription information. Is either a model type or a IO type. Required. :type event_subscription_update_parameters: ~azure.mgmt.eventgrid.models.EventSubscriptionUpdateParameters or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either EventSubscription or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.EventSubscription] = kwargs.pop("cls", None) polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token: Optional[str] = kwargs.pop("continuation_token", None) if cont_token is None: raw_result = await self._update_initial( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, event_subscription_update_parameters=event_subscription_update_parameters, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): deserialized = self._deserialize("EventSubscription", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore begin_update.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}" } @distributed_trace_async async def get_full_url( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, **kwargs: Any ) -> _models.EventSubscriptionFullUrl: """Get full URL of an event subscription of a partner topic. Get the full endpoint URL for an event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: EventSubscriptionFullUrl or the result of cls(response) :rtype: ~azure.mgmt.eventgrid.models.EventSubscriptionFullUrl :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) cls: ClsType[_models.EventSubscriptionFullUrl] = kwargs.pop("cls", None) request = build_get_full_url_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.get_full_url.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("EventSubscriptionFullUrl", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_full_url.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}/getFullUrl" } @distributed_trace def list_by_partner_topic( self, resource_group_name: str, partner_topic_name: str, filter: Optional[str] = None, top: Optional[int] = None, **kwargs: Any ) -> AsyncIterable["_models.EventSubscription"]: """List event subscriptions of a partner topic. List event subscriptions that belong to a specific partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param filter: The query used to filter the search results using OData syntax. Filtering is permitted on the 'name' property only and with limited number of OData operations. These operations are: the 'contains' function as well as the following logical operations: not, and, or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'. The following is not a valid filter example: $filter=location eq 'westus'. Default value is None. :type filter: str :param top: The number of results to return per page for the list operation. Valid range for top parameter is 1 to 100. If not specified, the default number of results to be returned is 20 items per page. Default value is None. :type top: int :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either EventSubscription or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventgrid.models.EventSubscription] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) cls: ClsType[_models.EventSubscriptionsListResult] = kwargs.pop("cls", None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_by_partner_topic_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, subscription_id=self._config.subscription_id, filter=filter, top=top, api_version=api_version, template_url=self.list_by_partner_topic.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("EventSubscriptionsListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data) list_by_partner_topic.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions" } @distributed_trace_async async def get_delivery_attributes( self, resource_group_name: str, partner_topic_name: str, event_subscription_name: str, **kwargs: Any ) -> _models.DeliveryAttributeListResult: """Get delivery attributes for an event subscription of a partner topic. Get all delivery attributes for an event subscription of a partner topic. :param resource_group_name: The name of the resource group within the user's subscription. Required. :type resource_group_name: str :param partner_topic_name: Name of the partner topic. Required. :type partner_topic_name: str :param event_subscription_name: Name of the event subscription to be created. Event subscription names must be between 3 and 100 characters in length and use alphanumeric letters only. Required. :type event_subscription_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: DeliveryAttributeListResult or the result of cls(response) :rtype: ~azure.mgmt.eventgrid.models.DeliveryAttributeListResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-06-15"] = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) cls: ClsType[_models.DeliveryAttributeListResult] = kwargs.pop("cls", None) request = build_get_delivery_attributes_request( resource_group_name=resource_group_name, partner_topic_name=partner_topic_name, event_subscription_name=event_subscription_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.get_delivery_attributes.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("DeliveryAttributeListResult", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_delivery_attributes.metadata = { "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerTopics/{partnerTopicName}/eventSubscriptions/{eventSubscriptionName}/getDeliveryAttributes" }
PypiClean
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/reports/get_skype_for_business_activity_user_detail_with_date/get_skype_for_business_activity_user_detail_with_date_request_builder.py
from __future__ import annotations from dataclasses import dataclass from datetime import date from kiota_abstractions.get_path_parameters import get_path_parameters from kiota_abstractions.method import Method from kiota_abstractions.request_adapter import RequestAdapter from kiota_abstractions.request_information import RequestInformation from kiota_abstractions.request_option import RequestOption from kiota_abstractions.response_handler import ResponseHandler from kiota_abstractions.serialization import Parsable, ParsableFactory from typing import Any, Callable, Dict, List, Optional, Union from ...models.o_data_errors import o_data_error class GetSkypeForBusinessActivityUserDetailWithDateRequestBuilder(): """ Provides operations to call the getSkypeForBusinessActivityUserDetail method. """ def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None, date: Optional[Date] = None) -> None: """ Instantiates a new GetSkypeForBusinessActivityUserDetailWithDateRequestBuilder and sets the default values. Args: date: Usage: date={date} pathParameters: The raw url or the Url template parameters for the request. requestAdapter: The request adapter to use to execute the requests. """ if path_parameters is None: raise Exception("path_parameters cannot be undefined") if request_adapter is None: raise Exception("request_adapter cannot be undefined") # Url template to use to build the URL for the current request builder self.url_template: str = "{+baseurl}/reports/microsoft.graph.getSkypeForBusinessActivityUserDetail(date={date})" url_tpl_params = get_path_parameters(path_parameters) url_tpl_params[""] = date self.path_parameters = url_tpl_params self.request_adapter = request_adapter def create_get_request_information(self,request_configuration: Optional[GetSkypeForBusinessActivityUserDetailWithDateRequestBuilderGetRequestConfiguration] = None) -> RequestInformation: """ Invoke function getSkypeForBusinessActivityUserDetail Args: requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options. Returns: RequestInformation """ request_info = RequestInformation() request_info.url_template = self.url_template request_info.path_parameters = self.path_parameters request_info.http_method = Method.GET if request_configuration: request_info.add_request_headers(request_configuration.headers) request_info.add_request_options(request_configuration.options) return request_info async def get(self,request_configuration: Optional[GetSkypeForBusinessActivityUserDetailWithDateRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> bytes: """ Invoke function getSkypeForBusinessActivityUserDetail Args: requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options. responseHandler: Response handler to use in place of the default response handling provided by the core service Returns: bytes """ request_info = self.create_get_request_information( request_configuration ) error_mapping: Dict[str, ParsableFactory] = { "4XX": o_data_error.ODataError, "5XX": o_data_error.ODataError, } if not self.request_adapter: raise Exception("Http core is null") return await self.request_adapter.send_primitive_async(request_info, "bytes", response_handler, error_mapping) @dataclass class GetSkypeForBusinessActivityUserDetailWithDateRequestBuilderGetRequestConfiguration(): """ Configuration for the request such as headers, query parameters, and middleware options. """ # Request headers headers: Optional[Dict[str, str]] = None # Request options options: Optional[List[RequestOption]] = None
PypiClean
/safe_etmp_py-0.0.4-py3-none-any.whl/gnosis/eth/contracts/__init__.py
import json import os import sys from typing import Any, Dict, Optional from eth_typing import ChecksumAddress from hexbytes import HexBytes from web3 import Web3 from web3.contract import Contract try: from functools import cache except ImportError: from functools import lru_cache cache = lru_cache(maxsize=None) def load_contract_interface(file_name): return _load_json_file(_abi_file_path(file_name)) def _abi_file_path(file): return os.path.abspath(os.path.join(os.path.dirname(__file__), file)) def _load_json_file(path): with open(path) as f: return json.load(f) current_module = sys.modules[__name__] contracts = { "safe_V1_3_0": "GnosisSafe_V1_3_0.json", "safe_V1_1_1": "GnosisSafe_V1_1_1.json", "safe_V1_0_0": "GnosisSafe_V1_0_0.json", "safe_V0_0_1": "GnosisSafe_V0_0_1.json", "compatibility_fallback_handler_V1_3_0": "CompatibilityFallbackHandler_V1_3_0.json", "erc20": "ERC20.json", "erc721": "ERC721.json", "erc1155": "ERC1155.json", "example_erc20": "ERC20TestToken.json", "delegate_constructor_proxy": "DelegateConstructorProxy.json", "multi_send": "MultiSend.json", "paying_proxy": "PayingProxy.json", "proxy_factory": "ProxyFactory_V1_3_0.json", "proxy_factory_V1_1_1": "ProxyFactory_V1_1_1.json", "proxy_factory_V1_0_0": "ProxyFactory_V1_0_0.json", "proxy": "Proxy_V1_1_1.json", "uniswap_exchange": "uniswap_exchange.json", "uniswap_factory": "uniswap_factory.json", "uniswap_v2_factory": "uniswap_v2_factory.json", "uniswap_v2_pair": "uniswap_v2_pair.json", "uniswap_v2_router": "uniswap_v2_router.json", # Router02 "kyber_network_proxy": "kyber_network_proxy.json", "cpk_factory": "CPKFactory.json", } def generate_contract_fn(contract: Dict[str, Any]): """ Dynamically generate functions to work with the contracts :param contract: :return: """ def fn(w3: Web3, address: Optional[ChecksumAddress] = None): return w3.eth.contract( address=address, abi=contract["abi"], bytecode=contract.get("bytecode") ) return fn # Anotate functions that will be generated later with `setattr` so typing does not complains def get_safe_contract(w3: Web3, address: Optional[str] = None) -> Contract: """ :param w3: :param address: :return: Latest available safe contract (v1.3.0) """ return get_safe_V1_3_0_contract(w3, address=address) def get_safe_V1_3_0_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_safe_V1_1_1_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_safe_V1_0_0_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_safe_V0_0_1_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_compatibility_fallback_handler_V1_3_0_contract( w3: Web3, address: Optional[str] = None ) -> Contract: pass def get_erc20_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_erc721_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_erc1155_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_example_erc20_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_delegate_constructor_proxy_contract( w3: Web3, address: Optional[str] = None ) -> Contract: pass def get_multi_send_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_paying_proxy_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_proxy_factory_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_proxy_factory_V1_1_1_contract( w3: Web3, address: Optional[str] = None ) -> Contract: pass def get_proxy_factory_V1_0_0_contract( w3: Web3, address: Optional[str] = None ) -> Contract: pass def get_proxy_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_uniswap_exchange_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_uniswap_factory_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_uniswap_v2_factory_contract( w3: Web3, address: Optional[str] = None ) -> Contract: pass def get_uniswap_v2_pair_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_uniswap_v2_router_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass def get_kyber_network_proxy_contract( w3: Web3, address: Optional[str] = None ) -> Contract: pass def get_cpk_factory_contract(w3: Web3, address: Optional[str] = None) -> Contract: pass @cache def get_proxy_1_3_0_deployed_bytecode() -> bytes: return HexBytes(load_contract_interface("Proxy_V1_3_0.json")["deployedBytecode"]) def get_proxy_1_1_1_mainnet_deployed_bytecode() -> bytes: """ Somehow it's different from the generated version compiling the contracts """ return HexBytes( "0x608060405273ffffffffffffffffffffffffffffffffffffffff600054167fa619486e0000000000000000000000000000000000000000000000000000000060003514156050578060005260206000f35b3660008037600080366000845af43d6000803e60008114156070573d6000fd5b3d6000f3fea265627a7a72315820d8a00dc4fe6bf675a9d7416fc2d00bb3433362aa8186b750f76c4027269667ff64736f6c634300050e0032" ) @cache def get_proxy_1_1_1_deployed_bytecode() -> bytes: return HexBytes(load_contract_interface("Proxy_V1_1_1.json")["deployedBytecode"]) @cache def get_proxy_1_0_0_deployed_bytecode() -> bytes: return HexBytes(load_contract_interface("Proxy_V1_0_0.json")["deployedBytecode"]) @cache def get_paying_proxy_deployed_bytecode() -> bytes: return HexBytes(load_contract_interface("PayingProxy.json")["deployedBytecode"]) for contract_name, json_contract_filename in contracts.items(): fn_name = "get_{}_contract".format(contract_name) contract_dict = load_contract_interface(json_contract_filename) if not contract_dict: raise ValueError(f"{contract_name} json cannot be empty") setattr(current_module, fn_name, generate_contract_fn(contract_dict))
PypiClean
/kodiksnlp-0.0.7.tar.gz/kodiksnlp-0.0.7/README.md
# Kodiks Türkçe Doğal Dil İşleme Python Kütüphanesi ## Kodiks Türkçe Doğal Dil İşleme resmi Python NLP kütüphanesi ### Kurulum **KodiksNLP**, Python 3.5 veya üstünü destekler. KodiksNLP'yi [PyPI](https://pypi.org/project/kodiksnlp/)'den pip ile yükleyebilirsiniz. ``` pip install kodiksnlp ``` bu aynı zamanda KodiksNLP'nin bağlımlılıklarından biri olan requests>=2.21.0 paketini de yükler. Şu anda kodiksnlp'nin önceki bir sürümü yüklüyse, şunu kullanın: ``` pip install kodiksnlp -U ``` ### Çalıştırma Python paketi detaylı kullanım örnekleri için [dökümantasyonu](http://yavuzkomecoglu.com/kodiksai/nlp/) inceleyiniz.
PypiClean
/Freya_alerce-0.2.1.tar.gz/Freya_alerce-0.2.1/Freya_alerce/core/utils.py
import numpy as np from astropy.coordinates import SkyCoord from astropy import units as u """ This class represent the generic methods """ class Utils: def deg_to_hms(self,ra,dec): """ Transform degree point(ra,dec) in IRCS point(hms). Parameters ---------- ra : float dec: float Return --------- Return point in string """ coord_icrs = SkyCoord(ra=ra*u.degree, dec=dec*u.degree) return coord_icrs.to_string('hmsdms') def hms_to_deg(self,hms): """ Transform IRCS point(hms) in degree point(ra,dec). Parameters ---------- hms : string Return --------- Return two float values: 'ra' and 'deg' """ coord = SkyCoord(hms,frame='icrs') #transform coord ra = coord.ra.degree dec = coord.dec.degree return ra,dec def get_nearest(self,center_ra,center_dec,matrix_ra_dec): """ Transform degree point(ra,dec) in IRCS point(hms). Parameters ---------- center_ra : float center_dec: float matrix_ra_dec : numpy array 2d values matrix in two dimension with column ra dec and the row represent the object, Return ---------- Return the index of matrix for the min angle between ra | dec center point and ra | dec the object. """ angle = [] c1 = SkyCoord(ra=center_ra,dec=center_dec,unit=u.degree) for obj in matrix_ra_dec: c2 = SkyCoord(obj[0],obj[1],unit=u.degree) angle.append(c1.separation(c2)) return angle.index(min(angle)) def flux_to_mag(self,psfFlux): """ Convert psfFlux in AB magnitude, use [-2.5*np.log10(psfFlux) + 8.90]. Parameters ---------- psfFlux : numpy array psfFlux in janskys Return ---------- Return numpy array with mag """ mag = (-2.5*np.log10(psfFlux))+8.90 return mag def fluxerr_to_magerr(self,psfFluxerror,psfFlux): """ Convert psfFlux and psfFluxerr in AB mag, use [1.08574*(psfFluxerror / psfFlux)] for calcule magerr. Parameters ---------- psfFluxerror : numpy array psfFluxerror in janskys psfFlux : numpy array psfFlux in janskys Return ---------- Return numpy array with magerr """ magerr = 1.08574*(psfFluxerror/psfFlux) return magerr
PypiClean
/ansible-8.3.0-py3-none-any.whl/ansible_collections/fortinet/fortios/plugins/modules/fortios_vpn_ipsec_phase2_interface.py
from __future__ import absolute_import, division, print_function # Copyright: (c) 2022 Fortinet # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. __metaclass__ = type ANSIBLE_METADATA = { "status": ["preview"], "supported_by": "community", "metadata_version": "1.1", } DOCUMENTATION = """ --- module: fortios_vpn_ipsec_phase2_interface short_description: Configure VPN autokey tunnel in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify vpn_ipsec feature and phase2_interface category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.0 version_added: "2.0.0" author: - Link Zheng (@chillancezen) - Jie Xue (@JieX19) - Hongbin Lu (@fgtdev-hblu) - Frank Shen (@frankshen01) - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks requirements: - ansible>=2.9 options: access_token: description: - Token-based authentication. Generated from GUI of Fortigate. type: str required: false enable_log: description: - Enable/Disable logging for task. type: bool required: false default: false vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root member_path: type: str description: - Member attribute path to operate on. - Delimited by a slash character if there are more than one attribute. - Parameter marked with member_path is legitimate for doing member operation. member_state: type: str description: - Add or delete a member under specified attribute path. - When member_state is specified, the state option is ignored. choices: - 'present' - 'absent' state: description: - Indicates whether to create or remove the object. type: str required: true choices: - 'present' - 'absent' vpn_ipsec_phase2_interface: description: - Configure VPN autokey tunnel. default: null type: dict suboptions: add_route: description: - Enable/disable automatic route addition. type: str choices: - 'phase1' - 'enable' - 'disable' auto_discovery_forwarder: description: - Enable/disable forwarding short-cut messages. type: str choices: - 'phase1' - 'enable' - 'disable' auto_discovery_sender: description: - Enable/disable sending short-cut messages. type: str choices: - 'phase1' - 'enable' - 'disable' auto_negotiate: description: - Enable/disable IPsec SA auto-negotiation. type: str choices: - 'enable' - 'disable' comments: description: - Comment. type: str dhcp_ipsec: description: - Enable/disable DHCP-IPsec. type: str choices: - 'enable' - 'disable' dhgrp: description: - Phase2 DH group. type: list elements: str choices: - '1' - '2' - '5' - '14' - '15' - '16' - '17' - '18' - '19' - '20' - '21' - '27' - '28' - '29' - '30' - '31' - '32' diffserv: description: - Enable/disable applying DSCP value to the IPsec tunnel outer IP header. type: str choices: - 'enable' - 'disable' diffservcode: description: - DSCP value to be applied to the IPsec tunnel outer IP header. type: str dst_addr_type: description: - Remote proxy ID type. type: str choices: - 'subnet' - 'range' - 'ip' - 'name' - 'subnet6' - 'range6' - 'ip6' - 'name6' dst_end_ip: description: - Remote proxy ID IPv4 end. type: str dst_end_ip6: description: - Remote proxy ID IPv6 end. type: str dst_name: description: - Remote proxy ID name. Source firewall.address.name firewall.addrgrp.name. type: str dst_name6: description: - Remote proxy ID name. Source firewall.address6.name firewall.addrgrp6.name. type: str dst_port: description: - Quick mode destination port (1 - 65535 or 0 for all). type: int dst_start_ip: description: - Remote proxy ID IPv4 start. type: str dst_start_ip6: description: - Remote proxy ID IPv6 start. type: str dst_subnet: description: - Remote proxy ID IPv4 subnet. type: str dst_subnet6: description: - Remote proxy ID IPv6 subnet. type: str encapsulation: description: - ESP encapsulation mode. type: str choices: - 'tunnel-mode' - 'transport-mode' inbound_dscp_copy: description: - Enable/disable copying of the DSCP in the ESP header to the inner IP header. type: str choices: - 'phase1' - 'enable' - 'disable' initiator_ts_narrow: description: - Enable/disable traffic selector narrowing for IKEv2 initiator. type: str choices: - 'enable' - 'disable' ipv4_df: description: - Enable/disable setting and resetting of IPv4 "Don"t Fragment" bit. type: str choices: - 'enable' - 'disable' keepalive: description: - Enable/disable keep alive. type: str choices: - 'enable' - 'disable' keylife_type: description: - Keylife type. type: str choices: - 'seconds' - 'kbs' - 'both' keylifekbs: description: - Phase2 key life in number of kilobytes of traffic (5120 - 4294967295). type: int keylifeseconds: description: - Phase2 key life in time in seconds (120 - 172800). type: int l2tp: description: - Enable/disable L2TP over IPsec. type: str choices: - 'enable' - 'disable' name: description: - IPsec tunnel name. required: true type: str pfs: description: - Enable/disable PFS feature. type: str choices: - 'enable' - 'disable' phase1name: description: - Phase 1 determines the options required for phase 2. Source vpn.ipsec.phase1-interface.name. type: str proposal: description: - Phase2 proposal. type: list elements: str choices: - 'null-md5' - 'null-sha1' - 'null-sha256' - 'null-sha384' - 'null-sha512' - 'des-null' - 'des-md5' - 'des-sha1' - 'des-sha256' - 'des-sha384' - 'des-sha512' - '3des-null' - '3des-md5' - '3des-sha1' - '3des-sha256' - '3des-sha384' - '3des-sha512' - 'aes128-null' - 'aes128-md5' - 'aes128-sha1' - 'aes128-sha256' - 'aes128-sha384' - 'aes128-sha512' - 'aes128gcm' - 'aes192-null' - 'aes192-md5' - 'aes192-sha1' - 'aes192-sha256' - 'aes192-sha384' - 'aes192-sha512' - 'aes256-null' - 'aes256-md5' - 'aes256-sha1' - 'aes256-sha256' - 'aes256-sha384' - 'aes256-sha512' - 'aes256gcm' - 'chacha20poly1305' - 'aria128-null' - 'aria128-md5' - 'aria128-sha1' - 'aria128-sha256' - 'aria128-sha384' - 'aria128-sha512' - 'aria192-null' - 'aria192-md5' - 'aria192-sha1' - 'aria192-sha256' - 'aria192-sha384' - 'aria192-sha512' - 'aria256-null' - 'aria256-md5' - 'aria256-sha1' - 'aria256-sha256' - 'aria256-sha384' - 'aria256-sha512' - 'seed-null' - 'seed-md5' - 'seed-sha1' - 'seed-sha256' - 'seed-sha384' - 'seed-sha512' protocol: description: - Quick mode protocol selector (1 - 255 or 0 for all). type: int replay: description: - Enable/disable replay detection. type: str choices: - 'enable' - 'disable' route_overlap: description: - Action for overlapping routes. type: str choices: - 'use-old' - 'use-new' - 'allow' single_source: description: - Enable/disable single source IP restriction. type: str choices: - 'enable' - 'disable' src_addr_type: description: - Local proxy ID type. type: str choices: - 'subnet' - 'range' - 'ip' - 'name' - 'subnet6' - 'range6' - 'ip6' - 'name6' src_end_ip: description: - Local proxy ID end. type: str src_end_ip6: description: - Local proxy ID IPv6 end. type: str src_name: description: - Local proxy ID name. Source firewall.address.name firewall.addrgrp.name. type: str src_name6: description: - Local proxy ID name. Source firewall.address6.name firewall.addrgrp6.name. type: str src_port: description: - Quick mode source port (1 - 65535 or 0 for all). type: int src_start_ip: description: - Local proxy ID start. type: str src_start_ip6: description: - Local proxy ID IPv6 start. type: str src_subnet: description: - Local proxy ID subnet. type: str src_subnet6: description: - Local proxy ID IPv6 subnet. type: str """ EXAMPLES = """ - hosts: fortigates collections: - fortinet.fortios connection: httpapi vars: vdom: "root" ansible_httpapi_use_ssl: yes ansible_httpapi_validate_certs: no ansible_httpapi_port: 443 tasks: - name: Configure VPN autokey tunnel. fortios_vpn_ipsec_phase2_interface: vdom: "{{ vdom }}" state: "present" access_token: "<your_own_value>" vpn_ipsec_phase2_interface: add_route: "phase1" auto_discovery_forwarder: "phase1" auto_discovery_sender: "phase1" auto_negotiate: "enable" comments: "<your_own_value>" dhcp_ipsec: "enable" dhgrp: "1" diffserv: "enable" diffservcode: "<your_own_value>" dst_addr_type: "subnet" dst_end_ip: "<your_own_value>" dst_end_ip6: "<your_own_value>" dst_name: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)" dst_name6: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)" dst_port: "0" dst_start_ip: "<your_own_value>" dst_start_ip6: "<your_own_value>" dst_subnet: "<your_own_value>" dst_subnet6: "<your_own_value>" encapsulation: "tunnel-mode" inbound_dscp_copy: "phase1" initiator_ts_narrow: "enable" ipv4_df: "enable" keepalive: "enable" keylife_type: "seconds" keylifekbs: "5120" keylifeseconds: "43200" l2tp: "enable" name: "default_name_31" pfs: "enable" phase1name: "<your_own_value> (source vpn.ipsec.phase1-interface.name)" proposal: "null-md5" protocol: "0" replay: "enable" route_overlap: "use-old" single_source: "enable" src_addr_type: "subnet" src_end_ip: "<your_own_value>" src_end_ip6: "<your_own_value>" src_name: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)" src_name6: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)" src_port: "0" src_start_ip: "<your_own_value>" src_start_ip6: "<your_own_value>" src_subnet: "<your_own_value>" src_subnet6: "<your_own_value>" """ RETURN = """ build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import ( FortiOSHandler, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import ( check_legacy_fortiosapi, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import ( schema_to_module_spec, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import ( check_schema_versioning, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import ( FAIL_SOCKET_MSG, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.data_post_processor import ( remove_invalid_fields, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import ( is_same_comparison, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import ( serialize, ) from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import ( find_current_values, ) def filter_vpn_ipsec_phase2_interface_data(json): option_list = [ "add_route", "auto_discovery_forwarder", "auto_discovery_sender", "auto_negotiate", "comments", "dhcp_ipsec", "dhgrp", "diffserv", "diffservcode", "dst_addr_type", "dst_end_ip", "dst_end_ip6", "dst_name", "dst_name6", "dst_port", "dst_start_ip", "dst_start_ip6", "dst_subnet", "dst_subnet6", "encapsulation", "inbound_dscp_copy", "initiator_ts_narrow", "ipv4_df", "keepalive", "keylife_type", "keylifekbs", "keylifeseconds", "l2tp", "name", "pfs", "phase1name", "proposal", "protocol", "replay", "route_overlap", "single_source", "src_addr_type", "src_end_ip", "src_end_ip6", "src_name", "src_name6", "src_port", "src_start_ip", "src_start_ip6", "src_subnet", "src_subnet6", ] json = remove_invalid_fields(json) dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def flatten_single_path(data, path, index): if ( not data or index == len(path) or path[index] not in data or not data[path[index]] ): return if index == len(path) - 1: data[path[index]] = " ".join(str(elem) for elem in data[path[index]]) elif isinstance(data[path[index]], list): for value in data[path[index]]: flatten_single_path(value, path, index + 1) else: flatten_single_path(data[path[index]], path, index + 1) def flatten_multilists_attributes(data): multilist_attrs = [ ["proposal"], ["dhgrp"], ] for attr in multilist_attrs: flatten_single_path(data, attr, 0) return data def underscore_to_hyphen(data): if isinstance(data, list): for i, elem in enumerate(data): data[i] = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace("_", "-")] = underscore_to_hyphen(v) data = new_data return data def vpn_ipsec_phase2_interface(data, fos, check_mode=False): vdom = data["vdom"] state = data["state"] vpn_ipsec_phase2_interface_data = data["vpn_ipsec_phase2_interface"] vpn_ipsec_phase2_interface_data = flatten_multilists_attributes( vpn_ipsec_phase2_interface_data ) filtered_data = underscore_to_hyphen( filter_vpn_ipsec_phase2_interface_data(vpn_ipsec_phase2_interface_data) ) # check_mode starts from here if check_mode: diff = { "before": "", "after": filtered_data, } mkey = fos.get_mkey("vpn.ipsec", "phase2-interface", filtered_data, vdom=vdom) current_data = fos.get("vpn.ipsec", "phase2-interface", vdom=vdom, mkey=mkey) is_existed = ( current_data and current_data.get("http_status") == 200 and isinstance(current_data.get("results"), list) and len(current_data["results"]) > 0 ) # 2. if it exists and the state is 'present' then compare current settings with desired if state == "present" or state is True: if mkey is None: return False, True, filtered_data, diff # if mkey exists then compare each other # record exits and they're matched or not if is_existed: is_same = is_same_comparison( serialize(current_data["results"][0]), serialize(filtered_data) ) current_values = find_current_values( current_data["results"][0], filtered_data ) return ( False, not is_same, filtered_data, {"before": current_values, "after": filtered_data}, ) # record does not exist return False, True, filtered_data, diff if state == "absent": if mkey is None: return ( False, False, filtered_data, {"before": current_data["results"][0], "after": ""}, ) if is_existed: return ( False, True, filtered_data, {"before": current_data["results"][0], "after": ""}, ) return False, False, filtered_data, {} return True, False, {"reason: ": "Must provide state parameter"}, {} if state == "present" or state is True: return fos.set("vpn.ipsec", "phase2-interface", data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete( "vpn.ipsec", "phase2-interface", mkey=filtered_data["name"], vdom=vdom ) else: fos._module.fail_json(msg="state must be present or absent!") def is_successful_status(resp): return ( "status" in resp and resp["status"] == "success" or "http_status" in resp and resp["http_status"] == 200 or "http_method" in resp and resp["http_method"] == "DELETE" and resp["http_status"] == 404 ) def fortios_vpn_ipsec(data, fos, check_mode): fos.do_member_operation("vpn.ipsec", "phase2-interface") if data["vpn_ipsec_phase2_interface"]: resp = vpn_ipsec_phase2_interface(data, fos, check_mode) else: fos._module.fail_json( msg="missing task body: %s" % ("vpn_ipsec_phase2_interface") ) if check_mode: return resp return ( not is_successful_status(resp), is_successful_status(resp) and (resp["revision_changed"] if "revision_changed" in resp else True), resp, {}, ) versioned_schema = { "type": "list", "elements": "dict", "children": { "name": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "required": True, }, "phase1name": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dhcp_ipsec": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "proposal": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "list", "options": [ { "value": "null-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "null-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "null-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "null-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "null-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "des-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "des-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "des-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "des-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "des-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "des-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "3des-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "3des-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "3des-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "3des-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "3des-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "3des-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes128gcm", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes192-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes192-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes192-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes192-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes192-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes192-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aes256gcm", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "chacha20poly1305", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria128-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria128-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria128-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria128-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria128-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria128-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria192-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria192-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria192-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria192-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria192-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria192-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria256-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria256-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria256-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria256-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria256-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "aria256-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "seed-null", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "seed-md5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "seed-sha1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "seed-sha256", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "seed-sha384", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "seed-sha512", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], "multiple_values": True, "elements": "str", }, "pfs": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "ipv4_df": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": False, "v6.0.11": False, "v6.0.0": False, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, }, }, ], }, "dhgrp": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "list", "options": [ { "value": "1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "2", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "5", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "14", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "15", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "16", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "17", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "18", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "19", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "20", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "21", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "27", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "28", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "29", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "30", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "31", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "32", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": False, "v6.0.11": False, "v6.0.0": False, }, }, ], "multiple_values": True, "elements": "str", }, "replay": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "keepalive": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "auto_negotiate": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "add_route": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "phase1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "inbound_dscp_copy": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": False, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": False, "v7.0.4": False, "v7.0.3": False, "v7.0.2": False, "v7.0.12": True, "v7.0.1": False, "v7.0.0": False, "v6.4.4": False, "v6.4.1": False, "v6.4.0": False, "v6.2.7": False, "v6.2.5": False, "v6.2.3": False, "v6.2.0": False, "v6.0.5": False, "v6.0.11": False, "v6.0.0": False, }, "type": "string", "options": [ { "value": "phase1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.12": True, }, }, { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.12": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.12": True, }, }, ], }, "auto_discovery_sender": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "phase1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "auto_discovery_forwarder": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "phase1", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "keylifeseconds": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "integer", }, "keylifekbs": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "integer", }, "keylife_type": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "seconds", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "kbs", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "both", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "single_source": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "route_overlap": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "use-old", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "use-new", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "allow", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "encapsulation": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "tunnel-mode", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "transport-mode", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "l2tp": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "comments": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "initiator_ts_narrow": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": False, "v6.4.0": False, "v6.2.7": False, "v6.2.5": False, "v6.2.3": False, "v6.2.0": False, "v6.0.5": False, "v6.0.11": False, "v6.0.0": False, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, }, }, ], }, "diffserv": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": False, "v6.4.0": False, "v6.2.7": False, "v6.2.5": False, "v6.2.3": False, "v6.2.0": False, "v6.0.5": False, "v6.0.11": False, "v6.0.0": False, }, "type": "string", "options": [ { "value": "enable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, }, }, { "value": "disable", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, }, }, ], }, "diffservcode": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": False, "v6.4.0": False, "v6.2.7": False, "v6.2.5": False, "v6.2.3": False, "v6.2.0": False, "v6.0.5": False, "v6.0.11": False, "v6.0.0": False, }, "type": "string", }, "protocol": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "integer", }, "src_name": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_name6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_addr_type": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "subnet", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "range", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "ip", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "name", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "subnet6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "range6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "ip6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "name6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "src_start_ip": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_start_ip6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_end_ip": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_end_ip6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_subnet": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_subnet6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "src_port": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "integer", }, "dst_name": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_name6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_addr_type": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", "options": [ { "value": "subnet", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "range", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "ip", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "name", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "subnet6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "range6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "ip6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, { "value": "name6", "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, }, ], }, "dst_start_ip": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_start_ip6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_end_ip": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_end_ip6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_subnet": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_subnet6": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "string", }, "dst_port": { "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, "type": "integer", }, }, "revisions": { "v7.4.0": True, "v7.2.4": True, "v7.2.2": True, "v7.2.1": True, "v7.2.0": True, "v7.0.8": True, "v7.0.7": True, "v7.0.6": True, "v7.0.5": True, "v7.0.4": True, "v7.0.3": True, "v7.0.2": True, "v7.0.12": True, "v7.0.1": True, "v7.0.0": True, "v6.4.4": True, "v6.4.1": True, "v6.4.0": True, "v6.2.7": True, "v6.2.5": True, "v6.2.3": True, "v6.2.0": True, "v6.0.5": True, "v6.0.11": True, "v6.0.0": True, }, } def main(): module_spec = schema_to_module_spec(versioned_schema) mkeyname = "name" fields = { "access_token": {"required": False, "type": "str", "no_log": True}, "enable_log": {"required": False, "type": "bool", "default": False}, "vdom": {"required": False, "type": "str", "default": "root"}, "member_path": {"required": False, "type": "str"}, "member_state": { "type": "str", "required": False, "choices": ["present", "absent"], }, "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "vpn_ipsec_phase2_interface": { "required": False, "type": "dict", "default": None, "options": {}, }, } for attribute_name in module_spec["options"]: fields["vpn_ipsec_phase2_interface"]["options"][attribute_name] = module_spec[ "options" ][attribute_name] if mkeyname and mkeyname == attribute_name: fields["vpn_ipsec_phase2_interface"]["options"][attribute_name][ "required" ] = True module = AnsibleModule(argument_spec=fields, supports_check_mode=True) check_legacy_fortiosapi(module) is_error = False has_changed = False result = None diff = None versions_check_result = None if module._socket_path: connection = Connection(module._socket_path) if "access_token" in module.params: connection.set_option("access_token", module.params["access_token"]) if "enable_log" in module.params: connection.set_option("enable_log", module.params["enable_log"]) else: connection.set_option("enable_log", False) fos = FortiOSHandler(connection, module, mkeyname) versions_check_result = check_schema_versioning( fos, versioned_schema, "vpn_ipsec_phase2_interface" ) is_error, has_changed, result, diff = fortios_vpn_ipsec( module.params, fos, module.check_mode ) else: module.fail_json(**FAIL_SOCKET_MSG) if versions_check_result and versions_check_result["matched"] is False: module.warn( "Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv" ) if not is_error: if versions_check_result and versions_check_result["matched"] is False: module.exit_json( changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff, ) else: module.exit_json(changed=has_changed, meta=result, diff=diff) else: if versions_check_result and versions_check_result["matched"] is False: module.fail_json( msg="Error in repo", version_check_warning=versions_check_result, meta=result, ) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == "__main__": main()
PypiClean
/iOpt-0.2.22-py3-none-any.whl/problems/rastriginInt.py
import numpy as np from iOpt.trial import Point from iOpt.trial import FunctionValue from iOpt.trial import Trial from iOpt.problem import Problem import math class RastriginInt(Problem): """ Функция Растригина задана формулой: :math:`f(y)=(\sum_{i=1}^{N}[x_{i}^{2}-10*cos(2\pi x_{i})])`, где :math:`x\in [-2.2, 1.8], N` – размерность задачи. """ def __init__(self, dimension: int, number_of_discrete_variables: int): """ Конструктор класса RastriginInt problem. :param dimension: Размерность задачи. """ super(RastriginInt, self).__init__() self.name = "RastriginInt" self.dimension = dimension self.number_of_float_variables = dimension - number_of_discrete_variables self.number_of_discrete_variables = number_of_discrete_variables self.number_of_objectives = 1 self.number_of_constraints = 0 self.float_variable_names = np.ndarray(shape=(self.number_of_float_variables), dtype=object) for i in range(self.number_of_float_variables): self.float_variable_names[i] = str(i) self.discrete_variable_names = np.ndarray(shape=(self.number_of_discrete_variables), dtype=object) for i in range(self.number_of_discrete_variables): self.discrete_variable_names[i] = str(i) self.lower_bound_of_float_variables = np.ndarray(shape=(self.number_of_float_variables), dtype=np.double) self.lower_bound_of_float_variables.fill(-2.2) self.upper_bound_of_float_variables = np.ndarray(shape=(self.number_of_float_variables), dtype=np.double) self.upper_bound_of_float_variables.fill(1.8) self.discrete_variable_values = [["A", "B"] for i in range(self.number_of_discrete_variables)] self.known_optimum = np.ndarray(shape=(1), dtype=Trial) pointfv = np.ndarray(shape=(self.number_of_float_variables), dtype=np.double) pointfv.fill(0) pointdv = np.ndarray(shape=(self.number_of_discrete_variables), dtype=object) pointdv.fill("B") KOpoint = Point(pointfv, pointdv) KOfunV = np.ndarray(shape=(1), dtype=FunctionValue) KOfunV[0] = FunctionValue() KOfunV[0].value = 0 self.known_optimum[0] = Trial(KOpoint, KOfunV) self.A = np.ndarray(shape=(self.dimension), dtype=np.double) self.A.fill(-2.2) self.B = np.ndarray(shape=(self.dimension), dtype=np.double) self.B.fill(1.8) self.optPoint = np.ndarray(shape=(self.dimension), dtype=np.double) self.optPoint = np.append([[0] for i in range(self.number_of_float_variables)], [[1.8] for i in range(self.number_of_discrete_variables)]) self.multKoef = 0 x = np.ndarray(shape=(self.dimension), dtype=np.double) count =math.pow(2, self.dimension) for i in range(int(count)): for j in range(self.dimension): x[j] = self.A[j] if (((i >> j) & 1) == 0) else self.B[j] v = abs(self.MultFunc(x)) if v > self.multKoef: self.multKoef = v self.multKoef += 4 self.optMultKoef = (self.MultFunc(self.optPoint)+self.multKoef) def calculate(self, point: Point, function_value: FunctionValue) -> FunctionValue: """ Вычисление значения выбранной функции в заданной точке. :param point: координаты точки испытания, в которой будет вычислено значение функции :param function_value: объект определяющий номер функции в задаче и хранящий значение функции :return: Вычисленное значение функции в точке point """ sum: np.double = 0 x = point.float_variables for i in range(self.number_of_float_variables): sum += x[i] * x[i] - 10 * math.cos(2 * math.pi * x[i]) + 10 dx = point.discrete_variables for i in range(self.number_of_discrete_variables): if dx[i] == "A": sum += 2.2 elif dx[i] == "B": sum -= 1.8 else: raise ValueError x_arr = self.point_to_array(point) sum = sum * (self.MultFunc(x_arr)+self.multKoef) function_value.value = sum return function_value def point_to_array(self, point: Point) -> np.ndarray: arr = np.ndarray(shape=(self.dimension), dtype=np.double) for i in range(0, self.number_of_float_variables): arr[i] = point.float_variables[i] for i in range(0, self.number_of_discrete_variables): if point.discrete_variables[i] == "A": arr[self.number_of_float_variables+i] = -2.2 elif point.discrete_variables[i] == "B": arr[self.number_of_float_variables+i] = 1.8 return arr def MultFunc(self, x: np.ndarray) -> np.double: result: np.double = 0 a: np.double d: np.double for i in range(self.dimension): d = (self.B[i]-self.A[i])/2 a = (x[i]-self.optPoint[i])/d a = np.double(a * a) result = np.double(result + a) result = np.double(- result) return result
PypiClean
/fake_bpy_module_2.78-20230117-py3-none-any.whl/bl_ui/properties_constraint.py
import sys import typing import bpy_types GenericType = typing.TypeVar("GenericType") class ConstraintButtonsPanel: bl_context = None ''' ''' bl_region_type = None ''' ''' bl_space_type = None ''' ''' def ACTION(self, context, layout, con): ''' ''' pass def CAMERA_SOLVER(self, context, layout, con): ''' ''' pass def CHILD_OF(self, context, layout, con): ''' ''' pass def CLAMP_TO(self, context, layout, con): ''' ''' pass def COPY_LOCATION(self, context, layout, con): ''' ''' pass def COPY_ROTATION(self, context, layout, con): ''' ''' pass def COPY_SCALE(self, context, layout, con): ''' ''' pass def COPY_TRANSFORMS(self, context, layout, con): ''' ''' pass def DAMPED_TRACK(self, context, layout, con): ''' ''' pass def FLOOR(self, context, layout, con): ''' ''' pass def FOLLOW_PATH(self, context, layout, con): ''' ''' pass def FOLLOW_TRACK(self, context, layout, con): ''' ''' pass def IK(self, context, layout, con): ''' ''' pass def IK_COPY_POSE(self, context, layout, con): ''' ''' pass def IK_DISTANCE(self, context, layout, con): ''' ''' pass def LIMIT_DISTANCE(self, context, layout, con): ''' ''' pass def LIMIT_LOCATION(self, context, layout, con): ''' ''' pass def LIMIT_ROTATION(self, context, layout, con): ''' ''' pass def LIMIT_SCALE(self, context, layout, con): ''' ''' pass def LOCKED_TRACK(self, context, layout, con): ''' ''' pass def MAINTAIN_VOLUME(self, context, layout, con): ''' ''' pass def OBJECT_SOLVER(self, context, layout, con): ''' ''' pass def PIVOT(self, context, layout, con): ''' ''' pass def RIGID_BODY_JOINT(self, context, layout, con): ''' ''' pass def SCRIPT(self, context, layout, con): ''' ''' pass def SHRINKWRAP(self, context, layout, con): ''' ''' pass def SPLINE_IK(self, context, layout, con): ''' ''' pass def STRETCH_TO(self, context, layout, con): ''' ''' pass def TRACK_TO(self, context, layout, con): ''' ''' pass def TRANSFORM(self, context, layout, con): ''' ''' pass def TRANSFORM_CACHE(self, context, layout, con): ''' ''' pass def draw_constraint(self, context, con): ''' ''' pass def ik_template(self, layout, con): ''' ''' pass def space_template(self, layout, con, target, owner): ''' ''' pass def target_template(self, layout, con, subtargets): ''' ''' pass class BONE_PT_constraints(ConstraintButtonsPanel, bpy_types.Panel, bpy_types._GenericUI): bl_context = None ''' ''' bl_label = None ''' ''' bl_options = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def ACTION(self, context, layout, con): ''' ''' pass def CAMERA_SOLVER(self, context, layout, con): ''' ''' pass def CHILD_OF(self, context, layout, con): ''' ''' pass def CLAMP_TO(self, context, layout, con): ''' ''' pass def COPY_LOCATION(self, context, layout, con): ''' ''' pass def COPY_ROTATION(self, context, layout, con): ''' ''' pass def COPY_SCALE(self, context, layout, con): ''' ''' pass def COPY_TRANSFORMS(self, context, layout, con): ''' ''' pass def DAMPED_TRACK(self, context, layout, con): ''' ''' pass def FLOOR(self, context, layout, con): ''' ''' pass def FOLLOW_PATH(self, context, layout, con): ''' ''' pass def FOLLOW_TRACK(self, context, layout, con): ''' ''' pass def IK(self, context, layout, con): ''' ''' pass def IK_COPY_POSE(self, context, layout, con): ''' ''' pass def IK_DISTANCE(self, context, layout, con): ''' ''' pass def LIMIT_DISTANCE(self, context, layout, con): ''' ''' pass def LIMIT_LOCATION(self, context, layout, con): ''' ''' pass def LIMIT_ROTATION(self, context, layout, con): ''' ''' pass def LIMIT_SCALE(self, context, layout, con): ''' ''' pass def LOCKED_TRACK(self, context, layout, con): ''' ''' pass def MAINTAIN_VOLUME(self, context, layout, con): ''' ''' pass def OBJECT_SOLVER(self, context, layout, con): ''' ''' pass def PIVOT(self, context, layout, con): ''' ''' pass def RIGID_BODY_JOINT(self, context, layout, con): ''' ''' pass def SCRIPT(self, context, layout, con): ''' ''' pass def SHRINKWRAP(self, context, layout, con): ''' ''' pass def SPLINE_IK(self, context, layout, con): ''' ''' pass def STRETCH_TO(self, context, layout, con): ''' ''' pass def TRACK_TO(self, context, layout, con): ''' ''' pass def TRANSFORM(self, context, layout, con): ''' ''' pass def TRANSFORM_CACHE(self, context, layout, con): ''' ''' pass def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_constraint(self, context, con): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def ik_template(self, layout, con): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def poll(self, context): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def space_template(self, layout, con, target, owner): ''' ''' pass def target_template(self, layout, con, subtargets): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class OBJECT_PT_constraints(ConstraintButtonsPanel, bpy_types.Panel, bpy_types._GenericUI): bl_context = None ''' ''' bl_label = None ''' ''' bl_options = None ''' ''' bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def ACTION(self, context, layout, con): ''' ''' pass def CAMERA_SOLVER(self, context, layout, con): ''' ''' pass def CHILD_OF(self, context, layout, con): ''' ''' pass def CLAMP_TO(self, context, layout, con): ''' ''' pass def COPY_LOCATION(self, context, layout, con): ''' ''' pass def COPY_ROTATION(self, context, layout, con): ''' ''' pass def COPY_SCALE(self, context, layout, con): ''' ''' pass def COPY_TRANSFORMS(self, context, layout, con): ''' ''' pass def DAMPED_TRACK(self, context, layout, con): ''' ''' pass def FLOOR(self, context, layout, con): ''' ''' pass def FOLLOW_PATH(self, context, layout, con): ''' ''' pass def FOLLOW_TRACK(self, context, layout, con): ''' ''' pass def IK(self, context, layout, con): ''' ''' pass def IK_COPY_POSE(self, context, layout, con): ''' ''' pass def IK_DISTANCE(self, context, layout, con): ''' ''' pass def LIMIT_DISTANCE(self, context, layout, con): ''' ''' pass def LIMIT_LOCATION(self, context, layout, con): ''' ''' pass def LIMIT_ROTATION(self, context, layout, con): ''' ''' pass def LIMIT_SCALE(self, context, layout, con): ''' ''' pass def LOCKED_TRACK(self, context, layout, con): ''' ''' pass def MAINTAIN_VOLUME(self, context, layout, con): ''' ''' pass def OBJECT_SOLVER(self, context, layout, con): ''' ''' pass def PIVOT(self, context, layout, con): ''' ''' pass def RIGID_BODY_JOINT(self, context, layout, con): ''' ''' pass def SCRIPT(self, context, layout, con): ''' ''' pass def SHRINKWRAP(self, context, layout, con): ''' ''' pass def SPLINE_IK(self, context, layout, con): ''' ''' pass def STRETCH_TO(self, context, layout, con): ''' ''' pass def TRACK_TO(self, context, layout, con): ''' ''' pass def TRANSFORM(self, context, layout, con): ''' ''' pass def TRANSFORM_CACHE(self, context, layout, con): ''' ''' pass def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_constraint(self, context, con): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def ik_template(self, layout, con): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def poll(self, context): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def space_template(self, layout, con, target, owner): ''' ''' pass def target_template(self, layout, con, subtargets): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass
PypiClean
/pymesh2-0.1.14-cp35-cp35m-macosx_10_11_x86_64.whl/pymesh2-0.1.14.data/purelib/pymesh/slice_mesh.py
from .boolean import boolean from .misc import Quaternion from .meshutils import generate_box_mesh, separate_mesh, remove_isolated_vertices, merge_meshes from .meshio import form_mesh, save_mesh import numpy as np from numpy.linalg import norm def slice_mesh(mesh, direction, N): """ Slice a given 3D mesh N times along certain direciton. Args: mesh (:class:`Mesh`): The mesh to be sliced. direction (:class:`numpy.ndaray`): Direction orthogonal to the slices. N (int): Number of slices. Returns: A list of `N` :class:`Mesh` objects, each representing a single slice. """ if mesh.dim != 3: raise NotImplementedError("Only slicing 3D mesh is supported."); bbox_min, bbox_max = mesh.bbox; center = 0.5 * (bbox_min + bbox_max); radius = norm(bbox_max - center); direction = np.array(direction); direction = direction / norm(direction); proj_len = np.dot(mesh.vertices, direction); min_val = np.amin(proj_len); max_val = np.amax(proj_len); mid_val = 0.5 * (min_val + max_val); intercepts = np.linspace(min_val - mid_val, max_val - mid_val, N+2)[1:-1]; assert(len(intercepts) == N); if N%2 == 1: intercepts = np.append(intercepts, intercepts[-1]+radius); boxes = []; for low, high in intercepts.reshape((-1, 2), order="C"): min_corner = -np.ones(3) * (radius+1); max_corner = np.ones(3) * (radius+1); min_corner[2] = low; max_corner[2] = high; box = generate_box_mesh(min_corner, max_corner); boxes.append(box); num_boxes = len(boxes); boxes = merge_meshes(boxes); rot = Quaternion.fromData( np.array([0.0, 0.0, 1.0]), np.array(direction)).to_matrix(); boxes = form_mesh(np.dot(rot, boxes.vertices.T).T + center, boxes.faces); slabs = boolean(boxes, mesh, "intersection"); cross_secs = []; source_faces = slabs.get_attribute("source_face").ravel(); source = slabs.get_attribute("source").ravel(); selected = source == 1; cross_section_faces = slabs.faces[selected]; cross_section = form_mesh(slabs.vertices, cross_section_faces); cross_secs = separate_mesh(cross_section); cross_secs = [remove_isolated_vertices(m)[0] for m in cross_secs]; intersects = [np.dot(m.vertices[0], direction) for m in cross_secs]; order = np.argsort(intersects); cross_secs = [cross_secs[i] for i in order]; for i in range(0, len(cross_secs), 2): # Correct normal direction of every other slice. m = cross_secs[i]; cross_secs[i] = form_mesh(m.vertices, m.faces[:, [0, 2, 1]]); return cross_secs;
PypiClean
/bfx_api_ws_fix-2.0.6.tar.gz/bfx_api_ws_fix-2.0.6/README.md
# Bitfinex Trading Library for Python - Bitcoin, Ethereum, Ripple and more ![https://api.travis-ci.org/bitfinexcom/bitfinex-api-py.svg?branch=master](https://api.travis-ci.org/bitfinexcom/bitfinex-api-py.svg?branch=master) A Python reference implementation of the Bitfinex API for both REST and websocket interaction. # Features - Official implementation - Websocket V2 and Rest V2 - Connection multiplexing - Order and wallet management - All market data feeds ## Installation Clone package into PYTHONPATH: ```sh git clone https://github.com/bitfinexcom/bitfinex-api-py.git cd bitfinex-api-py ``` Or via pip: ```sh python3 -m pip install bitfinex-api-py ``` Run the trades/candles example: ```sh cd bfxapi/examples/ws python3 subscribe_trades_candles.py ``` ## Quickstart ```python import os import sys from bfxapi import Client, Order bfx = Client( API_KEY='<YOUR_API_KEY>', API_SECRET='<YOUR_API_SECRET>' ) @bfx.ws.on('authenticated') async def submit_order(auth_message): await bfx.ws.submit_order('tBTCUSD', 19000, 0.01, Order.Type.EXCHANGE_MARKET) bfx.ws.run() ``` ## Docs * <b>[V2 Rest](docs/rest_v2.md)</b> - Documentation * <b>[V2 Websocket](docs/ws_v2.md)</b> - Documentation ## Examples #### Authenticate ```python bfx = Client( API_KEY='<YOUR_API_KEY>', API_SECRET='<YOUR_API_SECRET>' ) @bfx.ws.on('authenticated') async def do_something(): print ("Success!") bfx.ws.run() ``` #### Subscribe to trades ```python from bfxapi import Client bfx = Client( API_KEY=API_KEY, API_SECRET=API_SECRET ) @bfx.ws.on('new_trade') def log_trade(trade): print ("New trade: {}".format(trade)) @bfx.ws.on('connected') def start(): bfx.ws.subscribe('trades', 'tBTCUSD') bfx.ws.run() ``` #### Withdraw from wallet via REST ```python bfx = Client( API_KEY=API_KEY, API_SECRET=API_SECRET, logLevel='DEBUG' ) response = await bfx.rest.submit_wallet_withdraw("exchange", "tetheruse", 5, "0xc5bbb852f82c24327693937d4012f496cff7eddf") print ("Address: ", response.notify_info) ``` See the <b>[examples](https://github.com/bitfinexcom/bitfinex-api-py/tree/master/examples)</b> directory for more, like: - [Creating/updating an order](https://github.com/bitfinexcom/bitfinex-api-py/blob/master/bfxapi/examples/ws/send_order.py) - [Subscribing to orderbook updates](https://github.com/bitfinexcom/bitfinex-api-py/blob/master/bfxapi/examples/ws/resubscribe_orderbook.py) - [Withdrawing crypto](https://github.com/bitfinexcom/bitfinex-api-py/blob/master/bfxapi/examples/rest/transfer_wallet.py) - [Submitting a funding offer](https://github.com/bitfinexcom/bitfinex-api-py/blob/master/bfxapi/examples/rest/create_funding.py) For more info on how to use this library please see the example scripts in the `bfxapi/examples` directory. Here you will find usage of all interface exposed functions for both the rest and websocket. Also please see [this medium article](https://medium.com/@Bitfinex/15f201ad20d4) for a tutorial. ## FAQ ### Is there any rate limiting? For a Websocket connection there is no limit to the number of requests sent down the connection (unlimited order operations) however an account can only create 15 new connections every 5 mins and each connection is only able to subscribe to 30 inbound data channels. Fortunately this library handles all of the load balancing/multiplexing for channels and will automatically create/destroy new connections when needed, however the user may still encounter the max connections rate limiting error. For rest the base limit per-user is 1,000 orders per 5 minute interval, and is shared between all account API connections. It increases proportionally to your trade volume based on the following formula: 1000 + (TOTAL_PAIRS_PLATFORM * 60 * 5) / (250000000 / USER_VOL_LAST_30d) Where TOTAL_PAIRS_PLATFORM is the number of pairs on the Bitfinex platform (currently ~101) and USER_VOL_LAST_30d is in USD. ### Will I always receive an `on` packet? No; if your order fills immediately, the first packet referencing the order will be an `oc` signaling the order has closed. If the order fills partially immediately after creation, an `on` packet will arrive with a status of `PARTIALLY FILLED...` For example, if you submit a `LIMIT` buy for 0.2 BTC and it is added to the order book, an `on` packet will arrive via ws2. After a partial fill of 0.1 BTC, an `ou` packet will arrive, followed by a final `oc` after the remaining 0.1 BTC fills. On the other hand, if the order fills immediately for 0.2 BTC, you will only receive an `oc` packet. ### My websocket won't connect! Did you call `client.Connect()`? :) ### nonce too small I make multiple parallel request and I receive an error that the nonce is too small. What does it mean? Nonces are used to guard against replay attacks. When multiple HTTP requests arrive at the API with the wrong nonce, e.g. because of an async timing issue, the API will reject the request. If you need to go parallel, you have to use multiple API keys right now. ### How do `te` and `tu` messages differ? A `te` packet is sent first to the client immediately after a trade has been matched & executed, followed by a `tu` message once it has completed processing. During times of high load, the `tu` message may be noticably delayed, and as such only the `te` message should be used for a realtime feed. ### What are the sequence numbers for? If you enable sequencing on v2 of the WS API, each incoming packet will have a public sequence number at the end, along with an auth sequence number in the case of channel `0` packets. The public seq numbers increment on each packet, and the auth seq numbers increment on each authenticated action (new orders, etc). These values allow you to verify that no packets have been missed/dropped, since they always increase monotonically. ### What is the difference between R* and P* order books? Order books with precision `R0` are considered 'raw' and contain entries for each order submitted to the book, whereas `P*` books contain entries for each price level (which aggregate orders). ## Contributing 1. Fork it ( https://github.com/[my-github-username]/bitfinex/fork ) 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) 5. Create a new Pull Request ### Publish to Pypi ``` python setup.py sdist twine upload dist/* ```
PypiClean
/dnv_bladed_models-0.3.44.tar.gz/dnv_bladed_models-0.3.44/src/dnv_bladed_models/iec4_icing.py
from __future__ import annotations from datetime import date, datetime # noqa: F401 from enum import Enum, IntEnum import re # noqa: F401 from typing import Any, Dict, List, Optional, Type, Union, Callable # noqa: F401 from pathlib import Path from typing import TypeVar Model = TypeVar('Model', bound='BaseModel') StrBytes = Union[str, bytes] from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator, root_validator, Extra # noqa: F401 from dnv_bladed_models.iced_condition import IcedCondition class IEC4Icing(IcedCondition, InitialConditionType='IEC4Icing'): """IEC4Icing - The initial condition of a blade being iced according to the IEC4 standards. This will remain unchanged throughout the simulation. Attributes: ---------- InitialConditionType : str, readonly, default='IEC4Icing' Allows the schema to identify the type of the object. For this type of object, this must always be set to 'IEC4Icing' """ InitialConditionType: Optional[str] = Field(alias="InitialConditionType", default='IEC4Icing', allow_mutation=False) class Config: extra = Extra.forbid validate_assignment = True allow_population_by_field_name = True pass @root_validator(pre=True) def _parsing_ignores_underscore_properties(cls, values: dict[str, any]): allowed_vals = {} for key, val in values.items(): if not key.startswith('_'): if isinstance(val, dict): allowed_child_vals = {} for child_key, child_val in val.items(): if not child_key.startswith('_'): allowed_child_vals[child_key] = child_val allowed_vals[key] = allowed_child_vals else: allowed_vals[key] = val return allowed_vals def to_json( self, *, include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, by_alias: bool = True, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = True, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) -> str: r""" Generates a JSON string representation of the model. Notes ----- `include` and `exclude` arguments as per `dict()`. `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`. Examples -------- >>> model.to_json() Renders the full JSON representation of the model object. """ if dumps_kwargs.get('indent') is None: dumps_kwargs.update(indent=2) return super().json( include=include, exclude=exclude, by_alias=by_alias, skip_defaults=skip_defaults, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, encoder=encoder, models_as_dict=models_as_dict, **dumps_kwargs) @classmethod def from_file( cls: Type['Model'], path: Union[str, Path]) -> 'Model': r""" Loads a model from a given file path. Parameters ---------- path : string The file path to the model. Returns ------- IEC4Icing The model object. Raises ------ ValueError, ValidationError If the JSON document does not correctly describe the model according to the model schema. Examples -------- >>> model = IEC4Icing.from_file('/path/to/file') """ return super().parse_file(path=path) @classmethod def from_json( cls: Type['Model'], b: StrBytes) -> 'Model': r""" Creates a model object from a JSON string. Parameters ---------- b: StrBytes The JSON string describing the model. Returns ------- IEC4Icing The model object. Raises ------ ValueError, ValidationError If the JSON document does not correctly describe the model according to the model schema. Examples -------- >>> model = IEC4Icing.from_json('{ ... }') """ return super().parse_raw( b=b, content_type='application/json') @classmethod def from_dict( cls: Type['Model'], obj: Any) -> 'Model': r""" Creates a model object from a dict. Parameters ---------- obj : Any The dictionary object describing the model. Returns ------- IEC4Icing The model object. Raises ------ ValueError, ValidationError If the JSON document does not correctly describe the model according to the model schema. """ return super().parse_obj(obj=obj) def to_file( self, path: Union[str, Path]): r""" Writes the model as a JSON document to a file with UTF8 encoding. Parameters ---------- path : string The file path to which the model will be written. Examples -------- >>> model.to_file('/path/to/file') """ with open(file=path, mode='w', encoding="utf8") as output_file: output_file.write(self.to_json()) IEC4Icing.update_forward_refs()
PypiClean
/lark-oapi-1.0.19.tar.gz/lark-oapi-1.0.19/lark_oapi/api/hire/v1/model/offer_list_info.py
from typing import * from lark_oapi.core.construct import init from .base_bilingual_with_id import BaseBilingualWithId from .offer_job_info import OfferJobInfo class OfferListInfo(object): _types = { "id": str, "job_info": OfferJobInfo, "create_time": str, "offer_status": int, "offer_type": int, "employee_type": BaseBilingualWithId, "application_id": str, } def __init__(self, d=None): self.id: Optional[str] = None self.job_info: Optional[OfferJobInfo] = None self.create_time: Optional[str] = None self.offer_status: Optional[int] = None self.offer_type: Optional[int] = None self.employee_type: Optional[BaseBilingualWithId] = None self.application_id: Optional[str] = None init(self, d, self._types) @staticmethod def builder() -> "OfferListInfoBuilder": return OfferListInfoBuilder() class OfferListInfoBuilder(object): def __init__(self) -> None: self._offer_list_info = OfferListInfo() def id(self, id: str) -> "OfferListInfoBuilder": self._offer_list_info.id = id return self def job_info(self, job_info: OfferJobInfo) -> "OfferListInfoBuilder": self._offer_list_info.job_info = job_info return self def create_time(self, create_time: str) -> "OfferListInfoBuilder": self._offer_list_info.create_time = create_time return self def offer_status(self, offer_status: int) -> "OfferListInfoBuilder": self._offer_list_info.offer_status = offer_status return self def offer_type(self, offer_type: int) -> "OfferListInfoBuilder": self._offer_list_info.offer_type = offer_type return self def employee_type(self, employee_type: BaseBilingualWithId) -> "OfferListInfoBuilder": self._offer_list_info.employee_type = employee_type return self def application_id(self, application_id: str) -> "OfferListInfoBuilder": self._offer_list_info.application_id = application_id return self def build(self) -> "OfferListInfo": return self._offer_list_info
PypiClean
/secretflow_ray-2.2.0-cp38-cp38-macosx_10_16_x86_64.whl/secretflow_ray-2.2.0.data/purelib/ray/rllib/algorithms/algorithm.py
from collections import defaultdict import concurrent import copy from datetime import datetime import functools import gym import importlib import json import logging import numpy as np import os from packaging import version import pkg_resources import tempfile import time from typing import ( Callable, Container, DefaultDict, Dict, List, Optional, Set, Tuple, Type, Union, ) from ray.rllib.offline.offline_evaluator import OfflineEvaluator from ray.rllib.offline.offline_evaluation_utils import remove_time_dim import tree import ray from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag from ray.actor import ActorHandle from ray.air.checkpoint import Checkpoint import ray.cloudpickle as pickle from ray.rllib.algorithms.algorithm_config import AlgorithmConfig from ray.rllib.algorithms.registry import ALGORITHMS as ALL_ALGORITHMS from ray.rllib.env.env_context import EnvContext from ray.rllib.env.utils import _gym_env_creator from ray.rllib.evaluation.episode import Episode from ray.rllib.evaluation.metrics import ( collect_episodes, collect_metrics, summarize_episodes, ) from ray.rllib.evaluation.rollout_worker import RolloutWorker from ray.rllib.evaluation.worker_set import WorkerSet from ray.rllib.execution.common import ( STEPS_TRAINED_THIS_ITER_COUNTER, # TODO: Backward compatibility. ) from ray.rllib.execution.rollout_ops import synchronous_parallel_sample from ray.rllib.execution.train_ops import multi_gpu_train_one_step, train_one_step from ray.rllib.offline import get_dataset_and_shards from ray.rllib.offline.estimators import ( OffPolicyEstimator, ImportanceSampling, WeightedImportanceSampling, DirectMethod, DoublyRobust, ) from ray.rllib.policy.policy import Policy from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch, concat_samples from ray.rllib.utils import deep_update, FilterManager from ray.rllib.utils.annotations import ( DeveloperAPI, ExperimentalAPI, OverrideToImplementCustomLogic, OverrideToImplementCustomLogic_CallToSuperRecommended, PublicAPI, override, ) from ray.rllib.utils.checkpoints import CHECKPOINT_VERSION, get_checkpoint_info from ray.rllib.utils.debug import update_global_seed_if_necessary from ray.rllib.utils.deprecation import ( DEPRECATED_VALUE, Deprecated, deprecation_warning, ) from ray.rllib.utils.error import ERR_MSG_INVALID_ENV_DESCRIPTOR, EnvError from ray.rllib.utils.framework import try_import_tf from ray.rllib.utils.from_config import from_config from ray.rllib.utils.metrics import ( NUM_AGENT_STEPS_SAMPLED, NUM_AGENT_STEPS_SAMPLED_THIS_ITER, NUM_AGENT_STEPS_TRAINED, NUM_ENV_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED_THIS_ITER, NUM_ENV_STEPS_TRAINED, SYNCH_WORKER_WEIGHTS_TIMER, TRAINING_ITERATION_TIMER, ) from ray.rllib.utils.metrics.learner_info import LEARNER_INFO from ray.rllib.utils.policy import validate_policy_id from ray.rllib.utils.replay_buffers import MultiAgentReplayBuffer, ReplayBuffer from ray.rllib.utils.spaces import space_utils from ray.rllib.utils.typing import ( AgentID, AlgorithmConfigDict, EnvCreator, EnvInfoDict, EnvType, EpisodeID, PartialAlgorithmConfigDict, PolicyID, PolicyState, ResultDict, SampleBatchType, TensorStructType, TensorType, ) from ray.tune.execution.placement_groups import PlacementGroupFactory from ray.tune.experiment.trial import ExportFormat from ray.tune.logger import Logger, UnifiedLogger from ray.tune.registry import ENV_CREATOR, _global_registry from ray.tune.resources import Resources from ray.tune.result import DEFAULT_RESULTS_DIR from ray.tune.trainable import Trainable from ray.util import log_once from ray.util.timer import _Timer from ray.tune.registry import get_trainable_cls tf1, tf, tfv = try_import_tf() logger = logging.getLogger(__name__) @DeveloperAPI def with_common_config(extra_config: PartialAlgorithmConfigDict) -> AlgorithmConfigDict: """Returns the given config dict merged with common agent confs. Args: extra_config: A user defined partial config which will get merged with a default AlgorithmConfig() object and returned as plain python dict. Returns: AlgorithmConfigDict: The merged config dict resulting from AlgorithmConfig() plus `extra_config`. """ return Algorithm.merge_trainer_configs( AlgorithmConfig().to_dict(), extra_config, _allow_unknown_configs=True ) @PublicAPI class Algorithm(Trainable): """An RLlib algorithm responsible for optimizing one or more Policies. Algorithms contain a WorkerSet under `self.workers`. A WorkerSet is normally composed of a single local worker (self.workers.local_worker()), used to compute and apply learning updates, and optionally one or more remote workers used to generate environment samples in parallel. WorkerSet is fault tolerant and elastic. It tracks health states for all the managed remote worker actors. As a result, Algorithm should never access the underlying actor handles directly. Instead, always access them via all the foreach APIs with assigned IDs of the underlying workers. Each worker (remotes or local) contains a PolicyMap, which itself may contain either one policy for single-agent training or one or more policies for multi-agent training. Policies are synchronized automatically from time to time using ray.remote calls. The exact synchronization logic depends on the specific algorithm used, but this usually happens from local worker to all remote workers and after each training update. You can write your own Algorithm classes by sub-classing from `Algorithm` or any of its built-in sub-classes. This allows you to override the `training_step` method to implement your own algorithm logic. You can find the different built-in algorithms' `training_step()` methods in their respective main .py files, e.g. rllib.algorithms.dqn.dqn.py or rllib.algorithms.impala.impala.py. The most important API methods a Algorithm exposes are `train()`, `evaluate()`, `save()` and `restore()`. """ # Whether to allow unknown top-level config keys. _allow_unknown_configs = False # List of top-level keys with value=dict, for which new sub-keys are # allowed to be added to the value dict. _allow_unknown_subkeys = [ "tf_session_args", "local_tf_session_args", "env_config", "model", "optimizer", "multiagent", "custom_resources_per_worker", "evaluation_config", "exploration_config", "replay_buffer_config", "extra_python_environs_for_worker", "input_config", "output_config", ] # List of top level keys with value=dict, for which we always override the # entire value (dict), iff the "type" key in that value dict changes. _override_all_subkeys_if_type_changes = [ "exploration_config", "replay_buffer_config", ] # List of keys that are always fully overridden if present in any dict or sub-dict _override_all_key_list = ["off_policy_estimation_methods", "policies"] _progress_metrics = [ "episode_reward_mean", "evaluation/episode_reward_mean", "num_env_steps_sampled", "num_env_steps_trained", ] @staticmethod def from_checkpoint( checkpoint: Union[str, Checkpoint], policy_ids: Optional[Container[PolicyID]] = None, policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None, policies_to_train: Optional[ Union[ Container[PolicyID], Callable[[PolicyID, Optional[SampleBatchType]], bool], ] ] = None, ) -> "Algorithm": """Creates a new algorithm instance from a given checkpoint. Note: This method must remain backward compatible from 2.0.0 on. Args: checkpoint: The path (str) to the checkpoint directory to use or an AIR Checkpoint instance to restore from. policy_ids: Optional list of PolicyIDs to recover. This allows users to restore an Algorithm with only a subset of the originally present Policies. policy_mapping_fn: An optional (updated) policy mapping function to use from here on. policies_to_train: An optional list of policy IDs to be trained or a callable taking PolicyID and SampleBatchType and returning a bool (trainable or not?). If None, will keep the existing setup in place. Policies, whose IDs are not in the list (or for which the callable returns False) will not be updated. Returns: The instantiated Algorithm. """ checkpoint_info = get_checkpoint_info(checkpoint) # Not possible for (v0.1) (algo class and config information missing # or very hard to retrieve). if checkpoint_info["checkpoint_version"] == version.Version("0.1"): raise ValueError( "Cannot restore a v0 checkpoint using `Algorithm.from_checkpoint()`!" "In this case, do the following:\n" "1) Create a new Algorithm object using your original config.\n" "2) Call the `restore()` method of this algo object passing it" " your checkpoint dir or AIR Checkpoint object." ) if checkpoint_info["checkpoint_version"] < version.Version("1.0"): raise ValueError( "`checkpoint_info['checkpoint_version']` in `Algorithm.from_checkpoint" "()` must be 1.0 or later! You are using a checkpoint with " f"version v{checkpoint_info['checkpoint_version']}." ) state = Algorithm._checkpoint_info_to_algorithm_state( checkpoint_info=checkpoint_info, policy_ids=policy_ids, policy_mapping_fn=policy_mapping_fn, policies_to_train=policies_to_train, ) return Algorithm.from_state(state) @staticmethod def from_state(state: Dict) -> "Algorithm": """Recovers an Algorithm from a state object. The `state` of an instantiated Algorithm can be retrieved by calling its `get_state` method. It contains all information necessary to create the Algorithm from scratch. No access to the original code (e.g. configs, knowledge of the Algorithm's class, etc..) is needed. Args: state: The state to recover a new Algorithm instance from. Returns: A new Algorithm instance. """ algorithm_class: Type[Algorithm] = state.get("algorithm_class") if algorithm_class is None: raise ValueError( "No `algorithm_class` key was found in given `state`! " "Cannot create new Algorithm." ) # algo_class = get_trainable_cls(algo_class_name) # Create the new algo. config = state.get("config") if not config: raise ValueError("No `config` found in given Algorithm state!") new_algo = algorithm_class(config=config) # Set the new algo's state. new_algo.__setstate__(state) # Return the new algo. return new_algo @PublicAPI def __init__( self, config: Optional[AlgorithmConfig] = None, env=None, # deprecated arg logger_creator: Optional[Callable[[], Logger]] = None, **kwargs, ): """Initializes an Algorithm instance. Args: config: Algorithm-specific configuration object. logger_creator: Callable that creates a ray.tune.Logger object. If unspecified, a default logger is created. **kwargs: Arguments passed to the Trainable base class. """ config = config or self.get_default_config() # Translate possible dict into an AlgorithmConfig object, as well as, # resolving generic config objects into specific ones (e.g. passing # an `AlgorithmConfig` super-class instance into a PPO constructor, # which normally would expect a PPOConfig object). if isinstance(config, dict): default_config = self.get_default_config() # `self.get_default_config()` also returned a dict -> # Last resort: Create core AlgorithmConfig from merged dicts. if isinstance(default_config, dict): config = AlgorithmConfig.from_dict( config_dict=self.merge_trainer_configs(default_config, config, True) ) # Default config is an AlgorithmConfig -> update its properties # from the given config dict. else: config = default_config.update_from_dict(config) else: default_config = self.get_default_config() # Given AlgorithmConfig is not of the same type as the default config: # This could be the case e.g. if the user is building an algo from a # generic AlgorithmConfig() object. if not isinstance(config, type(default_config)): config = default_config.update_from_dict(config.to_dict()) # In case this algo is using a generic config (with no algo_class set), set it # here. if config.algo_class is None: config.algo_class = type(self) if env is not None: deprecation_warning( old=f"algo = Algorithm(env='{env}', ...)", new=f"algo = AlgorithmConfig().environment('{env}').build()", error=False, ) config.environment(env) # Validate and freeze our AlgorithmConfig object (no more changes possible). config.validate() config.freeze() # Convert `env` provided in config into a concrete env creator callable, which # takes an EnvContext (config dict) as arg and returning an RLlib supported Env # type (e.g. a gym.Env). self._env_id, self.env_creator = self._get_env_id_and_creator( config.env, config ) env_descr = ( self._env_id.__name__ if isinstance(self._env_id, type) else self._env_id ) # Placeholder for a local replay buffer instance. self.local_replay_buffer = None # Create a default logger creator if no logger_creator is specified if logger_creator is None: # Default logdir prefix containing the agent's name and the # env id. timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S") logdir_prefix = "{}_{}_{}".format(str(self), env_descr, timestr) if not os.path.exists(DEFAULT_RESULTS_DIR): # Possible race condition if dir is created several times on # rollout workers os.makedirs(DEFAULT_RESULTS_DIR, exist_ok=True) logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR) # Allow users to more precisely configure the created logger # via "logger_config.type". if config.logger_config and "type" in config.logger_config: def default_logger_creator(config): """Creates a custom logger with the default prefix.""" cfg = config["logger_config"].copy() cls = cfg.pop("type") # Provide default for logdir, in case the user does # not specify this in the "logger_config" dict. logdir_ = cfg.pop("logdir", logdir) return from_config(cls=cls, _args=[cfg], logdir=logdir_) # If no `type` given, use tune's UnifiedLogger as last resort. else: def default_logger_creator(config): """Creates a Unified logger with the default prefix.""" return UnifiedLogger(config, logdir, loggers=None) logger_creator = default_logger_creator # Metrics-related properties. self._timers = defaultdict(_Timer) self._counters = defaultdict(int) self._episode_history = [] self._episodes_to_be_collected = [] # The fully qualified AlgorithmConfig used for evaluation # (or None if evaluation not setup). self.evaluation_config: Optional[AlgorithmConfig] = None # Evaluation WorkerSet and metrics last returned by `self.evaluate()`. self.evaluation_workers: Optional[WorkerSet] = None # Initialize common evaluation_metrics to nan, before they become # available. We want to make sure the metrics are always present # (although their values may be nan), so that Tune does not complain # when we use these as stopping criteria. self.evaluation_metrics = { "evaluation": { "episode_reward_max": np.nan, "episode_reward_min": np.nan, "episode_reward_mean": np.nan, } } super().__init__( config=config, logger_creator=logger_creator, **kwargs, ) # Check, whether `training_iteration` is still a tune.Trainable property # and has not been overridden by the user in the attempt to implement the # algos logic (this should be done now inside `training_step`). try: assert isinstance(self.training_iteration, int) except AssertionError: raise AssertionError( "Your Algorithm's `training_iteration` seems to be overridden by your " "custom training logic! To solve this problem, simply rename your " "`self.training_iteration()` method into `self.training_step`." ) @OverrideToImplementCustomLogic @classmethod def get_default_config(cls) -> AlgorithmConfig: return AlgorithmConfig() @OverrideToImplementCustomLogic def _remote_worker_ids_for_metrics(self) -> List[int]: """Returns a list of remote worker IDs to fetch metrics from. Specific Algorithm implementations can override this method to use a subset of the workers for metrics collection. Returns: List of remote worker IDs to fetch metrics from. """ return self.workers.healthy_worker_ids() @OverrideToImplementCustomLogic_CallToSuperRecommended @override(Trainable) def setup(self, config: AlgorithmConfig) -> None: # Setup our config: Merge the user-supplied config dict (which could # be a partial config dict) with the class' default. if not isinstance(config, AlgorithmConfig): assert isinstance(config, PartialAlgorithmConfigDict) config_obj = self.get_default_config() if not isinstance(config_obj, AlgorithmConfig): assert isinstance(config, PartialAlgorithmConfigDict) config_obj = AlgorithmConfig().from_dict(config_obj) config_obj.update_from_dict(config) config_obj.env = self._env_id self.config = config_obj # Set Algorithm's seed after we have - if necessary - enabled # tf eager-execution. update_global_seed_if_necessary(self.config.framework_str, self.config.seed) self._record_usage(self.config) self.callbacks = self.config["callbacks"]() log_level = self.config.get("log_level") if log_level in ["WARN", "ERROR"]: logger.info( "Current log_level is {}. For more information, " "set 'log_level': 'INFO' / 'DEBUG' or use the -v and " "-vv flags.".format(log_level) ) if self.config.get("log_level"): logging.getLogger("ray.rllib").setLevel(self.config["log_level"]) # Create local replay buffer if necessary. self.local_replay_buffer = self._create_local_replay_buffer_if_necessary( self.config ) # Create a dict, mapping ActorHandles to sets of open remote # requests (object refs). This way, we keep track, of which actors # inside this Algorithm (e.g. a remote RolloutWorker) have # already been sent how many (e.g. `sample()`) requests. self.remote_requests_in_flight: DefaultDict[ ActorHandle, Set[ray.ObjectRef] ] = defaultdict(set) self.workers: Optional[WorkerSet] = None self.train_exec_impl = None # Offline RL settings. input_evaluation = self.config.get("input_evaluation") if input_evaluation is not None and input_evaluation is not DEPRECATED_VALUE: ope_dict = {str(ope): {"type": ope} for ope in input_evaluation} deprecation_warning( old="config.input_evaluation={}".format(input_evaluation), new="config.evaluation(evaluation_config={" f"'off_policy_estimation_methods'={ope_dict}" "})", error=True, help="Running OPE during training is not recommended.", ) self.config["off_policy_estimation_methods"] = ope_dict # Deprecated way of implementing Trainer sub-classes (or "templates" # via the `build_trainer` utility function). # Instead, sub-classes should override the Trainable's `setup()` # method and call super().setup() from within that override at some # point. # Old design: Override `Trainer._init`. _init = False try: self._init(self.config, self.env_creator) _init = True # New design: Override `Trainable.setup()` (as indented by tune.Trainable) # and do or don't call `super().setup()` from within your override. # By default, `super().setup()` will create both worker sets: # "rollout workers" for collecting samples for training and - if # applicable - "evaluation workers" for evaluation runs in between or # parallel to training. # TODO: Deprecate `_init()` and remove this try/except block. except NotImplementedError: pass # Only if user did not override `_init()`: if _init is False: # - Create rollout workers here automatically. # - Run the execution plan to create the local iterator to `next()` # in each training iteration. # This matches the behavior of using `build_trainer()`, which # has been deprecated. self.workers = WorkerSet( env_creator=self.env_creator, validate_env=self.validate_env, default_policy_class=self.get_default_policy_class(self.config), config=self.config, num_workers=self.config["num_workers"], local_worker=True, logdir=self.logdir, ) # TODO (avnishn): Remove the execution plan API by q1 2023 # Function defining one single training iteration's behavior. if self.config["_disable_execution_plan_api"]: # Ensure remote workers are initially in sync with the local worker. self.workers.sync_weights() # LocalIterator-creating "execution plan". # Only call this once here to create `self.train_exec_impl`, # which is a ray.util.iter.LocalIterator that will be `next`'d # on each training iteration. else: self.train_exec_impl = self.execution_plan( self.workers, self.config, **self._kwargs_for_execution_plan() ) # Now that workers have been created, update our policies # dict in config[multiagent] (with the correct original/ # unpreprocessed spaces). self.config["multiagent"][ "policies" ] = self.workers.local_worker().policy_dict # Compile, validate, and freeze an evaluation config. self.evaluation_config = self.config.get_evaluation_config_object() self.evaluation_config.validate() self.evaluation_config.freeze() # Evaluation WorkerSet setup. # User would like to setup a separate evaluation worker set. # Note: We skip workerset creation if we need to do offline evaluation if self._should_create_evaluation_rollout_workers(self.evaluation_config): _, env_creator = self._get_env_id_and_creator( self.evaluation_config.env, self.evaluation_config ) # Create a separate evaluation worker set for evaluation. # If evaluation_num_workers=0, use the evaluation set's local # worker for evaluation, otherwise, use its remote workers # (parallelized evaluation). self.evaluation_workers: WorkerSet = WorkerSet( env_creator=env_creator, validate_env=None, default_policy_class=self.get_default_policy_class(self.config), config=self.evaluation_config, num_workers=self.config["evaluation_num_workers"], # Don't even create a local worker if num_workers > 0. local_worker=False, logdir=self.logdir, ) if self.config["enable_async_evaluation"]: self._evaluation_weights_seq_number = 0 self.evaluation_dataset = None if ( self.evaluation_config.off_policy_estimation_methods and not self.evaluation_config.ope_split_batch_by_episode ): # the num worker is set to 0 to avoid creating shards. The dataset will not # be repartioned to num_workers blocks. logger.info("Creating evaluation dataset ...") ds, _ = get_dataset_and_shards(self.evaluation_config, num_workers=0) # Dataset should be in form of one episode per row. in case of bandits each # row is just one time step. To make the computation more efficient later # we remove the time dimension here. parallelism = self.evaluation_config.evaluation_num_workers or 1 batch_size = max(ds.count() // parallelism, 1) self.evaluation_dataset = ds.map_batches( remove_time_dim, batch_size=batch_size ) logger.info("Evaluation dataset created") self.reward_estimators: Dict[str, OffPolicyEstimator] = {} ope_types = { "is": ImportanceSampling, "wis": WeightedImportanceSampling, "dm": DirectMethod, "dr": DoublyRobust, } for name, method_config in self.config["off_policy_estimation_methods"].items(): method_type = method_config.pop("type") if method_type in ope_types: deprecation_warning( old=method_type, new=str(ope_types[method_type]), error=True, ) method_type = ope_types[method_type] elif isinstance(method_type, str): logger.log(0, "Trying to import from string: " + method_type) mod, obj = method_type.rsplit(".", 1) mod = importlib.import_module(mod) method_type = getattr(mod, obj) if isinstance(method_type, type) and issubclass( method_type, OfflineEvaluator ): # TODO(kourosh) : Add an integration test for all these # offline evaluators. policy = self.get_policy() if issubclass(method_type, OffPolicyEstimator): method_config["gamma"] = self.config["gamma"] self.reward_estimators[name] = method_type(policy, **method_config) else: raise ValueError( f"Unknown off_policy_estimation type: {method_type}! Must be " "either a class path or a sub-class of ray.rllib." "offline.offline_evaluator::OfflineEvaluator" ) # Run `on_algorithm_init` callback after initialization is done. self.callbacks.on_algorithm_init(algorithm=self) # TODO: Deprecated: In your sub-classes of Trainer, override `setup()` # directly and call super().setup() from within it if you would like the # default setup behavior plus some own setup logic. # If you don't need the env/workers/config/etc.. setup for you by super, # simply do not call super().setup() from your overridden method. def _init(self, config: AlgorithmConfigDict, env_creator: EnvCreator) -> None: raise NotImplementedError @OverrideToImplementCustomLogic @classmethod def get_default_policy_class( cls, config: AlgorithmConfig, ) -> Optional[Type[Policy]]: """Returns a default Policy class to use, given a config. This class will be used by an Algorithm in case the policy class is not provided by the user in any single- or multi-agent PolicySpec. """ return None @override(Trainable) def step(self) -> ResultDict: """Implements the main `Trainer.train()` logic. Takes n attempts to perform a single training step. Thereby catches RayErrors resulting from worker failures. After n attempts, fails gracefully. Override this method in your Trainer sub-classes if you would like to handle worker failures yourself. Otherwise, override only `training_step()` to implement the core algorithm logic. Returns: The results dict with stats/infos on sampling, training, and - if required - evaluation. """ # Do we have to run `self.evaluate()` this iteration? # `self.iteration` gets incremented after this function returns, # meaning that e. g. the first time this function is called, # self.iteration will be 0. evaluate_this_iter = ( self.config.evaluation_interval is not None and (self.iteration + 1) % self.config.evaluation_interval == 0 ) # Results dict for training (and if appolicable: evaluation). results: ResultDict = {} # Parallel eval + training: Kick off evaluation-loop and parallel train() call. if evaluate_this_iter and self.config["evaluation_parallel_to_training"]: ( results, train_iter_ctx, ) = self._run_one_training_iteration_and_evaluation_in_parallel() # - No evaluation necessary, just run the next training iteration. # - We have to evaluate in this training iteration, but no parallelism -> # evaluate after the training iteration is entirely done. else: results, train_iter_ctx = self._run_one_training_iteration() # Sequential: Train (already done above), then evaluate. if evaluate_this_iter and not self.config["evaluation_parallel_to_training"]: results.update(self._run_one_evaluation(train_future=None)) # Attach latest available evaluation results to train results, # if necessary. if not evaluate_this_iter and self.config["always_attach_evaluation_results"]: assert isinstance( self.evaluation_metrics, dict ), "Trainer.evaluate() needs to return a dict." results.update(self.evaluation_metrics) if hasattr(self, "workers") and isinstance(self.workers, WorkerSet): # Sync filters on workers. self._sync_filters_if_needed( from_worker=self.workers.local_worker(), workers=self.workers, timeout_seconds=self.config[ "sync_filters_on_rollout_workers_timeout_s" ], ) # TODO (avnishn): Remove the execution plan API by q1 2023 # Collect worker metrics and add combine them with `results`. if self.config["_disable_execution_plan_api"]: episodes_this_iter = collect_episodes( self.workers, self._remote_worker_ids_for_metrics(), timeout_seconds=self.config["metrics_episode_collection_timeout_s"], ) results = self._compile_iteration_results( episodes_this_iter=episodes_this_iter, step_ctx=train_iter_ctx, iteration_results=results, ) # Check `env_task_fn` for possible update of the env's task. if self.config["env_task_fn"] is not None: if not callable(self.config["env_task_fn"]): raise ValueError( "`env_task_fn` must be None or a callable taking " "[train_results, env, env_ctx] as args!" ) def fn(env, env_context, task_fn): new_task = task_fn(results, env, env_context) cur_task = env.get_task() if cur_task != new_task: env.set_task(new_task) fn = functools.partial(fn, task_fn=self.config["env_task_fn"]) self.workers.foreach_env_with_context(fn) return results @PublicAPI def evaluate( self, duration_fn: Optional[Callable[[int], int]] = None, ) -> dict: """Evaluates current policy under `evaluation_config` settings. Note that this default implementation does not do anything beyond merging evaluation_config with the normal trainer config. Args: duration_fn: An optional callable taking the already run num episodes as only arg and returning the number of episodes left to run. It's used to find out whether evaluation should continue. """ # Call the `_before_evaluate` hook. self._before_evaluate() if self.evaluation_dataset is not None: return {"evaluation": self._run_offline_evaluation()} # Sync weights to the evaluation WorkerSet. if self.evaluation_workers is not None: self.evaluation_workers.sync_weights( from_worker=self.workers.local_worker() ) self._sync_filters_if_needed( from_worker=self.workers.local_worker(), workers=self.evaluation_workers, timeout_seconds=self.config[ "sync_filters_on_rollout_workers_timeout_s" ], ) self.callbacks.on_evaluate_start(algorithm=self) if self.config["custom_eval_function"]: logger.info( "Running custom eval function {}".format( self.config["custom_eval_function"] ) ) metrics = self.config["custom_eval_function"](self, self.evaluation_workers) if not metrics or not isinstance(metrics, dict): raise ValueError( "Custom eval function must return " "dict of metrics, got {}.".format(metrics) ) else: if ( self.evaluation_workers is None and self.workers.local_worker().input_reader is None ): raise ValueError( "Cannot evaluate w/o an evaluation worker set in " "the Trainer or w/o an env on the local worker!\n" "Try one of the following:\n1) Set " "`evaluation_interval` >= 0 to force creating a " "separate evaluation worker set.\n2) Set " "`create_env_on_driver=True` to force the local " "(non-eval) worker to have an environment to " "evaluate on." ) # How many episodes/timesteps do we need to run? # In "auto" mode (only for parallel eval + training): Run as long # as training lasts. unit = self.config["evaluation_duration_unit"] eval_cfg = self.evaluation_config rollout = eval_cfg["rollout_fragment_length"] num_envs = eval_cfg["num_envs_per_worker"] auto = self.config["evaluation_duration"] == "auto" duration = ( self.config["evaluation_duration"] if not auto else (self.config["evaluation_num_workers"] or 1) * (1 if unit == "episodes" else rollout) ) agent_steps_this_iter = 0 env_steps_this_iter = 0 # Default done-function returns True, whenever num episodes # have been completed. if duration_fn is None: def duration_fn(num_units_done): return duration - num_units_done logger.info(f"Evaluating current policy for {duration} {unit}.") metrics = None all_batches = [] # No evaluation worker set -> # Do evaluation using the local worker. Expect error due to the # local worker not having an env. if self.evaluation_workers is None: # If unit=episodes -> Run n times `sample()` (each sample # produces exactly 1 episode). # If unit=ts -> Run 1 `sample()` b/c the # `rollout_fragment_length` is exactly the desired ts. iters = duration if unit == "episodes" else 1 for _ in range(iters): batch = self.workers.local_worker().sample() agent_steps_this_iter += batch.agent_steps() env_steps_this_iter += batch.env_steps() if self.reward_estimators: all_batches.append(batch) metrics = collect_metrics( self.workers, keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"], timeout_seconds=eval_cfg["metrics_episode_collection_timeout_s"], ) # Evaluation worker set only has local worker. elif self.evaluation_workers.num_remote_workers() == 0: # If unit=episodes -> Run n times `sample()` (each sample # produces exactly 1 episode). # If unit=ts -> Run 1 `sample()` b/c the # `rollout_fragment_length` is exactly the desired ts. iters = duration if unit == "episodes" else 1 for _ in range(iters): batch = self.evaluation_workers.local_worker().sample() agent_steps_this_iter += batch.agent_steps() env_steps_this_iter += batch.env_steps() if self.reward_estimators: all_batches.append(batch) # Evaluation worker set has n remote workers. elif self.evaluation_workers.num_healthy_remote_workers() > 0: # How many episodes have we run (across all eval workers)? num_units_done = 0 _round = 0 # In case all of the remote evaluation workers die during a round # of evaluation, we need to stop. while True and self.evaluation_workers.num_healthy_remote_workers() > 0: units_left_to_do = duration_fn(num_units_done) if units_left_to_do <= 0: break _round += 1 unit_per_remote_worker = ( 1 if unit == "episodes" else rollout * num_envs ) # Select proper number of evaluation workers for this round. selected_eval_worker_ids = [ worker_id for i, worker_id in enumerate( self.evaluation_workers.healthy_worker_ids() ) if i * unit_per_remote_worker < units_left_to_do ] batches = self.evaluation_workers.foreach_worker( func=lambda w: w.sample(), local_worker=False, remote_worker_ids=selected_eval_worker_ids, timeout_seconds=self.config["evaluation_sample_timeout_s"], ) if len(batches) != len(selected_eval_worker_ids): logger.warning( "Calling `sample()` on your remote evaluation worker(s) " "resulted in a timeout (after the configured " f"{self.config['evaluation_sample_timeout_s']} seconds)! " "Try to set `evaluation_sample_timeout_s` in your config" " to a larger value." + ( " If your episodes don't terminate easily, you may " "also want to set `evaluation_duration_unit` to " "'timesteps' (instead of 'episodes')." if unit == "episodes" else "" ) ) break _agent_steps = sum(b.agent_steps() for b in batches) _env_steps = sum(b.env_steps() for b in batches) # 1 episode per returned batch. if unit == "episodes": num_units_done += len(batches) # Make sure all batches are exactly one episode. for ma_batch in batches: ma_batch = ma_batch.as_multi_agent() for batch in ma_batch.policy_batches.values(): assert np.sum(batch[SampleBatch.DONES]) # n timesteps per returned batch. else: num_units_done += ( _agent_steps if self.config.count_steps_by == "agent_steps" else _env_steps ) if self.reward_estimators: # TODO: (kourosh) This approach will cause an OOM issue when # the dataset gets huge (should be ok for now). all_batches.extend(batches) agent_steps_this_iter += _agent_steps env_steps_this_iter += _env_steps logger.info( f"Ran round {_round} of parallel evaluation " f"({num_units_done}/{duration if not auto else '?'} " f"{unit} done)" ) else: # Can't find a good way to run this evaluation. # Wait for next iteration. pass if metrics is None: metrics = collect_metrics( self.evaluation_workers, keep_custom_metrics=self.config["keep_per_episode_custom_metrics"], timeout_seconds=eval_cfg["metrics_episode_collection_timeout_s"], ) metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter # TODO: Remove this key at some point. Here for backward compatibility. metrics["timesteps_this_iter"] = env_steps_this_iter # Compute off-policy estimates estimates = defaultdict(list) # for each batch run the estimator's fwd pass for name, estimator in self.reward_estimators.items(): for batch in all_batches: estimate_result = estimator.estimate( batch, split_batch_by_episode=self.config[ "ope_split_batch_by_episode" ], ) estimates[name].append(estimate_result) # collate estimates from all batches if estimates: metrics["off_policy_estimator"] = {} for name, estimate_list in estimates.items(): avg_estimate = tree.map_structure( lambda *x: np.mean(x, axis=0), *estimate_list ) metrics["off_policy_estimator"][name] = avg_estimate # Evaluation does not run for every step. # Save evaluation metrics on trainer, so it can be attached to # subsequent step results as latest evaluation result. self.evaluation_metrics = {"evaluation": metrics} # Trigger `on_evaluate_end` callback. self.callbacks.on_evaluate_end( algorithm=self, evaluation_metrics=self.evaluation_metrics ) # Also return the results here for convenience. return self.evaluation_metrics @ExperimentalAPI def _evaluate_async( self, duration_fn: Optional[Callable[[int], int]] = None, ) -> dict: """Evaluates current policy under `evaluation_config` settings. Uses the AsyncParallelRequests manager to send frequent `sample.remote()` requests to the evaluation RolloutWorkers and collect the results of these calls. Handles worker failures (or slowdowns) gracefully due to the asynch'ness and the fact that other eval RolloutWorkers can thus cover the workload. Important Note: This will replace the current `self.evaluate()` method as the default in the future. Args: duration_fn: An optional callable taking the already run num episodes as only arg and returning the number of episodes left to run. It's used to find out whether evaluation should continue. """ # How many episodes/timesteps do we need to run? # In "auto" mode (only for parallel eval + training): Run as long # as training lasts. unit = self.config["evaluation_duration_unit"] eval_cfg = self.evaluation_config rollout = eval_cfg["rollout_fragment_length"] num_envs = eval_cfg["num_envs_per_worker"] auto = self.config["evaluation_duration"] == "auto" duration = ( self.config["evaluation_duration"] if not auto else (self.config["evaluation_num_workers"] or 1) * (1 if unit == "episodes" else rollout) ) # Call the `_before_evaluate` hook. self._before_evaluate() # TODO(Jun): Implement solution via connectors. self._sync_filters_if_needed( from_worker=self.workers.local_worker(), workers=self.evaluation_workers, timeout_seconds=eval_cfg.get("sync_filters_on_rollout_workers_timeout_s"), ) if self.config["custom_eval_function"]: raise ValueError( "`custom_eval_function` not supported in combination " "with `enable_async_evaluation=True` config setting!" ) if self.evaluation_workers is None and ( self.workers.local_worker().input_reader is None or self.config["evaluation_num_workers"] == 0 ): raise ValueError( "Evaluation w/o eval workers (calling Algorithm.evaluate() w/o " "evaluation specifically set up) OR evaluation without input reader " "OR evaluation with only a local evaluation worker " "(`evaluation_num_workers=0`) not supported in combination " "with `enable_async_evaluation=True` config setting!" ) agent_steps_this_iter = 0 env_steps_this_iter = 0 logger.info(f"Evaluating current policy for {duration} {unit}.") all_batches = [] # Default done-function returns True, whenever num episodes # have been completed. if duration_fn is None: def duration_fn(num_units_done): return duration - num_units_done # Put weights only once into object store and use same object # ref to synch to all workers. self._evaluation_weights_seq_number += 1 weights_ref = ray.put(self.workers.local_worker().get_weights()) weights_seq_no = self._evaluation_weights_seq_number def remote_fn(worker): # Pass in seq-no so that eval workers may ignore this call if no update has # happened since the last call to `remote_fn` (sample). worker.set_weights( weights=ray.get(weights_ref), weights_seq_no=weights_seq_no ) batch = worker.sample() metrics = worker.get_metrics() return batch, metrics, weights_seq_no rollout_metrics = [] # How many episodes have we run (across all eval workers)? num_units_done = 0 _round = 0 while self.evaluation_workers.num_healthy_remote_workers() > 0: units_left_to_do = duration_fn(num_units_done) if units_left_to_do <= 0: break _round += 1 # Get ready evaluation results and metrics asynchronously. self.evaluation_workers.foreach_worker_async( func=remote_fn, healthy_only=True, ) eval_results = self.evaluation_workers.fetch_ready_async_reqs() batches = [] i = 0 for _, result in eval_results: batch, metrics, seq_no = result # Ignore results, if the weights seq-number does not match (is # from a previous evaluation step) OR if we have already reached # the configured duration (e.g. number of episodes to evaluate # for). if seq_no == self._evaluation_weights_seq_number and ( i * (1 if unit == "episodes" else rollout * num_envs) < units_left_to_do ): batches.append(batch) rollout_metrics.extend(metrics) i += 1 _agent_steps = sum(b.agent_steps() for b in batches) _env_steps = sum(b.env_steps() for b in batches) # 1 episode per returned batch. if unit == "episodes": num_units_done += len(batches) # Make sure all batches are exactly one episode. for ma_batch in batches: ma_batch = ma_batch.as_multi_agent() for batch in ma_batch.policy_batches.values(): assert np.sum(batch[SampleBatch.DONES]) # n timesteps per returned batch. else: num_units_done += ( _agent_steps if self.config.count_steps_by == "agent_steps" else _env_steps ) if self.reward_estimators: all_batches.extend(batches) agent_steps_this_iter += _agent_steps env_steps_this_iter += _env_steps logger.info( f"Ran round {_round} of parallel evaluation " f"({num_units_done}/{duration if not auto else '?'} " f"{unit} done)" ) metrics = summarize_episodes( rollout_metrics, keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"], ) metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter # TODO: Remove this key at some point. Here for backward compatibility. metrics["timesteps_this_iter"] = env_steps_this_iter if self.reward_estimators: # Compute off-policy estimates metrics["off_policy_estimator"] = {} total_batch = concat_samples(all_batches) for name, estimator in self.reward_estimators.items(): estimates = estimator.estimate(total_batch) metrics["off_policy_estimator"][name] = estimates # Evaluation does not run for every step. # Save evaluation metrics on trainer, so it can be attached to # subsequent step results as latest evaluation result. self.evaluation_metrics = {"evaluation": metrics} # Trigger `on_evaluate_end` callback. self.callbacks.on_evaluate_end( algorithm=self, evaluation_metrics=self.evaluation_metrics ) # Return evaluation results. return self.evaluation_metrics @OverrideToImplementCustomLogic @DeveloperAPI def restore_workers(self, workers: WorkerSet): """Try to restore failed workers if necessary. Algorithms that use custom RolloutWorkers may override this method to disable default, and create custom restoration logics. Args: workers: The WorkerSet to restore. This may be Rollout or Evaluation workers. """ if not workers or ( not workers.local_worker() and not self.workers.local_worker() ): # If workers does not exist, or # 1. this WorkerSet does not have a local worker, and # 2. self.workers (rollout worker set) does not have a local worker, # we don't have a local worker to get state from. # We can't recover remote worker in this case. return # This is really cheap, since probe_unhealthy_workers() is a no-op # if there are no unhealthy workers. restored = workers.probe_unhealthy_workers() if restored: from_worker = workers.local_worker() or self.workers.local_worker() state = ray.put(from_worker.get_state()) # By default, entire local worker state is synced after restoration # to bring these workers up to date. workers.foreach_worker( func=lambda w: w.set_state(ray.get(state)), local_worker=False, ) @OverrideToImplementCustomLogic @DeveloperAPI def training_step(self) -> ResultDict: """Default single iteration logic of an algorithm. - Collect on-policy samples (SampleBatches) in parallel using the Trainer's RolloutWorkers (@ray.remote). - Concatenate collected SampleBatches into one train batch. - Note that we may have more than one policy in the multi-agent case: Call the different policies' `learn_on_batch` (simple optimizer) OR `load_batch_into_buffer` + `learn_on_loaded_batch` (multi-GPU optimizer) methods to calculate loss and update the model(s). - Return all collected metrics for the iteration. Returns: The results dict from executing the training iteration. """ # Collect SampleBatches from sample workers until we have a full batch. if self.config.count_steps_by == "agent_steps": train_batch = synchronous_parallel_sample( worker_set=self.workers, max_agent_steps=self.config["train_batch_size"] ) else: train_batch = synchronous_parallel_sample( worker_set=self.workers, max_env_steps=self.config["train_batch_size"] ) train_batch = train_batch.as_multi_agent() self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps() self._counters[NUM_ENV_STEPS_SAMPLED] += train_batch.env_steps() # Only train if train_batch is not empty. # In an extreme situation, all rollout workers die during the # synchronous_parallel_sample() call above. # In which case, we should skip training, wait a little bit, then probe again. train_results = {} if train_batch.agent_steps() > 0: # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU). # TODO: (sven) rename MultiGPUOptimizer into something more # meaningful. if self.config.get("simple_optimizer") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) else: # Wait 1 sec before probing again via weight syncing. time.sleep(1) # Update weights and global_vars - after learning on the local worker - on all # remote workers (only those policies that were actually trained). global_vars = { "timestep": self._counters[NUM_ENV_STEPS_SAMPLED], } with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights( policies=list(train_results.keys()), global_vars=global_vars, ) return train_results @staticmethod def execution_plan(workers, config, **kwargs): raise NotImplementedError( "It is not longer recommended to use Trainer's `execution_plan` method/API." " Set `_disable_execution_plan_api=True` in your config and override the " "`Trainer.training_step()` method with your algo's custom " "execution logic." ) @PublicAPI def compute_single_action( self, observation: Optional[TensorStructType] = None, state: Optional[List[TensorStructType]] = None, *, prev_action: Optional[TensorStructType] = None, prev_reward: Optional[float] = None, info: Optional[EnvInfoDict] = None, input_dict: Optional[SampleBatch] = None, policy_id: PolicyID = DEFAULT_POLICY_ID, full_fetch: bool = False, explore: Optional[bool] = None, timestep: Optional[int] = None, episode: Optional[Episode] = None, unsquash_action: Optional[bool] = None, clip_action: Optional[bool] = None, # Deprecated args. unsquash_actions=DEPRECATED_VALUE, clip_actions=DEPRECATED_VALUE, # Kwargs placeholder for future compatibility. **kwargs, ) -> Union[ TensorStructType, Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]], ]: """Computes an action for the specified policy on the local worker. Note that you can also access the policy object through self.get_policy(policy_id) and call compute_single_action() on it directly. Args: observation: Single (unbatched) observation from the environment. state: List of all RNN hidden (single, unbatched) state tensors. prev_action: Single (unbatched) previous action value. prev_reward: Single (unbatched) previous reward value. info: Env info dict, if any. input_dict: An optional SampleBatch that holds all the values for: obs, state, prev_action, and prev_reward, plus maybe custom defined views of the current env trajectory. Note that only one of `obs` or `input_dict` must be non-None. policy_id: Policy to query (only applies to multi-agent). Default: "default_policy". full_fetch: Whether to return extra action fetch results. This is always set to True if `state` is specified. explore: Whether to apply exploration to the action. Default: None -> use self.config["explore"]. timestep: The current (sampling) time step. episode: This provides access to all of the internal episodes' state, which may be useful for model-based or multi-agent algorithms. unsquash_action: Should actions be unsquashed according to the env's/Policy's action space? If None, use the value of self.config["normalize_actions"]. clip_action: Should actions be clipped according to the env's/Policy's action space? If None, use the value of self.config["clip_actions"]. Keyword Args: kwargs: forward compatibility placeholder Returns: The computed action if full_fetch=False, or a tuple of a) the full output of policy.compute_actions() if full_fetch=True or we have an RNN-based Policy. Raises: KeyError: If the `policy_id` cannot be found in this Trainer's local worker. """ if clip_actions != DEPRECATED_VALUE: deprecation_warning( old="Trainer.compute_single_action(`clip_actions`=...)", new="Trainer.compute_single_action(`clip_action`=...)", error=True, ) clip_action = clip_actions if unsquash_actions != DEPRECATED_VALUE: deprecation_warning( old="Trainer.compute_single_action(`unsquash_actions`=...)", new="Trainer.compute_single_action(`unsquash_action`=...)", error=True, ) unsquash_action = unsquash_actions # `unsquash_action` is None: Use value of config['normalize_actions']. if unsquash_action is None: unsquash_action = self.config["normalize_actions"] # `clip_action` is None: Use value of config['clip_actions']. elif clip_action is None: clip_action = self.config["clip_actions"] # User provided an input-dict: Assert that `obs`, `prev_a|r`, `state` # are all None. err_msg = ( "Provide either `input_dict` OR [`observation`, ...] as " "args to Trainer.compute_single_action!" ) if input_dict is not None: assert ( observation is None and prev_action is None and prev_reward is None and state is None ), err_msg observation = input_dict[SampleBatch.OBS] else: assert observation is not None, err_msg # Get the policy to compute the action for (in the multi-agent case, # Trainer may hold >1 policies). policy = self.get_policy(policy_id) if policy is None: raise KeyError( f"PolicyID '{policy_id}' not found in PolicyMap of the " f"Trainer's local worker!" ) local_worker = self.workers.local_worker() # Check the preprocessor and preprocess, if necessary. pp = local_worker.preprocessors[policy_id] if pp and type(pp).__name__ != "NoPreprocessor": observation = pp.transform(observation) observation = local_worker.filters[policy_id](observation, update=False) # Input-dict. if input_dict is not None: input_dict[SampleBatch.OBS] = observation action, state, extra = policy.compute_single_action( input_dict=input_dict, explore=explore, timestep=timestep, episode=episode, ) # Individual args. else: action, state, extra = policy.compute_single_action( obs=observation, state=state, prev_action=prev_action, prev_reward=prev_reward, info=info, explore=explore, timestep=timestep, episode=episode, ) # If we work in normalized action space (normalize_actions=True), # we re-translate here into the env's action space. if unsquash_action: action = space_utils.unsquash_action(action, policy.action_space_struct) # Clip, according to env's action space. elif clip_action: action = space_utils.clip_action(action, policy.action_space_struct) # Return 3-Tuple: Action, states, and extra-action fetches. if state or full_fetch: return action, state, extra # Ensure backward compatibility. else: return action @PublicAPI def compute_actions( self, observations: TensorStructType, state: Optional[List[TensorStructType]] = None, *, prev_action: Optional[TensorStructType] = None, prev_reward: Optional[TensorStructType] = None, info: Optional[EnvInfoDict] = None, policy_id: PolicyID = DEFAULT_POLICY_ID, full_fetch: bool = False, explore: Optional[bool] = None, timestep: Optional[int] = None, episodes: Optional[List[Episode]] = None, unsquash_actions: Optional[bool] = None, clip_actions: Optional[bool] = None, # Deprecated. normalize_actions=None, **kwargs, ): """Computes an action for the specified policy on the local Worker. Note that you can also access the policy object through self.get_policy(policy_id) and call compute_actions() on it directly. Args: observation: Observation from the environment. state: RNN hidden state, if any. If state is not None, then all of compute_single_action(...) is returned (computed action, rnn state(s), logits dictionary). Otherwise compute_single_action(...)[0] is returned (computed action). prev_action: Previous action value, if any. prev_reward: Previous reward, if any. info: Env info dict, if any. policy_id: Policy to query (only applies to multi-agent). full_fetch: Whether to return extra action fetch results. This is always set to True if RNN state is specified. explore: Whether to pick an exploitation or exploration action (default: None -> use self.config["explore"]). timestep: The current (sampling) time step. episodes: This provides access to all of the internal episodes' state, which may be useful for model-based or multi-agent algorithms. unsquash_actions: Should actions be unsquashed according to the env's/Policy's action space? If None, use self.config["normalize_actions"]. clip_actions: Should actions be clipped according to the env's/Policy's action space? If None, use self.config["clip_actions"]. Keyword Args: kwargs: forward compatibility placeholder Returns: The computed action if full_fetch=False, or a tuple consisting of the full output of policy.compute_actions_from_input_dict() if full_fetch=True or we have an RNN-based Policy. """ if normalize_actions is not None: deprecation_warning( old="Trainer.compute_actions(`normalize_actions`=...)", new="Trainer.compute_actions(`unsquash_actions`=...)", error=True, ) unsquash_actions = normalize_actions # `unsquash_actions` is None: Use value of config['normalize_actions']. if unsquash_actions is None: unsquash_actions = self.config["normalize_actions"] # `clip_actions` is None: Use value of config['clip_actions']. elif clip_actions is None: clip_actions = self.config["clip_actions"] # Preprocess obs and states. state_defined = state is not None policy = self.get_policy(policy_id) filtered_obs, filtered_state = [], [] for agent_id, ob in observations.items(): worker = self.workers.local_worker() preprocessed = worker.preprocessors[policy_id].transform(ob) filtered = worker.filters[policy_id](preprocessed, update=False) filtered_obs.append(filtered) if state is None: continue elif agent_id in state: filtered_state.append(state[agent_id]) else: filtered_state.append(policy.get_initial_state()) # Batch obs and states obs_batch = np.stack(filtered_obs) if state is None: state = [] else: state = list(zip(*filtered_state)) state = [np.stack(s) for s in state] input_dict = {SampleBatch.OBS: obs_batch} # prev_action and prev_reward can be None, np.ndarray, or tensor-like structure. # Explicitly check for None here to avoid the error message "The truth value of # an array with more than one element is ambiguous.", when np arrays are passed # as arguments. if prev_action is not None: input_dict[SampleBatch.PREV_ACTIONS] = prev_action if prev_reward is not None: input_dict[SampleBatch.PREV_REWARDS] = prev_reward if info: input_dict[SampleBatch.INFOS] = info for i, s in enumerate(state): input_dict[f"state_in_{i}"] = s # Batch compute actions actions, states, infos = policy.compute_actions_from_input_dict( input_dict=input_dict, explore=explore, timestep=timestep, episodes=episodes, ) # Unbatch actions for the environment into a multi-agent dict. single_actions = space_utils.unbatch(actions) actions = {} for key, a in zip(observations, single_actions): # If we work in normalized action space (normalize_actions=True), # we re-translate here into the env's action space. if unsquash_actions: a = space_utils.unsquash_action(a, policy.action_space_struct) # Clip, according to env's action space. elif clip_actions: a = space_utils.clip_action(a, policy.action_space_struct) actions[key] = a # Unbatch states into a multi-agent dict. unbatched_states = {} for idx, agent_id in enumerate(observations): unbatched_states[agent_id] = [s[idx] for s in states] # Return only actions or full tuple if state_defined or full_fetch: return actions, unbatched_states, infos else: return actions @PublicAPI def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy: """Return policy for the specified id, or None. Args: policy_id: ID of the policy to return. """ return self.workers.local_worker().get_policy(policy_id) @PublicAPI def get_weights(self, policies: Optional[List[PolicyID]] = None) -> dict: """Return a dictionary of policy ids to weights. Args: policies: Optional list of policies to return weights for, or None for all policies. """ return self.workers.local_worker().get_weights(policies) @PublicAPI def set_weights(self, weights: Dict[PolicyID, dict]): """Set policy weights by policy id. Args: weights: Map of policy ids to weights to set. """ self.workers.local_worker().set_weights(weights) @PublicAPI def add_policy( self, policy_id: PolicyID, policy_cls: Optional[Type[Policy]] = None, policy: Optional[Policy] = None, *, observation_space: Optional[gym.spaces.Space] = None, action_space: Optional[gym.spaces.Space] = None, config: Optional[Union[AlgorithmConfig, PartialAlgorithmConfigDict]] = None, policy_state: Optional[PolicyState] = None, policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None, policies_to_train: Optional[ Union[ Container[PolicyID], Callable[[PolicyID, Optional[SampleBatchType]], bool], ] ] = None, evaluation_workers: bool = True, # Deprecated. workers: Optional[List[Union[RolloutWorker, ActorHandle]]] = DEPRECATED_VALUE, ) -> Optional[Policy]: """Adds a new policy to this Algorithm. Args: policy_id: ID of the policy to add. IMPORTANT: Must not contain characters that are also not allowed in Unix/Win filesystems, such as: `<>:"/\|?*` or a dot `.` or space ` ` at the end of the ID. policy_cls: The Policy class to use for constructing the new Policy. Note: Only one of `policy_cls` or `policy` must be provided. policy: The Policy instance to add to this algorithm. If not None, the given Policy object will be directly inserted into the Algorithm's local worker and clones of that Policy will be created on all remote workers as well as all evaluation workers. Note: Only one of `policy_cls` or `policy` must be provided. observation_space: The observation space of the policy to add. If None, try to infer this space from the environment. action_space: The action space of the policy to add. If None, try to infer this space from the environment. config: The config object or overrides for the policy to add. policy_state: Optional state dict to apply to the new policy instance, right after its construction. policy_mapping_fn: An optional (updated) policy mapping function to use from here on. Note that already ongoing episodes will not change their mapping but will use the old mapping till the end of the episode. policies_to_train: An optional list of policy IDs to be trained or a callable taking PolicyID and SampleBatchType and returning a bool (trainable or not?). If None, will keep the existing setup in place. Policies, whose IDs are not in the list (or for which the callable returns False) will not be updated. evaluation_workers: Whether to add the new policy also to the evaluation WorkerSet. workers: A list of RolloutWorker/ActorHandles (remote RolloutWorkers) to add this policy to. If defined, will only add the given policy to these workers. Returns: The newly added policy (the copy that got added to the local worker). If `workers` was provided, None is returned. """ validate_policy_id(policy_id, error=True) if workers is not DEPRECATED_VALUE: deprecation_warning( old="workers", help=( "The `workers` argument to `Algorithm.add_policy()` is deprecated " "and no-op now. Please do not use it anymore." ), error=False, ) self.workers.add_policy( policy_id, policy_cls, policy, observation_space=observation_space, action_space=action_space, config=config, policy_state=policy_state, policy_mapping_fn=policy_mapping_fn, policies_to_train=policies_to_train, ) # Add to evaluation workers, if necessary. if evaluation_workers is True and self.evaluation_workers is not None: self.evaluation_workers.add_policy( policy_id, policy_cls, policy, observation_space=observation_space, action_space=action_space, config=config, policy_state=policy_state, policy_mapping_fn=policy_mapping_fn, policies_to_train=policies_to_train, ) # Return newly added policy (from the local rollout worker). return self.get_policy(policy_id) @PublicAPI def remove_policy( self, policy_id: PolicyID = DEFAULT_POLICY_ID, *, policy_mapping_fn: Optional[Callable[[AgentID], PolicyID]] = None, policies_to_train: Optional[ Union[ Container[PolicyID], Callable[[PolicyID, Optional[SampleBatchType]], bool], ] ] = None, evaluation_workers: bool = True, ) -> None: """Removes a new policy from this Algorithm. Args: policy_id: ID of the policy to be removed. policy_mapping_fn: An optional (updated) policy mapping function to use from here on. Note that already ongoing episodes will not change their mapping but will use the old mapping till the end of the episode. policies_to_train: An optional list of policy IDs to be trained or a callable taking PolicyID and SampleBatchType and returning a bool (trainable or not?). If None, will keep the existing setup in place. Policies, whose IDs are not in the list (or for which the callable returns False) will not be updated. evaluation_workers: Whether to also remove the policy from the evaluation WorkerSet. """ def fn(worker): worker.remove_policy( policy_id=policy_id, policy_mapping_fn=policy_mapping_fn, policies_to_train=policies_to_train, ) self.workers.foreach_worker(fn, local_worker=True, healthy_only=True) if evaluation_workers and self.evaluation_workers is not None: self.evaluation_workers.foreach_worker( fn, local_worker=True, healthy_only=True, ) @DeveloperAPI def export_policy_model( self, export_dir: str, policy_id: PolicyID = DEFAULT_POLICY_ID, onnx: Optional[int] = None, ) -> None: """Exports policy model with given policy_id to a local directory. Args: export_dir: Writable local directory. policy_id: Optional policy id to export. onnx: If given, will export model in ONNX format. The value of this parameter set the ONNX OpSet version to use. If None, the output format will be DL framework specific. Example: >>> from ray.rllib.algorithms.ppo import PPO >>> # Use an Algorithm from RLlib or define your own. >>> algo = PPO(...) # doctest: +SKIP >>> for _ in range(10): # doctest: +SKIP >>> algo.train() # doctest: +SKIP >>> algo.export_policy_model("/tmp/dir") # doctest: +SKIP >>> algo.export_policy_model("/tmp/dir/onnx", onnx=1) # doctest: +SKIP """ self.get_policy(policy_id).export_model(export_dir, onnx) @DeveloperAPI def export_policy_checkpoint( self, export_dir: str, filename_prefix=DEPRECATED_VALUE, # deprecated arg, do not use anymore policy_id: PolicyID = DEFAULT_POLICY_ID, ) -> None: """Exports Policy checkpoint to a local directory and returns an AIR Checkpoint. Args: export_dir: Writable local directory to store the AIR Checkpoint information into. policy_id: Optional policy ID to export. If not provided, will export "default_policy". If `policy_id` does not exist in this Algorithm, will raise a KeyError. Raises: KeyError if `policy_id` cannot be found in this Algorithm. Example: >>> from ray.rllib.algorithms.ppo import PPO >>> # Use an Algorithm from RLlib or define your own. >>> algo = PPO(...) # doctest: +SKIP >>> for _ in range(10): # doctest: +SKIP >>> algo.train() # doctest: +SKIP >>> algo.export_policy_checkpoint("/tmp/export_dir") # doctest: +SKIP """ # `filename_prefix` should not longer be used as new Policy checkpoints # contain more than one file with a fixed filename structure. if filename_prefix != DEPRECATED_VALUE: deprecation_warning( old="Algorithm.export_policy_checkpoint(filename_prefix=...)", error=True, ) policy = self.get_policy(policy_id) if policy is None: raise KeyError(f"Policy with ID {policy_id} not found in Algorithm!") policy.export_checkpoint(export_dir) @DeveloperAPI def import_policy_model_from_h5( self, import_file: str, policy_id: PolicyID = DEFAULT_POLICY_ID, ) -> None: """Imports a policy's model with given policy_id from a local h5 file. Args: import_file: The h5 file to import from. policy_id: Optional policy id to import into. Example: >>> from ray.rllib.algorithms.ppo import PPO >>> algo = PPO(...) # doctest: +SKIP >>> algo.import_policy_model_from_h5("/tmp/weights.h5") # doctest: +SKIP >>> for _ in range(10): # doctest: +SKIP >>> algo.train() # doctest: +SKIP """ self.get_policy(policy_id).import_model_from_h5(import_file) # Sync new weights to remote workers. self._sync_weights_to_workers(worker_set=self.workers) @override(Trainable) def save_checkpoint(self, checkpoint_dir: str) -> str: """Exports AIR Checkpoint to a local directory and returns its directory path. The structure of an Algorithm checkpoint dir will be as follows:: policies/ pol_1/ policy_state.pkl pol_2/ policy_state.pkl rllib_checkpoint.json algorithm_state.pkl Note: `rllib_checkpoint.json` contains a "version" key (e.g. with value 0.1) helping RLlib to remain backward compatible wrt. restoring from checkpoints from Ray 2.0 onwards. Args: checkpoint_dir: The directory where the checkpoint files will be stored. Returns: The path to the created AIR Checkpoint directory. """ state = self.__getstate__() # Extract policy states from worker state (Policies get their own # checkpoint sub-dirs). policy_states = {} if "worker" in state and "policy_states" in state["worker"]: policy_states = state["worker"].pop("policy_states", {}) # Add RLlib checkpoint version. state["checkpoint_version"] = CHECKPOINT_VERSION # Write state (w/o policies) to disk. state_file = os.path.join(checkpoint_dir, "algorithm_state.pkl") with open(state_file, "wb") as f: pickle.dump(state, f) # Write rllib_checkpoint.json. with open(os.path.join(checkpoint_dir, "rllib_checkpoint.json"), "w") as f: json.dump( { "type": "Algorithm", "checkpoint_version": str(state["checkpoint_version"]), "ray_version": ray.__version__, "ray_commit": ray.__commit__, }, f, ) # Write individual policies to disk, each in their own sub-directory. for pid, policy_state in policy_states.items(): # From here on, disallow policyIDs that would not work as directory names. validate_policy_id(pid, error=True) policy_dir = os.path.join(checkpoint_dir, "policies", pid) os.makedirs(policy_dir, exist_ok=True) policy = self.get_policy(pid) policy.export_checkpoint(policy_dir, policy_state=policy_state) return checkpoint_dir @override(Trainable) def load_checkpoint(self, checkpoint: Union[Dict, str]) -> None: # Checkpoint is provided as a directory name. # Restore from the checkpoint file or dir. if isinstance(checkpoint, str): checkpoint_info = get_checkpoint_info(checkpoint) checkpoint_data = Algorithm._checkpoint_info_to_algorithm_state( checkpoint_info ) # Checkpoint is a checkpoint-as-dict -> Restore state from it as-is. else: checkpoint_data = checkpoint self.__setstate__(checkpoint_data) @override(Trainable) def log_result(self, result: ResultDict) -> None: # Log after the callback is invoked, so that the user has a chance # to mutate the result. # TODO: Remove `trainer` arg at some point to fully deprecate the old signature. self.callbacks.on_train_result(algorithm=self, result=result) # Then log according to Trainable's logging logic. Trainable.log_result(self, result) @override(Trainable) def cleanup(self) -> None: # Stop all workers. if hasattr(self, "workers") and self.workers is not None: self.workers.stop() if hasattr(self, "evaluation_workers") and self.evaluation_workers is not None: self.evaluation_workers.stop() @OverrideToImplementCustomLogic @classmethod @override(Trainable) def default_resource_request( cls, config: Union[AlgorithmConfig, PartialAlgorithmConfigDict] ) -> Union[Resources, PlacementGroupFactory]: # Default logic for RLlib Algorithms: # Create one bundle per individual worker (local or remote). # Use `num_cpus_for_driver` and `num_gpus` for the local worker and # `num_cpus_per_worker` and `num_gpus_per_worker` for the remote # workers to determine their CPU/GPU resource needs. # Convenience config handles. cf = cls.get_default_config().update_from_dict(config) cf.validate() cf.freeze() # get evaluation config eval_cf = cf.get_evaluation_config_object() eval_cf.validate() eval_cf.freeze() # resources for local worker local_worker = { "CPU": cf.num_cpus_for_local_worker, "GPU": 0 if cf._fake_gpus else cf.num_gpus, } bundles = [local_worker] # resources for rollout env samplers rollout_workers = [ { "CPU": cf.num_cpus_per_worker, "GPU": cf.num_gpus_per_worker, **cf.custom_resources_per_worker, } for _ in range(cf.num_rollout_workers) ] # resources for evaluation env samplers or datasets (if any) if cls._should_create_evaluation_rollout_workers(eval_cf): # Evaluation workers. # Note: The local eval worker is located on the driver CPU. evaluation_bundle = [ { "CPU": eval_cf.num_cpus_per_worker, "GPU": eval_cf.num_gpus_per_worker, **eval_cf.custom_resources_per_worker, } for _ in range(eval_cf.evaluation_num_workers) ] else: # resources for offline dataset readers during evaluation # Note (Kourosh): we should not claim extra workers for # training on the offline dataset, since rollout workers have already # claimed it. # Another Note (Kourosh): dataset reader will not use placement groups so # whatever we specify here won't matter because dataset won't even use it. # Disclaimer: using ray dataset in tune may cause deadlock when multiple # tune trials get scheduled on the same node and do not leave any spare # resources for dataset operations. The workaround is to limit the # max_concurrent trials so that some spare cpus are left for dataset # operations. This behavior should get fixed by the dataset team. more info # found here: # https://docs.ray.io/en/master/data/dataset-internals.html#datasets-tune evaluation_bundle = [] bundles += rollout_workers + evaluation_bundle # Return PlacementGroupFactory containing all needed resources # (already properly defined as device bundles). return PlacementGroupFactory( bundles=bundles, strategy=config.get("placement_strategy", "PACK"), ) @DeveloperAPI def _before_evaluate(self): """Pre-evaluation callback.""" pass @staticmethod def _get_env_id_and_creator( env_specifier: Union[str, EnvType, None], config: AlgorithmConfig ) -> Tuple[Optional[str], EnvCreator]: """Returns env_id and creator callable given original env id from config. Args: env_specifier: An env class, an already tune registered env ID, a known gym env name, or None (if no env is used). config: The AlgorithmConfig object. Returns: Tuple consisting of a) env ID string and b) env creator callable. """ # Environment is specified via a string. if isinstance(env_specifier, str): # An already registered env. if _global_registry.contains(ENV_CREATOR, env_specifier): return env_specifier, _global_registry.get(ENV_CREATOR, env_specifier) # A class path specifier. elif "." in env_specifier: def env_creator_from_classpath(env_context): try: env_obj = from_config(env_specifier, env_context) except ValueError: raise EnvError( ERR_MSG_INVALID_ENV_DESCRIPTOR.format(env_specifier) ) return env_obj return env_specifier, env_creator_from_classpath # Try gym/PyBullet/Vizdoom. else: return env_specifier, functools.partial( _gym_env_creator, env_descriptor=env_specifier ) elif isinstance(env_specifier, type): env_id = env_specifier # .__name__ if config["remote_worker_envs"]: # Check gym version (0.22 or higher?). # If > 0.21, can't perform auto-wrapping of the given class as this # would lead to a pickle error. gym_version = pkg_resources.get_distribution("gym").version if version.parse(gym_version) >= version.parse("0.22"): raise ValueError( "Cannot specify a gym.Env class via `config.env` while setting " "`config.remote_worker_env=True` AND your gym version is >= " "0.22! Try installing an older version of gym or set `config." "remote_worker_env=False`." ) @ray.remote(num_cpus=1) class _wrapper(env_specifier): # Add convenience `_get_spaces` and `_is_multi_agent` # methods: def _get_spaces(self): return self.observation_space, self.action_space def _is_multi_agent(self): from ray.rllib.env.multi_agent_env import MultiAgentEnv return isinstance(self, MultiAgentEnv) return env_id, lambda cfg: _wrapper.remote(cfg) else: return env_id, lambda cfg: env_specifier(cfg) # No env -> Env creator always returns None. elif env_specifier is None: return None, lambda env_config: None else: raise ValueError( "{} is an invalid env specifier. ".format(env_specifier) + "You can specify a custom env as either a class " '(e.g., YourEnvCls) or a registered env id (e.g., "your_env").' ) def _sync_filters_if_needed( self, from_worker: RolloutWorker, workers: WorkerSet, timeout_seconds: Optional[float] = None, ): if ( from_worker and self.config.get("observation_filter", "NoFilter") != "NoFilter" ): FilterManager.synchronize( from_worker.filters, workers, update_remote=self.config["synchronize_filters"], timeout_seconds=timeout_seconds, ) logger.debug("synchronized filters: {}".format(from_worker.filters)) @DeveloperAPI def _sync_weights_to_workers( self, *, worker_set: WorkerSet, ) -> None: """Sync "main" weights to given WorkerSet or list of workers.""" # Broadcast the new policy weights to all remote workers in worker_set. logger.info("Synchronizing weights to workers.") worker_set.sync_weights() @classmethod @override(Trainable) def resource_help(cls, config: Union[AlgorithmConfig, AlgorithmConfigDict]) -> str: return ( "\n\nYou can adjust the resource requests of RLlib Algorithms by calling " "`AlgorithmConfig.resources(" "num_gpus=.., num_cpus_per_worker=.., num_gpus_per_worker=.., ..)` or " "`AgorithmConfig.rollouts(num_rollout_workers=..)`. See " "the `ray.rllib.algorithms.algorithm_config.AlgorithmConfig` classes " "(each Algorithm has its own subclass of this class) for more info.\n\n" f"The config of this Algorithm is: {config}" ) @override(Trainable) def get_auto_filled_metrics( self, now: Optional[datetime] = None, time_this_iter: Optional[float] = None, debug_metrics_only: bool = False, ) -> dict: # Override this method to make sure, the `config` key of the returned results # contains the proper Tune config dict (instead of an AlgorithmConfig object). auto_filled = super().get_auto_filled_metrics( now, time_this_iter, debug_metrics_only ) if "config" not in auto_filled: raise KeyError("`config` key not found in auto-filled results dict!") # If `config` key is no dict (but AlgorithmConfig object) -> # make sure, it's a dict to not break Tune APIs. if not isinstance(auto_filled["config"], dict): assert isinstance(auto_filled["config"], AlgorithmConfig) auto_filled["config"] = auto_filled["config"].to_dict() return auto_filled @classmethod def merge_trainer_configs( cls, config1: AlgorithmConfigDict, config2: PartialAlgorithmConfigDict, _allow_unknown_configs: Optional[bool] = None, ) -> AlgorithmConfigDict: """Merges a complete Algorithm config dict with a partial override dict. Respects nested structures within the config dicts. The values in the partial override dict take priority. Args: config1: The complete Algorithm's dict to be merged (overridden) with `config2`. config2: The partial override config dict to merge on top of `config1`. _allow_unknown_configs: If True, keys in `config2` that don't exist in `config1` are allowed and will be added to the final config. Returns: The merged full algorithm config dict. """ config1 = copy.deepcopy(config1) if "callbacks" in config2 and type(config2["callbacks"]) is dict: deprecation_warning( "callbacks dict interface", "a class extending rllib.algorithms.callbacks.DefaultCallbacks; " "see `rllib/examples/custom_metrics_and_callbacks.py` for an example.", error=True, ) if _allow_unknown_configs is None: _allow_unknown_configs = cls._allow_unknown_configs return deep_update( config1, config2, _allow_unknown_configs, cls._allow_unknown_subkeys, cls._override_all_subkeys_if_type_changes, cls._override_all_key_list, ) @staticmethod @ExperimentalAPI def validate_env(env: EnvType, env_context: EnvContext) -> None: """Env validator function for this Algorithm class. Override this in child classes to define custom validation behavior. Args: env: The (sub-)environment to validate. This is normally a single sub-environment (e.g. a gym.Env) within a vectorized setup. env_context: The EnvContext to configure the environment. Raises: Exception in case something is wrong with the given environment. """ pass @override(Trainable) def _export_model( self, export_formats: List[str], export_dir: str ) -> Dict[str, str]: ExportFormat.validate(export_formats) exported = {} if ExportFormat.CHECKPOINT in export_formats: path = os.path.join(export_dir, ExportFormat.CHECKPOINT) self.export_policy_checkpoint(path) exported[ExportFormat.CHECKPOINT] = path if ExportFormat.MODEL in export_formats: path = os.path.join(export_dir, ExportFormat.MODEL) self.export_policy_model(path) exported[ExportFormat.MODEL] = path if ExportFormat.ONNX in export_formats: path = os.path.join(export_dir, ExportFormat.ONNX) self.export_policy_model(path, onnx=int(os.getenv("ONNX_OPSET", "11"))) exported[ExportFormat.ONNX] = path return exported def import_model(self, import_file: str): """Imports a model from import_file. Note: Currently, only h5 files are supported. Args: import_file: The file to import the model from. Returns: A dict that maps ExportFormats to successfully exported models. """ # Check for existence. if not os.path.exists(import_file): raise FileNotFoundError( "`import_file` '{}' does not exist! Can't import Model.".format( import_file ) ) # Get the format of the given file. import_format = "h5" # TODO(sven): Support checkpoint loading. ExportFormat.validate([import_format]) if import_format != ExportFormat.H5: raise NotImplementedError else: return self.import_policy_model_from_h5(import_file) @PublicAPI def __getstate__(self) -> Dict: """Returns current state of Algorithm, sufficient to restore it from scratch. Returns: The current state dict of this Algorithm, which can be used to sufficiently restore the algorithm from scratch without any other information. """ # Add config to state so complete Algorithm can be reproduced w/o it. state = { "algorithm_class": type(self), "config": self.config, } if hasattr(self, "workers"): state["worker"] = self.workers.local_worker().get_state() # TODO: Experimental functionality: Store contents of replay buffer # to checkpoint, only if user has configured this. if self.local_replay_buffer is not None and self.config.get( "store_buffer_in_checkpoints" ): state["local_replay_buffer"] = self.local_replay_buffer.get_state() if self.train_exec_impl is not None: state["train_exec_impl"] = self.train_exec_impl.shared_metrics.get().save() else: state["counters"] = self._counters return state @PublicAPI def __setstate__(self, state) -> None: """Sets the algorithm to the provided state. Args: state: The state dict to restore this Algorithm instance to. `state` may have been returned by a call to an Algorithm's `__getstate__()` method. """ # TODO (sven): Validate that our config and the config in state are compatible. # For example, the model architectures may differ. # Also, what should the behavior be if e.g. some training parameter # (e.g. lr) changed? if hasattr(self, "workers") and "worker" in state: self.workers.local_worker().set_state(state["worker"]) remote_state = ray.put(state["worker"]) self.workers.foreach_worker( lambda w: w.set_state(ray.get(remote_state)), local_worker=False, healthy_only=False, ) if self.evaluation_workers: # If evaluation workers are used, also restore the policies # there in case they are used for evaluation purpose. self.evaluation_workers.foreach_worker( lambda w: w.set_state(ray.get(remote_state)), local_worker=False, healthy_only=False, ) # If necessary, restore replay data as well. if self.local_replay_buffer is not None: # TODO: Experimental functionality: Restore contents of replay # buffer from checkpoint, only if user has configured this. if self.config.get("store_buffer_in_checkpoints"): if "local_replay_buffer" in state: self.local_replay_buffer.set_state(state["local_replay_buffer"]) else: logger.warning( "`store_buffer_in_checkpoints` is True, but no replay " "data found in state!" ) elif "local_replay_buffer" in state and log_once( "no_store_buffer_in_checkpoints_but_data_found" ): logger.warning( "`store_buffer_in_checkpoints` is False, but some replay " "data found in state!" ) if self.train_exec_impl is not None: self.train_exec_impl.shared_metrics.get().restore(state["train_exec_impl"]) elif "counters" in state: self._counters = state["counters"] @staticmethod def _checkpoint_info_to_algorithm_state( checkpoint_info: dict, policy_ids: Optional[Container[PolicyID]] = None, policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None, policies_to_train: Optional[ Union[ Container[PolicyID], Callable[[PolicyID, Optional[SampleBatchType]], bool], ] ] = None, ) -> Dict: """Converts a checkpoint info or object to a proper Algorithm state dict. The returned state dict can be used inside self.__setstate__(). Args: checkpoint_info: A checkpoint info dict as returned by `ray.rllib.utils.checkpoints.get_checkpoint_info( [checkpoint dir or AIR Checkpoint])`. policy_ids: Optional list/set of PolicyIDs. If not None, only those policies listed here will be included in the returned state. Note that state items such as filters, the `is_policy_to_train` function, as well as the multi-agent `policy_ids` dict will be adjusted as well, based on this arg. policy_mapping_fn: An optional (updated) policy mapping function to include in the returned state. policies_to_train: An optional list of policy IDs to be trained or a callable taking PolicyID and SampleBatchType and returning a bool (trainable or not?) to include in the returned state. Returns: The state dict usable within the `self.__setstate__()` method. """ if checkpoint_info["type"] != "Algorithm": raise ValueError( "`checkpoint` arg passed to " "`Algorithm._checkpoint_info_to_algorithm_state()` must be an " f"Algorithm checkpoint (but is {checkpoint_info['type']})!" ) with open(checkpoint_info["state_file"], "rb") as f: state = pickle.load(f) # New checkpoint format: Policies are in separate sub-dirs. # Note: Algorithms like ES/ARS don't have a WorkerSet, so we just return # the plain state here. if ( checkpoint_info["checkpoint_version"] > version.Version("0.1") and state.get("worker") is not None ): worker_state = state["worker"] # Retrieve the set of all required policy IDs. policy_ids = set( policy_ids if policy_ids is not None else worker_state["policy_ids"] ) # Remove those policies entirely from filters that are not in # `policy_ids`. worker_state["filters"] = { pid: filter for pid, filter in worker_state["filters"].items() if pid in policy_ids } # Compile actual config object. algo_cls = state["algorithm_class"] if isinstance(algo_cls, str): algo_cls = get_trainable_cls(algo_cls) default_config = algo_cls.get_default_config() if isinstance(default_config, AlgorithmConfig): new_config = default_config.update_from_dict(state["config"]) else: new_config = Algorithm.merge_trainer_configs( default_config, state["config"] ) # Remove policies from multiagent dict that are not in `policy_ids`. new_policies = new_config.policies if isinstance(new_policies, (set, list, tuple)): new_policies = {pid for pid in new_policies if pid in policy_ids} else: new_policies = { pid: spec for pid, spec in new_policies.items() if pid in policy_ids } new_config.multi_agent( policies=new_policies, policies_to_train=policies_to_train, ) state["config"] = new_config.to_dict() # Prepare local `worker` state to add policies' states into it, # read from separate policy checkpoint files. worker_state["policy_states"] = {} for pid in policy_ids: policy_state_file = os.path.join( checkpoint_info["checkpoint_dir"], "policies", pid, "policy_state.pkl", ) if not os.path.isfile(policy_state_file): raise ValueError( "Given checkpoint does not seem to be valid! No policy " f"state file found for PID={pid}. " f"The file not found is: {policy_state_file}." ) with open(policy_state_file, "rb") as f: worker_state["policy_states"][pid] = pickle.load(f) if policy_mapping_fn is not None: worker_state["policy_mapping_fn"] = policy_mapping_fn if policies_to_train is not None: worker_state["is_policy_to_train"] = policies_to_train return state @DeveloperAPI def _create_local_replay_buffer_if_necessary( self, config: PartialAlgorithmConfigDict ) -> Optional[MultiAgentReplayBuffer]: """Create a MultiAgentReplayBuffer instance if necessary. Args: config: Algorithm-specific configuration data. Returns: MultiAgentReplayBuffer instance based on algorithm config. None, if local replay buffer is not needed. """ if not config.get("replay_buffer_config") or config["replay_buffer_config"].get( "no_local_replay_buffer" or config.get("no_local_replay_buffer") ): return return from_config(ReplayBuffer, config["replay_buffer_config"]) @DeveloperAPI def _kwargs_for_execution_plan(self): kwargs = {} if self.local_replay_buffer is not None: kwargs["local_replay_buffer"] = self.local_replay_buffer return kwargs def _run_one_training_iteration(self) -> Tuple[ResultDict, "TrainIterCtx"]: """Runs one training iteration (self.iteration will be +1 after this). Calls `self.training_step()` repeatedly until the minimum time (sec), sample- or training steps have been reached. Returns: The results dict from the training iteration. """ # In case we are training (in a thread) parallel to evaluation, # we may have to re-enable eager mode here (gets disabled in the # thread). if self.config.get("framework") == "tf2" and not tf.executing_eagerly(): tf1.enable_eager_execution() results = None # Create a step context ... with TrainIterCtx(algo=self) as train_iter_ctx: # .. so we can query it whether we should stop the iteration loop (e.g. # when we have reached `min_time_s_per_iteration`). while not train_iter_ctx.should_stop(results): # Try to train one step. # TODO (avnishn): Remove the execution plan API by q1 2023 with self._timers[TRAINING_ITERATION_TIMER]: if self.config._disable_execution_plan_api: results = self.training_step() else: results = next(self.train_exec_impl) # With training step done. Try to bring failed workers back. self.restore_workers(self.workers) return results, train_iter_ctx def _run_one_evaluation( self, train_future: Optional[concurrent.futures.ThreadPoolExecutor] = None, ) -> ResultDict: """Runs evaluation step via `self.evaluate()` and handling worker failures. Args: train_future: In case, we are training and avaluating in parallel, this arg carries the currently running ThreadPoolExecutor object that runs the training iteration Returns: The results dict from the evaluation call. """ eval_results = { "evaluation": { "episode_reward_max": np.nan, "episode_reward_min": np.nan, "episode_reward_mean": np.nan, } } eval_func_to_use = ( self._evaluate_async if self.config.enable_async_evaluation else self.evaluate ) if self.config.evaluation_duration == "auto": assert ( train_future is not None and self.config.evaluation_parallel_to_training ) unit = self.config.evaluation_duration_unit eval_results = eval_func_to_use( duration_fn=functools.partial( self._automatic_evaluation_duration_fn, unit, self.config.evaluation_num_workers, self.evaluation_config, train_future, ) ) # Run `self.evaluate()` only once per training iteration. else: eval_results = eval_func_to_use() if self.evaluation_workers is not None: # After evaluation, do a round of health check to see if any of # the failed workers are back. self.restore_workers(self.evaluation_workers) # Add number of healthy evaluation workers after this iteration. eval_results["evaluation"][ "num_healthy_workers" ] = self.evaluation_workers.num_healthy_remote_workers() eval_results["evaluation"][ "num_in_flight_async_reqs" ] = self.evaluation_workers.num_in_flight_async_reqs() eval_results["evaluation"][ "num_remote_worker_restarts" ] = self.evaluation_workers.num_remote_worker_restarts() return eval_results def _run_one_training_iteration_and_evaluation_in_parallel( self, ) -> Tuple[ResultDict, "TrainIterCtx"]: """Runs one training iteration and one evaluation step in parallel. First starts the training iteration (via `self._run_one_training_iteration()`) within a ThreadPoolExecutor, then runs the evaluation step in parallel. In auto-duration mode (config.evaluation_duration=auto), makes sure the evaluation step takes roughly the same time as the training iteration. Returns: The accumulated training and evaluation results. """ with concurrent.futures.ThreadPoolExecutor() as executor: train_future = executor.submit(lambda: self._run_one_training_iteration()) # Pass the train_future into `self._run_one_evaluation()` to allow it # to run exactly as long as the training iteration takes in case # evaluation_duration=auto. results = self._run_one_evaluation(train_future) # Collect the training results from the future. train_results, train_iter_ctx = train_future.result() results.update(train_results) return results, train_iter_ctx def _run_offline_evaluation(self): """Runs offline evaluation via `OfflineEvaluator.estimate_on_dataset()` API. This method will be used when `evaluation_dataset` is provided. Note: This will only work if the policy is a single agent policy. Returns: The results dict from the offline evaluation call. """ assert len(self.workers.local_worker().policy_map) == 1 parallelism = self.evaluation_config.evaluation_num_workers or 1 offline_eval_results = {"off_policy_estimator": {}} for evaluator_name, offline_evaluator in self.reward_estimators.items(): offline_eval_results["off_policy_estimator"][ evaluator_name ] = offline_evaluator.estimate_on_dataset( self.evaluation_dataset, n_parallelism=parallelism, ) return offline_eval_results @classmethod def _should_create_evaluation_rollout_workers(cls, eval_config: "AlgorithmConfig"): """Determines whether we need to create evaluation workers. Returns False if we need to run offline evaluation (with ope.estimate_on_dastaset API) or when local worker is to be used for evaluation. Note: We only use estimate_on_dataset API with bandits for now. That is when ope_split_batch_by_episode is False. TODO: In future we will do the same for episodic RL OPE. """ run_offline_evaluation = ( eval_config.get("off_policy_estimation_methods") and not eval_config.ope_split_batch_by_episode ) return not run_offline_evaluation and ( eval_config.evaluation_num_workers > 0 or eval_config.evaluation_interval ) @staticmethod def _automatic_evaluation_duration_fn( unit, num_eval_workers, eval_cfg, train_future, num_units_done ): # Training is done and we already ran at least one # evaluation -> Nothing left to run. if num_units_done > 0 and train_future.done(): return 0 # Count by episodes. -> Run n more # (n=num eval workers). elif unit == "episodes": return num_eval_workers # Count by timesteps. -> Run n*m*p more # (n=num eval workers; m=rollout fragment length; # p=num-envs-per-worker). else: return ( num_eval_workers * eval_cfg["rollout_fragment_length"] * eval_cfg["num_envs_per_worker"] ) def _compile_iteration_results( self, *, episodes_this_iter, step_ctx, iteration_results=None ): # Return dict. results: ResultDict = {} iteration_results = iteration_results or {} # Evaluation results. if "evaluation" in iteration_results: results["evaluation"] = iteration_results.pop("evaluation") # Custom metrics and episode media. results["custom_metrics"] = iteration_results.pop("custom_metrics", {}) results["episode_media"] = iteration_results.pop("episode_media", {}) # Learner info. results["info"] = {LEARNER_INFO: iteration_results} # Calculate how many (if any) of older, historical episodes we have to add to # `episodes_this_iter` in order to reach the required smoothing window. episodes_for_metrics = episodes_this_iter[:] missing = self.config["metrics_num_episodes_for_smoothing"] - len( episodes_this_iter ) # We have to add some older episodes to reach the smoothing window size. if missing > 0: episodes_for_metrics = self._episode_history[-missing:] + episodes_this_iter assert ( len(episodes_for_metrics) <= self.config["metrics_num_episodes_for_smoothing"] ) # Note that when there are more than `metrics_num_episodes_for_smoothing` # episodes in `episodes_for_metrics`, leave them as-is. In this case, we'll # compute the stats over that larger number. # Add new episodes to our history and make sure it doesn't grow larger than # needed. self._episode_history.extend(episodes_this_iter) self._episode_history = self._episode_history[ -self.config["metrics_num_episodes_for_smoothing"] : ] results["sampler_results"] = summarize_episodes( episodes_for_metrics, episodes_this_iter, self.config["keep_per_episode_custom_metrics"], ) # TODO: Don't dump sampler results into top-level. results.update(results["sampler_results"]) results["num_healthy_workers"] = self.workers.num_healthy_remote_workers() results["num_in_flight_async_reqs"] = self.workers.num_in_flight_async_reqs() results[ "num_remote_worker_restarts" ] = self.workers.num_remote_worker_restarts() # Train-steps- and env/agent-steps this iteration. for c in [ NUM_AGENT_STEPS_SAMPLED, NUM_AGENT_STEPS_TRAINED, NUM_ENV_STEPS_SAMPLED, NUM_ENV_STEPS_TRAINED, ]: results[c] = self._counters[c] if self.config.count_steps_by == "agent_steps": results[NUM_AGENT_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled results[NUM_AGENT_STEPS_TRAINED + "_this_iter"] = step_ctx.trained # TODO: For CQL and other algos, count by trained steps. results["timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED] # TODO: Backward compatibility. results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained else: results[NUM_ENV_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled results[NUM_ENV_STEPS_TRAINED + "_this_iter"] = step_ctx.trained # TODO: For CQL and other algos, count by trained steps. results["timesteps_total"] = self._counters[NUM_ENV_STEPS_SAMPLED] # TODO: Backward compatibility. results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained # TODO: Backward compatibility. results["agent_timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED] # Process timer results. timers = {} for k, timer in self._timers.items(): timers["{}_time_ms".format(k)] = round(timer.mean * 1000, 3) if timer.has_units_processed(): timers["{}_throughput".format(k)] = round(timer.mean_throughput, 3) results["timers"] = timers # Process counter results. counters = {} for k, counter in self._counters.items(): counters[k] = counter results["counters"] = counters # TODO: Backward compatibility. results["info"].update(counters) return results def __repr__(self): return type(self).__name__ def _record_usage(self, config): """Record the framework and algorithm used. Args: config: Algorithm config dict. """ record_extra_usage_tag(TagKey.RLLIB_FRAMEWORK, config["framework"]) record_extra_usage_tag(TagKey.RLLIB_NUM_WORKERS, str(config["num_workers"])) alg = self.__class__.__name__ # We do not want to collect user defined algorithm names. if alg not in ALL_ALGORITHMS: alg = "USER_DEFINED" record_extra_usage_tag(TagKey.RLLIB_ALGORITHM, alg) @Deprecated(new="Algorithm.compute_single_action()", error=True) def compute_action(self, *args, **kwargs): return self.compute_single_action(*args, **kwargs) @Deprecated(new="construct WorkerSet(...) instance directly", error=False) def _make_workers( self, *, env_creator: EnvCreator, validate_env: Optional[Callable[[EnvType, EnvContext], None]], policy_class: Type[Policy], config: AlgorithmConfigDict, num_workers: int, local_worker: bool = True, ) -> WorkerSet: return WorkerSet( env_creator=env_creator, validate_env=validate_env, default_policy_class=policy_class, config=config, num_workers=num_workers, local_worker=local_worker, logdir=self.logdir, ) def validate_config(self, config) -> None: # TODO: Deprecate. All logic has been moved into the AlgorithmConfig classes. pass @staticmethod @Deprecated(new="AlgorithmConfig.validate()", error=True) def _validate_config(config, trainer_or_none): pass # TODO: Create a dict that throw a deprecation warning once we have fully moved # to AlgorithmConfig() objects (some algos still missing). COMMON_CONFIG: AlgorithmConfigDict = AlgorithmConfig(Algorithm).to_dict() class TrainIterCtx: def __init__(self, algo: Algorithm): self.algo = algo def __enter__(self): # Before first call to `step()`, `results` is expected to be None -> # Start with self.failures=-1 -> set to 0 before the very first call # to `self.step()`. self.failures = -1 self.time_start = time.time() self.sampled = 0 self.trained = 0 self.init_env_steps_sampled = self.algo._counters[NUM_ENV_STEPS_SAMPLED] self.init_env_steps_trained = self.algo._counters[NUM_ENV_STEPS_TRAINED] self.init_agent_steps_sampled = self.algo._counters[NUM_AGENT_STEPS_SAMPLED] self.init_agent_steps_trained = self.algo._counters[NUM_AGENT_STEPS_TRAINED] self.failure_tolerance = self.algo.config[ "num_consecutive_worker_failures_tolerance" ] return self def __exit__(self, *args): pass def should_stop(self, results): # Before first call to `step()`. if results is None: # Fail after n retries. self.failures += 1 if self.failures > self.failure_tolerance: raise RuntimeError( "More than `num_consecutive_worker_failures_tolerance=" f"{self.failure_tolerance}` consecutive worker failures! " "Exiting." ) # Continue to very first `step()` call or retry `step()` after # a (tolerable) failure. return False # Stopping criteria. elif self.algo.config._disable_execution_plan_api: if self.algo.config.count_steps_by == "agent_steps": self.sampled = ( self.algo._counters[NUM_AGENT_STEPS_SAMPLED] - self.init_agent_steps_sampled ) self.trained = ( self.algo._counters[NUM_AGENT_STEPS_TRAINED] - self.init_agent_steps_trained ) else: self.sampled = ( self.algo._counters[NUM_ENV_STEPS_SAMPLED] - self.init_env_steps_sampled ) self.trained = ( self.algo._counters[NUM_ENV_STEPS_TRAINED] - self.init_env_steps_trained ) min_t = self.algo.config["min_time_s_per_iteration"] min_sample_ts = self.algo.config["min_sample_timesteps_per_iteration"] min_train_ts = self.algo.config["min_train_timesteps_per_iteration"] # Repeat if not enough time has passed or if not enough # env|train timesteps have been processed (or these min # values are not provided by the user). if ( (not min_t or time.time() - self.time_start >= min_t) and (not min_sample_ts or self.sampled >= min_sample_ts) and (not min_train_ts or self.trained >= min_train_ts) ): return True else: return False # No errors (we got results != None) -> Return True # (meaning: yes, should stop -> no further step attempts). else: return True
PypiClean
/tensorflow_fork-1.15.2.0-py3-none-any.whl/tensorflow/contrib/specs/python/specs_ops.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.layers.python.layers import layers from tensorflow.contrib.specs.python import specs_lib from tensorflow.python.ops import array_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import variable_scope # The following assignments don't appear to follow Google naming # conventions, but that's because these are functions defined by # higher-order function application, not "constants" and because they # are the commands of the DSL. # pylint: disable=invalid-name class Idx(specs_lib.Composable): """Implements the identity function in network specifications.""" def funcall(self, x): return x class Conc(specs_lib.Composable): """Implements tensor concatenation in network specifications.""" def __init__(self, dim, *args): """Concatenates tensors along the given dimension. Args: dim: dimension along which concatenation takes place *args: argument tensor functions to be concatenated """ self.dim = dim self.funs = args def funcall(self, x): outputs = [f.funcall(x) for f in self.funs] return array_ops.concat(outputs, self.dim) External = specs_lib.External Import = specs_lib.Import Fun = specs_lib.Function debug = specs_lib.debug Print = Fun(logging_ops.Print) Id = Fun(array_ops.identity) # TODO(tmb) add Assert # Two letter names for the most common layers. # 2D Convolutional layers with nonlinearities (s/t/r/m/l) # TODO(tmb) add Cbs, Fbs etc. for batch norms Cx = Fun(layers.conv2d) Cs = Fun(layers.conv2d, activation_fn=math_ops.sigmoid) Ct = Fun(layers.conv2d, activation_fn=math_ops.tanh) Cr = Fun(layers.conv2d, activation_fn=nn_ops.relu) Cm = Fun(layers.conv2d, activation_fn=nn_ops.softmax) Cl = Fun(layers.conv2d, activation_fn=None) # Fully connected slim with nonlinearities (s/t/r/m/l) Fx = Fun(layers.fully_connected) Fs = Fun(layers.fully_connected, activation_fn=math_ops.sigmoid) Ft = Fun(layers.fully_connected, activation_fn=math_ops.tanh) Fr = Fun(layers.fully_connected, activation_fn=nn_ops.relu) Fm = Fun(layers.fully_connected, activation_fn=nn_ops.softmax) Fl = Fun(layers.fully_connected, activation_fn=None) # Pooling Mp = Fun(layers.max_pool2d) Ap = Fun(layers.avg_pool2d) # Batch manipulations Do = Fun(layers.dropout) Bn = Fun(layers.batch_norm) Lrn = Fun(nn.local_response_normalization) Unit = Fun(layers.unit_norm) # Shape changes Flat = Fun(layers.flatten) Reshape = Fun(array_ops.reshape) Transpose = Fun(array_ops.transpose) Squeeze = Fun(array_ops.squeeze) Expand = Fun(array_ops.expand_dims) # Nonlinearities (rarely needed on their own) Relu = Fun(nn_ops.relu) Sig = Fun(math_ops.sigmoid) Tanh = Fun(math_ops.tanh) Smax = Fun(nn_ops.softmax) def Dws(n): """Depth-wise convolution + sigmoid (used after LSTM).""" return Cs(n, [1, 1]) def Dwm(n): """Depth-wise convolution + softmax (used after LSTM).""" return Cm(n, [1, 1]) # Sharing of Variables def Var(name, *args, **kw): """Implements an operator that generates a variable. This function is still experimental. Use it only for generating a single variable instance for each name. Args: name: Name of the variable. *args: Other arguments to get_variable. **kw: Other keywords for get_variable. Returns: A specs object for generating a variable. """ def var(_): return variable_scope.get_variable(name, *args, **kw) return specs_lib.Callable(var) class Shared(specs_lib.Composable): """Wraps a scope with variable reuse around the subnetwork. This function is still experimental. Attributes: f: The shared subnetwork. name: A name for the shared scope. used: A flag indicating whether the scope has already been used. """ shared_number = 1 def __init__(self, subnet, name=None, scope=None): """Create the Shared operator. Use this as: f = Shared(Cr(100, 3)) g = f | f | f Ordinarily, you do not need to provide either a name or a scope. Providing a name is useful if you want a well-defined namespace for the variables (e.g., for saving a subnet). Args: subnet: Definition of the shared network. name: Optional name for the shared context. scope: Optional shared scope (must be a Scope, not a string). Raises: ValueError: Scope is not of type tf.Scope, name is not of type string, or both scope and name are given together. """ if scope is not None and not isinstance(scope, variable_scope.VariableScope): raise ValueError("scope must be None or a VariableScope") if name is not None and not isinstance(scope, str): raise ValueError("name must be None or a string") if scope is not None and name is not None: raise ValueError("cannot provide both a name and a scope") if name is None: name = "Shared_%d" % Shared.shared_number Shared.shared_number += 1 self.subnet = subnet self.name = name self.scope = scope def funcall(self, x): """Apply the shared operator to an input. This wraps a variable scope around the creation of the subnet. Args: x: The input argument on which the subnet is invoked. Returns: The output tensor from invoking the subnet constructor. """ if self.scope is None: with variable_scope.variable_scope(self.name, values=[x]) as scope: self.scope = scope return self.subnet.funcall(x) else: with variable_scope.variable_scope(self.scope, values=[x], reuse=True): return self.subnet.funcall(x)
PypiClean
/blanketdb-0.4.0.tar.gz/blanketdb-0.4.0/HISTORY.rst
======= History ======= 0.4.0 (2020-02-26) ------------------ * Start uwsgi using http protocol by default in DOCKERFILE (s.t. standalone use is possible) * Overwrite CMD in docker-compose file to communicate via uwsgi protocol between nginx and blanketdb container 0.3.4 (2020-02-26) ------------------ * Support Python 3.8 0.3.3 (2019-12-12) ------------------ * Split tests into Python and HTTP API tests * Added tests that can be executed against an actual HTTP API of `BlanketDB` 0.3.2 (2019-12-04) ------------------ * Release to trigger build on dockerhub 0.3.1 (2019-03-06) ------------------ * Improved clarity with default values 0.3 (2019-03-06) ---------------- * Type annotations for `BlanketDB` * Python 3.4 is not supported anymore (as it does not know type annotations) 0.2.2 (2019-01-31) ------------------ * setuptools entrypoint for cli * quickstart documentation * added logo 0.2.1 (2019-01-24) ------------------ * fix tag confusion 0.2.0 (2019-01-24) ------------------ * Added CLI for starting `BlanketDB` with `wsgiref.simple_server` * Tests for `BlanketDB` Web API using `webtest` * Added documentation for usage and Web API 0.1.0 (2019-01-18) ------------------ * First release on PyPI.
PypiClean
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/users/item/information_protection/policy/labels/evaluate_application/evaluate_application_response.py
from __future__ import annotations from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union if TYPE_CHECKING: from .......models import base_collection_pagination_count_response, information_protection_action from .......models import base_collection_pagination_count_response class EvaluateApplicationResponse(base_collection_pagination_count_response.BaseCollectionPaginationCountResponse): def __init__(self,) -> None: """ Instantiates a new evaluateApplicationResponse and sets the default values. """ super().__init__() # The value property self._value: Optional[List[information_protection_action.InformationProtectionAction]] = None @staticmethod def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> EvaluateApplicationResponse: """ Creates a new instance of the appropriate class based on discriminator value Args: parseNode: The parse node to use to read the discriminator value and create the object Returns: EvaluateApplicationResponse """ if parse_node is None: raise Exception("parse_node cannot be undefined") return EvaluateApplicationResponse() def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]: """ The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]] """ from .......models import base_collection_pagination_count_response, information_protection_action fields: Dict[str, Callable[[Any], None]] = { "value": lambda n : setattr(self, 'value', n.get_collection_of_object_values(information_protection_action.InformationProtectionAction)), } super_fields = super().get_field_deserializers() fields.update(super_fields) return fields def serialize(self,writer: SerializationWriter) -> None: """ Serializes information the current object Args: writer: Serialization writer to use to serialize this model """ if writer is None: raise Exception("writer cannot be undefined") super().serialize(writer) writer.write_collection_of_object_values("value", self.value) @property def value(self,) -> Optional[List[information_protection_action.InformationProtectionAction]]: """ Gets the value property value. The value property Returns: Optional[List[information_protection_action.InformationProtectionAction]] """ return self._value @value.setter def value(self,value: Optional[List[information_protection_action.InformationProtectionAction]] = None) -> None: """ Sets the value property value. The value property Args: value: Value to set for the value property. """ self._value = value
PypiClean
/aslack-0.11.0-py3-none-any.whl/aslack-0.11.0.dist-info/DESCRIPTION.rst
aSlack ====== */əˈslæk/* .. image:: https://img.shields.io/pypi/v/aslack.svg :target: https://pypi.python.org/pypi/aslack :alt: PyPI Version .. image:: https://travis-ci.org/textbook/aslack.svg :target: https://travis-ci.org/textbook/aslack :alt: Travis Build Status .. image:: https://coveralls.io/repos/textbook/aslack/badge.svg?branch=master&service=github :target: https://coveralls.io/github/textbook/aslack?branch=master :alt: Code Coverage .. image:: https://www.quantifiedcode.com/api/v1/project/482551d8368740c68fb1d3e80c4f6664/badge.svg :target: https://www.quantifiedcode.com/app/project/482551d8368740c68fb1d3e80c4f6664 :alt: Code Issues .. image:: https://img.shields.io/badge/license-ISC-blue.svg :target: https://github.com/textbook/aslack/blob/master/LICENSE :alt: ISC License aSlack is a lightweight, asynchronous wrapper for Slack's Web and Real-Time Messaging (RTM) APIs, designed to allow the easy development of Slack tools and bots in Python. It defines two principal components: - ``SlackApi`` - a wrapper around the Web API; and - ``SlackBot`` - a messaging bot built on top of the RTM API. Compatibility ------------- aSlack uses asyncio_ with the ``async`` and ``await`` syntax, so is only compatible with Python versions 3.5 and above. Dependencies ------------ Asynchronous HTTP and WebSocket functionality is provided by aiohttp_ (version 0.15 and above required for out-of-the-box WebSocket client support). Documentation ------------- aSlack's documentation is available on PythonHosted_. Installation ------------ aSlack is available through the Python Package Index, PyPI_, you can install it with:: pip install aslack Alternatively, clone or fork the repository and use e.g.:: python setup.py develop to install locally for development. In this case, you should also install the development dependencies (ideally in a ``virtualenv``) using:: pip install -r requirements.txt Testing ------- The test suite can be run using ``py.test`` directly or by running:: python setup.py test in which case ``pylint`` will also be run to check the code quality. Additionally, a demo test for the Halliwell example can be run by setting the environment variable ``TMDB_API_TOKEN``. Examples -------- See the ``/examples`` directory for examples of the kinds of bots that you can build with aSlack. Halliwell ......... Based on both aSlack and `aTMDb`_, Halliwell is a bot that can provide information on movies or actors and find overlaps. Two environment variables, ``SLACK_API_TOKEN`` and ``TMDB_API_TOKEN``, are required to run this example, and configuration for easy deployment to Cloud Foundry is provided. .. _aiohttp: http://aiohttp.rtfd.org/ .. _asyncio: https://docs.python.org/3/library/asyncio.html .. _aTMDb: http://pythonhosted.org/atmdb/ .. _PyPI: https://pypi.python.org/pypi .. _PythonHosted: http://pythonhosted.org/aslack/
PypiClean
/spacepy-0.4.1.zip/spacepy-0.4.1/Doc/source/empiricals.rst
####################################################### empiricals - module with heliospheric empirical modules ####################################################### .. automodule:: spacepy.empiricals .. currentmodule:: spacepy.empiricals .. autosummary:: :toctree: autosummary getDststar getExpectedSWTemp getLmax getMagnetopause getMPstandoff getPlasmaPause getSolarProtonSpectra getSolarRotation getVampolaOrder omniFromDirectionalFlux vampolaPA
PypiClean
/wiliot-testers-5.0.10.tar.gz/wiliot-testers-5.0.10/wiliot_testers/sample/com_connect.py
from threading import Thread import logging import threading from tkinter import * from serial import tools import pygubu from os.path import abspath from json import dump from os.path import join, dirname from wiliot_testers.test_equipment import * from wiliot_core import WiliotGateway, ActionType, DataType from configs_gui import CONFIGS_DIR barcodeMutex = threading.Lock() CONNECT_HW = 'Connect HW' GO = 'Go' CONTINUE = 'Continue' READ = 'Read' FINISH = 'Finish' SEND = 'Send' ADD = 'ADD' REMOVE = 'REMOVE' GW_CANCEL = '!reset' # gateway GW_VERSION = 'Gateway version: ' GW_AVAILABLE_VERSION = 'Available Version: ' # attenuator ATTENUATION = 'atten' BLE = 'Ble' LORA = 'LoRa' BARCODES = 'Barcodes' ATTENUATORS = 'Atten' CHAMBERS = 'Chambers' GW_TBP_VERSION = '2.5.1' class ComConnect(object): ''' classdocs ''' isGui = False hwConnected = False cur_gw_tbp_version = False missing_com_port = False wiliotTags = False gateway = None attenuator = None chambers_move_com = '' barcodes_move_com = '' gwVersion = '' reel_id = '' gtin = '' barcodes_state = ADD chambers_state = ADD barcodes_serials = {} chambers_serials = {} atten_serials = {} sensors_serials = {} used_ports = [] gw_com_port = [] barcode_error = [] chambers_to_close = [] gw_latest_version = [''] gw_update_status = 'disabled' start_time = 0 temperature_sensor_readings = [] all_sensors = [] ttk = None def __init__(self, top_builder=None, new_tag_func=None, update_go=None, default_dict=None, logger=None): ''' Constructor ''' self.gateway = WiliotGateway(logger_name='sample') self.top_builder = top_builder self.add_tag_to_test = new_tag_func self.update_go_state = update_go self.default_dict = default_dict if logger is None: self.logger = logging.getLogger('sample') else: self.logger = logger def __del__(self): if self.gateway is not None and self.is_gw_serial_open(): self.gateway.close_port() for com_port, barcode in self.barcodes_serials.items(): if barcode is not None and barcode.is_open(): barcode.close_port() for com_port, chamber in self.chambers_serials.items(): if chamber is not None and chamber.is_connected(): chamber.open_chamber() chamber.close_port() # for com_port, atten in self.atten_serials.items(): # if atten!=None and 'serial' in atten.keys() and atten['serial']!=None and atten: # atten.disconnect() def gui(self, ttk_frame=None): self.builder = builder = pygubu.Builder() ui_file = join(abspath(dirname(__file__)), 'utils', 'com_connect.ui') self.builder.add_from_file(ui_file) img_path = join(abspath(dirname(__file__)), '') builder.add_resource_path(img_path) img_path = join(abspath(dirname(__file__)), 'utils') builder.add_resource_path(img_path) self.ttk = ttk_frame self.ttk.title("ComConnect") self.mainwindow = self.builder.get_object('mainwindow', self.ttk) self.builder.connect_callbacks(self) self.isGui = True self.find_com_ports() self.set_gui_defaults() self.ttk.protocol("WM_DELETE_WINDOW", self.close) self.ttk.lift() self.ttk.attributes("-topmost", True) self.ttk.attributes("-topmost", False) # self.set_gui_defaults() self.ttk.mainloop() def find_com_ports(self, *args): com_ports = serial_ports() available_ports = [com_port for com_port in com_ports if com_port not in self.used_ports] self.update_com_gui(available_ports, com_ports) def set_gui_defaults(self): if self.serials_connected(CHAMBERS): self.builder.get_object('connect_chambers').configure(text='Disconnect') self.builder.get_object('chosenChambers')['state'] = 'disabled' self.builder.get_object('availableChambers')['state'] = 'disabled' self.builder.get_object('chambers_up')['state'] = 'disabled' self.builder.get_object('chambers_down')['state'] = 'disabled' self.builder.get_object('addChambers')['state'] = 'disabled' else: self.builder.get_object('connect_chambers').configure(text='Connect') ble_atten = '' lora_atten = '' for com_port, atten in self.atten_serials.items(): ble_atten = com_port if atten['type'] == BLE else ble_atten lora_atten = com_port if atten['type'] == LORA else lora_atten self.builder.get_object('attenComBle').set(ble_atten) self.builder.get_object('attenComLoRa').set(lora_atten) if self.serials_connected(ATTENUATORS): self.builder.get_object('connect_atten').configure(text='Disconnect') self.builder.get_object('attenComBle')['state'] = 'disabled' self.builder.get_object('attenComLoRa')['state'] = 'disabled' else: self.builder.get_object('connect_atten').configure(text='Connect') pass def update_com_gui(self, available_ports, com_ports): self.builder.get_object('gwCom')['values'] = available_ports + [''] self.builder.get_object('attenComBle')['values'] = available_ports + [''] self.builder.get_object('attenComLoRa')['values'] = available_ports + [''] self.update_multi_serials(available_ports, BARCODES) self.update_multi_serials(available_ports, CHAMBERS) self.missing_com_port = False self.check_chosen_ports(com_ports) self.check_opened_ports() def check_chosen_ports(self, com_ports): if len(self.gw_com_port) == 0 or self.gw_com_port[0] not in com_ports: self.gw_com_port = [''] self.builder.get_object('gwCom').set('') self.missing_com_port = True i = 0 while i < len(self.atten_serials.keys()): port = list(self.atten_serials.keys())[i] atten = list(self.atten_serials.values())[i] if port != '' and port not in com_ports: self.atten_serials.pop(port) self.builder.get_object(f"attenCom{atten['type']}").set('') self.missing_com_port = True continue i += 1 self.check_multi_coms(BARCODES, com_ports) self.check_multi_coms(CHAMBERS, com_ports) def check_multi_coms(self, obj, com_ports): self.builder.get_object(f'chosen{obj}').delete(0, END) ports = getattr(self, f'{obj.lower()}_serials') for port in ports.keys(): if port in com_ports: self.builder.get_object(f'chosen{obj}').insert(END, port) else: self.missing_com_port = True def check_opened_ports(self): if self.is_gui_opened(): self.check_gw_open() self.check_multi_open(BARCODES) self.check_multi_open(CHAMBERS) self.check_atten_open() def check_multi_open(self, obj): if self.serials_connected(obj): self.builder.get_object(f'connect_{obj.lower()}').configure(text='Disconnect') self.builder.get_object(f'chosen{obj}')['state'] = 'disabled' self.builder.get_object(f'available{obj}')['state'] = 'disabled' self.builder.get_object(f'{obj.lower()}_up')['state'] = 'disabled' self.builder.get_object(f'{obj.lower()}_down')['state'] = 'disabled' self.builder.get_object(f'add{obj}')['state'] = 'disabled' else: self.builder.get_object(f'connect_{obj.lower()}').configure(text='Connect') self.builder.get_object(f'chosen{obj}')['state'] = 'normal' self.builder.get_object(f'available{obj}')['state'] = 'normal' self.builder.get_object(f'{obj.lower()}_up')['state'] = 'normal' self.builder.get_object(f'{obj.lower()}_down')['state'] = 'normal' self.builder.get_object(f'add{obj}')['state'] = 'normal' def check_gw_open(self): if len(self.gw_com_port) > 0 and self.gw_com_port[0] != '': self.builder.get_object('gwCom').set(self.gw_com_port[0]) if self.is_gw_serial_open(): self.builder.get_object('connect_gw').configure(text='Disconnect') self.builder.get_object('gwCom')['state'] = 'disabled' self.builder.get_object('version').configure(text=GW_VERSION + self.gwVersion[0]) self.builder.get_object('latestVersion').configure(text=GW_AVAILABLE_VERSION + self.gw_latest_version[0]) self.builder.get_object('update_gw')['state'] = self.gw_update_status else: self.builder.get_object('connect_gw').configure(text='Connect') self.builder.get_object('gwCom')['state'] = 'normal' self.builder.get_object('version').configure(text=GW_VERSION) self.builder.get_object('latestVersion').configure(text=GW_AVAILABLE_VERSION) self.builder.get_object('update_gw')['state'] = 'disabled' def check_atten_open(self): connected = False for com, atten in self.atten_serials.items(): if atten['serial'] is not None and atten['serial'].GetActiveTE().is_open(): connected = True if connected: self.builder.get_object(f'connect_atten').configure(text='Disconnect') self.builder.get_object(f'attenComLoRa')['state'] = 'disabled' self.builder.get_object(f'attenComBle')['state'] = 'disabled' else: self.builder.get_object(f'connect_atten').configure(text='Connect') self.builder.get_object(f'attenComLoRa')['state'] = 'normal' self.builder.get_object(f'attenComBle')['state'] = 'normal' def choose_com_ports(self): default_dict = self.default_dict com_ports = [s.device for s in tools.list_ports.comports()] if len(com_ports) == 0: com_ports = [s.name for s in tools.list_ports.comports()] if 'gw' in default_dict.keys() and default_dict['gw'] in com_ports: self.gw_com_port = [default_dict['gw']] self.used_ports.append(default_dict['gw']) else: self.gw_com_port = [''] self.missing_com_port = True if 'atten' in default_dict.keys() and BLE in default_dict['atten'].keys() and default_dict['atten'][BLE] \ in com_ports: self.atten_serials[default_dict['atten'][BLE]] = {} self.atten_serials[default_dict['atten'][BLE]]['type'] = BLE self.atten_serials[default_dict['atten'][BLE]]['serial'] = None self.used_ports.append(default_dict['atten'][BLE]) elif 'atten' in default_dict.keys() and BLE in default_dict['atten'].keys() \ and default_dict['atten'][BLE].strip() != '': self.missing_com_port = True if 'atten' in default_dict.keys() and LORA in default_dict['atten'].keys() and default_dict['atten'][LORA] \ in com_ports: self.atten_serials[default_dict['atten'][LORA]] = {} self.atten_serials[default_dict['atten'][LORA]]['type'] = LORA self.atten_serials[default_dict['atten'][LORA]]['serial'] = None self.used_ports.append(default_dict['atten'][LORA]) elif 'atten' in default_dict.keys() and LORA in default_dict['atten'].keys() \ and default_dict['atten'][LORA].strip() != '': self.missing_com_port = True if 'barcodes' in default_dict.keys(): self.barcodes_serials = dict.fromkeys([barcode for barcode in default_dict['barcodes'] if barcode in com_ports], None) self.used_ports += list(self.barcodes_serials.keys()) if 'chambers' in default_dict.keys(): self.chambers_serials = dict.fromkeys([chamber for chamber in default_dict['chambers'] if chamber in com_ports], None) self.used_ports += list(self.chambers_serials.keys()) if 'temperature_sensors' in default_dict.keys(): self.sensors_serials = dict.fromkeys([sensor for sensor in default_dict['temperature_sensors']], None) missing_barcodes = [] missing_chambers = [] if 'barcodes' in default_dict.keys(): missing_barcodes = [barcode for barcode in default_dict['barcodes'] if barcode not in com_ports] if 'chambers' in default_dict.keys(): missing_chambers = [chamber for chamber in default_dict['chambers'] if chamber not in com_ports] if any(missing_barcodes + missing_chambers): self.missing_com_port = True return self.missing_com_port def choose_gw(self, *args): if len(self.gw_com_port) > 0 and self.gw_com_port[0] != '': self.used_ports.pop(self.used_ports.index(self.gw_com_port[0])) self.gw_com_port = [self.builder.get_object('gwCom').get()] self.used_ports.append(self.gw_com_port[0]) def choose_ble_atten(self, *args): ble_com = self.builder.get_object('attenComBle').get() ble_last_com = [com for com, item in self.atten_serials.items() if item['type'] == BLE] if len(ble_last_com) > 0: self.atten_serials.pop(ble_last_com[0]) self.used_ports.pop(self.used_ports.index(ble_last_com[0])) if ble_com.strip() != '': self.atten_serials[ble_com] = {} self.atten_serials[ble_com]['type'] = BLE self.atten_serials[ble_com]['serial'] = None self.used_ports.append(ble_com) def choose_lora_atten(self, *args): lora_com = self.builder.get_object('attenComLoRa').get() lora_last_com = [com for com, item in self.atten_serials.items() if item['type'] == LORA] if len(lora_last_com) > 0: self.atten_serials.pop(lora_last_com[0]) self.used_ports.pop(self.used_ports.index(lora_last_com[0])) if lora_com.strip() != '': self.atten_serials[lora_com] = {} self.atten_serials[lora_com]['type'] = LORA self.atten_serials[lora_com]['serial'] = None self.used_ports.append(lora_com) def connect_all(self, gui=True): if not self.is_gw_serial_open(): success = self.connect_gw(gui) if not success: return if not self.serials_connected(ATTENUATORS): self.connect_atten(gui) if not self.serials_connected(BARCODES): self.connect_barcodes(gui) if not self.serials_connected(CHAMBERS): self.connect_chambers(gui) self.connect_temperature_sensor() self.hwConnected = True def connect_gw(self, gui=True, disconnect=False): if not self.is_gw_serial_open() and not disconnect: if len(self.gw_com_port) == 0 or self.gw_com_port[0].strip() == '': self.popup_message('No default com port for GW, please choose GW com port.', title='Error', log='error') return False com_port = self.gw_com_port[0] self.gateway.open_port(port=com_port, baud=921600) if self.is_gw_serial_open(): self.start_listener(not_print_str=True) self.logger.info(f'GW is connected on port: {com_port}.') self.gateway.reset_gw() sleep(1) version = self.gateway.get_gw_version() self.gwVersion = version self.gw_latest_version = latest_version = self.gateway.get_latest_version_number() cur_version = int(version[0].replace('.', '')) self.gw_update_status = 'normal' if cur_version < int(latest_version[0].replace('.', '')) \ else 'disabled' if cur_version >= int(GW_TBP_VERSION.replace('.', '')): self.cur_gw_tbp_version = True else: self.logger.error(f'Error connecting to GW on port: {com_port}.') return False else: self.gateway.close_port() self.builder.get_object('connect_gw').configure(text='Connect') self.builder.get_object('version').configure(text=GW_VERSION) self.builder.get_object('latestVersion').configure(text=GW_AVAILABLE_VERSION) self.builder.get_object('gwCom')['state'] = 'normal' if gui: self.check_gw_open() return True def connect_barcodes(self, gui=True): self.connect_multi_serials(BARCODES, gui=gui) def connect_chambers(self, gui=True): self.connect_multi_serials(CHAMBERS, gui=gui) def connect_atten(self, gui=True): is_connected = self.connect_multi_serials(ATTENUATORS, gui=gui) if gui: atten_state = 'disabled' if is_connected else 'normal' self.builder.get_object('attenComLoRa')['state'] = atten_state self.builder.get_object('attenComBle')['state'] = atten_state def connect_temperature_sensor(self): self.all_sensors = [] self.temperature_sensor_readings = [] for sensor_name in self.sensors_serials.keys(): self.temperature_sensor_readings.append([]) try: sensor_temp = YoctoTemperatureSensor() is_temp_sensor_connected = sensor_temp.connect(target=sensor_name) if is_temp_sensor_connected: self.logger.info('Temperature Sensor {} is connected'.format(sensor_temp.get_sensor_name())) self.all_sensors.append(sensor_temp) else: self.popup_message('Could not connect to Temperature Sensor according to ' 'the following name: {}'.format(sensor_name), title='Error', log='error') self.all_sensors.append(None) raise ConnectionError("Could not establish Temperature sensor connection") except Exception as e: self.logger.info('while connecting to the Temperature Sensor the following error occurs : {}'.format(e)) self.all_sensors.append(None) raise Exception(f'Could not establish Temperature sensor connection due to {e}') def read_temperature_sensor(self): for i, sensor in enumerate(self.all_sensors): if sensor is not None: self.temperature_sensor_readings[i].append(sensor.get_temperature()) def connect_multi_serials(self, obj, gui=True): serials = getattr(self, f'{obj.lower()}_serials') is_connected = False if self.serials_connected(obj): self.close_serials(obj, serials) self.builder.get_object(f'connect_{obj.lower()}').configure(text='Connect') try: self.builder.get_object(f'chosen{obj}')['state'] = 'normal' self.builder.get_object(f'available{obj}')['state'] = 'normal' self.builder.get_object(f'add{obj}')['state'] = 'normal' except: pass elif len(serials.keys()) > 0: self.open_serials(obj, serials) if self.serials_connected(obj) and gui: # if gui: is_connected = True self.builder.get_object(f'connect_{obj.lower()}').configure(text='Disconnect') try: self.builder.get_object(f'chosen{obj}')['state'] = 'disabled' self.builder.get_object(f'available{obj}')['state'] = 'disabled' self.builder.get_object(f'add{obj}')['state'] = 'disabled' except: pass if gui: self.find_com_ports() # self.update_go_state() return is_connected def open_serials(self, obj, serials): threads = [] for com_port, com_serial in serials.items(): if 'barcode' in obj.lower(): if com_serial is not None and com_serial.is_open(): continue com_serial = BarcodeScanner(com_port=com_port, log_type='LOG_NL') if com_serial.is_open(): # self.used_ports.append(com_port) serials[com_port] = com_serial elif 'chamber' in obj.lower(): if com_serial is not None and com_serial.is_connected(): continue temp_thread = Thread(target=self.connect_chamber, args=([com_port, serials])) temp_thread.start() threads.append(temp_thread) elif 'atten' in obj.lower(): if com_serial['serial'] is not None or com_port.strip() == '': # 8 if serial['serial']!=None and # serial['serial'].GetActiveTE().s.is_open(): continue com_serial = Attenuator('API', comport=com_port) if com_serial.GetActiveTE().is_open(): # self.used_ports.append(com_port) serials[com_port]['serial'] = com_serial for thread in threads: thread.join() def close(self): if self.is_gw_serial_open() and self.serials_connected(ATTENUATORS) and self.serials_connected(BARCODES): self.hwConnected = True self.enable_hw_connected() if self.isGui: self.isGui = False self.ttk.destroy() return self.hwConnected def save(self): # default_dict = {} # if isfile(join(CONFIGS_DIR, '.defaults.json')): # with open(join(CONFIGS_DIR, '.defaults.json'), 'r') as defaultComs: # default_dict = load(defaultComs) self.default_dict['gw'] = self.gw_com_port[0] self.default_dict['atten'] = {} for com_port, atten in self.atten_serials.items(): self.default_dict['atten'][atten['type']] = com_port self.default_dict['barcodes'] = list(self.barcodes_serials.keys()) self.default_dict['chambers'] = list(self.chambers_serials.keys()) with open(join(CONFIGS_DIR, '.defaults.json'), 'w+') as defaultComs: dump(dict(self.default_dict), defaultComs, indent=4) self.logger.info(f'Com ports saved successfully.') def focus_barcode_available(self, *args): self.focus_available(BARCODES) def focus_barcode_chosen(self, *args): self.focus_chosen(BARCODES) def focus_chamber_available(self, *args): self.focus_available(CHAMBERS) def focus_chamber_chosen(self, *args): self.focus_chosen(CHAMBERS) def focus_available(self, obj): self.builder.get_object(f'add{obj}').configure(text='>') setattr(self, f'{obj.lower()}_state', ADD) def focus_chosen(self, obj): self.builder.get_object(f'add{obj}').configure(text='<') setattr(self, f'{obj.lower()}_state', REMOVE) setattr(self, f'{obj.lower()}_move_com', '') def add_barcode(self): if getattr(self, f'{BARCODES.lower()}_state') == ADD: com_chosen = self.builder.get_object(f'available{BARCODES}').get(ACTIVE) try: temp_barcode = BarcodeScanner(com_port=com_chosen) except Exception as e: self.popup_message(f'Could NOT connect. {e}', title='Error', log='error') self.add(BARCODES) self.find_com_ports() def add_chamber(self): self.add(CHAMBERS) self.find_com_ports() def add(self, obj): if getattr(self, f'{obj.lower()}_state') == ADD: sending = self.builder.get_object(f'available{obj}') receiving = self.builder.get_object(f'chosen{obj}') else: sending = self.builder.get_object(f'chosen{obj}') receiving = self.builder.get_object(f'available{obj}') com_list = list(sending.get(0, END)) com_chosen = sending.get(ACTIVE) receiving.insert(END, com_chosen) com_index = com_list.index(com_chosen) sending.delete(com_index, com_index) serials = getattr(self, f'{obj.lower()}_serials') com_ports = self.builder.get_object(f'chosen{obj}').get(0, END) old_ports = [port for port in serials.keys() if port not in com_ports] new_serials = dict(zip(com_ports, [None] * len(com_ports))) self.used_ports = [port for port in self.used_ports if port not in old_ports] + list(com_ports) setattr(self, f'{obj.lower()}_serials', new_serials) def chamber_up(self): self.up(CHAMBERS) def chamber_down(self): self.down(CHAMBERS) def barcode_up(self): self.up(BARCODES) def barcode_down(self): self.down(BARCODES) def up(self, obj): com_list = list(self.builder.get_object(f'chosen{obj}').get(0, END)) if getattr(self, f'{obj.lower()}_move_com') == '': chosen_com = self.builder.get_object(f'chosen{obj}').get(ACTIVE) setattr(self, f'{obj.lower()}_move_com', chosen_com) else: chosen_com = getattr(self, f'{obj.lower()}_move_com') if chosen_com != '': com_index = com_list.index(chosen_com) if com_index > 0: com_list.pop(com_list.index(chosen_com)) com_list.insert(com_index - 1, chosen_com) self.builder.get_object(f'chosen{obj}').delete(0, END) for com in com_list: self.builder.get_object(f'chosen{obj}').insert(END, com) self.builder.get_object(f'chosen{obj}').select_set(com_index - 1) def down(self, obj): com_list = list(self.builder.get_object(f'chosen{obj}').get(0, END)) if getattr(self, f'{obj.lower()}_move_com') == '': chosen_com = self.builder.get_object(f'chosen{obj}').get(ACTIVE) setattr(self, f'{obj.lower()}_move_com', chosen_com) else: chosen_com = getattr(self, f'{obj.lower()}_move_com') if chosen_com != '': com_index = com_list.index(chosen_com) if com_index < (len(com_list) - 1): com_list.pop(com_list.index(chosen_com)) com_list.insert(com_index + 1, chosen_com) self.builder.get_object(f'chosen{obj}').delete(0, END) for com in com_list: self.builder.get_object(f'chosen{obj}').insert(END, com) self.builder.get_object(f'chosen{obj}').select_set(com_index + 1) def update_multi_serials(self, available_ports, obj): self.builder.get_object(f'available{obj}').delete(0, END) for port in available_ports: if port not in self.used_ports: self.builder.get_object(f'available{obj}').insert(END, port) def update_atten_serials(self, available_ports): for com, item in self.atten_serials.items(): if item['serial'] is None or not item['serial'].is_open(): self.builder.get_object(f'attenCom{item["type"]}')['values'] = available_ports else: self.builder.get_object(f'attenCom{item["type"]}').set(com) def connect_chamber(self, com_port, serials): com_serial = Tescom(com_port) if com_serial.is_connected(): # self.used_ports.append(com_port) serials[com_port] = com_serial if not com_serial.is_door_open(): com_serial.open_chamber() def close_serials(self, obj, serials): if 'atten' in obj.lower(): for serial in serials.values(): # serial['serial'].GetActiveTE.s.close_port() if serial['serial'] is not None and serial['serial'].GetActiveTE().is_open(): serial['serial'].GetActiveTE().close_port() serial['serial'] = None # self.used_ports.remove(com_port) else: for com_serial in serials.values(): if 'chamber' in obj.lower(): com_serial.open_chamber() com_serial.close_port() # self.used_ports.remove(com_port) # serials.pop(com_port) def serials_connected(self, obj): serials = getattr(self, f'{obj.lower()}_serials') if 'atten' in obj.lower(): serials = dict(zip(serials.keys(), [atten['serial'] for atten in serials.values()])) connected_serials = 0 for com_port, com_serial in serials.items(): if com_serial is not None: if 'barcode' in obj.lower() and com_serial.is_open(): connected_serials += 1 if 'chamber' in obj.lower() and com_serial.is_connected(): connected_serials += 1 if 'atten' in obj.lower(): connected_serials += 1 # if com_port.strip()=='': # connected_serials += 1 if connected_serials > 0 and connected_serials == len(serials.keys()): return True else: return False def get_data(self, actionType=ActionType.ALL_SAMPLE, dataType=DataType.PACKET_LIST): return self.gateway.get_packets(action_type=actionType, data_type=dataType) def read_barcode(self, scanner_index=0, close_chamber=False, add_to_test=False, n_try=5): if len(list(self.barcodes_serials.values())) == 0: self.logger.error('Trying to read barcode but no scanner were connected') return None, None scanner = list(self.barcodes_serials.values())[scanner_index] for i in range(n_try): full_data, cur_id, reel_id, gtin = scanner.scan_ext_id() if full_data is None and cur_id is None and reel_id is None and full_data is None: continue break cur_id = cur_id if cur_id is not None else full_data reel_id = reel_id if reel_id is not None else full_data gtin = gtin if gtin is not None else '' if reel_id is not None: self.reel_id = reel_id self.gtin = gtin if cur_id is None: barcodeMutex.acquire() if close_chamber: self.barcode_error.append(scanner_index) barcodeMutex.release() return None, None if not close_chamber: reel_id_obj = self.top_builder.tkvariables.get('reelId') reel_id_obj.set(self.reel_id) else: success = self.add_tag_to_test(cur_id, reel_id, scanner_index=scanner_index, add_to_test=add_to_test) if success: self.chambers_to_close.append(scanner_index) return cur_id, reel_id def read_scanners_barcodes(self, indexes=()): if len(indexes) == 0: indexes = list(range(len(self.barcodes_serials.values()))) scanner_threads = [] self.barcode_error = [] self.chambers_to_close = [] for i in indexes: t = threading.Thread(target=self.read_barcode, args=(i, True, True)) scanner_threads.append(t) t.start() for i in range(len(scanner_threads)): t = scanner_threads[i] t.join() read_message = '' title = 'Warning' font = 18 if len(self.barcode_error) > 0: read_message += f'Error reading external ID from chambers {self.barcode_error}, try repositioning the tags.\n' title = 'Error' font = 16 if len(self.chambers_to_close) > 0: read_message += f'Chambers are closing!!\nWatch your hands!!!' popupThread = threading.Thread(target=self.popup_message, args=(read_message, title, ("Helvetica", font), title.lower())) popupThread.start() popupThread.join() if len(self.chambers_to_close) > 0: self.close_chambers(self.chambers_to_close) self.update_go_state() def enable_hw_connected(self): self.top_builder.get_object('read_qr')['state'] = 'normal' self.top_builder.get_object('reelId')['state'] = 'normal' if self.top_builder.tkvariables.get('go').get() == CONNECT_HW: self.top_builder.tkvariables.get('go').set(READ) self.top_builder.get_object('go')['state'] = 'disabled' def send_gw_app(self, params): str_rsp = [] self.gateway.reset_buffer() self.gateway.clear_rsp_str_input_q() self.gateway.reset_start_time() for param, value in params.items(): if param.startswith(ATTENUATION): for com_port, atten in self.atten_serials.items(): if param.lower().endswith(atten['type'].lower()): try: attenuation = atten['serial'].GetActiveTE().Setattn(float(value)) self.logger.info(f"{atten['type']} Attenuation set to: {str(attenuation).strip()}") except Exception as e: self.logger.error(f"{atten['type']} Attenuator error: try reconnect the attenuator [{e}].") return False self.start_time = time() try: str_rsp.append(self.gateway.write('!sub1g_sync 1', with_ack=True)) str_rsp.append(self.gateway.write('!listen_to_tag_only 1', with_ack=True)) str_rsp.append(self.gateway.write('!set_tester_mode 1', with_ack=True)) if 'EmulateSurfaceValue' in params.keys(): str_rsp.append(self.gateway.write('!set_sub_1_ghz_energizing_frequency {}'.format( params['EmulateSurfaceValue']), with_ack=True)) _, gw_ans = self.gateway.config_gw(effective_output_power_val=22, sub1g_output_power_val=29, energy_pattern_val=params['pattern'], received_channel=params['channel'], time_profile_val=[params['tOn'], params['tTotal']], start_gw_app=True, filter_val=False, pacer_val=0, with_ack=True) for rsp in gw_ans: str_rsp.append(rsp) except Exception as e: self.logger.error(f'Gateway error: try reconnect the gateway [{e}].') self.cancel_gw_commands() return False for command in str_rsp: if "Command Complete Event" not in command['raw'] and "Energizing" not in command['raw']: self.logger.error(f'Gateway response: {command["raw"]}') self.cancel_gw_commands() return False return True def connect_and_close(self): self.connect_all() self.close() def get_reel_id(self): return self.reel_id def get_gtin(self): return self.gtin def get_reel_external(self): return self.gtin + self.reel_id def is_gw_serial_open(self): serial_open, _, _ = self.gateway.get_connection_status() return serial_open def is_gw_data_available(self): return self.gateway.is_data_available() def is_gui_opened(self): return self.isGui def get_gw_version(self): return self.gwVersion[0] def update_gw(self): self.gateway.update_version() def get_gw_time(self): return self.start_time def cancel_gw_commands(self): # self.gateway.write(GW_CANCEL) self.gateway.reset_gw() # self.gateway.stop_continuous_listener() # self.gateway.reset_buffer() sleep(0.1) def start_listener(self, not_print_str=True): # self.gateway.start_continuous_listener(not_print_str) self.gateway.start_continuous_listener() def is_hw_connected(self): return self.hwConnected def get_chambers(self): return list(self.chambers_serials.values()) def open_chambers(self, indexes=()): chambers_threads = [] chambers = list(self.chambers_serials.values()) if len(indexes) == 0: indexes = list(range(len(chambers))) for index in indexes: if len(chambers) > index and chambers[index] is not None: temp_thread = Thread(target=chambers[index].open_chamber, args=()) temp_thread.start() chambers_threads.append(temp_thread) for thread in chambers_threads: thread.join() def close_chambers(self, indexes=()): chambers = list(self.chambers_serials.values()) chambers_threads = [] if len(indexes) == 0: indexes = list(range(len(chambers))) for index in indexes: if len(chambers) > index and chambers[index] is not None: t = threading.Thread(target=chambers[index].close_chamber, args=()) chambers_threads.append(t) t.start() for thread in chambers_threads: thread.join() def get_num_of_barcode_scanners(self): return len(self.barcodes_serials.keys()) def get_error_barcode(self): return self.barcode_error def gw_tbp_version(self): return self.cur_gw_tbp_version def gw_get_packets(self): self.gateway.get_packets(action_type=ActionType.ALL_SAMPLE, data_type=DataType.PROCESSED) def get_default_dict(self): return self.default_dict def popup_message(self, msg, title='Error', font=("Helvetica", 10), log='info', bg=None, tk_frame=None): if tk_frame: popup = Toplevel(tk_frame) else: popup = Tk() popup.eval('tk::PlaceWindow . center') popup.wm_title(title) if bg is not None: popup.configure(bg=bg) getattr(self.logger, log)(f'{title} - {msg}') def popup_exit(): popup.destroy() label = Label(popup, text=msg, font=font) label.pack(side="top", fill="x", padx=10, pady=10) b1 = Button(popup, text="Okay", command=popup_exit) b1.pack(padx=10, pady=10) popup.mainloop()
PypiClean
/pg_statviz-0.5-py3-none-any.whl/pg_statviz/modules/wal.py
__author__ = "Jimmy Angelakos" __copyright__ = "Copyright (c) 2023 Jimmy Angelakos" __license__ = "PostgreSQL License" import argparse import getpass import logging import numpy from argh.decorators import arg from dateutil.parser import isoparse from matplotlib.ticker import MaxNLocator from pg_statviz.libs import plot from pg_statviz.libs.dbconn import dbconn from pg_statviz.libs.info import getinfo @arg('-d', '--dbname', help="database name to analyze") @arg('-h', '--host', metavar="HOSTNAME", help="database server host or socket directory") @arg('-p', '--port', help="database server port") @arg('-U', '--username', help="database user name") @arg('-W', '--password', action='store_true', help="force password prompt (should happen automatically)") @arg('-D', '--daterange', nargs=2, metavar=('FROM', 'TO'), type=str, help="date range to be analyzed in ISO 8601 format e.g. 2023-01-01T00:00 " + "2023-01-01T23:59") @arg('-O', '--outputdir', help="output directory") @arg('--info', help=argparse.SUPPRESS) @arg('--conn', help=argparse.SUPPRESS) def wal(dbname=getpass.getuser(), host="/var/run/postgresql", port="5432", username=getpass.getuser(), password=False, daterange=[], outputdir=None, info=None, conn=None): "run WAL generation analysis module" MAX_RESULTS = 1000 logging.basicConfig() _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) if not conn: conn_details = {'dbname': dbname, 'user': username, 'password': getpass.getpass("Password: ") if password else password, 'host': host, 'port': port} conn = dbconn(**conn_details) if not info: info = getinfo(conn) _logger.info("Running WAL generation analysis") if daterange: daterange = [isoparse(d) for d in daterange] if daterange[0] > daterange[1]: daterange = [daterange[1], daterange[0]] else: daterange = ['-infinity', 'now()'] # Retrieve the snapshots from DB cur = conn.cursor() cur.execute("""SELECT wal_bytes, snapshot_tstamp, stats_reset FROM pgstatviz.wal WHERE snapshot_tstamp BETWEEN %s AND %s ORDER BY snapshot_tstamp""", (daterange[0], daterange[1])) data = cur.fetchmany(MAX_RESULTS) if not data: raise SystemExit("No pg_statviz snapshots found in this database") tstamps = [t['snapshot_tstamp'] for t in data] walgb = [round(w['wal_bytes'] / 1073741824, 1) for w in data] # Plot WAL in GB plt, fig = plot.setup() plt.suptitle(f"pg_statviz · {info['hostname']}:{port}", fontweight='semibold') plt.title("WAL generated") plt.plot_date(tstamps, walgb, label="WAL", aa=True, linestyle='solid') plt.xlabel("Timestamp", fontweight='semibold') plt.ylabel("GB generated (since stats reset)", fontweight='semibold') fig.axes[0].set_ylim(bottom=0) fig.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) fig.legend() fig.tight_layout() outfile = f"""{outputdir.rstrip("/") + "/" if outputdir else ''}pg_statviz_{info['hostname'] .replace("/", "-")}_{port}_wal.png""" _logger.info(f"Saving {outfile}") plt.savefig(outfile) # WAL diff generator - yields list of the rates in MB/s def waldiff(data): yield numpy.nan for i, item in enumerate(data): if i + 1 < len(data): if data[i + 1]['stats_reset'] == data[i]['stats_reset']: s = (data[i + 1]['snapshot_tstamp'] - data[i]['snapshot_tstamp']).total_seconds() yield (int(data[i + 1]['wal_bytes']) - int(data[i]['wal_bytes'])) / 1048576 / s else: yield numpy.nan walrates = list(waldiff(data)) # Plot WAL rates plt, fig = plot.setup() plt.suptitle(f"pg_statviz · {info['hostname']}:{port}", fontweight='semibold') plt.title("WAL generation rate") plt.plot_date(tstamps, walrates, label="WAL", aa=True, linestyle='solid') plt.xlabel("Timestamp", fontweight='semibold') plt.ylabel("Avg. WAL generation rate (MB/s)", fontweight='semibold') fig.legend() fig.tight_layout() outfile = f"""{outputdir.rstrip("/") + "/" if outputdir else ''}pg_statviz_{info['hostname'] .replace("/", "-")}_{port}_wal_rate.png""" _logger.info(f"Saving {outfile}") plt.savefig(outfile)
PypiClean
/hydro-tune-0.1.0.tar.gz/hydro-tune-0.1.0/hydro/fuse_ops/batchnorm.py
from typing import Optional, Any import torch from torch import Tensor from torch.nn.modules import Module from torch.nn.parameter import Parameter from torch.nn import functional as F from torch.nn import init class _NormBase(Module): """Pytorch 2.0 Common base of _InstanceNorm and _BatchNorm""" _version = 2 __constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine", "B"] num_features: int eps: float momentum: float affine: bool track_running_stats: bool B: int # WARNING: weight and bias purposely not defined here. # See https://github.com/pytorch/pytorch/issues/39670 def __init__( self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = True, track_running_stats: bool = True, device=None, dtype=None, B: int = 1, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.B = B self.num_features = num_features self.eps = eps self.momentum = momentum self.affine = affine self.track_running_stats = track_running_stats if self.affine: self.weight = Parameter(torch.empty(B, num_features, **factory_kwargs)) self.bias = Parameter(torch.empty(B, num_features, **factory_kwargs)) else: self.register_parameter("weight", None) self.register_parameter("bias", None) if self.track_running_stats: self.register_buffer("running_mean", torch.zeros(B, num_features, **factory_kwargs)) self.register_buffer("running_var", torch.ones(B, num_features, **factory_kwargs)) self.running_mean: Optional[Tensor] self.running_var: Optional[Tensor] self.register_buffer( "num_batches_tracked", torch.tensor(0, dtype=torch.long, **{k: v for k, v in factory_kwargs.items() if k != "dtype"}), ) self.num_batches_tracked: Optional[Tensor] else: self.register_buffer("running_mean", None) self.register_buffer("running_var", None) self.register_buffer("num_batches_tracked", None) self.reset_parameters() def reset_running_stats(self) -> None: if self.track_running_stats: # running_mean/running_var/num_batches... are registered at runtime depending # if self.track_running_stats is on self.running_mean.zero_() # type: ignore[union-attr] self.running_var.fill_(1) # type: ignore[union-attr] self.num_batches_tracked.zero_() # type: ignore[union-attr,operator] def reset_parameters(self) -> None: self.reset_running_stats() if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def _check_input_dim(self, input): raise NotImplementedError def extra_repr(self): return ( "{num_features}, eps={eps}, momentum={momentum}, affine={affine}, " "track_running_stats={track_running_stats}, B={B}".format(**self.__dict__) ) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): version = local_metadata.get("version", None) if (version is None or version < 2) and self.track_running_stats: # at version 2: added num_batches_tracked buffer # this should have a default value of 0 num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key not in state_dict: state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long) super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ) def snatch_parameters(self, other, b): assert 0 <= b < self.B if self.affine: self.weight.data[b] = other.weight.data self.bias.data[b] = other.bias.data if self.track_running_stats: self.running_mean[b] = other.running_mean self.running_var[b] = other.running_var if self.num_batches_tracked == 0: self.num_batches_tracked = other.num_batches_tracked elif self.num_batches_tracked != other.num_batches_tracked: raise ValueError( 'Got different "num_batches_tracked", {} != {} for b={}'.format( self.num_batches_tracked, other.num_batches_tracked, b ) ) def keep_partial_parameters(self, keep_list): self.B = len(keep_list) if self.affine: self.weight.data = self.weight.data[keep_list, :] self.bias.data = self.bias.data[keep_list, :] if self.track_running_stats: self.running_mean.data = self.running_mean.data[keep_list, :] self.running_var.data = self.running_var.data[keep_list, :] class _BatchNorm(_NormBase): def __init__( self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = True, track_running_stats: bool = True, device=None, dtype=None, B: int = 1, ): factory_kwargs = {"device": device, "dtype": dtype} super().__init__(num_features, eps, momentum, affine, track_running_stats, **factory_kwargs, B=B) def forward(self, input: Tensor) -> Tensor: input, ori_shape = self._check_input_dim(input) # exponential_average_factor is set to self.momentum # (when it is available) only so that it gets updated # in ONNX graph when this node is exported to ONNX. if self.momentum is None: exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if self.training and self.track_running_stats: # TODO: if statement only here to tell the jit to skip emitting this when it is None if self.num_batches_tracked is not None: # type: ignore[has-type] self.num_batches_tracked.add_(1) # type: ignore[has-type] if self.momentum is None: # use cumulative moving average exponential_average_factor = 1.0 / float(self.num_batches_tracked) else: # use exponential moving average exponential_average_factor = self.momentum r""" Decide whether the mini-batch stats should be used for normalization rather than the buffers. Mini-batch stats are used in training mode, and in eval mode when buffers are None. """ if self.training: bn_training = True else: bn_training = (self.running_mean is None) and (self.running_var is None) r""" Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are used for normalization (i.e. in eval mode when buffers are not None). """ self_weight, self_bias = ( ( self.weight.view(self.B * self.num_features), self.bias.view(self.B * self.num_features), ) if self.affine else (self.weight, self.bias) ) self_running_mean, self_running_var = ( ( self.running_mean.view(self.B * self.num_features), self.running_var.view(self.B * self.num_features), ) if self.track_running_stats else (self.running_mean, self.running_var) ) return F.batch_norm( input, # If buffers are not to be tracked, ensure that they won't be updated self_running_mean if not self.training or self.track_running_stats else None, self_running_var if not self.training or self.track_running_stats else None, self_weight, self_bias, bn_training, exponential_average_factor, self.eps, ).view(ori_shape) class BatchNorm1d(_BatchNorm): r"""Pytorch 2.0 Applies Batch Normalization over a 2D or 3D input as described in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ . .. math:: y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension over the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size `C` (where `C` is the number of features or channels of the input). By default, the elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0. The standard-deviation is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. Also by default, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. If :attr:`track_running_stats` is set to ``False``, this layer then does not keep running estimates, and batch statistics are instead used during evaluation time as well. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. Because the Batch Normalization is done over the `C` dimension, computing statistics on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization. Args: num_features: number of features or channels :math:`C` of the input eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True`` track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics, and initializes statistics buffers :attr:`running_mean` and :attr:`running_var` as ``None``. When these buffers are ``None``, this module always uses batch statistics. in both training and eval modes. Default: ``True`` Shape: - Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size, :math:`C` is the number of features or channels, and :math:`L` is the sequence length - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) Examples:: >>> # With Learnable Parameters >>> m = nn.BatchNorm1d(100) >>> # Without Learnable Parameters >>> m = nn.BatchNorm1d(100, affine=False) >>> input = torch.randn(20, 100) >>> output = m(input) """ def _check_input_dim(self, input: Tensor) -> Tensor: if input.dim() != 3 and input.dim() != 4: raise ValueError("expected 3D or 4D input (got {}D input)".format(input.dim())) shape = input.shape len_input = input.dim() if len_input == 3: input = input.transpose(0, 1) return input, shape def forward(self, input: Tensor) -> Tensor: result = super().forward(input) return result.transpose(0, 1) class BatchNorm2d(_BatchNorm): r"""Pytorch 2.0 Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ . .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated per-dimension over the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0. The standard-deviation is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`. Also by default, during training this layer keeps running estimates of its computed mean and variance, which are then used for normalization during evaluation. The running estimates are kept with a default :attr:`momentum` of 0.1. If :attr:`track_running_stats` is set to ``False``, this layer then does not keep running estimates, and batch statistics are instead used during evaluation time as well. .. note:: This :attr:`momentum` argument is different from one used in optimizer classes and the conventional notion of momentum. Mathematically, the update rule for running statistics here is :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the new observed value. Because the Batch Normalization is done over the `C` dimension, computing statistics on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True`` track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics, and initializes statistics buffers :attr:`running_mean` and :attr:`running_var` as ``None``. When these buffers are ``None``, this module always uses batch statistics. in both training and eval modes. Default: ``True`` Shape: - Input: :math:`(N, C, H, W)` - Output: :math:`(N, C, H, W)` (same shape as input) Examples:: >>> # With Learnable Parameters >>> m = nn.BatchNorm2d(100) >>> # Without Learnable Parameters >>> m = nn.BatchNorm2d(100, affine=False) >>> input = torch.randn(20, 100, 35, 45) >>> output = m(input) """ def _check_input_dim(self, input: Tensor) -> Tensor: if input.dim() != 5: raise ValueError("expected 5D input (got {}D input)".format(input.dim())) shape = input.shape input = input.reshape(shape[0], -1, shape[3], shape[4]) return input, shape
PypiClean
/mera_tvm_runtime-1.4.0-cp36-cp36m-manylinux_2_27_x86_64.whl/tvm/relay/testing/synthetic.py
from __future__ import absolute_import from tvm import relay from .init import create_workload, Constant from . import layers def get_net(input_shape=(1, 3, 24, 12), dtype="float32", wtype=None): """Get synthetic testing network. Parameters ---------- image_shape : tuple, optional The input shape as (batch_size, channels, height, width). dtype : str, optional The data type for the input. wtype : str, optional The data type for weights. Defaults to `dtype`. Returns ------- net : relay.Function The dataflow. """ if wtype is None: wtype = dtype data = relay.var("data", shape=input_shape, dtype=dtype) dense_shape = [-1, input_shape[3]] dense = relay.nn.relu( relay.nn.dense( relay.reshape(data, dense_shape), relay.var("dense_weight", shape=[input_shape[3], dense_shape[1]], dtype=wtype), ) ) dense = relay.reshape_like(dense, data) conv_shape = [input_shape[1], input_shape[1], 3, 3] conv = relay.nn.softmax( relay.nn.conv2d( data, relay.var("conv_weight", shape=conv_shape, dtype=wtype), padding=1, kernel_size=3, ) ) added = relay.add(dense, conv) biased = layers.batch_norm_infer( relay.nn.bias_add(added, relay.var("bias", dtype=wtype)), name="batch_norm" ) dense = relay.nn.relu( relay.nn.dense( relay.reshape(biased, dense_shape), relay.var("dense2_weight", shape=[input_shape[3], dense_shape[1]], dtype=wtype), ) ) dense = relay.reshape_like(dense, data) conv = relay.nn.softmax( relay.nn.conv2d( biased, relay.var("conv2_weight", shape=conv_shape, dtype=wtype), padding=1, kernel_size=3, ) ) added = relay.add(dense, conv) args = relay.analysis.free_vars(added) return relay.Function(args, added) def get_workload(input_shape=(1, 3, 24, 12), dtype="float32", wtype=None): """Get benchmark workload for the synthetic net. Parameters ---------- image_shape : tuple, optional The input shape as (batch_size, channels, height, width). dtype : str, optional The data type for the input. wtype : str, optional The data type for weights. Defaults to `dtype`. Returns ------- mod : tvm.IRModule The relay module that contains a synthetic network. params : dict of str to NDArray The parameters. """ return create_workload( get_net(input_shape=input_shape, dtype=dtype, wtype=wtype), initializer=Constant(), )
PypiClean
/pollination-utci-comfort-map-0.9.10.tar.gz/pollination-utci-comfort-map-0.9.10/pollination/utci_comfort_map/_comfort.py
from pollination_dsl.dag import Inputs, DAG, task from dataclasses import dataclass from typing import Dict, List from pollination.ladybug_comfort.epw import AirSpeedJson from pollination.ladybug_comfort.map import ShortwaveMrtMap, LongwaveMrtMap, AirMap, Tcp from pollination.ladybug_comfort.mtx import UtciMtx @dataclass class ComfortMappingEntryPoint(DAG): """Entry point for Comfort calculations.""" # inputs epw = Inputs.file( description='Weather file used for the comfort map.', extensions=['epw'] ) result_sql = Inputs.file( description='A SQLite file that was generated by EnergyPlus and contains ' 'hourly or sub-hourly thermal comfort results.', extensions=['sql', 'db', 'sqlite'], optional=True ) grid_name = Inputs.str( description='Sensor grid file name (used to name the final result files).' ) enclosure_info = Inputs.file( description='A JSON file containing information about the radiant ' 'enclosure that sensor points belong to.', extensions=['json'] ) view_factors = Inputs.file( description='A CSV of spherical view factors to the surfaces in the result-sql.', extensions=['csv'] ) modifiers = Inputs.file( description='Path to a modifiers file that aligns with the view-factors.', extensions=['mod', 'txt'] ) indirect_irradiance = Inputs.file( description='An .ill containing the indirect irradiance for each sensor.', extensions=['ill', 'irr'] ) direct_irradiance = Inputs.file( description='An .ill containing direct irradiance for each sensor.', extensions=['ill', 'irr'] ) ref_irradiance = Inputs.file( description='An .ill containing ground-reflected irradiance for each ' 'sensor.', extensions=['ill', 'irr'] ) sun_up_hours = Inputs.file( description='A sun-up-hours.txt file output by Radiance and aligns with the ' 'input irradiance files.' ) occ_schedules = Inputs.file( description='A JSON file containing occupancy schedules derived from ' 'the input model.' ) schedule = Inputs.file( description='A CSV file containing a single number for meteorological wind ' 'speed in m/s or several rows of wind speeds that align with the length of the ' 'run period. This will be used for all outdoor comfort evaluation.', optional=True ) transmittance_contribs = Inputs.folder( description='An optional folder containing a transmittance schedule JSON ' 'and sub-folders of irradiance results that exclude the shade from the ' 'calculation. There should be one sub-folder per window groups and each ' 'one should contain three .ill files named direct.ill, indirect.ill and ' 'reflected.ill. If specified, these will be added to the irradiance inputs ' 'before computing shortwave MRT deltas.', optional=True ) trans_schedules = Inputs.file( description='A schedule JSON that contains fractional schedule values ' 'for each shade transmittance schedule in the model.' ) run_period = Inputs.str( description='An AnalysisPeriod string to set the start and end dates of ' 'the simulation (eg. "6/21 to 9/21 between 0 and 23 @1"). If None, ' 'the simulation will be annual.', default='' ) wind_speed = Inputs.file( description='A CSV with numbers that align with the input run period. ' 'This will be used for all outdoor comfort evaluation. If None, ' 'the EPW wind speed will be used for all outdoor sensors.', optional=True ) air_speed_mtx = Inputs.file( description='A CSV file with with a matrix of air speed values in m/s. ' 'Note that these values are not meteorological and should be AT HUMAN ' 'SUBJECT LEVEL. If specified, this overrides the wind-speed input.', optional=True ) solarcal_parameters = Inputs.str( description='A SolarCalParameter string to customize the assumptions of ' 'the SolarCal model.', default='--posture standing --sharp 135 ' '--absorptivity 0.7 --emissivity 0.95' ) comfort_parameters = Inputs.str( description='An UTCIParameter string to customize the assumptions of ' 'the UTCI comfort model.', default='--cold 9 --heat 26' ) @task(template=LongwaveMrtMap) def create_longwave_mrt_map( self, result_sql=result_sql, view_factors=view_factors, modifiers=modifiers, enclosure_info=enclosure_info, epw=epw, run_period=run_period, name=grid_name ) -> List[Dict]: return [ { 'from': LongwaveMrtMap()._outputs.longwave_mrt_map, 'to': 'conditions/longwave_mrt/{{self.name}}.csv' } ] @task(template=ShortwaveMrtMap) def create_shortwave_mrt_map( self, epw=epw, indirect_irradiance=indirect_irradiance, direct_irradiance=direct_irradiance, ref_irradiance=ref_irradiance, sun_up_hours=sun_up_hours, transmittance_contribs=transmittance_contribs, trans_schedules=trans_schedules, solarcal_par=solarcal_parameters, run_period=run_period, name=grid_name ) -> List[Dict]: return [ { 'from': ShortwaveMrtMap()._outputs.shortwave_mrt_map, 'to': 'conditions/shortwave_mrt/{{self.name}}.csv' } ] @task(template=AirMap) def create_air_temperature_map( self, result_sql=result_sql, enclosure_info=enclosure_info, epw=epw, run_period=run_period, metric='air-temperature', name=grid_name ) -> List[Dict]: return [ { 'from': AirMap()._outputs.air_map, 'to': 'conditions/air_temperature/{{self.name}}.csv' } ] @task(template=AirMap) def create_rel_humidity_map( self, result_sql=result_sql, enclosure_info=enclosure_info, epw=epw, run_period=run_period, metric='relative-humidity', name=grid_name ) -> List[Dict]: return [ { 'from': AirMap()._outputs.air_map, 'to': 'conditions/rel_humidity/{{self.name}}.csv' } ] @task(template=AirSpeedJson) def create_air_speed_json( self, epw=epw, enclosure_info=enclosure_info, multiply_by=1.0, outdoor_air_speed=wind_speed, run_period=run_period, name=grid_name ) -> List[Dict]: return [ { 'from': AirSpeedJson()._outputs.air_speeds, 'to': 'conditions/air_speed/{{self.name}}.json' } ] @task( template=UtciMtx, needs=[ create_longwave_mrt_map, create_shortwave_mrt_map, create_air_temperature_map, create_rel_humidity_map, create_air_speed_json ] ) def process_utci_matrix( self, air_temperature_mtx=create_air_temperature_map._outputs.air_map, rel_humidity_mtx=create_rel_humidity_map._outputs.air_map, rad_temperature_mtx=create_longwave_mrt_map._outputs.longwave_mrt_map, rad_delta_mtx=create_shortwave_mrt_map._outputs.shortwave_mrt_map, wind_speed_json=create_air_speed_json._outputs.air_speeds, air_speed_mtx=air_speed_mtx, comfort_par=comfort_parameters, name=grid_name ) -> List[Dict]: return [ { 'from': UtciMtx()._outputs.temperature_map, 'to': 'results/temperature/{{self.name}}.csv' }, { 'from': UtciMtx()._outputs.condition_map, 'to': 'results/condition/{{self.name}}.csv' }, { 'from': UtciMtx()._outputs.category_map, 'to': 'results/condition_intensity/{{self.name}}.csv' } ] @task( template=Tcp, needs=[process_utci_matrix] ) def compute_tcp( self, condition_csv=process_utci_matrix._outputs.condition_map, enclosure_info=enclosure_info, occ_schedule_json=occ_schedules, schedule=schedule, name=grid_name ) -> List[Dict]: return [ {'from': Tcp()._outputs.tcp, 'to': 'metrics/TCP/{{self.name}}.csv'}, {'from': Tcp()._outputs.hsp, 'to': 'metrics/HSP/{{self.name}}.csv'}, {'from': Tcp()._outputs.csp, 'to': 'metrics/CSP/{{self.name}}.csv'} ]
PypiClean
/aiidalab_launch-2023.1019.tar.gz/aiidalab_launch-2023.1019/aiidalab_launch/config.py
from __future__ import annotations from copy import deepcopy from dataclasses import asdict, dataclass, field from pathlib import Path from uuid import uuid4 import toml from .profile import Profile MAIN_PROFILE_NAME = "default" @dataclass class Config: profiles: list[Profile] = field(default_factory=lambda: [Profile()]) default_profile: str = MAIN_PROFILE_NAME # The configuration is always stored to disk beginning with version # 2022.1012, which means we assume that if no configuration is stored # we cannot make any assumptions about the latest applicable version. version: str | None = None @classmethod def loads(cls, blob: str) -> Config: loaded_config = toml.loads(blob) config = deepcopy(loaded_config) config["profiles"] = [] for name, profile in loaded_config.pop("profiles", dict()).items(): extra_mounts = ( set(profile.pop("extra_mounts")) if "extra_mounts" in profile else set() ) config["profiles"].append( Profile(name=name, extra_mounts=extra_mounts, **profile) ) return cls(**config) def dumps(self) -> str: config = asdict(self) config["profiles"] = { profile.pop("name"): profile for profile in config.pop("profiles", []) } return toml.dumps(config) @classmethod def load(cls, path: Path) -> Config: return cls.loads(path.read_text()) def save(self, path: Path, safe: bool = True) -> None: path.parent.mkdir(exist_ok=True, parents=True) if safe: path_tmp = path.with_suffix(f".{uuid4()!s}") path_tmp.write_text(self.dumps()) path_tmp.replace(path) else: path.write_text(self.dumps()) def get_profile(self, name: str) -> Profile: for profile in self.profiles: if profile.name == name: return profile raise ValueError(f"Did not find profile with name '{name}'.")
PypiClean
/Gnosis_Utils-1.2.2.tar.gz/Gnosis_Utils-1.2.2/gnosis/util/sql2dtd.py
__shell_usage__ = """ Shell Usage: [python] sql2dtd.py [SQL_query] [< SQL_from_STDIN] """ __version__ = "$Revision: 0.10 $" __author__=["David Mertz ([email protected])",] __thanks_to__=["Scott Hathaway ([email protected])",] __copyright__=""" This file is released to the public domain. I (dqm) would appreciate it if you choose to keep derived works under terms that promote freedom, but obviously am giving up any rights to compel such. """ __history__=""" 0.10 Initial version. """ # Constants MYSQL = 1 DB2 = 2 ORACLE = 3 MSSQL = 4 POSTGRES = 5 import string,sys def parseSQL(query): "Return a structure of parsed components of an SQL query" # Known issues: # * Does not handle UNION'd clauses. # * Computed columns use generic position name since neither # the SQL function or its arguments is necessarily unique # between columns. # Normalize the query slightly query = string.join(string.split(query)) # One space between words query = string.replace(query,", ",",") # Comma'd items together query = string.replace(query," ,",",") # Comma'd items together query = string.replace(query,"( ","(") # No space after left paren query = string.replace(query," )",")") # No space before right paren query = string.replace(query," as ","!") # Special handling of AS query = string.replace(query," AS ","!") # Special handling of AS query = string.replace(query," As ","!") # Special handling of AS sql_terms = string.split(query) # Initial validity test if string.upper(sql_terms[0]) <> 'SELECT': raise ValueError, "SELECT query must be specified" # Go to it... ignore_adjectives = ('STRAIGHT_JOIN','SQL_SMALL_RESULT','DISTINCT','ALL') column_names = [] group_bys = "" got_columns = 0 for ndx in range(1,len(sql_terms)): term = sql_terms[ndx] if term in ignore_adjectives: pass elif not got_columns: column_names = string.split(term,",") got_columns = 1 elif (string.upper(term)=='GROUP' and string.upper(sql_terms[ndx+1])=='BY'): group_bys = string.join(string.split(sql_terms[ndx+2],",")) # Use the AS version of row name, if given for i in range(len(column_names)): column_names[i] = string.split(column_names[i],'!')[-1] # Massage computed row names ...use generic name 'columnXX', # but store actual computation to column_attr dict column_attr = {} for i in range(len(column_names)): column_name = column_names[i] if "(" in column_name: seq_name = 'column'+`i+1` column_attr[seq_name] = column_name column_names[i] = seq_name return column_names, group_bys, column_attr def parsedQuery2DTD(parseStruct,query=""): "Convert a parsed SQL structure (and a raw query) into a matching DTD" column_names = parseStruct[0] group_bys = parseStruct[1] column_attr = parseStruct[2] dtd_lst = [] #-- Root (SQL) element and attributes (if any) dtd_lst.append("<!ELEMENT SQL (row)*>") dtd_lst.append("<!ATTLIST SQL") if group_bys: dtd_lst.append(' GROUP_BY NMTOKEN #FIXED "%s"' % group_bys) if query: dtd_lst.append(' query CDATA #FIXED "%s"' % query) dtd_lst.append(">") # close the root ATTLIST #-- <row> element and attribute 'num' columns_str = string.replace(`tuple(column_names)`,"'","") dtd_lst.append("<!ELEMENT row %s>" % columns_str) dtd_lst.append("<!ATTLIST row num ID #IMPLIED>") #-- Individual columns (with attribute if calculated) for column in column_names: dtd_lst.append("<!ELEMENT %s (#PCDATA)>" % column) if column_attr.has_key(column): dtd_lst.append('<!ATTLIST %s CALC CDATA #FIXED "%s">' % (column, column_attr[column])) return string.join(dtd_lst,'\n') def parsedQuery2Fields(parseStruct): "Convert a parsed SQL structure to sql2xml's pipe-delimited column list" column_names = parseStruct[0] return string.join(column_names,'|') def typifyDTD(dtd, column_type_dict): "Add column type information to a DTD" # not yet implemented return dtd def textResults2XML(table, column_names, RDBMS=MYSQL): "Transform text-form query results to XML markup" # not yet implemented (meaningfully) xml = ['<SQL>'] row = 0 for line in string.split(table,'\n'): row = row+1 xml.append(' <row num=%s>' % row) xml.append(' %s' % line) xml.append(' </row>') xml.append('</SQL>') return string.join(xml,'\n') #-- Command line version of tool if __name__ == '__main__': if len(sys.argv) > 1: if sys.argv[1] in ('-h','/h','-?','/?','?','--help'): print __shell_usage__ else: query = sys.argv[1] print parsedQuery2DTD(parseSQL(query),query) # print parsedQuery2Fields(parseSQL(query)) else: query = sys.stdin.read() print parsedQuery2DTD(parseSQL(query),query)
PypiClean
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/wu-ya-jiu-shu/连招1.0学习训练系统完整版:乌鸦连招1.0学习训练系统完整版:女性性格分类:女性性格分类3.md
# 连招1.0 学习训练系统完整版:乌鸦 连招1.0学习训练系统完整版:女性性格分类:女性性格分类3 我刚才,那你就说让他对你表白啊,我现在讲,关键点啊,使刚才我介绍的性格分开,又是永远是,就是在我们生活当中属于,比较简单的这些女生,最简单的一类,算是儿子高稍有难度,但儿子几个儿子正常是非常容易的。 那阳光男生基本上没有问题的,所以,我们在,生活共产党中,最多的碰见就是说,经历比较正常,也不是多也不是特别少,正常谈过一两段恋爱,不男朋友,说,爱情有一定的理解,还挺长心的,然后我们先从,第一转。 不开放子,不行开始,当你用虽然谈过恋爱,但是,他们因为可能,因为家庭原因比较传统,快快,家庭原因比较传统,所以这类女生,忘了放不开自己,居然有过节目,就是能用,不开放这类女生,就是我所,斗习。 儿子低的情况,儿子低喜欢为什么土斗习,就是我们可能看到一些女孩,儿到土,说打扮自己,也许有过男朋友,但她谈了谈,你只是谈了谈,但不是真的有太多的,懂吧,人如果干这些,她们,感众往往是朋友更重要。 是她的女性朋友更重要,是幼稚的,还好,幼稚的没有正常性的更明,正常性的往往是,觉得朋友比你更重要,很多都是这样的,所以,你们在旧中女生的时候,一定要让她同,就是身边的朋友,对的就是搞定闺蜜。 能搞定这个女的,实际上是只有在这种女生类型,你才会用的,儿不是说,哪个女生我都搞定她,闺蜜就可以,但是这类女生依然,还有个问题,就是她太多,但因为她比较成熟,所以说,有些时候你说一些,稍微商人的话。 她不会接受这类女孩,你稍微自己到她,她就太不理,也相对,其实比幼稚的女生,还好搞定,这个反而,因为她成熟了,好,你很多事情不是有意的,只无形当中伤害到的,这类女生的,还有一个特点就是,他们就是。 因为价值观比较低,所以你在吃饭的时候,然后去特别高当,你说,差不多就行了,让她感觉你一类人,因为她已经长大,她照,让你无朝见的付出是不好的,然后紧接着下一个就是,儿子正常,不可能,冲等了。 正常你CN女生说出来,跟正常女生的相比,就一点距离,就是你80点够长,通常就可以,为什么,因为她仅仅就是属于,跟一般女生比,更关一些的女生,就是安女,关一关女,我们再把她的,稍给高s足够的安全感。 外加一点点感觉,然后我再说下一种,儿子比较高又不开了,她正常,这种女生就,已经算是稍又难度了,这种女生甚至可以跟,其他成熟女生相比,因为她儿子价,就是叫高吧,她教育比较好,要保守,所以说是比较高冷一下。 那女生其实就是我们,经常所说的,高冷女生,既然高冷就特别难度,就像我前前介绍的,我们再说话说,一定要带点话题,然后手还有就是,所有儿子高的时候,我们都要具备,一定的社会价值,我说你在跟他们接受的时候。 我已经,肉质你稍微低点,还有可能,正常情况,你一定是有一定的,至少有一定的上进行,各方面,感觉,你这个人社会价值,已经要求社会价值了,未来是ok的,尤其儿子高跟女生,就是,有这个要正常,知道。 那女生我们在判断过程当中,一定要有具备一定的社会价值,能是甜点,就想靠谐技巧来靠近之类,但是,然后紧接着我再讲,下一种,我们经常碰到的,NMP型,就是,就跟正常女生相比价值稍微,把女生把的方法。 查NMP型跟儿型,但正常女差距不大,就是说,NMP型跟,跟三型跟儿型有什么区别,就是你其实,属于很简单的女生就是怎样,就是你可以比你正常情况跑简单,就是要求你社会价值更比。 你正常判断还要求你具备一定社会价值,这种女生都不要求你具备社会,我觉得甚至是你稍微低点,跟她合适一些,所以一个是比较容易的,但是你如果社会价值,表现太高有所谓,你只要稍加一些成一样的相信,你是喜欢她。 是属于简单类型跟女生啊,是NMP型,然后三N型我就算是可以跳过了吧,说我正常女生我有什么好说呢,然后我再说一下,因为没有什么特别注意点,就是正常的可能,然后NN儿型是怎样的,就是比这场女生你稍微。 多具备一些社会价值就可以,因为就是给她一个平衡感的显,但也不是特别需要为什么,就是如果你用其他方面价值,你比如说我的情绪价值,个人魅力较高,那你稍微个人价值低点,而也可以去吃饭饭。 这也未必是不可以尝试的,但通常情况是需要你的魅力,或者你的家庭,要不是你自己家庭,跟她吃饭,对吧,标目就是,你其他地方,名价于,就是,龙等男生,让我才能取得一个平衡,这个就比较简单了。 所以我现在在讲下容了,下容就是,她一方形的,而只有低的正常,这种女生其实,已经就是,有点,你就是相当好高的类型,我们家相当好高的类型,就是说属于小姐型,属于睡得很容易,她已经把性能当中一步了。 觉得这些东西无所谓,然后因为她家只有低就好高,像女生说是花你花钱就可以高了吧,然后儿子高又开放,又是正常型,所谓的,偶恩,这种女生就是,跟小姐型去学,就是她不缺社会价值,不缺社会价值。 所以我们在搞定这种女生的时候,你不能像小姐一样的应对,只是就是,你正常女生稍微开放一起,我们只需要适当你引寶,特别正常搞上床,早上床,这就是我们可以就是,如果我们的社交职,决一些特殊一点,一段叛递出来。 这类女生比较适合素费的,然后再说一种,这种就是稍有难度,稍有难度就是价值有点高,所以我儿子显示在我们会,虽然说她,跟这场女生一样,所以开放的,但是因为她自然价值很高,所以她在选男的,这类型其实更类似的。 黑默儿型,知道吗,默儿就要女生,因为儿子高说的不是她的家庭高,说她喜欢高呼帅,一个男人的价值要求高,所以这类女生,如果你是高呼帅的话,就比较,但是如果你不适的话,因为她很拜金,所以你非常容易。 被她当成开的,这类型,平均力高手不远了,价值高的女生,说稍有难度,所以我就现在跟大家说,难度低的是怎样的呢,难度低的就是,又是形的,开放,我等会儿这个,图一个图表,就把那个三档给大家分了。 大家看一下图表,大家更办的是,等我讲这些课,我给大家证试的,难度比一图,然后我现在在简单介绍下,成熟型的女生,成熟型的女生,一般都是非常难搞定的,包括我都有一定的失败机,是我的成功机率件很高。 我第一个高成熟型的女生,我会装的必须特别的好,我上来爆料出来一点,我也在地方就会让她们看穿,穿穿之后那我还有心,不知道我跟她是完了,那我最多睡一觉,但是我第一种,一个女生如果不开放。 还是有一个成熟表现成,现在女生不要觉得简单,因为她会畅了解男人,而且她知道,不能让一个男人得到她,才是最关键的,你在搞定这些女生说实话吧,这最忙的为什么,你又要有魅力,你要有成熟,又要花时间,是我说的。 你价值算比较低,但是非常丑数,这个女生数据,几大男工形,为什么说儿子低还反呢,因为儿子低才知道自己需要的,她更追求的是,那种王子,也不一定是王子,我不是说的,神吧,今天我们梦欢醒了,男人。 她要男人的时候,因为她,你必须情绪价值高,因为她儿子低说明她看中的不是,你的生活价值根本就不重要,所以你搞定,现在女生的时候,你必须,要在你的人格,没上非常高,并且你还得有成熟,并且你还要花高时间。 所以为什么说,我都不喜欢浪费时间,然后再说,正常性的烧好一系,那女生还放一系,会那么的纠结,所以说时间上可能短了,但是要求你在,情绪价值,个人魅力上,非常高,女生其实非常,这是属于正常。 然后特别像韩剧的那种,是想看韩,韩定是幼稚,就是需要你真正一个男人有诚意,然后必须来,但她要求你价值低,但你必须是一个居家好男人,这种女人其实,往往好男人搞定,她如果花的时间够长,只要她让这个男人。 对她产生一点儿感觉太业统,帮我再说一种,又成熟又开放价值要低,这种女生是怎样的,这种特别会利用自己的身体,然后,这跟不同男人去,大发发的为什么,她是低呀,她虽然很开放很有手段,她,但是虽然很开放。 但她因为她的成熟度,她在男人,男女还通常是有口碑的,当她受过很多男人,但她依然可以,骗一些幼稚的小孩,而且她懂得用自己的身体,可以去交换一些东西,当然她交换的东西,不一定是钱,然后再说,下面这种。 就是动场情况,大致是动场的,就是台方有成熟的,这种其实就是,我比较喜欢的,小叫比较好,这种女生王,就是经过挺长时间,有可能连恋爱都没有谈过,为什么是,我说那种,他的恋爱次数,要么就是他经过,这个不是。 不开放的话,一般都是恋爱次数远,大于上床的,这种情况的女生是怎样的,就是他在跟男生接受,当中非常的小,因为他的不开放,是他前期找我的大量的经验,然后发现到这一步的女生,说实话都非常有人格卖你了。 因为我聊过几个这样的,你看我,我玩一次,通过特别高身的浪漫手,吸引到他,但是如果我这段时间,稍有冷淡,他就会感觉到,一些就知道我是在玩了,然后又不我退步到朋友,就本来一天晚上聊得很好。 已经聊到人生聊到爱了,感觉我们两多行行相应了,但是后来他又离智,就是反过来特离智了,有些回想提前是,你又觉得我是不靠谱的,人生会通过多方面来进行判斗,这种女生我们用李星肆为分,才有一点点忠仇。 结果有到退步,所以特别的麻烦,我一般就不喜欢搞成熟,这三种成熟不开放都属于最难行的,然后就是正常行的少一点,好少一点,知道正常行的价值也正常,就是这一类女生你要给她一个,搞不得生活的课。 其实结果活的女人停工都属于这样,要比一级老公,对吧,要通常那些照复性,然后对,就是下手成熟就是,又成熟有开放,然后二只这样,这种女生在,不是熟悉,要那种的女人,一般,因为她开放,所以她就是在男生非常。 她是睡过很多男生的女人,知道后她需求一用价值,但又不是非差,跟男人接受当中,她并不一定仅仅,是男人今天上的交流,但她当中,不一定会对你,男人是有一个,大值的标准的,一个成熟女人的,大值的标准。 你这个人的身份如何,身份我必定说是有没有钱,这些女人往往都是有情人的,而且特老公通常说,什么时候特老公,也许也没有老公,但是这类女生有老公的非常多的,她那么性格爱分开,这样的感觉,你妈妈体会。 然后又该说,就是现在我就开始讲,你放在基本上,加密会出现两种的事,我们生活当中,碰到最难搞的两类女生,更难搞两类女的是哪两种,就是一种是,又不开放又成熟价值高,这种女生几乎,你们生活当中是不会碰到的。 为什么就是,为什么,你们如果是对吧,价值高又不开放,又是跟正常情还常见,因为现在女生很难达到成熟的程度,因为当时达到成熟的程度,应该是下摘的,这种女生我称之为,丁丁写,我非常懂男,这些就是她们儿很高。 也就是属于,儿子正常女儿的进化吧,进化到要求儿子高之后,你相信这种女生,就不能为一般的手段了,跟哪怕你开的蓝波,进你长得又帅,他们的位置会调你的,不要说,就是这样,这来女生几乎都没有,拉眼睛的存在。 除非就是一个女生,一个男生就有点那种娘的男生,同样会这一个特性,因为我见过这类女生只有两个儿子,不具比较少,因为我就是觉得这种,精神实在太有点哥们,因为她太成熟也有点喜欢,为什么。 这个女生当时因为很成熟,很多东西,有的知道在外面有你,但是她老运种不较隐晦的方式,暗示我,非常的不熟,你要想不她搞上床的时候,是说我是说我,首先,先吃了两个月不熟,平常性的连爱性。 然后同时为装自己的性格,魅力非常的强,这两件事情非常累,而且在这时候,技巧跟情绪价值作用又不大,因为她见多了会了,骚本来,你去各种营兵头很容易犯错,我只能努力跟平时的一些,浪漫的东西才可以形容,浪漫的。 就浪漫八十年,比如说我用的很多巧冬真的,比如说我,因为她我发明一种遗憾的浪漫的方式,我在最后离开她的时候,跟着她说,以后就想跟你说,然后她非常感动,因为这种事情,她勉强骗高,就是对这种女生,我说说搞的。 自己不想再搞第二,好好解释,然后下面就是说另一把爱种,就是跟这种女生相女,如果她不是那么的饱,所以一个男生要求那么高的话,她还想得简单,这种女人就是跟正常女人相比怎样呢,就是使用你的前天小姐。 大家国家校,又不叫中国,或者说那种小富婆,或者说那种女强人,走吧,都是属于子来,女生搞的女孩,她搞的,你也同样女强人,你同样比她更强来的好,所以搞这种女生,其实就是那个时候好像不在。 我在我其实给她发过的浪漫,就是我高阳她,她就是说她也不是说为啥,我给我展示,她发现我跟她聊工作的时候,也唱得OK,然后在聊感情也是非常OK,然后就是,我觉得这种女生,还相对比较简单,为什么。 我以卡你卡那么死,我觉得你OK,即使你不是特别那什么,她也心里懂,理解到了,然后即使,因为她,因为不是那么保守,她觉得,就是你那什么一下,这辈子也没有你遗憾了,但是像,如果是不开放心,她就会觉得,如果。 就是要给你留伤你,还如果你不喜欢我,不喜欢我没有,给我到时候那么大的管轴,我觉得就跟你,什么之外,你记我一辈子,当然正常性的女生就会想,她不多了,每遇到一这样男人,虽然说不可能跟你一辈子在一起。 也是也很难遇到下来的了,为什么,这就是她跟,那种不开放心的区别,然后还有就是,跟我刚才说那种病列最难的,就是说,开放又成熟,好二只有搞,这人的女生,我是有点讨厌,这种女生非常懂全数。 她懂得利用自己的身体,身体就换了,换去割类好处,身体的眼影只是,身体一种价值,到这个东西很重要,如果换去锁取,而且最关键是她在别人眼睛,你口碑也很好,口碑好导致,你在搞定这种女生的时候,你的身体,你都。 不一定能看出来她是这样的,你或许见证,女生还觉得她是很偏见的浪吗,其实不是,你就是特别容易胖干,15周女生,虽然是一个极端的,但有可能未装上正常女生,可能未装上怎样,因为她们。 很成为她们会演示自己的开放,因为她知道开放在现在社会的身体,要不演唱唱,普通女孩,要不就,演,就是偽装上的那种子,还比较,如果比较弱小的女生的身份,来拨去你同情,她懂得男人用的一切巧。 其实这种就跟女玩家女皮肖的差很多了,其实有些时候,你还觉得这种女生是白负买,即使吧,非常的噁心,但是她们去的时,人格没得上,还是有很大的,跟腰筋形容,就是我说,精灵性女生,勾人的地方比较。 这种女人勾人是那种,恶不弱声,我营营喊喊的勾得,让你遇到吧哆,那这种女生,非常的懂,你非常知道男人需求,什么各方面都安全的,然后我今天的后半段基本上叫,女茶表,女茶表,其实不是这种,女茶表没有这么牛逼。 你都能看出来她是女茶表的,最后一种女生,我称这位舞责天行女生,知道我们懂得全数,你已经茶表那个等级,你茶表往往是,比这个的成熟度不如,我跟她说这种,女茶表是成熟的时候,有可能发展成熟。 要表通常是幼稚的或者是生长的,生长都有点高过他们,我跟你说,最后一种根本不是女茶表,就是你能判断,朋友们现在发,来 我现在就继续说,有一种类型是不适合你,这男人那一种吧,生长你们如果。 继续选用的技术科成,也必定能告定,需要高节科成来调,必须用DAC,在方法才能告,所以我们,生长情况我们面对,就是刚才我从幼稚,正常型还有就是成熟性,你的谋谊种,我一会儿给大家,不表格出了。 大家就是恨之关的理解,我通过不同的颜色,把预色最简单,黄色是种的,红色,你最好不要去触摸,这种女生也没有什么,好的地方,你像一个精力,那么豐富的女生,要么就特别地摸击麻烦,要么就是,你叫垃圾。 所以我觉得你们都可以不去触摸,因为真是生活中,能有那么多精力豐富的女人,是很少的,你想一个人的精力,代表一个人的脑力,比如说我如果没有,你如果,这个意思举得点不恰吧,如果你不我,这个,到格里放的吧。 那我肯定是那种,而是比较高,又不开放,又成熟的,还不玩笑,你看我多见,不自己放在不开放这个,其实我具体是哪种,大家都应该理解,如果不我们放在这个里头,你可以看看你是哪种,所以说如果是,像有些种在。 成熟里的都是,像所有POH的,你定的这个,你非长逢,我车长高,要不然他就根本俩女生都搞不定,明白吧,怕他周红色的女生,我一会儿给你们投表格,然后具体的S&M值的那个,对你们后家的人是没有那个。 我S&M值而这三家属性,怕定的方法的,所以你要听我以前的录音,然后M,S&M像属性怕他表准,因为这个我内部才有的,本没上床,就比如说什么对待,陌生男人不理睬,但是为什么我不是,这个具体数字发给你们呢。 如果放上去,你们就完全用这个数字去架,但是女人不能是用数字来计算了,我只是说给你们一个基础的印象,大概这个范围,而不是,我是有基础技算公式对吧,那只大概的范围,具体全是可以进行微调的,你整理了。 我也有整理版,但我没有极其发生,因为我这个觉得你们看了法,会让你们变得更弱,所以你们零十个看下,我再来这再法一边,你们最好不要就是按照这个表格,完全去计算,好吧,我给发展那个外外公平上了,不得好像。 没法,反正是最后再補一个,别了,意思,你说,这个为什么我不发泉,发泉,你没有特别当回事,这东西如果你当回事了,反而会影响你们判断你,因为你会更加的死吧,我希望我教的人都会活学活用,对吗。 我不希望你们就是看我的这些东西,我使你们以后,动家的死吧,是我本了,不是,你们最关键听是听得我上节课,对吧,为什么我说是这样的,第二十七种基础还是不现实,我刚才只是给你们简单介绍,最关键的点。 是你们经常听我录音,看看自己组织的符合是哪种标准,还有就是,最关键的,其实是我第一届性格分类讲的,S,DS只是代表了什么,DH只是代表了什么,你这样以后自己都会去分类,处理没种女人,而不是单纯的。 一招我刚才给你这样的分类,非常没有效用的,然后听完这节课,我下来课就可以给,终于给大家讲一下,你们最关心的吸引力问题,你只要解决吸引,我的事情都不在书问题,那好ok,我这些课想结束了。 然后我给大家整理完,发来说有没有弄讲了,对,我还给你们讲,这几来女生的应对法则了吧,正常性女生,应该用怎样的应对法则的,我们往往要去用一些,成熟的吸引,领袖的吸引,反动的吸引,而开心的吸引。 随着成熟的体上分,越来越小,烟池的吸引也是一样的,但后期煎池的吸引,只能给女生创造一些,即使感觉,什么叫即使感觉,感觉是什么,觉得你的不感觉,也就这么多了,而不是说,你能通过煎池,给他捞下印记。 而且这种女生往往是,因为很成熟,他们不需要暖男的关系,懂吧,暖男,对正常女生还有些用途,但是都成熟用了,张化的吸引作为,也相比越来越弱,因为你张化成熟女人的时候,他们已经身经摆这样,不怕你去上。 很幼稚性女生了,但这时候精力的吸引非常重要,所有正常就跟成熟,吸引生必吃,如果你能跟着去到聊天,看到一些他的内心想法,如果你再通过你说的一些精力,跟他产生共鸣,非常非常有用,然后等到我具体讲。 十大新法则的时候,告诉你们一种吸引力,针对的内心就可以,然后现在这些合档,你就问题他问,然后我下去是
PypiClean
/misp_lib_stix2-3.0.1.1-py3-none-any.whl/stix2/test/v20/constants.py
import datetime as dt import pytz FAKE_TIME = dt.datetime(2017, 1, 1, 12, 34, 56, tzinfo=pytz.utc) ATTACK_PATTERN_ID = "attack-pattern--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061" CAMPAIGN_ID = "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f" COURSE_OF_ACTION_ID = "course-of-action--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f" IDENTITY_ID = "identity--311b2d2d-f010-4473-83ec-1edf84858f4c" INDICATOR_ID = "indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7" INTRUSION_SET_ID = "intrusion-set--4e78f46f-a023-4e5f-bc24-71b3ca22ec29" MALWARE_ID = "malware--9c4638ec-f1de-4ddb-abf4-1b760417654e" MARKING_DEFINITION_ID = "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9" NOTE_ID = "note--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061" OBSERVED_DATA_ID = "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf" RELATIONSHIP_ID = "relationship--df7c87eb-75d2-4948-af81-9d49d246f301" REPORT_ID = "report--84e4d88f-44ea-4bcd-bbf3-b2c1c320bcb3" SIGHTING_ID = "sighting--bfbc19db-ec35-4e45-beed-f8bde2a772fb" THREAT_ACTOR_ID = "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f" TOOL_ID = "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f" VULNERABILITY_ID = "vulnerability--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061" MARKING_IDS = [ "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9", "marking-definition--443eb5c3-a76c-4a0a-8caa-e93998e7bc09", "marking-definition--57fcd772-9c1d-41b0-8d1f-3d47713415d9", "marking-definition--462bf1a6-03d2-419c-b74e-eee2238b2de4", "marking-definition--68520ae2-fefe-43a9-84ee-2c2a934d2c7d", "marking-definition--2802dfb1-1019-40a8-8848-68d0ec0e417f", ] RELATIONSHIP_IDS = [ 'relationship--06520621-5352-4e6a-b976-e8fa3d437ffd', 'relationship--181c9c09-43e6-45dd-9374-3bec192f05ef', 'relationship--a0cbb21c-8daf-4a7f-96aa-7155a4ef8f70', ] # *_KWARGS contains all required arguments to create an instance of that STIX object # *_MORE_KWARGS contains all the required arguments, plus some optional ones ATTACK_PATTERN_KWARGS = dict( name="Phishing", ) CAMPAIGN_KWARGS = dict( name="Green Group Attacks Against Finance", description="Campaign by Green Group against a series of targets in the financial services sector.", ) CAMPAIGN_MORE_KWARGS = dict( type='campaign', id=CAMPAIGN_ID, created_by_ref=IDENTITY_ID, created="2016-04-06T20:03:00.000Z", modified="2016-04-06T20:03:00.000Z", name="Green Group Attacks Against Finance", description="Campaign by Green Group against a series of targets in the financial services sector.", ) COURSE_OF_ACTION_KWARGS = dict( name="Block", ) IDENTITY_KWARGS = dict( name="John Smith", identity_class="individual", ) INDICATOR_KWARGS = dict( labels=['malicious-activity'], pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']", ) INTRUSION_SET_KWARGS = dict( name="Bobcat Breakin", ) MALWARE_KWARGS = dict( labels=['ransomware'], name="Cryptolocker", ) MALWARE_MORE_KWARGS = dict( type='malware', id=MALWARE_ID, created="2016-04-06T20:03:00.000Z", modified="2016-04-06T20:03:00.000Z", labels=['ransomware'], name="Cryptolocker", description="A ransomware related to ...", ) OBSERVED_DATA_KWARGS = dict( first_observed=FAKE_TIME, last_observed=FAKE_TIME, number_observed=1, objects={ "0": { "type": "windows-registry-key", "key": "HKEY_LOCAL_MACHINE\\System\\Foo\\Bar", }, }, ) REPORT_KWARGS = dict( labels=["campaign"], name="Bad Cybercrime", published=FAKE_TIME, object_refs=[INDICATOR_ID], ) RELATIONSHIP_KWARGS = dict( relationship_type="indicates", source_ref=INDICATOR_ID, target_ref=MALWARE_ID, ) SIGHTING_KWARGS = dict( sighting_of_ref=INDICATOR_ID, ) THREAT_ACTOR_KWARGS = dict( labels=["crime-syndicate"], name="Evil Org", ) TOOL_KWARGS = dict( labels=["remote-access"], name="VNC", ) VULNERABILITY_KWARGS = dict( name="Heartbleed", )
PypiClean
/wps-light-0.16.1.tar.gz/wps-light-0.16.1/wemake_python_styleguide/visitors/ast/complexity/overuses.py
import ast from collections import defaultdict from typing import ( Callable, ClassVar, DefaultDict, FrozenSet, List, Tuple, Union, ) from typing_extensions import final from wemake_python_styleguide.compat.aliases import FunctionNodes from wemake_python_styleguide.logic import source, walk from wemake_python_styleguide.logic.complexity import overuses from wemake_python_styleguide.logic.tree import annotations from wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive from wemake_python_styleguide.violations import complexity from wemake_python_styleguide.visitors import base, decorators #: We use these types to store the number of nodes usage in different contexts. _Expressions = DefaultDict[str, List[ast.AST]] _FunctionExpressions = DefaultDict[ast.AST, _Expressions] _StringConstants = FrozenSet[Union[str, bytes]] @final @decorators.alias('visit_any_string', ( 'visit_Str', 'visit_Bytes', )) class StringOveruseVisitor(base.BaseNodeVisitor): """ Restricts repeated usage of the same string constant. NB: Some short strings are ignored, as their use is very common and forcing assignment would not make much sense (i.e. newlines, "", comma, dot). """ _ignored_string_constants: ClassVar[_StringConstants] = frozenset(( ' ', '.', ',', '', '\n', '\r\n', '\t', b' ', b'.', b',', b'', b'\n', b'\r\n', b'\t', )) def __init__(self, *args, **kwargs) -> None: """Inits the counter for constants.""" super().__init__(*args, **kwargs) self._string_constants: DefaultDict[ AnyTextPrimitive, int, ] = defaultdict(int) def visit_any_string(self, node: AnyText) -> None: """Restricts to over-use string constants.""" self._check_string_constant(node) self.generic_visit(node) def _check_string_constant(self, node: AnyText) -> None: if annotations.is_annotation(node): return # Some strings are so common, that it makes no sense to check if # they are overused. if node.s in self._ignored_string_constants: return self._string_constants[node.s] += 1 def _post_visit(self) -> None: for string, usage_count in self._string_constants.items(): if usage_count > self.options.max_string_usages: self.add_violation( complexity.OverusedStringViolation( text=source.render_string(string) or "''", baseline=self.options.max_string_usages, ), ) @final class ExpressionOveruseVisitor(base.BaseNodeVisitor): """Finds overused expressions.""" _expressions: ClassVar[AnyNodes] = ( # We do not treat `ast.Attribute`s as expressions # because they are too widely used. That's a compromise. ast.Assert, ast.BoolOp, ast.BinOp, ast.UnaryOp, ast.Call, ast.Compare, ast.Subscript, ast.Lambda, ast.DictComp, ast.Dict, ast.List, ast.ListComp, ast.Tuple, ast.GeneratorExp, ast.Set, ast.SetComp, ) _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = ( overuses.is_decorator, overuses.is_self, annotations.is_annotation, overuses.is_class_context, overuses.is_super_call, overuses.is_primitive, overuses.is_unary_minus, ) _msg: ClassVar[str] = '{0}; used {1}' def __init__(self, *args, **kwargs) -> None: """We need to track expression usage in functions and modules.""" super().__init__(*args, **kwargs) self._module_expressions: _Expressions = defaultdict(list) self._function_expressions: _FunctionExpressions = defaultdict( lambda: defaultdict(list), ) def visit(self, node: ast.AST) -> None: """Visits all nodes in a module to find overused values.""" if isinstance(node, self._expressions): self._add_expression(node) self.generic_visit(node) def _add_expression(self, node: ast.AST) -> None: if any(ignore(node) for ignore in self._ignore_predicates): return source_code = source.node_to_string(node) self._module_expressions[source_code].append(node) maybe_function = walk.get_closest_parent(node, FunctionNodes) if maybe_function is not None: self._function_expressions[maybe_function][source_code].append( node, ) def _post_visit(self) -> None: for mod_source, module_nodes in self._module_expressions.items(): if len(module_nodes) > self.options.max_module_expressions: self.add_violation( complexity.OverusedExpressionViolation( module_nodes[0], text=self._msg.format(mod_source, len(module_nodes)), baseline=self.options.max_module_expressions, ), ) for function_contexts in self._function_expressions.values(): for src, function_nodes in function_contexts.items(): if len(function_nodes) > self.options.max_function_expressions: self.add_violation( complexity.OverusedExpressionViolation( function_nodes[0], text=self._msg.format(src, len(function_nodes)), baseline=self.options.max_function_expressions, ), )
PypiClean
/strawberry_resources-0.8.1.tar.gz/strawberry_resources-0.8.1/strawberry_resources/utils/inspect.py
import itertools from typing import Generator, Optional, Union from strawberry.lazy_type import LazyType from strawberry.type import ( StrawberryContainer, StrawberryType, StrawberryTypeVar, get_object_definition, ) from strawberry.types.types import StrawberryObjectDefinition from strawberry.union import StrawberryUnion from typing_extensions import assert_never def get_possible_types( gql_type: Union[StrawberryObjectDefinition, StrawberryType, type], *, object_definition: Optional[StrawberryObjectDefinition] = None, ) -> Generator[type, None, None]: if isinstance(gql_type, StrawberryObjectDefinition): yield from get_possible_types(gql_type.origin, object_definition=gql_type) elif isinstance(gql_type, LazyType): yield from get_possible_types(gql_type.resolve_type()) elif isinstance(gql_type, StrawberryTypeVar) and object_definition is not None: resolved = object_definition.type_var_map.get(gql_type.type_var, None) if resolved is not None: yield from get_possible_types(resolved) elif isinstance(gql_type, StrawberryContainer): yield from get_possible_types(gql_type.of_type) elif isinstance(gql_type, StrawberryUnion): yield from itertools.chain.from_iterable( (get_possible_types(t) for t in gql_type.types), ) elif isinstance(gql_type, StrawberryType): # Nothing to return here pass elif isinstance(gql_type, type): yield gql_type else: assert_never(gql_type) def get_possible_type_definitions( gql_type: Union[StrawberryObjectDefinition, StrawberryType, type], ) -> Generator[StrawberryObjectDefinition, None, None]: if isinstance(gql_type, StrawberryObjectDefinition): yield gql_type return for t in get_possible_types(gql_type): if isinstance(t, StrawberryObjectDefinition): yield t elif (type_def := get_object_definition(t)) is not None: yield type_def
PypiClean
/danmu_utils-2.12.0-py3-none-any.whl/danmu_utils/plugin/diyidan/DiyidanDownloader.py
import urllib.request from danmu_utils.common.IDownloader import IDownloader class DiyidanDownloader(IDownloader): @property def DANMU_TYPE(self): return 'diyidan' @property def DANMU_EXTNAME(self): return 'dydjson' @property def DANMU_LIST_EXTNAME(self): return 'dydlist' def _download(self, videoId=None, postId=None): res = {} if videoId != None: url = 'https://api.diyidan.net/v0.2/posts/danmaku?videoId=%s' % (videoId) try: with urllib.request.urlopen(url) as f: danmu = f.read() res['videoId'] = danmu except Exception as e: print(e) if postId != None: url = 'https://api.diyidan.net/v0.2/posts/danmaku?postId=%s' % (postId) try: with urllib.request.urlopen(url) as f: danmu = f.read() res['postId'] = danmu except Exception as e: print(e) return res def download(self, line): line_res = [] line_params = line.strip('\n').split('\t') videoId = line_params[0] postId = line_params[1] res = self._download(videoId=videoId, postId=postId) if 'videoId' in res: out_filename = '' if postId != None: out_filename = out_filename + postId out_filename = out_filename + '-' + videoId out_filename = out_filename + '.' + 'dydjson' item_res = {} item_res['filename'] = out_filename item_res['data'] = res['videoId'] line_res.append(item_res) if 'postId' in res: out_filename = postId + '-' out_filename = out_filename + '.' + 'dydjson' item_res = {} item_res['filename'] = out_filename item_res['data'] = res['postId'] line_res.append(item_res) return line_res if __name__ == '__main__': pass else: from danmu_utils.common.plugin_collection import add_download_tool add_download_tool(DiyidanDownloader().DANMU_TYPE, DiyidanDownloader)
PypiClean
/syntaxnet-0.2-cp27-cp27mu-manylinux1_x86_64.whl/syntaxnet-0.2.data/purelib/dragnn/python/evaluation.py
"""Parser evaluation utils.""" from __future__ import division import tensorflow as tf from syntaxnet import sentence_pb2 from syntaxnet.util import check def calculate_parse_metrics(gold_corpus, annotated_corpus): """Calculate POS/UAS/LAS accuracy based on gold and annotated sentences.""" check.Eq(len(gold_corpus), len(annotated_corpus), 'Corpora are not aligned') num_tokens = 0 num_correct_pos = 0 num_correct_uas = 0 num_correct_las = 0 for gold_str, annotated_str in zip(gold_corpus, annotated_corpus): gold = sentence_pb2.Sentence() annotated = sentence_pb2.Sentence() gold.ParseFromString(gold_str) annotated.ParseFromString(annotated_str) check.Eq(gold.text, annotated.text, 'Text is not aligned') check.Eq(len(gold.token), len(annotated.token), 'Tokens are not aligned') tokens = zip(gold.token, annotated.token) num_tokens += len(tokens) num_correct_pos += sum(1 for x, y in tokens if x.tag == y.tag) num_correct_uas += sum(1 for x, y in tokens if x.head == y.head) num_correct_las += sum(1 for x, y in tokens if x.head == y.head and x.label == y.label) tf.logging.info('Total num documents: %d', len(annotated_corpus)) tf.logging.info('Total num tokens: %d', num_tokens) pos = num_correct_pos * 100.0 / num_tokens uas = num_correct_uas * 100.0 / num_tokens las = num_correct_las * 100.0 / num_tokens tf.logging.info('POS: %.2f%%', pos) tf.logging.info('UAS: %.2f%%', uas) tf.logging.info('LAS: %.2f%%', las) return pos, uas, las def parser_summaries(gold_corpus, annotated_corpus): """Computes parser evaluation summaries for gold and annotated sentences.""" pos, uas, las = calculate_parse_metrics(gold_corpus, annotated_corpus) return {'POS': pos, 'LAS': las, 'UAS': uas, 'eval_metric': las} def calculate_segmentation_metrics(gold_corpus, annotated_corpus): """Calculate precision/recall/f1 based on gold and annotated sentences.""" check.Eq(len(gold_corpus), len(annotated_corpus), 'Corpora are not aligned') num_gold_tokens = 0 num_test_tokens = 0 num_correct_tokens = 0 def token_span(token): check.Ge(token.end, token.start) return (token.start, token.end) def ratio(numerator, denominator): check.Ge(numerator, 0) check.Ge(denominator, 0) if denominator > 0: return numerator / denominator elif numerator == 0: return 0.0 # map 0/0 to 0 else: return float('inf') # map x/0 to inf for gold_str, annotated_str in zip(gold_corpus, annotated_corpus): gold = sentence_pb2.Sentence() annotated = sentence_pb2.Sentence() gold.ParseFromString(gold_str) annotated.ParseFromString(annotated_str) check.Eq(gold.text, annotated.text, 'Text is not aligned') gold_spans = set() test_spans = set() for token in gold.token: check.NotIn(token_span(token), gold_spans, 'Duplicate token') gold_spans.add(token_span(token)) for token in annotated.token: check.NotIn(token_span(token), test_spans, 'Duplicate token') test_spans.add(token_span(token)) num_gold_tokens += len(gold_spans) num_test_tokens += len(test_spans) num_correct_tokens += len(gold_spans.intersection(test_spans)) tf.logging.info('Total num documents: %d', len(annotated_corpus)) tf.logging.info('Total gold tokens: %d', num_gold_tokens) tf.logging.info('Total test tokens: %d', num_test_tokens) precision = 100 * ratio(num_correct_tokens, num_test_tokens) recall = 100 * ratio(num_correct_tokens, num_gold_tokens) f1 = ratio(2 * precision * recall, precision + recall) tf.logging.info('Precision: %.2f%%', precision) tf.logging.info('Recall: %.2f%%', recall) tf.logging.info('F1: %.2f%%', f1) return round(precision, 2), round(recall, 2), round(f1, 2) def segmentation_summaries(gold_corpus, annotated_corpus): """Computes segmentation eval summaries for gold and annotated sentences.""" prec, rec, f1 = calculate_segmentation_metrics(gold_corpus, annotated_corpus) return {'precision': prec, 'recall': rec, 'f1': f1, 'eval_metric': f1}
PypiClean
/pyiron_atomistics-0.3.2-py3-none-any.whl/pyiron_atomistics/table/funct.py
import ast import json import numpy as np import warnings from pyiron_atomistics.atomistics.structure.atoms import Atoms, pyiron_to_ase def _get_value_from_incar(job, key): value = job["input/incar/data_dict"]["Value"][ job["input/incar/data_dict"]["Parameter"].index(key) ] if isinstance(value, str): return ast.literal_eval(value) else: return value def get_majority(lst, minority=False): elements_dict = {name: lst.count(name) for name in set(lst)} max_value = np.max(list(elements_dict.values())) majority_element = [ key for key, value in elements_dict.items() if value == max_value ][0] if minority: minority_lst = list(elements_dict.keys()) del minority_lst[minority_lst.index(majority_element)] return majority_element, minority_lst else: return majority_element def get_incar(job): data_dict = job["input/incar/data_dict"] return { key: value for key, value in zip(data_dict["Parameter"], data_dict["Value"]) } def get_sigma(job): return {"sigma": _get_value_from_incar(job=job, key="SIGMA")} def get_ismear(job): return {"ismear": _get_value_from_incar(job=job, key="ISMEAR")} def get_encut(job): return {"encut": _get_value_from_incar(job=job, key="ENCUT")} def get_n_kpts(job): return {"n_kpts": eval(job["input/kpoints/data_dict"]["Value"][3].split()[0])} def get_n_equ_kpts(job): return {"n_equ_kpts": len(job["output/generic/dft/bands/k_points"])} def get_total_number_of_atoms(job): return {"Number_of_atoms": len(job["input/structure/indices"])} def get_average_waves(job): weights = job["output/outcar/irreducible_kpoint_weights"] planewaves = job["output/outcar/number_plane_waves"] return {"avg. plane waves": sum(weights * planewaves) / sum(weights)} def get_plane_waves(job): _, weights, planewaves = job["output/outcar/irreducible_kpoints"] return {"plane waves": sum(weights * planewaves)} def get_ekin_error(job): return { "energy_tot_wo_kin_corr": job["output/outcar/kin_energy_error"] + job["output/generic/energy_tot"][-1] } def get_volume(job): return {"volume": job["output/generic/volume"][-1]} def get_volume_per_atom(job): return { "volume": job["output/generic/volume"][-1] / get_total_number_of_atoms(job=job)["Number_of_atoms"] } def get_elements(job): species = job["input/structure/species"] indices = job["input/structure/indices"] return {s: sum(indices == i) for i, s in enumerate(species)} def get_convergence_check(job): try: conv = job.project.load(job.job_id).convergence_check() except: conv = None return {"Convergence": conv} def get_number_of_species(job): return {"Number_of_species": len(job["output/structure/species"])} def get_number_of_ionic_steps(job): return {"Number_of_ionic_steps": len(job["output/generic/energy_tot"])} def get_number_of_final_electronic_steps(job): el_steps = job["output/generic/scf_energies"] if len(el_steps) != 0: return {"Number_of_final_electronic_steps": len(el_steps[-1])} else: return {"Number_of_final_electronic_steps": None} def get_majority_species(job): indices_lst = job["input/structure/indices"].tolist() element_lst = job["input/structure/species"] majority_element, minority_lst = get_majority( [element_lst[ind] for ind in indices_lst], minority=True ) return {"majority_element": majority_element, "minority_element_list": minority_lst} def get_majority_crystal_structure(job): basis = Atoms().from_hdf(job["input"]) majority_element = basis.get_majority_species()["symbol"] majority_index = [ ind for ind, el in enumerate(basis) if el.symbol == majority_element ] type_list = list( basis[majority_index].analyse.pyscal_cna_adaptive( mode="str", ovito_compatibility=True ) ) return {"crystal_structure": get_majority(type_list, minority=False)} def get_job_name(job): return {"job_name": job.job_name} def get_energy_tot_per_atom(job): return { "energy_tot": job["output/generic/energy_tot"][-1] / get_total_number_of_atoms(job=job)["Number_of_atoms"] } def get_energy_tot(job): return {"energy_tot": job["output/generic/energy_tot"][-1]} def get_energy_pot_per_atom(job): return { "energy_pot": job["output/generic/energy_pot"][-1] / get_total_number_of_atoms(job=job)["Number_of_atoms"] } def get_energy_pot(job): return {"energy_pot": job["output/generic/energy_pot"][-1]} def get_energy_free_per_atom(job): return { "energy_free": job["output/generic/dft/energy_free"][-1] / get_total_number_of_atoms(job=job)["Number_of_atoms"] } def get_energy_free(job): return {"energy_free": job["output/generic/dft/energy_free"][-1]} def get_energy_int_per_atom(job): return { "energy_int": job["output/generic/dft/energy_int"][-1] / get_total_number_of_atoms(job=job)["Number_of_atoms"] } def get_energy_int(job): return {"energy_int": job["output/generic/dft/energy_int"][-1]} def get_f_states(job): if "occ_matrix" in job["output/electronic_structure"].list_nodes(): return { "f_states": job["output/electronic_structure/occ_matrix"].flatten().tolist() } elif "occupancy_matrix" in job["output/electronic_structure"].list_nodes(): return { "f_states": job["output/electronic_structure/occupancy_matrix"] .flatten() .tolist() } else: print("get_f_states(): ", job.job_name, job.status) return {"f_states": [0.0]} def get_e_band(job): if "occ_matrix" in job["output/electronic_structure"].list_nodes(): f_occ = job["output/electronic_structure/occ_matrix"].flatten() ev_mat = job["output/electronic_structure/eig_matrix"].flatten() elif "occupancy_matrix" in job["output/electronic_structure"].list_nodes(): f_occ = job["output/electronic_structure/occupancy_matrix"].flatten() ev_mat = job["output/electronic_structure/eigenvalue_matrix"].flatten() else: print("get_e_band(): ", job.job_name, job.status) f_occ = np.array([0.0]) ev_mat = np.array([0.0]) return {"e_band": np.sum(ev_mat * f_occ)} def get_equilibrium_parameters(job): return { key: job["output/" + key] for key in [ "equilibrium_energy", "equilibrium_b_prime", "equilibrium_bulk_modulus", "equilibrium_volume", ] } def get_structure(job): atoms = pyiron_to_ase(job.to_object().get_structure()) atoms_dict = { "symbols": atoms.get_chemical_symbols(), "positions": atoms.get_positions().tolist(), "cell": atoms.get_cell().tolist(), "pbc": atoms.get_pbc().tolist(), "celldisp": atoms.get_celldisp().tolist(), } if atoms.has("tags"): atoms_dict["tags"] = atoms.get_tags().tolist() if atoms.has("masses"): atoms_dict["masses"] = atoms.get_masses().tolist() if atoms.has("momenta"): atoms_dict["momenta"] = atoms.get_momenta().tolist() if atoms.has("initial_magmoms"): atoms_dict["magmoms"] = atoms.get_initial_magnetic_moments().tolist() if atoms.has("initial_charges"): atoms_dict["charges"] = atoms.get_initial_charges().tolist() if not atoms.__dict__["_calc"] == None: warnings.warn("Found calculator: " + str(atoms.__dict__["_calc"])) if not atoms.__dict__["_constraints"] == []: warnings.warn("Found constraint: " + str(atoms.__dict__["_constraints"])) return {"structure": json.dumps(atoms_dict)} def get_forces(job): return {"forces": json.dumps(job["output/generic/forces"][-1].tolist())} def get_magnetic_structure(job): basis = Atoms().from_hdf(job["input"]) magmons = basis.get_initial_magnetic_moments() if all(magmons == None): return {"magnetic_structure": "non magnetic"} else: abs_sum_mag = sum(np.abs(magmons)) sum_mag = sum(magmons) if abs_sum_mag == 0 and sum_mag == 0: return {"magnetic_structure": "non magnetic"} elif abs_sum_mag == np.abs(sum_mag): return {"magnetic_structure": "ferro-magnetic"} elif abs_sum_mag > 0 and sum_mag == 0: return {"magnetic_structure": "para-magnetic"} else: return {"magnetic_structure": "unknown"} def get_e_conv_level(job): return { "el_conv": np.max( np.abs( job["output/generic/dft/scf_energy_free"][0] - job["output/generic/dft/scf_energy_free"][0][-1] )[-10:] ) }
PypiClean
/django-clickhouse-1.2.1.tar.gz/django-clickhouse-1.2.1/src/django_clickhouse/management/commands/clickhouse_migrate.py
import json from django.apps import apps as django_apps from django.core.management import BaseCommand, CommandParser from ...configuration import config from ...migrations import migrate_app class Command(BaseCommand): help = 'Migrates ClickHouse databases' requires_migrations_checks = False def add_arguments(self, parser: CommandParser) -> None: parser.add_argument('app_label', nargs='?', type=str, help='Django App name to migrate. By default all found apps are migrated.') parser.add_argument('migration_number', nargs='?', type=int, help='Migration number in selected django app to migrate to.' ' By default all available migrations are applied.' ' Note that library currently have no ability rollback migrations') parser.add_argument('--database', '-d', nargs='?', type=str, required=False, choices=list(config.DATABASES.keys()), help='ClickHouse database alias key from CLICKHOUSE_DATABASES django setting.' ' By default migrations are applied to all databases.') def handle(self, *args, **options) -> None: apps = [options['app_label']] if options['app_label'] else [app.name for app in django_apps.get_app_configs()] databases = [options['database']] if options['database'] else list(config.DATABASES.keys()) kwargs = {'up_to': options['migration_number']} if options['migration_number'] else {} self.stdout.write(self.style.MIGRATE_HEADING( "Applying ClickHouse migrations for apps %s in databases %s" % (json.dumps(apps), json.dumps(databases)))) any_migrations_applied = False for app_label in apps: for db_alias in databases: res = migrate_app(app_label, db_alias, verbosity=options['verbosity'], **kwargs) any_migrations_applied = any_migrations_applied or res if not any_migrations_applied: self.stdout.write("No migrations to apply")
PypiClean
/cellxgene_census-1.5.1-py3-none-any.whl/cellxgene_census/_get_anndata.py
from typing import Optional import anndata import tiledbsoma as soma from somacore.options import SparseDFCoord from ._experiment import _get_experiment def get_anndata( census: soma.Collection, organism: str, measurement_name: str = "RNA", X_name: str = "raw", obs_value_filter: Optional[str] = None, obs_coords: Optional[SparseDFCoord] = None, var_value_filter: Optional[str] = None, var_coords: Optional[SparseDFCoord] = None, column_names: Optional[soma.AxisColumnNames] = None, ) -> anndata.AnnData: """ Convience wrapper around ``soma.Experiment`` query, to build and execute a query, and return it as an :class:`anndata.AnnData` object. Args: census: The census object, usually returned by :func:`cellxgene_census.open_soma()`. organism: The organism to query, usually one of `Homo sapiens` or `Mus musculus`. measurement_name: The measurement object to query. Defaults to `RNA`. X_name: The ``X`` layer to query. Defaults to `raw`. obs_value_filter: Value filter for the ``obs`` metadata. Value is a filter query written in the SOMA ``value_filter`` syntax. obs_coords: Coordinates for the ``obs`` axis, which is indexed by the ``soma_joinid`` value. May be an ``int``, a list of ``int``, or a slice. The default, ``None``, selects all. var_value_filter: Value filter for the ``var`` metadata. Value is a filter query written in the SOMA ``value_filter`` syntax. var_coords: Coordinates for the ``var`` axis, which is indexed by the ``soma_joinid`` value. May be an ``int``, a list of ``int``, or a slice. The default, ``None``, selects all. column_names: Columns to fetch for ``obs`` and ``var`` dataframes. Returns: An :class:`anndata.AnnData` object containing the census slice. Lifecycle: maturing Examples: >>> get_anndata(census, "Mus musculus", obs_value_filter="tissue_general in ['brain', 'lung']") >>> get_anndata(census, "Homo sapiens", column_names={"obs": ["tissue"]}) >>> get_anndata(census, "Homo sapiens", obs_coords=slice(0, 1000)) """ exp = _get_experiment(census, organism) obs_coords = (slice(None),) if obs_coords is None else (obs_coords,) var_coords = (slice(None),) if var_coords is None else (var_coords,) with exp.axis_query( measurement_name, obs_query=soma.AxisQuery(value_filter=obs_value_filter, coords=obs_coords), var_query=soma.AxisQuery(value_filter=var_value_filter, coords=var_coords), ) as query: return query.to_anndata(X_name=X_name, column_names=column_names)
PypiClean
/lco-banzai-0.19.3.tar.gz/lco-banzai-0.19.3/astropy_helpers/licenses/LICENSE_NUMPYDOC.rst
------------------------------------------------------------------------------- The files - numpydoc.py - docscrape.py - docscrape_sphinx.py - phantom_import.py have the following license: Copyright (C) 2008 Stefan van der Walt <[email protected]>, Pauli Virtanen <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------- The files - compiler_unparse.py - comment_eater.py - traitsdoc.py have the following license: This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative. Copyright (c) 2006, Enthought, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Enthought, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------- The file - plot_directive.py originates from Matplotlib (http://matplotlib.sf.net/) which has the following license: Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. 1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. 4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
PypiClean
/config-client-1.3.0.tar.gz/config-client-1.3.0/config/cli.py
import random import re from json import dump, dumps from pathlib import Path from typing import List import click from requests.auth import HTTPBasicAuth, HTTPDigestAuth from rich.console import Console from rich.json import JSON from rich.panel import Panel from rich.status import Status from rich.table import Table from config import __version__ from config.exceptions import RequestFailedException from config.spring import ConfigClient CONTEXT_SETTINGS = dict( help_option_names=["-h", "--help"], ) EMOJI_ERRORS: List[str] = ["🤯", "😵", "🤮", "🤢", "😨", "😭", "💩", "💔", "💥", "🔥"] EMOJI_SUCCESS: List[str] = ["🥳", "🤩", "😻", "💖", "🎉", "🎊"] EMOJI_NOT_FOUND = ["🙂", "😌", "🤨", "🙃", "😅"] console = Console() @click.group(context_settings=CONTEXT_SETTINGS) @click.version_option(version=__version__) def cli(): pass @cli.command() @click.argument("app_name", envvar="APP_NAME") @click.option( "-a", "--address", envvar="CONFIGSERVER_ADDRESS", required=True, default="http://localhost:8888", show_default=True, help="ConfigServer address.", ) @click.option( "-l", "--label", envvar="LABEL", required=True, default="master", show_default=True, help="Branch config.", ) @click.option( "-p", "--profile", envvar="PROFILE", required=True, default="development", show_default=True, help="Profile config.", ) @click.option("-f", "--filter", required=False, help="Filter output by.") @click.option("--auth", required=False, help="Basic authentication credentials.") @click.option("--digest", required=False, help="Digest authentication credentials.") @click.option( "--file", required=False, help="Gets remote file from server and saves locally." ) @click.option("--json", is_flag=True, required=False, help="Save output as json.") @click.option("-v", "--verbose", is_flag=True, help="Extend output info.") def client( app_name, address, label, profile, filter, auth, digest, file, json, verbose ): """Interact with Spring Cloud Server via cli.""" client = ConfigClient( address=address, label=label, app_name=app_name, profile=profile, fail_fast=False, ) if file: # get file from server and exit with Status("Contacting server...", spinner="dots4") as status: try: resp = client.get_file(file) except RequestFailedException: raise click.ClickException("💥 Failed to contact server!") Path(file).write_text(resp) status.update("OK!") console.print(f"File saved: [cyan]{file}[/cyan]", style="bold") raise SystemExit if verbose: table = Table.grid(padding=(0, 1)) table.add_column(style="cyan", justify="right") table.add_column(style="magenta") table.add_row("address[yellow]:[/yellow] ", client.address) table.add_row("label[yellow]:[/yellow] ", client.label) table.add_row("profile[yellow]:[/yellow] ", client.profile) table.add_row("URL[yellow]:[/yellow] ", client.url) console.print( Panel( table, title="[bold yellow]client info[/bold yellow]", border_style="yellow", expand=True, ) ) with Status("Contacting server...", spinner="dots4") as status: emoji = random.choice(EMOJI_ERRORS) try: if auth: username, password = auth.split(":") auth = HTTPBasicAuth(username, password) elif digest: username, password = digest.split(":") auth = HTTPDigestAuth(username, password) else: auth = None client.get_config(auth=auth) except ValueError: raise click.ClickException( f"{emoji} Bad credentials format for auth method. Format expected: <user>:<password>" ) except ConnectionError: raise click.ClickException("💥 Failed to contact server!") status.update("OK!") content = client.config if filter: content = client.get(filter) if len(str(content)) == 0: emoji = random.choice(EMOJI_NOT_FOUND) console.print( f"{emoji} No result found for filter: [yellow]'[white bold]{filter}[/white bold]'[/yellow]", ) raise SystemExit if json: with open("response.json", "w", encoding="utf-8") as f: dump(content, f, indent=4, sort_keys=True) console.print("File saved: [cyan]response.json[/cyan]", style="bold") raise SystemExit filter = filter or "all" console.print( Panel( JSON(dumps(content), indent=4, highlight=True, sort_keys=True), title=f"[bold][green]report for filter[/green][yellow]: [/yellow]'[magenta italic]{filter}[/magenta italic]'[/bold]", highlight=True, border_style="white", expand=True, ) ) @cli.command() @click.argument("text") @click.option( "-a", "--address", envvar="CONFIGSERVER_ADDRESS", required=True, default="http://localhost:8888", help="ConfigServer address.", ) @click.option( "-p", "--path", required=True, default="/decrypt", help="Decrypt path endpoint." ) def decrypt(text, address, path): """Decrypt a input via Spring Cloud Config.""" client = ConfigClient(address=address, fail_fast=False) cipher = re.match(r"^.?{cipher}?(?P<name>\w.*)", text) if cipher: text = cipher.group("name") try: resp = client.decrypt(text, path=path) except Exception: raise click.ClickException("💥 Failed to contact server!") table = Table.grid(padding=(0, 1)) table.add_column(style="cyan", justify="right") table.add_column(style="magenta") table.add_row("decrypted data[yellow]:[/yellow] ", f"'{resp}'") console.print(Panel(table, border_style="yellow", expand=True)) @cli.command() @click.argument("data") @click.option( "-a", "--address", envvar="CONFIGSERVER_ADDRESS", default="http://localhost:8888", required=True, help="ConfigServer address.", ) @click.option( "-p", "--path", default="/encrypt", required=True, help="Encrypt path endpoint." ) @click.option("--raw", is_flag=True, help=r"Format output including {cipher}") def encrypt(data, address, path, raw): """Encrypt a input via Spring Cloud Config.""" client = ConfigClient(address=address, fail_fast=False) try: resp = client.encrypt(data, path=path) except Exception: raise click.ClickException("💥 Failed to contact server!") if raw: resp = f"{{cipher}}{resp}" table = Table.grid(padding=(0, 1)) table.add_column(style="cyan", justify="right") table.add_column(style="magenta") table.add_row("encrypted data[yellow]:[/yellow] ", f"'{resp}'") console.print(Panel(table, border_style="yellow", expand=True))
PypiClean
/myams_js-1.16.0.tar.gz/myams_js-1.16.0/pkg/js/ext/validate/i18n/messages_pt_BR.min.js
!function(e){"function"==typeof define&&define.amd?define(["jquery","../jquery.validate"],e):"object"==typeof module&&module.exports?module.exports=e(require("jquery")):e(jQuery)}((function(e){return e.extend(e.validator.messages,{required:"Este campo &eacute; requerido.",remote:"Por favor, corrija este campo.",email:"Por favor, forne&ccedil;a um endere&ccedil;o de email v&aacute;lido.",url:"Por favor, forne&ccedil;a uma URL v&aacute;lida.",date:"Por favor, forne&ccedil;a uma data v&aacute;lida.",dateISO:"Por favor, forne&ccedil;a uma data v&aacute;lida (ISO).",number:"Por favor, forne&ccedil;a um n&uacute;mero v&aacute;lido.",digits:"Por favor, forne&ccedil;a somente d&iacute;gitos.",creditcard:"Por favor, forne&ccedil;a um cart&atilde;o de cr&eacute;dito v&aacute;lido.",equalTo:"Por favor, forne&ccedil;a o mesmo valor novamente.",maxlength:e.validator.format("Por favor, forne&ccedil;a n&atilde;o mais que {0} caracteres."),minlength:e.validator.format("Por favor, forne&ccedil;a ao menos {0} caracteres."),rangelength:e.validator.format("Por favor, forne&ccedil;a um valor entre {0} e {1} caracteres de comprimento."),range:e.validator.format("Por favor, forne&ccedil;a um valor entre {0} e {1}."),max:e.validator.format("Por favor, forne&ccedil;a um valor menor ou igual a {0}."),min:e.validator.format("Por favor, forne&ccedil;a um valor maior ou igual a {0}."),step:e.validator.format("Por favor, forne&ccedil;a um valor m&uacute;ltiplo de {0}."),maxWords:e.validator.format("Por favor, forne&ccedil;a com {0} palavras ou menos."),minWords:e.validator.format("Por favor, forne&ccedil;a pelo menos {0} palavras."),rangeWords:e.validator.format("Por favor, forne&ccedil;a entre {0} e {1} palavras."),accept:"Por favor, forne&ccedil;a um tipo v&aacute;lido.",alphanumeric:"Por favor, forne&ccedil;a somente com letras, n&uacute;meros e sublinhados.",bankaccountNL:"Por favor, forne&ccedil;a com um n&uacute;mero de conta banc&aacute;ria v&aacute;lida.",bankorgiroaccountNL:"Por favor, forne&ccedil;a um banco v&aacute;lido ou n&uacute;mero de conta.",bic:"Por favor, forne&ccedil;a um c&oacute;digo BIC v&aacute;lido.",cifES:"Por favor, forne&ccedil;a um c&oacute;digo CIF v&aacute;lido.",creditcardtypes:"Por favor, forne&ccedil;a um n&uacute;mero de cart&atilde;o de cr&eacute;dito v&aacute;lido.",currency:"Por favor, forne&ccedil;a uma moeda v&aacute;lida.",dateFA:"Por favor, forne&ccedil;a uma data correta.",dateITA:"Por favor, forne&ccedil;a uma data correta.",dateNL:"Por favor, forne&ccedil;a uma data correta.",extension:"Por favor, forne&ccedil;a um valor com uma extens&atilde;o v&aacute;lida.",giroaccountNL:"Por favor, forne&ccedil;a um n&uacute;mero de conta corrente v&aacute;lido.",iban:"Por favor, forne&ccedil;a um c&oacute;digo IBAN v&aacute;lido.",integer:"Por favor, forne&ccedil;a um n&uacute;mero n&atilde;o decimal.",ipv4:"Por favor, forne&ccedil;a um IPv4 v&aacute;lido.",ipv6:"Por favor, forne&ccedil;a um IPv6 v&aacute;lido.",lettersonly:"Por favor, forne&ccedil;a apenas com letras.",letterswithbasicpunc:"Por favor, forne&ccedil;a apenas letras ou pontua&ccedil;ões.",mobileNL:"Por favor, fornece&ccedil;a um n&uacute;mero v&aacute;lido de telefone.",mobileUK:"Por favor, fornece&ccedil;a um n&uacute;mero v&aacute;lido de telefone.",nieES:"Por favor, forne&ccedil;a um NIE v&aacute;lido.",nifES:"Por favor, forne&ccedil;a um NIF v&aacute;lido.",nowhitespace:"Por favor, n&atilde;o utilize espa&ccedil;os em branco.",pattern:"O formato fornecido &eacute; inv&aacute;lido.",phoneNL:"Por favor, forne&ccedil;a um n&uacute;mero de telefone v&aacute;lido.",phoneUK:"Por favor, forne&ccedil;a um n&uacute;mero de telefone v&aacute;lido.",phoneUS:"Por favor, forne&ccedil;a um n&uacute;mero de telefone v&aacute;lido.",phonesUK:"Por favor, forne&ccedil;a um n&uacute;mero de telefone v&aacute;lido.",postalCodeCA:"Por favor, forne&ccedil;a um n&uacute;mero de c&oacute;digo postal v&aacute;lido.",postalcodeIT:"Por favor, forne&ccedil;a um n&uacute;mero de c&oacute;digo postal v&aacute;lido.",postalcodeNL:"Por favor, forne&ccedil;a um n&uacute;mero de c&oacute;digo postal v&aacute;lido.",postcodeUK:"Por favor, forne&ccedil;a um n&uacute;mero de c&oacute;digo postal v&aacute;lido.",postalcodeBR:"Por favor, forne&ccedil;a um CEP v&aacute;lido.",require_from_group:e.validator.format("Por favor, forne&ccedil;a pelo menos {0} destes campos."),skip_or_fill_minimum:e.validator.format("Por favor, optar entre ignorar esses campos ou preencher pelo menos {0} deles."),stateUS:"Por favor, forne&ccedil;a um estado v&aacute;lido.",strippedminlength:e.validator.format("Por favor, forne&ccedil;a pelo menos {0} caracteres."),time:"Por favor, forne&ccedil;a um hor&aacute;rio v&aacute;lido, no intervado de 00:00 a 23:59.",time12h:"Por favor, forne&ccedil;a um hor&aacute;rio v&aacute;lido, no intervado de 01:00 a 12:59 am/pm.",url2:"Por favor, forne&ccedil;a uma URL v&aacute;lida.",vinUS:"O n&uacute;mero de identifica&ccedil;&atilde;o de ve&iacute;culo informado (VIN) &eacute; inv&aacute;lido.",zipcodeUS:"Por favor, forne&ccedil;a um c&oacute;digo postal americano v&aacute;lido.",ziprange:"O c&oacute;digo postal deve estar entre 902xx-xxxx e 905xx-xxxx",cpfBR:"Por favor, forne&ccedil;a um CPF v&aacute;lido.",nisBR:"Por favor, forne&ccedil;a um NIS/PIS v&aacute;lido",cnhBR:"Por favor, forne&ccedil;a um CNH v&aacute;lido.",cnpjBR:"Por favor, forne&ccedil;a um CNPJ v&aacute;lido."}),e}));
PypiClean
/hi-ml-azure-0.3.4.tar.gz/hi-ml-azure-0.3.4/src/health_azure/paths.py
import logging from pathlib import Path ENVIRONMENT_YAML_FILE_NAME = "environment.yml" REPO_HIML_FOLDER = "hi-ml" REPO_HIML_AZURE_FOLDER = "hi-ml-azure" def is_himl_used_from_git_repo() -> bool: """Returns False if HI-ML was installed as a package into site-packages. Returns True if the HI-ML codebase is used from a clone of the full git repository. :return: False if HI-ML is installed as a package, True if used via source from git. """ health_ml_root = Path(__file__).resolve().parent.parent logging.debug(f"health_ml root: {health_ml_root}") if health_ml_root.parent.stem == "site-packages": return False himl_root = health_ml_root.parent.parent # These two folder are present in the top-level folder of the git repo expected_folders = [REPO_HIML_FOLDER, REPO_HIML_AZURE_FOLDER] all_folders_exist = all((himl_root / folder).is_dir() for folder in expected_folders) if all_folders_exist: return True raise ValueError( "Unable to determine the installation status: Code is not used from site-packages, but the " "expected top-level folders are not present?" ) def git_repo_root_folder() -> Path: """ Attempts to return the path to the top-level hi-ml repo that contains the hi-ml and hi-ml-azure packages. This top level repo will only be present if hi-ml has been installed as a git submodule, or the repo has been directly downloaded. Otherwise (e.g.if hi-ml has been installed as a pip package) raises an exception :return: Path to the himl root dir if it exists """ if not is_himl_used_from_git_repo(): raise ValueError("This function should not be used if hi-ml is used as an installed package.") current_file = Path(__file__).resolve() return current_file.parent.parent.parent.parent def shared_himl_conda_env_file() -> Path: """ Attempts to return the path to the Conda environment file in the hi-ml project folder. If hi-ml has been installed as a package (instead of as a git submodule or directly downloading the repo) it's not possible to find this shared conda file, and so a ValueError will be raised. :return: Path to the Conda environment file in the hi-ml project folder :raises: ValueError if hi-ml has not been installed/downloaded from the git repo (i.e. we can't get the path to the shared environment definition file) """ repo_root_folder = git_repo_root_folder() return repo_root_folder / "hi-ml" / ENVIRONMENT_YAML_FILE_NAME
PypiClean
/ondewo_bpi-4.1.1-py3-none-any.whl/ondewo_bpi/bpi_services.py
import functools from abc import ABCMeta, abstractmethod from dataclasses import dataclass, field from typing import Dict, Callable, List, Optional import grpc import regex as re from ondewo.logging.decorators import Timer from ondewo.logging.logger import logger_console from ondewo.nlu import session_pb2, intent_pb2, user_pb2, context_pb2 from ondewo.nlu.client import Client as NLUClient from ondewo.nlu.session_pb2 import TextInput from ondewo_bpi.autocoded.agent_grpc_autocode import AutoAgentsServicer from ondewo_bpi.autocoded.aiservices_grpc_autocode import AutoAiServicesServicer from ondewo_bpi.autocoded.context_grpc_autocode import AutoContextsServicer from ondewo_bpi.autocoded.entity_type_grpc_autocode import AutoEntityTypesServicer from ondewo_bpi.autocoded.intent_grpc_autocode import AutoIntentsServicer from ondewo_bpi.autocoded.project_role_grpc_autocode import AutoProjectRolesServicer from ondewo_bpi.autocoded.session_grpc_autocode import AutoSessionsServicer from ondewo_bpi.autocoded.user_grpc_autocode import AutoUsersServicer from ondewo_bpi.config import SENTENCE_TRUNCATION from ondewo_bpi.constants import SipTriggers, QueryTriggers from ondewo_bpi.helpers import get_session_from_response from ondewo_bpi.message_handler import MessageHandler, SingleMessageHandler @dataclass() class IntentCallbackAssignor: """Class for keeping track of the intents and their handlers""" sort_index: int = field(init=False, repr=False) intent_pattern: str handlers: List[Callable] def __gt__(self, other: 'IntentCallbackAssignor') -> bool: return self.sort_index > other.sort_index def __lt__(self, other: 'IntentCallbackAssignor') -> bool: return self.sort_index < other.sort_index def __post_init__(self): object.__setattr__(self, 'sort_index', len(self.intent_pattern)) class BpiSessionsServices(AutoSessionsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass def __init__(self) -> None: self.intent_handlers: List[IntentCallbackAssignor] = list() self.trigger_handlers: Dict[str, Callable] = { i.value: self.trigger_function_not_implemented for i in [*SipTriggers, *QueryTriggers] } def register_intent_handler(self, intent_pattern: str, handlers: List[Callable]) -> None: intent_handler: IntentCallbackAssignor = IntentCallbackAssignor(intent_pattern=intent_pattern, handlers=handlers) self.intent_handlers.append(intent_handler) self.intent_handlers = sorted(self.intent_handlers, reverse=True) def register_trigger_handler(self, trigger: str, handler: Callable) -> None: self.trigger_handlers[trigger] = handler def trigger_function_not_implemented( self, response: session_pb2.DetectIntentResponse, message: intent_pb2.Intent.Message, trigger: str, found_triggers: Dict[str, List[str]], ) -> None: logger_console.warning( { "message": f"no function for the trigger {trigger}, please subclass and implement", "trigger": trigger, "content": found_triggers[trigger], } ) def DetectIntent( self, request: session_pb2.DetectIntentRequest, context: grpc.ServicerContext ) -> session_pb2.DetectIntentResponse: try: if len(request.query_input.text.text) > SENTENCE_TRUNCATION: logger_console.warning(f'The received text is too long, it will be truncated ' f'to {SENTENCE_TRUNCATION} characters!') truncated_text: TextInput = TextInput(text=request.query_input.text.text[:SENTENCE_TRUNCATION]) request.query_input.text.CopyFrom(truncated_text) text = request.query_input.text.text except Exception as e: logger_console.exception(f"An issue was encountered in BPI:\n" f"\tSeems like the request query_input data was not properly formatted\n" f"\tDetails: {e}") text = "error" logger_console.debug( { "message": f"CAI-DetectIntentRequest to CAI, text input: {text}", "content": text, "text": text, "tags": ["text"], } ) cai_response = self.perform_detect_intent(request) intent_name = cai_response.query_result.intent.display_name logger_console.debug( { "message": f"CAI-DetectIntentResponse from CAI, intent_name: {intent_name}", "content": intent_name, "intent_name": intent_name, "session_id": get_session_from_response(cai_response), "tags": ["text"], } ) cai_response = self.process_messages(cai_response) return self.process_intent_handler(cai_response) @Timer(log_arguments=False, recursive=True) def perform_detect_intent(self, request: session_pb2.DetectIntentRequest, ) -> session_pb2.DetectIntentResponse: return self.client.services.sessions.detect_intent(request) @Timer(log_arguments=False, recursive=True) def process_messages(self, response: session_pb2.DetectIntentResponse, ) -> session_pb2.DetectIntentResponse: for j, message in enumerate(response.query_result.fulfillment_messages): found_triggers = MessageHandler.get_triggers(message, get_session_from_response(response)) for found_trigger in found_triggers: new_response: Optional[session_pb2.DetectIntentResponse] = \ self.trigger_handlers[found_trigger](response, message, found_trigger, found_triggers) if new_response: if not new_response.response_id == response.response_id: return new_response for found_trigger in found_triggers: SingleMessageHandler.substitute_pattern_in_message(message, found_trigger, "") self.quicksend_to_api(response, message, j) if not len(response.query_result.fulfillment_messages): self.quicksend_to_api(response, None, 0) return response def quicksend_to_api( self, response: session_pb2.DetectIntentResponse, message: Optional[intent_pb2.Intent.Message], count: int ) -> None: logger_console.warning({"message": "quicksend_to_api not written, please subclass and implement"}) @Timer(log_arguments=False, recursive=True) def process_intent_handler( self, cai_response: session_pb2.DetectIntentResponse ) -> session_pb2.DetectIntentResponse: # Create an ordered dictionary by key value length intent_name = cai_response.query_result.intent.display_name handlers: List[Callable] = self._get_handlers_for_intent(intent_name, self.intent_handlers) for handler in handlers: cai_response = handler(cai_response, self.client) text = [i.text.text for i in cai_response.query_result.fulfillment_messages] logger_console.info( { "message": f"BPI-DetectIntentResponse from BPI with text: {text}", "content": text, "text": text, "tags": ["text", "clean"], } ) return cai_response def _get_handlers_for_intent(self, intent_name: str, assignors: List[IntentCallbackAssignor]) -> List[Callable]: for assignor in assignors: if re.match(assignor.intent_pattern, intent_name): return assignor.handlers return [] class BpiUsersServices(AutoUsersServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass def Login(self, request: user_pb2.LoginRequest, context: grpc.ServicerContext) -> user_pb2.LoginResponse: logger_console.info(f'Login request handled by bpi\n' f'Login user: {request.user_email}') return super().Login(request, context) class BpiContextServices(AutoContextsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass def CreateContext( self, request: context_pb2.CreateContextRequest, context: grpc.ServicerContext ) -> context_pb2.Context: logger_console.info("passing create context request on to CAI") return self.client.services.contexts.create_context(request=request) class BpiAgentsServices(AutoAgentsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiEntityTypeServices(AutoEntityTypesServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiAiServicesServices(AutoAiServicesServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiIntentsServices(AutoIntentsServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass class BpiProjectRolesServices(AutoProjectRolesServicer): __metaclass__ = ABCMeta @property @abstractmethod def client(self) -> NLUClient: pass
PypiClean
/lollygag-cli-0.0.1.tar.gz/lollygag-cli-0.0.1/lollygag/vendor/npyscreen/wgmonthbox.py
from . import wgwidget as widget import calendar import datetime import curses class DateEntryBase(widget.Widget): def __init__(self, screen, allowPastDate=True, allowTodaysDate=True, firstWeekDay=6, use_datetime = False, allowClear=False, **keywords): super(DateEntryBase, self).__init__(screen, **keywords) self.allow_date_in_past = allowPastDate self.allow_todays_date = allowTodaysDate self.allow_clear = allowClear self.use_datetime = use_datetime self._max = datetime.date.max self._min = datetime.date.min self.firstWeekDay = firstWeekDay def date_or_datetime(self): if self.use_datetime: return datetime.datetime else: return datetime.date def _check_date(self): if not self.value: return None if not self.allow_date_in_past: if self.value < self.date_or_datetime().today(): if self.allow_todays_date: self.value = self.date_or_datetime().today() else: self.value = self.date_or_datetime().today() + datetime.timedelta(1) def _check_today_validity(self, onErrorHigher=True): """If not allowed to select today's date, and today is selected, move either higher or lower depending on the value of onErrorHigher""" if not self.allow_date_in_past: onErrorHigher = True if self.allow_todays_date: return True else: if self.value == self.date_or_datetime().today(): if onErrorHigher: self.value += datetime.timedelta(1) else: self.value -= datetime.timedelta(1) def set_up_handlers(self): super(DateEntryBase, self).set_up_handlers() self.handlers.update({ "D": self.h_day_less, "d": self.h_day_more, "W": self.h_week_less, "w": self.h_week_more, "M": self.h_month_less, "m": self.h_month_more, "Y": self.h_year_less, "y": self.h_year_more, "t": self.h_find_today, "q": self.h_clear, "c": self.h_clear, }) def _reduce_value_by_delta(self, delta): old_value = self.value try: self.value -= delta except: self.value = old_value def _increase_value_by_delta(self, delta): old_value = self.value try: self.value += delta except: self.value = old_value def h_day_less(self, *args): self._reduce_value_by_delta(datetime.timedelta(1)) self._check_date() self._check_today_validity(onErrorHigher=False) def h_day_more(self, *args): self._increase_value_by_delta(datetime.timedelta(1)) self._check_date() self._check_today_validity(onErrorHigher=True) def h_week_less(self, *args): self._reduce_value_by_delta(datetime.timedelta(7)) self._check_date() self._check_today_validity(onErrorHigher=False) def h_week_more(self, *args): self._increase_value_by_delta(datetime.timedelta(7)) self._check_date() self._check_today_validity(onErrorHigher=True) def h_month_less(self, *args): self._reduce_value_by_delta(datetime.timedelta(28)) self._check_date() self._check_today_validity(onErrorHigher=False) def h_month_more(self, *args): self._increase_value_by_delta(datetime.timedelta(28)) self._check_date() self._check_today_validity(onErrorHigher=True) def h_year_less(self, *args): old_value = self.value try: if self.value.month == 2 and self.value.day == 29: self.value = self.value.replace(year=self.value.year-1, day=self.value.day-1) else: self.value = self.value.replace(year=self.value.year-1) self._check_date() self._check_today_validity(onErrorHigher=False) except: self.value=old_value def h_year_more(self, *args): old_value = self.value try: if self.value.month == 2 and self.value.day == 29: self.value = self.value.replace(year=self.value.year+1, day=self.value.day-1) else: self.value = self.value.replace(year=self.value.year+1) self._check_date() self._check_today_validity(onErrorHigher=True) except: self.value = old_value def h_find_today(self, *args): self.value = self.date_or_datetime().today() self._check_date() self._check_today_validity(onErrorHigher=True) def h_clear(self, *args): if self.allow_clear: self.value = None self.editing = None class MonthBox(DateEntryBase): DAY_FIELD_WIDTH = 4 def __init__(self, screen, **keywords): super(MonthBox, self).__init__(screen, **keywords) def calculate_area_needed(self): # Rember that although months only have 4-5 weeks, they can span 6 weeks. # Currently allowing 2 lines for headers, so 8 lines total return 10, self.__class__.DAY_FIELD_WIDTH * 7 def update(self, clear=True): calendar.setfirstweekday(self.firstWeekDay) if clear: self.clear() if self.hidden: self.clear() return False # Title line if not self.value: _title_line = "No Value Set" else: year = self.value.year month = self.value.month try: monthname = self.value.strftime('%B') except ValueError: monthname = "Month: %s" % self.value.month day = self.value.day _title_line = "%s, %s" % (monthname, year) if isinstance(_title_line, bytes): _title_line = _title_line.decode(self.encoding, 'replace') if self.do_colors(): title_attribute = self.parent.theme_manager.findPair(self) else: title_attribute = curses.A_NORMAL self.add_line(self.rely, self.relx, _title_line, self.make_attributes_list(_title_line, title_attribute), self.width-1 ) if self.value: # Print the days themselves try: cal_data = calendar.monthcalendar(year, month) do_cal_print = True except OverflowError: do_cal_print = False self.parent.curses_pad.addstr(self.rely+1, self.relx, "Unable to display") self.parent.curses_pad.addstr(self.rely+2, self.relx, "calendar for date.") if do_cal_print: # Print the day names # weekheader puts an extra space at the end of each name cal_header = calendar.weekheader(self.__class__.DAY_FIELD_WIDTH - 1) if isinstance(cal_header, bytes): cal_header = cal_header.decode(self.encoding, 'replace') if self.do_colors(): cal_title_attribute = self.parent.theme_manager.findPair(self, 'LABEL') else: cal_title_attribute = curses.A_NORMAL self.add_line(self.rely+1, self.relx, cal_header, self.make_attributes_list(cal_header, cal_title_attribute), self.width, ) print_line = self.rely+2 for calrow in cal_data: print_column = self.relx for thisday in calrow: if thisday is 0: pass elif day == thisday: if self.do_colors(): self.parent.curses_pad.addstr(print_line, print_column, str(thisday), curses.A_STANDOUT | self.parent.theme_manager.findPair(self, self.color)) else: self.parent.curses_pad.addstr(print_line, print_column, str(thisday), curses.A_STANDOUT) else: if self.do_colors(): self.parent.curses_pad.addstr(print_line, print_column, str(thisday), self.parent.theme_manager.findPair(self, self.color)) else: self.parent.curses_pad.addstr(print_line, print_column, str(thisday)) print_column += self.__class__.DAY_FIELD_WIDTH print_line += 1 # Print some help if self.allow_clear: key_help = "keys: dwmyDWMY t cq" else: key_help = "keys: dwmyDWMY t" if self.do_colors(): self.parent.curses_pad.addstr(self.rely+9, self.relx, key_help, self.parent.theme_manager.findPair(self, 'LABEL')) else: self.parent.curses_pad.addstr(self.rely+9, self.relx, key_help) def set_up_handlers(self): super(MonthBox, self).set_up_handlers() self.handlers.update({curses.KEY_LEFT: self.h_day_less, curses.KEY_RIGHT: self.h_day_more, curses.KEY_UP: self.h_week_less, curses.KEY_DOWN: self.h_week_more, curses.ascii.SP: self.h_exit_down, "^T": self.h_find_today, })
PypiClean
/pyqodeng.core-0.0.4.tar.gz/pyqodeng.core-0.0.4/pyqodeng/core/styles/qt.py
from pygments.style import Style from pygments.token import Comment, Error, Generic, Keyword, Literal, Name, \ Operator, Text, Punctuation class QtStyle(Style): """ Port of the qt style """ default_style = '' background_color = '#ffffff' highlight_color = '#c7e7f9' styles = { Comment.Multiline: ' #008000', Comment.Preproc: '#000080', Comment.Single: ' #808080', Comment.Special: 'bold #000080', Comment: ' #808080', Error: '#CC0000', Generic.Deleted: 'bg:#ffdddd #000000', Generic.Emph: ' #000000', Generic.Error: '#aa0000', Generic.Heading: '#999999', Generic.Inserted: 'bg:#ddffdd #000000', Generic.Output: '#888888', Generic.Prompt: '#555555', Generic.Strong: 'bold', Generic.Subheading: '#aaaaaa', Generic.Traceback: '#aa0000', Keyword.Constant: '#808000 ', Keyword.Declaration: '#808000', Keyword.Namespace: '#000080', Keyword.Pseudo: '#808000', Keyword.Reserved: '#808000 bold', Keyword.Type: '#800080', Keyword: '#808000 bold', Literal.Number: '#000080', Literal.String: '#008000', Literal.String.Doc: '#000080', Name.Attribute: '#800080', Name.Builtin.Pseudo: '#94558D', Name.Builtin: '#AA00AA', Name.Class: '#800080 bold', Name.Constant: '#800080', Name.Decorator: '#808000', Name.Entity: '#000000', Name.Exception: '#800080', Name.Function: '#800000 bold', Name.Label: '#800000', Name.Namespace: '#000000', Name.Tag: '#2984C6 bold', Name.Variable.Class: '#800080', Name.Variable.Global: '#000000', Name.Variable.Instance: '#800000', Name.Variable: '#000000', Operator.Word: '#808000 bold', Operator: '#808000 bold', Text: '#000000', Text.Whitespace: '#BFBFBF', Punctuation: '#202020' }
PypiClean
/safegate_pro-2021.7.6-py3-none-any.whl/homeassistant/components/coronavirus/__init__.py
from datetime import timedelta import logging import async_timeout import coronavirus from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import aiohttp_client, entity_registry, update_coordinator from .const import DOMAIN PLATFORMS = ["sensor"] async def async_setup(hass: HomeAssistant, config: dict) -> bool: """Set up the Coronavirus component.""" # Make sure coordinator is initialized. await get_coordinator(hass) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Coronavirus from a config entry.""" if isinstance(entry.data["country"], int): hass.config_entries.async_update_entry( entry, data={**entry.data, "country": entry.title} ) @callback def _async_migrator(entity_entry: entity_registry.RegistryEntry): """Migrate away from unstable ID.""" country, info_type = entity_entry.unique_id.rsplit("-", 1) if not country.isnumeric(): return None return {"new_unique_id": f"{entry.title}-{info_type}"} await entity_registry.async_migrate_entries( hass, entry.entry_id, _async_migrator ) if not entry.unique_id: hass.config_entries.async_update_entry(entry, unique_id=entry.data["country"]) coordinator = await get_coordinator(hass) if not coordinator.last_update_success: await coordinator.async_config_entry_first_refresh() hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" return await hass.config_entries.async_unload_platforms(entry, PLATFORMS) async def get_coordinator( hass: HomeAssistant, ) -> update_coordinator.DataUpdateCoordinator: """Get the data update coordinator.""" if DOMAIN in hass.data: return hass.data[DOMAIN] async def async_get_cases(): with async_timeout.timeout(10): return { case.country: case for case in await coronavirus.get_cases( aiohttp_client.async_get_clientsession(hass) ) } hass.data[DOMAIN] = update_coordinator.DataUpdateCoordinator( hass, logging.getLogger(__name__), name=DOMAIN, update_method=async_get_cases, update_interval=timedelta(hours=1), ) await hass.data[DOMAIN].async_refresh() return hass.data[DOMAIN]
PypiClean
/fiftyone-0.20.1rc1.tar.gz/fiftyone-0.20.1rc1/docs/source/custom_directives.py
from docutils.parsers.rst import Directive, directives from docutils.statemachine import StringList from docutils import nodes class CustomCardItemDirective(Directive): """A custom card item for use on pages that contain a list of links with short descriptions and optional images. The card item provides a heading, a description, and an optional image to the right. The entire card is clickable and links to the provided link. Example usage:: .. customcarditem:: :header: Custom card :description: This is a custom card. Click to learn more. :link: other/page.html :image: ../_static/images/custom-image.jpg :tags: Custom-Tag """ option_spec = { "header": directives.unchanged, "description": directives.unchanged, "link": directives.unchanged, "image": directives.unchanged, "tags": directives.unchanged, } def run(self): header = self.options.get("header", "") description = self.options.get("description", "") link = self.options.get("link", "") image = self.options.get("image", "") tags = self.options.get("tags", "") if image: card_rst = _CUSTOM_CARD_TEMPLATE.format( header=header, description=description, link=link, image='<img src="%s">' % image, tags=tags, ) else: # No image template = _CUSTOM_CARD_TEMPLATE.replace( '<div class="tutorials-image">{image}</div>', "" ) card_rst = template.format( header=header, description=description, link=link, tags=tags, ) card_list = StringList(card_rst.split("\n")) card = nodes.paragraph() self.state.nested_parse(card_list, self.content_offset, card) return [card] _CUSTOM_CARD_TEMPLATE = """ .. raw:: html <div class="col-md-12 tutorials-card-container" data-tags={tags}> <div class="card tutorials-card" link={link}> <div class="card-body"> <div class="card-title-container"> <h4>{header}</h4> </div> <p class="card-summary">{description}</p> <p class="tags">{tags}</p> <div class="tutorials-image">{image}</div> </div> </div> </div> """ class CustomCalloutItemDirective(Directive): """A custom callout for use on table of contents-style pages that link into other pages. The callout contains a header, a body, a clickable button that links to the provided link, and an optional image. Example usage:: .. customcalloutitem:: :header: Custom header :description: Custom body :button_text: Custom button :button_link: other/page.html :image: ../_static/images/custom-image.jpg """ option_spec = { "header": directives.unchanged, "description": directives.unchanged, "button_text": directives.unchanged, "button_link": directives.unchanged, "image": directives.unchanged, } def run(self): header = self.options.get("header", "") description = self.options.get("description", "") button_text = self.options.get("button_text", "") button_link = self.options.get("button_link", "") image = self.options.get("image", "") classes = "with-right-arrow" if button_link else "" attributes = ( "" if button_link else 'onclick="return false;" style="pointer-events:none;cursor:default;"' ) if image: callout_rst = _CUSTOM_CALLOUT_TEMPLATE.format( header=header, description=description, button_text=button_text, button_link=button_link, classes=classes, attributes=attributes, image='<img src="%s">' % image, ) else: # No image template = _CUSTOM_CALLOUT_TEMPLATE.replace( "<div>{image}</div>", "" ) callout_rst = template.format( header=header, description=description, button_text=button_text, button_link=button_link, classes=classes, attributes=attributes, ) button_list = StringList(callout_rst.split("\n")) button = nodes.paragraph() self.state.nested_parse(button_list, self.content_offset, button) return [button] _CUSTOM_CALLOUT_TEMPLATE = """ .. raw:: html <div class="col-md-6"> <div class="text-container"> <h3>{header}</h3> <p class="body-paragraph">{description}</p> <p><a class="btn {classes} callout-button" href="{button_link}"{attributes}>{button_text}</a></p> <div>{image}</div> </div> </div> """ class CustomButtonDirective(Directive): """A custom button for use on table of contents-style pages that link into other pages. The button is clickable and links to the provided link. Example usage:: .. custombutton:: :button_text: Custom button :button_link: other/page.html """ option_spec = { "button_text": directives.unchanged, "button_link": directives.unchanged, } def run(self): button_text = self.options.get("button_text", "") button_link = self.options.get("button_link", "") classes = "with-right-arrow" if button_link else "" attributes = ( "" if button_link else 'onclick="return false;" style="pointer-events:none;cursor:default;"' ) callout_rst = _CUSTOM_BUTTON_TEMPLATE.format( button_text=button_text, button_link=button_link, classes=classes, attributes=attributes, ) button_list = StringList(callout_rst.split("\n")) button = nodes.paragraph() self.state.nested_parse(button_list, self.content_offset, button) return [button] _CUSTOM_BUTTON_TEMPLATE = """ .. raw:: html <div class="tutorials-callout-container"> <a class="btn {classes} callout-button" href="{button_link}"{attributes}>{button_text}</a> </div> """ class CustomImageLinkDirective(Directive): """A custom image within a link nested in a div. Styling can be done via a parent container. Example usage:: .. customimagelink:: :image_link: other/page.html :image_src: images/image.png :image_src: My image """ option_spec = { "image_link": directives.unchanged, "image_src": directives.unchanged, "image_title": directives.unchanged, } def run(self): image_link = self.options.get("image_link", "") image_src = self.options.get("image_src", "") image_title = self.options.get("image_title", "") callout_rst = _CUSTOM_IMAGE_LINK_TEMPLATE.format( image_link=image_link, image_src=image_src, image_title=image_title, ) image_list = StringList(callout_rst.split("\n")) image = nodes.paragraph() self.state.nested_parse(image_list, self.content_offset, image) return [image] _CUSTOM_IMAGE_LINK_TEMPLATE = """ .. raw:: html <div> <a href="{image_link}" title="{image_title}"> <img src="{image_src}" alt="{image_title}"/> </a> </div> """
PypiClean
/djangocms-twitter2-0.1.1.tar.gz/djangocms-twitter2-0.1.1/djangocms_twitter/models.py
from __future__ import unicode_literals import logging import tweepy from django.core.cache import cache from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from django.utils.encoding import python_2_unicode_compatible, force_text from django.utils.translation import ugettext_lazy as _ from cms.models import CMSPlugin from connected_accounts.fields import AccountField from tweepy.error import TweepError from tweepy.parsers import JSONParser from .conf import settings logger = logging.getLogger('djangocms_twitter') @python_2_unicode_compatible class Twitter(CMSPlugin): USER_TIMELINE = 'user' USER_FAVORITES = 'favorites' SEARCH_QUERY = 'search' TIMELINE_CHOICES = ( (USER_TIMELINE, _('User Timeline')), (USER_FAVORITES, _('Favorites')), (SEARCH_QUERY, _('Search Query')), ) account = AccountField( 'twitter', verbose_name=_('Connected Account'), help_text=_('Select a connected Twitter account or connect to a new account.')) screen_name = models.CharField( _('Twitter Username'), max_length=100, blank=True, null=True, help_text=_('You may create an embedded timeline for any public ' 'Twitter user. By default, the "Connected Account" tweets are fetched.')) search_query = models.CharField( _('Search Query'), max_length=255, blank=True, help_text=_('You may create a search timeline for any query or #hashtag..')) no_of_items = models.IntegerField( _('Items to Display'), default=20, validators=[MaxValueValidator(20), MinValueValidator(1)], help_text=_('Select the number of items this block ' 'should display (max 20)')) timeline_source = models.CharField( _('Available Timelines'), max_length=50, choices=TIMELINE_CHOICES, default=USER_TIMELINE, help_text=_('You can embed a timeline for Tweets from an individual user, ' 'a user\'s favorites or any search query or hashtag.')) show_avatar = models.BooleanField( _('Show Avatar?'), default=True, help_text=_('Shows or hides the avatar image.')) show_username = models.BooleanField( _('Show Username?'), default=True, help_text=_('Shows or hides the username text.')) follow_button = models.BooleanField( _('Show Follow Button?'), default=True, help_text=_('Append a follow button to the listing.')) plugin_template = models.CharField( _('Design'), max_length=150, choices=settings.DJANGOCMS_TWITTER_TEMPLATES, default=settings.DJANGOCMS_TWITTER_DEFAULT_TEMPLATE, ) def __str__(self): profile_data = self.get_profile() name = profile_data.get('name', 'No name') screen_name = profile_data.get('screen_name', 'yourhandle') if self.timeline_source == self.USER_FAVORITES: return _('Favorite Tweets by {0} @{1}').format(name, screen_name) elif self.timeline_source == self.SEARCH_QUERY: return _('Tweets about "{0}"').format(self.search_query) return _('Tweets by {0} @{1}').format(name, screen_name) def get_api(self): if not hasattr(self, '_api'): oauth = tweepy.OAuthHandler( settings.CONNECTED_ACCOUNTS_TWITTER_CONSUMER_KEY, settings.CONNECTED_ACCOUNTS_TWITTER_CONSUMER_SECRET) oauth.set_access_token( self.account.get_token(), self.account.get_token_secret()) self._api = tweepy.API(oauth, parser=JSONParser()) return self._api def save(self, *args, **kwargs): super(Twitter, self).save(*args, **kwargs) cache_keys = ( self.get_cache_key(prefix='profile'), self.get_cache_key(prefix='tweets'), ) cache.delete_many(cache_keys) def get_cache_key(self, prefix=''): return 'djangocms-twitter-{0}-{1}'.format(prefix, str(self.id)) def get_tweets(self): cache_key = self.get_cache_key(prefix='tweets') tweets = cache.get(cache_key) if not tweets: api = self.get_api() screen_name = self.screen_name.lstrip('@').strip() or \ self.account.get_common_data().get('username') tweets = [] try: if self.timeline_source == self.USER_FAVORITES: tweets = api.favorites(screen_name, count=self.no_of_items) elif self.timeline_source == self.SEARCH_QUERY: tweets = api.search(self.search_query, count=self.no_of_items).get('statuses') else: tweets = api.user_timeline(screen_name, count=self.no_of_items) except TweepError as e: msg = _('Failed to retrieve {0} - Reason: {1}').format(force_text(self), e) logger.error(msg) else: cache.set(cache_key, tweets, settings.DJANGOCMS_TWITTER_CACHE_DURATION) return tweets def get_profile(self): cache_key = self.get_cache_key(prefix='profile') profile_data = cache.get(cache_key) if not profile_data: api = self.get_api() screen_name = self.screen_name.lstrip('@').strip() or \ self.account.get_common_data().get('username') profile_data = { 'screen_name': screen_name } try: profile_data = api.get_user(screen_name) except TweepError as e: msg = _('Failed to retrieve information ' 'about @{0} - Reason: {1}').format(screen_name, e) logger.error(msg) else: profile_data = { 'name': profile_data.get('name'), 'screen_name': profile_data.get('screen_name'), 'is_verified': profile_data.get('verified'), 'profile_image_url': profile_data.get('profile_image_url', '').replace('_normal', ''), 'description': profile_data.get('description'), 'location': profile_data.get('location'), 'followers_count': profile_data.get('followers_count'), 'following_count': profile_data.get('friends_count'), 'statuses_count': profile_data.get('statuses_count'), 'favourites_count': profile_data.get('favourites_count'), } cache.set(cache_key, profile_data, settings.DJANGOCMS_TWITTER_CACHE_DURATION) return profile_data
PypiClean
/pydomo-0.3.0.9.tar.gz/pydomo-0.3.0.9/README.md
# Python3 - Domo API SDK (pydomo) [![License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://www.opensource.org/licenses/MIT) Current Release: 0.3.0 ### Notice - Python 3 Compatibility * PyDomo is written for Python3, and is not compatible with Python2 * Execute scripts via 'python3', and updates via 'pip3' ### About * The Domo API SDK is the simplest way to automate your Domo instance * The SDK streamlines the API programming experience, allowing you to significantly reduce your written code * This SDK was written for Python3, and is not compatible with Python2 * PyDomo has been published to [PyPI](https://pypi.org/project/pydomo/). The SDK can be easily installed via `pip3 install pydomo`, and can be updated via `pip3 install pydomo --upgrade` ### Features: - DataSet and Personalized Data Policy (PDP) Management - Use DataSets for fairly static data sources that only require occasional updates via data replacement - This SDK automates the use of Domo Streams so that uploads are always as fast as possible - Add Personalized Data Policies (PDPs) to DataSets (hide sensitive data from groups of users) - Docs: https://developer.domo.com/docs/domo-apis/data - User Management - Create, update, and remove users - Major use case: LDAP/Active Directory synchronization - Docs: https://developer.domo.com/docs/domo-apis/users - Group Management - Create, update, and remove groups of users - Docs: https://developer.domo.com/docs/domo-apis/group-apis - Page Management - Create, update, and delete pages - Docs: https://developer.domo.com/docs/page-api-reference/page ### Setup * Install Python3: https://www.python.org/downloads/ * Linux: 'apt-get install python3' * MacOS: 'brew install python3' * Windows: direct download, or use Bash on Windows 10 * Install PyDomo and its dependencies via `pip3 install pydomo` ### Updates * Update your PyDomo package via `pip3 install pydomo --upgrade` * View the [changelog](CHANGELOG.md) ### Usage Below are examples of how to use the SDK to perform a few common tasks. To run similar code on your system, do the following. * Create an API Client on the [Domo Developer Portal](https://developer.domo.com/) * Use your API Client id/secret to instantiate pydomo 'Domo()' * Multiple API Clients can be used by instantiating multiple 'Domo()' clients * Authentication with the Domo API is handled automatically by the SDK * If you encounter a 'Not Allowed' error, this is a permissions issue. Please speak with your Domo Administrator. ```python from pydomo import Domo domo = Domo('client-id','secret',api_host='api.domo.com') # Download a data set from Domo car_data = domo.ds_get('2f09a073-54a4-4269-8c62-b776e67d59f0') # Create a summary data set, taking the mean of dollars by make and model. car_summary = car_data.groupby(['make','model']).agg({'dollars':'mean'}).reset_index() # Create a new data set in Domo with the result, the return value is the data set id of the new data set. car_ds = domo.ds_create(car_summary,'Python | Car Summary Data Set','Python | Generated during demo') # Modify summary and then upload to the data set we already created. The SDK will update the data set schema automatically. car_summary2 = car_data.groupby(['make','model'],as_index=False).agg({'dollars':'mean','email':'count'}).reset_index() car_update = domo.ds_update(car_ds,car_summary2) # Create PDP Policy from pydomo.datasets import Policy, PolicyFilter, FilterOperator, PolicyType, Sorting # Create policy filters pdp_filter = PolicyFilter() pdp_filter.column = 'make' # The DataSet column to filter on pdp_filter.operator = FilterOperator.EQUALS pdp_filter.values = ['Honda'] # The DataSet row value to filter on pdp_request = Policy() pdp_request.name = 'Python | US East' pdp_request.filters = [pdp_filter] pdp_request.type = PolicyType.USER pdp_request.users = [] pdp_request.groups = [1631291223] domo.pdp_create(car_ds,pdp_request) # Interact with groups all_groups = domo.groups_list() # List all groups all_users = domo.users_list() # List all users # List all users in US South Division domo.groups_list_users(328554991) added_users = domo.groups_add_users(328554991,2063934980) domo.groups_list_users(328554991) ``` ### Available Functions The functions in this package match most parts of the API documented at [developer.domo.com](https://developer.domo.com/) and follow a specific convention. Each set of functions is preceeded by the portion of the API it operates on. The following lists all the sets of functions available in this package. For further help, refer to the help function in Python. * **Data sets** - This set of functions is designed to transfer data in and out of Domo. * **ds_get** - downloads data from Domo * **ds_create** - creates a new data set * **ds_update** - updates an existing data set, only data sets created by the API can be updated * **ds_meta** - downloads meta data regarding a single data set * **ds_list** - downloads a list of data sets in your Domo instance * **ds_delete** - deletes a data set (be careful) * **ds_query** - allows you to send a query to a data set, Domo will evaluate that query and sends the results back as a list or a tibble * **Groups** - This set of functions modifies and creates groups. * **groups_add_users** - adds users to an existing group * **groups_create** - create a group * **groups_delete** - delete an existing group * **groups_list** - list all groups * **groups_remove_users** - remove users from a group * **groups_list_users** - list users in a group * **Pages** - functions related to managing Domo pages * **page_update** - update a page * **page_list** - list all pages * **page_get_collections** - list all collections on a page * **page_get** - get information regarding a page * **page_create** - create a page * **PDP** - functions to manage PDP * **pdp_update** - update an existing PDP policy * **pdp_list** - list all PDP policies * **pdp_enable** - toggle PDP on and off * **pdp_delete** - delete a PDP policy * **pdp_create** - create a PDP policy * **Users** - functions to manage users * **users_delete** - delete a user * **users_update** - update a user * **users_list** - list all users * **users_get** - get a single user record * **users_add** - create a user (or users)
PypiClean
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/AlipayCommerceEducateNceeApplySyncModel.py
import json from alipay.aop.api.constant.ParamConstants import * class AlipayCommerceEducateNceeApplySyncModel(object): def __init__(self): self._batch = None self._course = None self._interested_major_num = None self._interested_school_num = None self._one_key_support = None self._province_code = None self._purpose_form_num = None self._rank = None self._report_num = None self._score = None self._selected_num = None self._subject = None self._total_num = None self._type = None self._user_id = None self._year = None @property def batch(self): return self._batch @batch.setter def batch(self, value): self._batch = value @property def course(self): return self._course @course.setter def course(self, value): self._course = value @property def interested_major_num(self): return self._interested_major_num @interested_major_num.setter def interested_major_num(self, value): self._interested_major_num = value @property def interested_school_num(self): return self._interested_school_num @interested_school_num.setter def interested_school_num(self, value): self._interested_school_num = value @property def one_key_support(self): return self._one_key_support @one_key_support.setter def one_key_support(self, value): self._one_key_support = value @property def province_code(self): return self._province_code @province_code.setter def province_code(self, value): self._province_code = value @property def purpose_form_num(self): return self._purpose_form_num @purpose_form_num.setter def purpose_form_num(self, value): self._purpose_form_num = value @property def rank(self): return self._rank @rank.setter def rank(self, value): self._rank = value @property def report_num(self): return self._report_num @report_num.setter def report_num(self, value): self._report_num = value @property def score(self): return self._score @score.setter def score(self, value): self._score = value @property def selected_num(self): return self._selected_num @selected_num.setter def selected_num(self, value): self._selected_num = value @property def subject(self): return self._subject @subject.setter def subject(self, value): self._subject = value @property def total_num(self): return self._total_num @total_num.setter def total_num(self, value): self._total_num = value @property def type(self): return self._type @type.setter def type(self, value): self._type = value @property def user_id(self): return self._user_id @user_id.setter def user_id(self, value): self._user_id = value @property def year(self): return self._year @year.setter def year(self, value): self._year = value def to_alipay_dict(self): params = dict() if self.batch: if hasattr(self.batch, 'to_alipay_dict'): params['batch'] = self.batch.to_alipay_dict() else: params['batch'] = self.batch if self.course: if hasattr(self.course, 'to_alipay_dict'): params['course'] = self.course.to_alipay_dict() else: params['course'] = self.course if self.interested_major_num: if hasattr(self.interested_major_num, 'to_alipay_dict'): params['interested_major_num'] = self.interested_major_num.to_alipay_dict() else: params['interested_major_num'] = self.interested_major_num if self.interested_school_num: if hasattr(self.interested_school_num, 'to_alipay_dict'): params['interested_school_num'] = self.interested_school_num.to_alipay_dict() else: params['interested_school_num'] = self.interested_school_num if self.one_key_support: if hasattr(self.one_key_support, 'to_alipay_dict'): params['one_key_support'] = self.one_key_support.to_alipay_dict() else: params['one_key_support'] = self.one_key_support if self.province_code: if hasattr(self.province_code, 'to_alipay_dict'): params['province_code'] = self.province_code.to_alipay_dict() else: params['province_code'] = self.province_code if self.purpose_form_num: if hasattr(self.purpose_form_num, 'to_alipay_dict'): params['purpose_form_num'] = self.purpose_form_num.to_alipay_dict() else: params['purpose_form_num'] = self.purpose_form_num if self.rank: if hasattr(self.rank, 'to_alipay_dict'): params['rank'] = self.rank.to_alipay_dict() else: params['rank'] = self.rank if self.report_num: if hasattr(self.report_num, 'to_alipay_dict'): params['report_num'] = self.report_num.to_alipay_dict() else: params['report_num'] = self.report_num if self.score: if hasattr(self.score, 'to_alipay_dict'): params['score'] = self.score.to_alipay_dict() else: params['score'] = self.score if self.selected_num: if hasattr(self.selected_num, 'to_alipay_dict'): params['selected_num'] = self.selected_num.to_alipay_dict() else: params['selected_num'] = self.selected_num if self.subject: if hasattr(self.subject, 'to_alipay_dict'): params['subject'] = self.subject.to_alipay_dict() else: params['subject'] = self.subject if self.total_num: if hasattr(self.total_num, 'to_alipay_dict'): params['total_num'] = self.total_num.to_alipay_dict() else: params['total_num'] = self.total_num if self.type: if hasattr(self.type, 'to_alipay_dict'): params['type'] = self.type.to_alipay_dict() else: params['type'] = self.type if self.user_id: if hasattr(self.user_id, 'to_alipay_dict'): params['user_id'] = self.user_id.to_alipay_dict() else: params['user_id'] = self.user_id if self.year: if hasattr(self.year, 'to_alipay_dict'): params['year'] = self.year.to_alipay_dict() else: params['year'] = self.year return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayCommerceEducateNceeApplySyncModel() if 'batch' in d: o.batch = d['batch'] if 'course' in d: o.course = d['course'] if 'interested_major_num' in d: o.interested_major_num = d['interested_major_num'] if 'interested_school_num' in d: o.interested_school_num = d['interested_school_num'] if 'one_key_support' in d: o.one_key_support = d['one_key_support'] if 'province_code' in d: o.province_code = d['province_code'] if 'purpose_form_num' in d: o.purpose_form_num = d['purpose_form_num'] if 'rank' in d: o.rank = d['rank'] if 'report_num' in d: o.report_num = d['report_num'] if 'score' in d: o.score = d['score'] if 'selected_num' in d: o.selected_num = d['selected_num'] if 'subject' in d: o.subject = d['subject'] if 'total_num' in d: o.total_num = d['total_num'] if 'type' in d: o.type = d['type'] if 'user_id' in d: o.user_id = d['user_id'] if 'year' in d: o.year = d['year'] return o
PypiClean
/parade-0.2.5.1.tar.gz/parade-0.2.5.1/docs/source/modules/parade.api.rst
parade.api package ================== Submodules ---------- parade.api.data module ---------------------- .. automodule:: parade.api.data :members: :undoc-members: :show-inheritance: parade.api.exec module ---------------------- .. automodule:: parade.api.exec :members: :undoc-members: :show-inheritance: parade.api.flow module ---------------------- .. automodule:: parade.api.flow :members: :undoc-members: :show-inheritance: parade.api.task module ---------------------- .. automodule:: parade.api.task :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: parade.api :members: :undoc-members: :show-inheritance:
PypiClean
/infoblox-netmri-3.8.0.0.tar.gz/infoblox-netmri-3.8.0.0/infoblox_netmri/api/broker/v2_4_0/adv_setting_def_broker.py
from ..broker import Broker class AdvSettingDefBroker(Broker): controller = "adv_setting_defs" def show(self, **kwargs): """Shows the details for the specified adv setting def. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param id: The internal NetMRI identifier for this setting definition. :type id: Integer **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return adv_setting_def: The adv setting def identified by the specified id. :rtype adv_setting_def: AdvSettingDef """ return self.api_request(self._get_method_fullname("show"), kwargs) def index(self, **kwargs): """Lists the available adv setting defs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient. **Inputs** | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param id: The internal NetMRI identifier for this setting definition. :type id: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param id: The internal NetMRI identifier for this setting definition. :type id: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param name: The name of this setting. :type name: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param name: The name of this setting. :type name: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` id :param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each AdvSettingDef. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return adv_setting_defs: An array of the AdvSettingDef objects that match the specified input criteria. :rtype adv_setting_defs: Array of AdvSettingDef """ return self.api_list_request(self._get_method_fullname("index"), kwargs) def search(self, **kwargs): """Lists the available adv setting defs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below. **Inputs** | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param allow_empty: A flag indicating if this setting can be empty. :type allow_empty: Boolean | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param allow_empty: A flag indicating if this setting can be empty. :type allow_empty: Array of Boolean | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param category: The category of this setting. :type category: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param category: The category of this setting. :type category: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param default_value: Default value for this setting. :type default_value: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param default_value: Default value for this setting. :type default_value: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param description: Description for this setting. :type description: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param description: Description for this setting. :type description: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param display_hints: Hints to display for this setting. :type display_hints: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param display_hints: Hints to display for this setting. :type display_hints: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param feature: The feature this setting definition is related to. :type feature: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param feature: The feature this setting definition is related to. :type feature: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param id: The internal NetMRI identifier for this setting definition. :type id: Integer | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param id: The internal NetMRI identifier for this setting definition. :type id: Array of Integer | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param name: The name of this setting. :type name: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param name: The name of this setting. :type name: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param setting_type: The type of this setting. :type setting_type: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param setting_type: The type of this setting. :type setting_type: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param ui_name: The UI name of this setting. :type ui_name: String | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param ui_name: The UI name of this setting. :type ui_name: Array of String | ``api version min:`` 2.4 | ``api version max:`` 2.4 | ``required:`` False | ``default:`` None :param visible: A flag indicating if this setting visible. :type visible: Boolean | ``api version min:`` 2.5 | ``api version max:`` None | ``required:`` False | ``default:`` None :param visible: A flag indicating if this setting visible. :type visible: Array of Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` id :param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each AdvSettingDef. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param query: This value will be matched against adv setting defs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: allow_empty, category, default_value, description, display_hints, feature, id, name, setting_type, ui_name, visible. :type query: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return adv_setting_defs: An array of the AdvSettingDef objects that match the specified input criteria. :rtype adv_setting_defs: Array of AdvSettingDef """ return self.api_list_request(self._get_method_fullname("search"), kwargs) def find(self, **kwargs): """Lists the available adv setting defs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: allow_empty, category, default_value, description, display_hints, feature, id, name, setting_type, ui_name, visible. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_allow_empty: The operator to apply to the field allow_empty. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. allow_empty: A flag indicating if this setting can be empty. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_allow_empty: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_allow_empty: If op_allow_empty is specified, the field named in this input will be compared to the value in allow_empty using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_allow_empty must be specified if op_allow_empty is specified. :type val_f_allow_empty: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_allow_empty: If op_allow_empty is specified, this value will be compared to the value in allow_empty using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_allow_empty must be specified if op_allow_empty is specified. :type val_c_allow_empty: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_category: The operator to apply to the field category. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. category: The category of this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_category: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_category: If op_category is specified, the field named in this input will be compared to the value in category using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_category must be specified if op_category is specified. :type val_f_category: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_category: If op_category is specified, this value will be compared to the value in category using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_category must be specified if op_category is specified. :type val_c_category: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_default_value: The operator to apply to the field default_value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. default_value: Default value for this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_default_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_default_value: If op_default_value is specified, the field named in this input will be compared to the value in default_value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_default_value must be specified if op_default_value is specified. :type val_f_default_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_default_value: If op_default_value is specified, this value will be compared to the value in default_value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_default_value must be specified if op_default_value is specified. :type val_c_default_value: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: Description for this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_description: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified. :type val_f_description: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified. :type val_c_description: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_display_hints: The operator to apply to the field display_hints. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. display_hints: Hints to display for this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_display_hints: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_display_hints: If op_display_hints is specified, the field named in this input will be compared to the value in display_hints using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_display_hints must be specified if op_display_hints is specified. :type val_f_display_hints: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_display_hints: If op_display_hints is specified, this value will be compared to the value in display_hints using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_display_hints must be specified if op_display_hints is specified. :type val_c_display_hints: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_feature: The operator to apply to the field feature. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. feature: The feature this setting definition is related to. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_feature: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_feature: If op_feature is specified, the field named in this input will be compared to the value in feature using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_feature must be specified if op_feature is specified. :type val_f_feature: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_feature: If op_feature is specified, this value will be compared to the value in feature using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_feature must be specified if op_feature is specified. :type val_c_feature: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this setting definition. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified. :type val_f_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified. :type val_c_id: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The name of this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified. :type val_f_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified. :type val_c_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_setting_type: The operator to apply to the field setting_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. setting_type: The type of this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_setting_type: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_setting_type: If op_setting_type is specified, the field named in this input will be compared to the value in setting_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_setting_type must be specified if op_setting_type is specified. :type val_f_setting_type: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_setting_type: If op_setting_type is specified, this value will be compared to the value in setting_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_setting_type must be specified if op_setting_type is specified. :type val_c_setting_type: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_ui_name: The operator to apply to the field ui_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ui_name: The UI name of this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_ui_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_ui_name: If op_ui_name is specified, the field named in this input will be compared to the value in ui_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ui_name must be specified if op_ui_name is specified. :type val_f_ui_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_ui_name: If op_ui_name is specified, this value will be compared to the value in ui_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ui_name must be specified if op_ui_name is specified. :type val_c_ui_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param op_visible: The operator to apply to the field visible. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. visible: A flag indicating if this setting visible. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values. :type op_visible: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_f_visible: If op_visible is specified, the field named in this input will be compared to the value in visible using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_visible must be specified if op_visible is specified. :type val_f_visible: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param val_c_visible: If op_visible is specified, this value will be compared to the value in visible using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_visible must be specified if op_visible is specified. :type val_c_visible: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 0 :param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information. :type start: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` 1000 :param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000. :type limit: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` id :param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. :type sort: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` asc :param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'. :type dir: Array of String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param select: The list of attributes to return for each AdvSettingDef. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. If empty or omitted, all attributes will be returned. :type select: Array | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_field: The field name for NIOS GOTO that is used for locating a row position of records. :type goto_field: String | ``api version min:`` 2.8 | ``api version max:`` None | ``required:`` False | ``default:`` None :param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records. :type goto_value: String | ``api version min:`` 2.3 | ``api version max:`` None | ``required:`` False | ``default:`` None :param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering. :type xml_filter: String **Outputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :return adv_setting_defs: An array of the AdvSettingDef objects that match the specified input criteria. :rtype adv_setting_defs: Array of AdvSettingDef """ return self.api_list_request(self._get_method_fullname("find"), kwargs)
PypiClean
/bikesanity-1.1.6.tar.gz/bikesanity-1.1.6/README.md
# BikeSanity Cycle Journal Exporter for CrazyGuyOnABike ### What is this? In this repository is the complete code needed to download, extract, interpret, and export - in an attractive, completely new and _mobile friendly_ HTML format - bicycle touring journals available in the popular [crazyguyonabike.com](https://www.crazyguyonabike.com) (CGOAB) website. You can also convert your journals to PDF format (**new!**) or to a JSON data structure for transfer to other cycle touring platforms. You can use it download and create backups of your journals, to interpret them into a form where the content is easier to extract, and to re-format them in various formats readable offline. All the authored content in journals will be retrieved, including all pages, text, images and maps. "Social" content such as guestbooks will not be retrieved. For technical users, it also populates an object model of all cycle journals. You can write your own Python programs, and linking to this library get hold of `Journal` objects containing sensibly structured content for you to work on as you wish. ### Can I see a sample of an extracted journal? Sure! The `samples/html` folder contains a sample of the converted journal HTML format, generated using placeholder data. This shows off the responsive layout, cleaner design, and fully working map embedding. You can browse that sample live at [https://www.bikejournalbackup.com/journals/sample/index.html](https://www.bikejournalbackup.com/journals/sample/index.html). All images used in the sample are in the public domain and freely licensed from [pexels](https://www.pexels.com/). ### How can I use this tool? This is a command line tool, and source code written in Python 3.6, to perform journal extraction. If you are comfortable using command line tools and have Python on your system, you can install this module and use it directly. Otherwise you may be better off using the user-friendly graphical tool [**available here**](https://github.com/JohnHenrySplitMyHeart/bikesanity-ui). That is built on top of this library and so can perform the same actions, but will be easier for non-technical users. ### Who can use it? Anyone. All code is free and open source, and provided as a service to the cycle touring community. The code is licensed under the permissive Apache 2.0 license. Please feel free to fork this code, or use it for your own development. ### Installing this module This is standard Python package, and you can install it from PyPi or directly using `pip` pip install bikesanity or, after cloning/downloading the repository, in the base directory pip install . The module is implemented in pure Python, with a few dependencies which will be automatically installed. After installation, the `bikesanity-run` script will be made available on your path. Run bikesanity-run --help to see options. ### Quick Start The three simple commands will download, interpret, and re-output a journal in new HTML bikesanity-run download <http://www.crazyguyonabike.com/doc/JournalNameHere> bikesanity-run process <12345> bikesanity-run publish <12345> --html Replace `<>` values with a link to the journal you want to download, and its ID (which will be shown to you upon download). The your re-formatted journal will be available in `CycleSanityJournals/processed/12345/html/index.html`! ### Retrieving and interpreting journal content using the script The `bikesanity-run` script can perform four different operations: 1. Download of a complete journal, as is, from crazyguyonabike.com 2. Interpretation ("processing") of a _downloaded_ journal to extract all the content into an understandable internal (object) model. 3. Interpretation of an _exported_ journal (i.e. one formally exported using the tool provided by CGOAB) to extract all the content 4. Publishing of a processed journal to create new, clean HTML content including all images, maps, and structure locally browseable. You can also publish to create a unified PDF of your journal, or a simplified JSON data structure for transfer to other platforms. By default all journals will be downloaded and processed into a folder created in your home directory (`/home/<usr>/CycleSanityJournals` on mac/linux, `c:\Users\<usr\CycleSanityJournals` on Windows). You can change this path using the options below. #### Downloading journals Download a journal by using the `download` argument and providing a URL to the journal front page. This can be a permalink like `http://www.crazyguyonabike.com/doc/JournalNameHere`. bikesanity-run download http://www.crazyguyonabike.com/doc/JournalNameHere Because of the slow rate of retrieval from CGOAB, this can take a few minutes or longer for very large journals. The default download path will be `CycleSanityJournals/downloads/<journal_id>`. You can change it with the options below: - `--location` changes the location to download to. - `--from-page` picks up the download from a provided page number, e.g. `--from-page 8`. This is useful for picking up downloads that failed, e.g. in the middle. - `--do-process` also performs an interpreting processing run once the download has completed (see below) - `--no-local-readability-postprocessing` turns off post-processing to make journals locally readable and navigable (advanced) #### Processing journals Downloaded or exported journals can be interpreted using the `process` or `process-exported` arguments respectively. You should provide the _journal id_, which will be the number attributed to the downloaded folder, e.g. 12345. bikesanity-run process 12345 By default, the processor expects downloaded journals to be in the `CycleSanityJournals/downloads/<journal_id>` directory, and exported journals to be in `CycleSanityJournals/exported/<journal_id>`. It will output processed journals on the path `CycleSanityJournals/processed/<journal_id>`. Options: - `--input_location` changes the location to take downloaded/exported input from - `--out_location` changes the location to send processed output to Following processing, a complete object model of the journal will be created and saved in as a serialized Python pickle as `journal.pickle` - for technical users, you may wish to load and inspect this. All resources (images and maps) will be copied to the new `processed/resources` location. #### Locally publishing to HTML, PDF and other formats Currently it possible to publish processed journals to HTML, PDF documents, or a JSON data structure. Publish any processed journal using the `publish` argument and providing the same _journal id_: bikesanity-run publish <12345> --html --pdf --json Use any combination of the optional flags to generate HTML, PDF or JSON output: - `--html` will produce attractive and clean HTML that can be fully-functionally browsed on the local machine, including dynamic maps. - `--pdf` will generate a collected PDF document of the journal, including all images. Large journals may be split into several PDF parts - `--json` will generate a simple JSON data structure that may be used to migrate the journal to other platforms By default, HTML is output inside the processed directory, at `CycleSanityJournals/processed/<journal_id>/html` - open `index.html` to browse the journal index. Options: - `--input_location` changes the location to take processed input from - `--out_location` changes the location to send HTML output to. Note links to resources may break if your change this. HTML is generated by populating templates. These are available in `resources/templates` if you would like to adjust the exact appearance of the output. Publication to EPUB is work in progress - watch this space! ### Why is this necessary? CGOAB has for twenty years been a fantastic resource. What has made it so good has been the contributions of the wonderful bike touring community. It contains a wealth of material primarily in the form of thousands of journals describing bicycle tours. However, in recent years the site has seen significant fall in user activity. Since 2016, the rate of journal submission has roughly halved. A sometimes toxic environment on the site forum has driven away many users. The site is run and maintained by one person. It has become clear that the understandable work involved in running the site has become a significant burden, financial and otherwise, on the administrator. Nobody except the owner can administrate the site, or know how its bespoke and now very old internals work. The site is now extremely dated in appearance and operation. Because of the factors above, there is a significant risk that CGOAB is vulnerable to eventual outage. This means there is a serious consideration that years of work and thousands of touring journals may be lost forever. BikeSanity allows these journals to be retrieved for posterity and never lost. ### How do we know it works? The interpreter has been successfully tested against >10000 journals. ### What difficulties are there in retrieving journal content? To the great credit of CGOAB's administrator, two factors make retrieving journal content easier than it could be: 1. All content is owned purely by their authors. The administrator does not own any of the journal content (that he has not authored). 2. CGOAB provides an "export" function that can allow journals to be downloaded as-is from the site. **However**, even after export, the content remains in CGOAB's peculiar HTML format. This is highly non-standard and often faulty (see technical details below) and very laborious to extract content from manually. The administrator of CGOAB is extremely resistant to any attempts to liberate journal content from this old and unworkable format. To make it reliably readable into the future, a robust parser is needed. This library attempts to provide that facility. ### What technical deficiencies are there in CGOAB journal format? Unfortunately, it's clear that there are serious technical deficiencies in the CGOAB site and platform which are actively causing problems with maintaining and modernising the site, and are a risk factor for its long-term stability. The front end code has serious problems: - No CSS is used across the site *whatsoever*. All style is baked directly into HTML, an extremely bad practice. This is one reason why the antiquated visual style of the site has never changed, and would be very difficult to change. - JavaScript is baked in HTML across the site, and is repeated in every single page. - All the HTML uses nonstandard markup and is a terrible mess. Cases of tags are mixed indiscriminately, e.g. `<B>` vs `<b>`. HTML tags are not closed properly, and sometimes are nested recursively, like the `DD` tags. These are very basic errors that have a direct impact on the difficulty for the administrator to maintain and update the site. A case in point is responsive layout for mobile devices, which has been requested for many years yet but is still not supported. Google terminating their free mapping API caused a major panic and months to resolve. These problems raise serious worries about the robustness of the server code. At the very least, it is likely to be impossible to work with for anyone other than their creator, who has developed the entire site in isolation from modern coding practices. ### Do you have any relationship with any other bicycle touring resources? No, none whatsoever. This tool has been provided completely independently and _pro bono_ for the benefit of the cycle touring community. If you plan to make copies or republish material retrieved by this tool, please ensure that you are, or have permission of, the author. Absolutely no property of CGOAB is included in this library whatsoever. All code is original, other than third-party libraries which have appropriate open-source licenses attached.
PypiClean
/certego_saas-0.7.0.tar.gz/certego_saas-0.7.0/certego_saas/ext/mixins.py
import csv from typing import Dict from django.http import HttpResponse from rest_framework.generics import GenericAPIView from .serializers import RecaptchaV2Serializer __all__ = [ "ExportCsvAdminMixin", "SerializerActionMixin", "RecaptchaV2Mixin", ] class ExportCsvAdminMixin: """ Mixin class that can be used with django's ``ModelAdmin``. """ def export_as_csv(self, request, queryset): meta = self.model._meta field_names = [field.name for field in meta.fields] response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = f"attachment; filename={meta}.csv" writer = csv.writer(response) writer.writerow(field_names) for obj in queryset: writer.writerow([getattr(obj, field) for field in field_names]) return response export_as_csv.short_description = "Export Selected" # type: ignore class SerializerActionMixin(GenericAPIView): """ Mixin that allows defining different serializer class for different view actions. Define mapping inside the class attribute ``serializer_action_classes`` (type: ``dict``). """ serializer_action_classes: Dict = {} def get_serializer_class(self, *args, **kwargs): """ Instantiate the list of serializers per action from class attribute (must be defined). """ kwargs["partial"] = True try: return self.serializer_action_classes[self.action] except (KeyError, AttributeError): return super(SerializerActionMixin, self).get_serializer_class() class RecaptchaV2Mixin(GenericAPIView): """ Mixin that hooks the ``RecaptchaV2Serializer`` into ``get_serializer()``. The hook is applied only if request method is ``POST``. """ def get_serializer(self, *args, **kwargs): if self.request.method == "POST": # first validate against the recaptcha serializer recaptcha_serializer = RecaptchaV2Serializer( data=self.request.data, context=self.get_serializer_context() ) recaptcha_serializer.is_valid(raise_exception=True) return super().get_serializer(*args, **kwargs)
PypiClean
/geodatahub-2020.3.post1.tar.gz/geodatahub-2020.3.post1/cli.py
import sys import argparse import logging as lg import json from geodatahub.auth import GeodatahubAuth from geodatahub.connection import Connection as GeodatahubConn # Setup logging module lg.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=lg.DEBUG) LOG = lg.getLogger("geodatahub") LOG.setLevel(lg.INFO) url = "https://api-dev.geodatahub.dk" def login(args): try: auth = GeodatahubAuth() if args.use_refresh_token: args.print_all_tokens = True access, refresh, identity = auth.refresh_auth() else: print("Please login at www.geodatahub.dk/login and paste the one-time code below,") code = input("One-time code: ") access, refresh, identity = auth.login(code) auth.store_token(refresh) except Exception: LOG.info("The login was not successful.") else: if args.print_all_tokens: print("Access Token") print("-------------") print(access) print("\n\n") print("Refresh Token") print("-------------") print(refresh) print("\n\n") print("Identity Token") print("---------------") print(identity) print("\n\n") LOG.info("Login completed!") def test_connection(args): conn = GeodatahubConn(backend_url=url) if conn.ping(): LOG.info("Connection to Geodatahub works") else: LOG.info("An error occured while contacting Geodatahub") def search(args): """Perform search of datasets in Geodatahub """ conn = GeodatahubConn(backend_url=url) # Process the users input. # The user might type: key1=val1 key2=val2 # This must be translated into an acceptable query string # { "key1":val1, "key2":val2 } query = "" try: if args.query != "": query = [] for q in args.query: q2 = q.split('=') query.append(f'"{q2[0]}":"{q2[1]}"') query = "{%s}" % ",".join(query) except IndexError: # User supplied the request in JSON format query = json.loads(args.query[0]) for k in query: if "http" not in k: # Ensure all keys have the full schema URI query[f"https://schema.geodatahub.dk/{k}.json"] = query.pop(k) datasets = conn.searchDataset(query) if datasets is None: LOG.info("No datasets exist") else: for dset in datasets: LOG.info(dset) def get_data(args): """Perform a GET request for a specific dataset in Geodatahub """ conn = GeodatahubConn(backend_url=url) conn._auth_headers["Authorization"] = "eyJraWQiOiJsQWdWTGM3SWZseDRwYkVHVUI0c3c4Ykg0R3VQTDRJb2NcL25JVFU1RWJVRT0iLCJhbGciOiJSUzI1NiJ9.eyJhdF9oYXNoIjoibVo4YTdPRzdOYjlqOXhPZXVZT0hXZyIsInN1YiI6ImFhNTk0NjdlLTlhZWYtNDg1NC1hNzYyLTkyMGZlMjBhOTc0NyIsImF1ZCI6IjFtZGQzMGI1a2Jza3E1cms5YjNyZDk2Y3YyIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsInRva2VuX3VzZSI6ImlkIiwiYXV0aF90aW1lIjoxNTk0NzU5NjA2LCJpc3MiOiJodHRwczpcL1wvY29nbml0by1pZHAuZXUtd2VzdC0xLmFtYXpvbmF3cy5jb21cL2V1LXdlc3QtMV8wVURHeFdqNmQiLCJjb2duaXRvOnVzZXJuYW1lIjoiYWE1OTQ2N2UtOWFlZi00ODU0LWE3NjItOTIwZmUyMGE5NzQ3IiwiZXhwIjoxNTk0NzYzMjA2LCJpYXQiOjE1OTQ3NTk2MDYsImVtYWlsIjoiY2hyaXN0aWFuQGdlb2RhdGFodWIuZGsifQ.NzoYMWnEXHeuv_nZAVC6THiUDSoNeMuYDMvivBV4crutEpDrwvNypU_8iGPN29veBaEC02UPTajyW44nFE2j62kF8Cc6kyolUxBiPbbtedAYzwEENcqcccZJeTKja-09Fxqs8hCzH668CLPNcPD7Z0-jjFHJRysYnz7u3DPRjQb4K0HPCkhtv3u0e-UH6o3qZeh1HqpT4RAA6FQ8KxlbLFGejpB1dL19n6zBwcS2aAcLn4tFdVbOIpbfohxaJmQWCiwnaTeIdhyVO28e9eoiCtiPm8R1zw-5X5D-NMrjLZjBdAwCDw8K8ydGadcxH_ms2pARfeGCSnOsfaTAaBARKQ" dataset = conn.getDataset(args.id) LOG.info(dataset) def schema(args): """Perform a GET request for a specific dataset in Geodatahub """ conn = GeodatahubConn(backend_url=url) dataset = conn.get_schema_options(args.schema, args.key) def add_data(args): conn = GeodatahubConn(backend_url=url) print(args.files) try: for f in args.files: with open(f, "r") as json_file: dataset = json.load(json_file) dset_id = conn.uploadDataset(dataset) for ids in dset_id: LOG.info(f"New dataset added with ID {ids}") except FileNotFoundError: LOG.error(f"Unable to open one or more of the files {args.files}") # Setup commandline arguments cli_parser = argparse.ArgumentParser(description="Commandline interface to GeoDataHub") sub_parser = cli_parser.add_subparsers(help= "Type of operation") cli_parser.add_argument("--config-path", help="Location of the user config file") # Commandline arguments to login to backend auth_parser = sub_parser.add_parser("login") auth_parser.add_argument("--print-all-tokens", default=False, const=True, nargs='?', help="Print auth tokens to screen") auth_parser.add_argument("--use-refresh-token", default=False, const=True, nargs='?', help="Use existing refresh token to login. Token will refresh any existing tokens but not require a full login.") auth_parser.set_defaults(func=login) # Commandline arguments to test connection to backend test_parser = sub_parser.add_parser("test") test_parser.set_defaults(func=test_connection) # Commandline arguments to search/list datasets search_parser = sub_parser.add_parser("list") search_parser.add_argument("query", nargs='+', help="Values to search for", default="") search_parser.set_defaults(func=search) # Commandline arguments to add new datasets add_parser = sub_parser.add_parser("add") add_parser.add_argument("files", nargs='+', help="List of JSON files containing metadata to upload") add_parser.set_defaults(func=add_data) # Commandline arguments to get a specific datasets get_parser = sub_parser.add_parser("get") get_parser.add_argument("id", help="Unique dataset identifier") get_parser.set_defaults(func=get_data) # Commandline arguments to get a specific datasets schema_options_parser = sub_parser.add_parser("schema") schema_options_parser.add_argument("schema", help="Unique dataset identifier") schema_options_parser.add_argument("key", help="Unique dataset identifier") schema_options_parser.set_defaults(func=schema) if __name__ == "__main__": args = cli_parser.parse_args() try: args.func(args) except AttributeError: cli_parser.print_help() sys.exit(1)
PypiClean
/ixnetwork_restpy-1.1.10.tar.gz/ixnetwork_restpy-1.1.10/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/l1config/pos/dcc/dcc.py
import sys from ixnetwork_restpy.base import Base from ixnetwork_restpy.files import Files if sys.version_info >= (3, 5): from typing import List, Any, Union class Dcc(Base): """The Layer 1 Configuration is being configured for a POS port and DCC is selected as the Payload Type. The Dcc class encapsulates a required dcc resource which will be retrieved from the server every time the property is accessed. """ __slots__ = () _SDM_NAME = "dcc" _SDM_ATT_MAP = { "AvailableSpeeds": "availableSpeeds", "CanModifySpeed": "canModifySpeed", "CanSetMultipleSpeeds": "canSetMultipleSpeeds", "Crc": "crc", "OverheadByte": "overheadByte", "SelectedSpeeds": "selectedSpeeds", "TimeFill": "timeFill", } _SDM_ENUM_MAP = { "crc": ["crc16", "crc32"], "overheadByte": ["loh", "soh"], "timeFill": ["flag7E", "markIdle"], } def __init__(self, parent, list_op=False): super(Dcc, self).__init__(parent, list_op) @property def AvailableSpeeds(self): # type: () -> List[str] """ Returns ------- - list(str[]): Which speeds are available for the current media and AN settings. """ return self._get_attribute(self._SDM_ATT_MAP["AvailableSpeeds"]) @property def CanModifySpeed(self): # type: () -> bool """ Returns ------- - bool: Returns true/false depending upon if the port can change speed for the current media and AN settings. """ return self._get_attribute(self._SDM_ATT_MAP["CanModifySpeed"]) @property def CanSetMultipleSpeeds(self): # type: () -> bool """ Returns ------- - bool: Can this port selectmultiple speeds for the current media and AN settings. """ return self._get_attribute(self._SDM_ATT_MAP["CanSetMultipleSpeeds"]) @property def Crc(self): # type: () -> str """ Returns ------- - str(crc16 | crc32): Choose the type of Cyclic Redundancy Check to be used. """ return self._get_attribute(self._SDM_ATT_MAP["Crc"]) @Crc.setter def Crc(self, value): # type: (str) -> None self._set_attribute(self._SDM_ATT_MAP["Crc"], value) @property def OverheadByte(self): # type: () -> str """ Returns ------- - str(loh | soh): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams. """ return self._get_attribute(self._SDM_ATT_MAP["OverheadByte"]) @OverheadByte.setter def OverheadByte(self, value): # type: (str) -> None self._set_attribute(self._SDM_ATT_MAP["OverheadByte"], value) @property def SelectedSpeeds(self): # type: () -> List[str] """ Returns ------- - list(str[]): Which speeds are selected for the current media and AN settings. """ return self._get_attribute(self._SDM_ATT_MAP["SelectedSpeeds"]) @SelectedSpeeds.setter def SelectedSpeeds(self, value): # type: (List[str]) -> None self._set_attribute(self._SDM_ATT_MAP["SelectedSpeeds"], value) @property def TimeFill(self): # type: () -> str """ Returns ------- - str(flag7E | markIdle): Choose the type of bytes used to fill the gaps between DCC frames. """ return self._get_attribute(self._SDM_ATT_MAP["TimeFill"]) @TimeFill.setter def TimeFill(self, value): # type: (str) -> None self._set_attribute(self._SDM_ATT_MAP["TimeFill"], value) def update(self, Crc=None, OverheadByte=None, SelectedSpeeds=None, TimeFill=None): # type: (str, str, List[str], str) -> Dcc """Updates dcc resource on the server. Args ---- - Crc (str(crc16 | crc32)): Choose the type of Cyclic Redundancy Check to be used. - OverheadByte (str(loh | soh)): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams. - SelectedSpeeds (list(str[])): Which speeds are selected for the current media and AN settings. - TimeFill (str(flag7E | markIdle)): Choose the type of bytes used to fill the gaps between DCC frames. Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) def find( self, AvailableSpeeds=None, CanModifySpeed=None, CanSetMultipleSpeeds=None, Crc=None, OverheadByte=None, SelectedSpeeds=None, TimeFill=None, ): # type: (List[str], bool, bool, str, str, List[str], str) -> Dcc """Finds and retrieves dcc resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dcc resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all dcc resources from the server. Args ---- - AvailableSpeeds (list(str[])): Which speeds are available for the current media and AN settings. - CanModifySpeed (bool): Returns true/false depending upon if the port can change speed for the current media and AN settings. - CanSetMultipleSpeeds (bool): Can this port selectmultiple speeds for the current media and AN settings. - Crc (str(crc16 | crc32)): Choose the type of Cyclic Redundancy Check to be used. - OverheadByte (str(loh | soh)): Choose the type of Overhead bytes to be used for transmitting the DCC packet streams. - SelectedSpeeds (list(str[])): Which speeds are selected for the current media and AN settings. - TimeFill (str(flag7E | markIdle)): Choose the type of bytes used to fill the gaps between DCC frames. Returns ------- - self: This instance with matching dcc resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): """Retrieves a single instance of dcc data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the dcc resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ return self._read(href)
PypiClean
/scikit_allel-1.3.6-cp311-cp311-macosx_10_9_x86_64.whl/allel/io/vcf_write.py
import csv from datetime import date import itertools from operator import itemgetter import logging import numpy as np import allel logger = logging.getLogger(__name__) debug = logger.debug VCF_FIXED_FIELDS = 'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO' def normalize_callset(callset): if hasattr(callset, 'keys'): names = list() new_callset = dict() for k in list(callset.keys()): a = callset[k] if k.startswith('calldata/'): continue if k == 'samples': continue if k.startswith('variants/'): k = k[9:] names.append(k) new_callset[k] = a callset = new_callset elif hasattr(callset, 'dtype') and callset.dtype.names: names = list(callset.dtype.names) else: raise ValueError('callset should be dict or recarray, found %r' % callset) return names, callset def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): """Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.""" names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill) def write_vcf_header(vcf_file, names, callset, rename, number, description): if rename is None: rename = dict() if number is None: number = dict() if description is None: description = dict() # write file format version print('##fileformat=VCFv4.1', file=vcf_file) # write today's date today = date.today().strftime('%Y%m%d') print('##fileDate=%s' % today, file=vcf_file) # write source print('##source=scikit-allel-%s' % allel.__version__, file=vcf_file) info_names = [n for n in names if not n.upper().startswith('FILTER_') and not n.upper() in VCF_FIXED_FIELDS] info_ids = [rename[n] if n in rename else n for n in info_names] # write INFO headers, sorted by ID for name, vcf_id in sorted(zip(info_names, info_ids), key=itemgetter(1)): col = callset[name] # determine VCF Number if name in number: vcf_number = number[name] else: if col.ndim == 1 and col.dtype.kind == 'b': # Flag vcf_number = 0 elif col.ndim == 1: vcf_number = 1 elif col.ndim == 2: vcf_number = col.shape[1] else: raise NotImplementedError('only columns with 1 or two ' 'dimensions are supported') # determine VCF Type kind = col.dtype.kind if kind == 'b': vcf_type = 'Flag' elif kind in 'ui': vcf_type = 'Integer' elif kind == 'f': vcf_type = 'Float' else: vcf_type = 'String' # determine VCF Description if name in description: vcf_description = description[name] else: vcf_description = '' # construct INFO header line header_line = '##INFO=<ID=%s,Number=%s,Type=%s,Description="%s">'\ % (vcf_id, vcf_number, vcf_type, vcf_description) print(header_line, file=vcf_file) filter_names = [n for n in names if n.upper().startswith('FILTER_')] filter_ids = [rename[n] if n in rename else n[7:] for n in filter_names] # write FILTER headers, sorted by ID for name, vcf_id in sorted(zip(filter_names, filter_ids), key=itemgetter(1)): # determine VCF Description if name in description: vcf_description = description[name] else: vcf_description = '' # construct FILTER header line header_line = '##FILTER=<ID=%s,Description="%s">'\ % (vcf_id, vcf_description) print(header_line, file=vcf_file) # write column names line = '#' + '\t'.join(VCF_FIXED_FIELDS) print(line, file=vcf_file) # noinspection PyShadowingBuiltins def write_vcf_data(vcf_file, names, callset, rename, fill): if rename is None: rename = dict() if fill is None: fill = dict() # find the fixed columns, allowing for case insensitive naming in the # input array col_chrom = None col_pos = None col_id = None col_ref = None col_alt = None col_qual = None for n in names: if n.upper() == 'CHROM': col_chrom = callset[n] elif n.upper() == 'POS': col_pos = callset[n] elif n.upper() == 'ID': col_id = callset[n] elif n.upper() == 'REF': col_ref = callset[n] elif n.upper() == 'ALT': col_alt = callset[n] elif n.upper() == 'QUAL': col_qual = callset[n] # check for required columns if col_chrom is None: raise ValueError('CHROM column not found') if col_pos is None: raise ValueError('POS column not found') # pad optional columns dot = itertools.repeat('.') if col_id is None: col_id = dot if col_ref is None: col_ref = dot if col_alt is None: col_alt = dot if col_qual is None: col_qual = dot # find FILTER columns filter_names = [n for n in names if n.upper().startswith('FILTER_')] filter_ids = [rename[n] if n in rename else n[7:] for n in filter_names] filter_cols = [callset[n] for n in filter_names] # sort by ID if filter_names: filters = sorted(zip(filter_names, filter_ids, filter_cols), key=itemgetter(1)) filter_names, filter_ids, filter_cols = zip(*filters) # find INFO columns info_names = [n for n in names if not n.upper().startswith('FILTER_') and not n.upper() in VCF_FIXED_FIELDS] info_ids = [rename[n] if n in rename else n for n in info_names] info_cols = [callset[n] for n in info_names] # sort by ID if info_names: infos = sorted(zip(info_names, info_ids, info_cols), key=itemgetter(1)) info_names, info_ids, info_cols = zip(*infos) # setup writer writer = csv.writer(vcf_file, delimiter='\t', lineterminator='\n') # zip up data as rows rows = zip(col_chrom, col_pos, col_id, col_ref, col_alt, col_qual) filter_rows = zip(*filter_cols) info_rows = zip(*info_cols) for row, filter_row, info_row in itertools.zip_longest(rows, filter_rows, info_rows): # unpack main row chrom, pos, id, ref, alt, qual = row chrom = _vcf_value_str(chrom) pos = _vcf_value_str(pos) id = _vcf_value_str(id) ref = _vcf_value_str(ref) alt = _vcf_value_str(alt, fill=fill.get('ALT', None)) qual = _vcf_value_str(qual) # construct FILTER value if filter_row is not None: flt = [i for i, v in zip(filter_ids, filter_row) if v] if flt: flt = ';'.join(flt) else: flt = 'PASS' else: flt = '.' # construct INFO value if info_row is not None: info_vals = [_vcf_info_str(n, i, v, fill) for n, i, v in zip(info_names, info_ids, info_row)] info_vals = [x for x in info_vals if x is not None] info = ';'.join(info_vals) else: info = '.' # repack row = chrom, pos, id, ref, alt, qual, flt, info writer.writerow(row) def _vcf_value_str(o, fill=None): if isinstance(o, bytes): return str(o, encoding='ascii') elif isinstance(o, (tuple, list, np.ndarray)): if fill is None: t = [_vcf_value_str(x) for x in o] else: t = [_vcf_value_str(x) for x in o if x != fill] return ','.join(t) else: return str(o) # noinspection PyShadowingBuiltins def _vcf_info_str(name, id, value, fill): if isinstance(value, (bool, np.bool_)): if bool(value): return id else: return None else: return '%s=%s' % (id, _vcf_value_str(value, fill=fill.get(name, None)))
PypiClean
/mindspore_ascend-1.10.0-cp39-none-any.whl/mindspore/ops/composite/clip_ops.py
"""Operations for clipping tensors to min/max values.""" from __future__ import absolute_import import numpy as np from mindspore.nn.cell import Cell from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.common.tensor import Tensor from mindspore.common import dtype as mstype from mindspore._checkparam import Rel from mindspore._checkparam import Validator as validator from mindspore.ops.primitive import constexpr @constexpr def _check_output_shape(input_shape, out_shape, prim_name=None): msg_prefix = f"For '{prim_name}', the" if prim_name else "The" if input_shape != out_shape: raise ValueError(f"{msg_prefix} input 'x' shape must be equal to the output shape, but got " f"input 'x' shape {input_shape}, output shape {out_shape}.") def check_np_type(np_dtype, is_max_val): if not (np.issubsctype(np_dtype, np.floating) or np.issubsctype(np_dtype, np.integer) or np.issubsctype(np_dtype, np.complex64) or np.issubsctype(np_dtype, np.complex128) or np.issubsctype(np_dtype, np.bool_)): value_info = ("clip_value_max", "clip_value_min") if is_max_val else ("clip_value_min", "clip_value_max") raise ValueError(f"When {value_info[0]} is none, The date type of {value_info[1]} only support integer," f"floating, bool, complex64 or complex128. But got {np_dtype}") @constexpr def create_max_min_value(ms_type, is_max_val): """create max or min value""" np_dtype = mstype.dtype_to_nptype(ms_type) check_np_type(np_dtype, is_max_val) if np.issubsctype(np_dtype, np.floating): val = np.finfo(np_dtype).max if is_max_val else np.finfo(np_dtype).min elif np.issubsctype(np_dtype, np.integer): val = np.iinfo(np_dtype).max if is_max_val else np.iinfo(np_dtype).min elif np.issubsctype(np_dtype, np.complex64): val = np.finfo(np.float32).max if is_max_val else np.finfo(np.float32).min val = np.complex64(np.complex(val, val)) elif np.issubsctype(np_dtype, np.complex128): val = np.finfo(np.float64).max if is_max_val else np.finfo(np.float64).min val = np.complex128(np.complex(val, val)) else: val = np.bool_(True) if is_max_val else np.bool_(False) return Tensor(val, ms_type) @constexpr def raise_value_error(): raise ValueError("At least one of 'clip_value_min' or 'clip_value_max' must not be None") def clip_by_value(x, clip_value_min=None, clip_value_max=None): r""" Clips tensor values to a specified min and max. Limits the value of :math:`x` to a range, whose lower limit is `clip_value_min` and upper limit is `clip_value_max` . .. math:: out_i= \left\{ \begin{array}{align} clip\_value\_max & \text{ if } x_i\ge clip\_value\_max \\ x_i & \text{ if } clip\_value\_min \lt x_i \lt clip\_value\_max \\ clip\_value\_min & \text{ if } x_i \le clip\_value\_min \\ \end{array}\right. Note: `clip_value_min` needs to be less than or equal to `clip_value_max` . The data type of x, `clip_value_min` and `clip_value_max` should support implicit type conversion and cannot all be bool type. Args: x (Tensor): Input data. The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions. clip_value_min (Tensor): The minimum value. `clip_value_min` and `clip_value_max` cannot be all None. Default: None. clip_value_max (Tensor): The maximum value. `clip_value_min` and `clip_value_max` cannot be all None. Default: None. Returns: Tensor, a clipped Tensor. The data type is the one with higher precision or higher digits among the x, `clip_value_min` and `clip_value_max` . Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> from mindspore import Tensor, ops >>> import numpy as np >>> min_value = Tensor(5, mindspore.float32) >>> max_value = Tensor(20, mindspore.float32) >>> x = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32) >>> output = ops.clip_by_value(x, min_value, max_value) >>> print(output) [[ 5. 20. 5. 7.] [ 5. 11. 6. 20.]] """ min_op = P.Minimum() max_op = P.Maximum() if clip_value_min is None and clip_value_max is None: raise_value_error() if clip_value_min is None: clip_value_min = create_max_min_value(F.dtype(clip_value_max), False) if clip_value_max is None: clip_value_max = create_max_min_value(F.dtype(clip_value_min), True) x_min = min_op(x, clip_value_max) x_max = max_op(x_min, clip_value_min) _check_output_shape(F.shape(x), F.shape(x_max), 'clip_by_value') return x_max # The attribute grad_scale is needed for enabling the parallel mode # If this is removed, c.clip_by_global_norm will have precision error in semi/auto parallel mode. expand_dims = P.ExpandDims().add_prim_attr("grad_scale", True) get_square_sum = C.MultitypeFuncGraph("get_square_sum") @get_square_sum.register("Tensor") def _get_square_sum(x): norm = P.ReduceSum(False)(F.square(x), ()) norm = expand_dims(F.cast(norm, mstype.float32), 0) return norm apply_global_norm = C.MultitypeFuncGraph("apply_global_norm") @apply_global_norm.register("Tensor", "Tensor", "Tensor") def _apply_global_norm(clip_norm, global_norm, x): x_dtype = F.dtype(x) x = x * clip_norm / global_norm x = F.cast(x, x_dtype) return x class _ClipByGlobalNorm(Cell): r""" Clips tensor values by the ratio of the sum of their norms. Args: clip_norm (Union(float, int)): The clipping ratio. Default: 1.0 use_norm (Union(float, None)): The global norm. Default: None Inputs: - **x** (Union(tuple[Tensor], list[Tensor])) - Input data to clip. Outputs: Tensor, a clipped Tensor. """ def __init__(self, clip_norm=1.0, use_norm=None): """Initialize _ClipByGlobalNorm.""" super(_ClipByGlobalNorm, self).__init__() # Add interface. This parameter is not used at present if use_norm is not None: raise ValueError(f"For '{self.cls_name}', input 'use_norm' only supports None currently, " f"but got 'use_norm': {use_norm}") validator.check_number("clip_norm", clip_norm, 0.0, Rel.GT, self.cls_name) self.clip_norm = Tensor([clip_norm], mstype.float32) self.hyper_map = C.HyperMap() self.greater_equal = P.GreaterEqual() def construct(self, x): square_sum = self.hyper_map(get_square_sum, x) global_norm = F.sqrt(F.addn(square_sum)) cond = self.greater_equal(global_norm, self.clip_norm) global_norm = F.select(cond, global_norm, self.clip_norm) clip_x = self.hyper_map(F.partial(apply_global_norm, self.clip_norm, global_norm), x) return clip_x @constexpr def _check_value(clip_norm): validator.check_number("clip_norm", clip_norm, 0.0, Rel.GT, "clip_by_global_norm") return clip_norm def clip_by_global_norm(x, clip_norm=1.0, use_norm=None): r""" Clips tensor values by the ratio of the sum of their norms. Note: - Input `x` should be a tuple or list of tensors. Otherwise, it will raise an error. - On the SEMI_AUTO_PARALLEL mode or AUTO_PARALLEL mode, if the input `x` is the gradient, the gradient norm values on all devices will be automatically aggregated by allreduce inserted after the local square sum of the gradients. Args: x (Union(tuple[Tensor], list[Tensor])): Input data to clip. The shape of each Tensor in tuple is :math:`(N,*)` where :math:`*` means, any number of additional dimensions. clip_norm (Union(float, int)): The clipping ratio, it should be greater than 0. Default: 1.0 use_norm (None): The global norm. Default: None. Currently only none is supported. Returns: tuple[Tensor], a clipped Tensor. It has the same data type as `x` and each Tensor in the output tuple is the same as the original input shape. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> from mindspore import Tensor, ops >>> import numpy as np >>> x1 = np.array([[2., 3.], [1., 2.]]).astype(np.float32) >>> x2 = np.array([[1., 4.], [3., 1.]]).astype(np.float32) >>> input_x = (Tensor(x1), Tensor(x2)) >>> out = ops.clip_by_global_norm(input_x, 1.0) >>> print(out) (Tensor(shape=[2, 2], dtype=Float32, value= [[ 2.98142403e-01, 4.47213590e-01], [ 1.49071202e-01, 2.98142403e-01]]), Tensor(shape=[2, 2], dtype=Float32, value= [[ 1.49071202e-01, 5.96284807e-01], [ 4.47213590e-01, 1.49071202e-01]])) """ clip_norm = _check_value(clip_norm) clip_val = _ClipByGlobalNorm(clip_norm, use_norm)(x) return clip_val
PypiClean
/whywork-0.0.0.tar.gz/whywork-0.0.0/.eggs/mysql_connector-2.2.9-py3.8-linux-x86_64.egg/mysqlx/charsets.py
# MySQL Connector/Python - MySQL driver written in Python. # Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. # MySQL Connector/Python is licensed under the terms of the GPLv2 # <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most # MySQL Connectors. There are special exceptions to the terms and # conditions of the GPLv2 as it is applied to this software, see the # FOSS License Exception # <http://www.mysql.com/about/legal/licensing/foss-exception.html>. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # This file was auto-generated. _GENERATED_ON = '2016-06-06' _MYSQL_VERSION = (5, 7, 12) """This module contains the MySQL Server Character Sets""" MYSQL_CHARACTER_SETS = [ # (character set name, collation, default) None, ("big5", "big5_chinese_ci", True), # 1 ("latin2", "latin2_czech_cs", False), # 2 ("dec8", "dec8_swedish_ci", True), # 3 ("cp850", "cp850_general_ci", True), # 4 ("latin1", "latin1_german1_ci", False), # 5 ("hp8", "hp8_english_ci", True), # 6 ("koi8r", "koi8r_general_ci", True), # 7 ("latin1", "latin1_swedish_ci", True), # 8 ("latin2", "latin2_general_ci", True), # 9 ("swe7", "swe7_swedish_ci", True), # 10 ("ascii", "ascii_general_ci", True), # 11 ("ujis", "ujis_japanese_ci", True), # 12 ("sjis", "sjis_japanese_ci", True), # 13 ("cp1251", "cp1251_bulgarian_ci", False), # 14 ("latin1", "latin1_danish_ci", False), # 15 ("hebrew", "hebrew_general_ci", True), # 16 None, ("tis620", "tis620_thai_ci", True), # 18 ("euckr", "euckr_korean_ci", True), # 19 ("latin7", "latin7_estonian_cs", False), # 20 ("latin2", "latin2_hungarian_ci", False), # 21 ("koi8u", "koi8u_general_ci", True), # 22 ("cp1251", "cp1251_ukrainian_ci", False), # 23 ("gb2312", "gb2312_chinese_ci", True), # 24 ("greek", "greek_general_ci", True), # 25 ("cp1250", "cp1250_general_ci", True), # 26 ("latin2", "latin2_croatian_ci", False), # 27 ("gbk", "gbk_chinese_ci", True), # 28 ("cp1257", "cp1257_lithuanian_ci", False), # 29 ("latin5", "latin5_turkish_ci", True), # 30 ("latin1", "latin1_german2_ci", False), # 31 ("armscii8", "armscii8_general_ci", True), # 32 ("utf8", "utf8_general_ci", True), # 33 ("cp1250", "cp1250_czech_cs", False), # 34 ("ucs2", "ucs2_general_ci", True), # 35 ("cp866", "cp866_general_ci", True), # 36 ("keybcs2", "keybcs2_general_ci", True), # 37 ("macce", "macce_general_ci", True), # 38 ("macroman", "macroman_general_ci", True), # 39 ("cp852", "cp852_general_ci", True), # 40 ("latin7", "latin7_general_ci", True), # 41 ("latin7", "latin7_general_cs", False), # 42 ("macce", "macce_bin", False), # 43 ("cp1250", "cp1250_croatian_ci", False), # 44 ("utf8mb4", "utf8mb4_general_ci", True), # 45 ("utf8mb4", "utf8mb4_bin", False), # 46 ("latin1", "latin1_bin", False), # 47 ("latin1", "latin1_general_ci", False), # 48 ("latin1", "latin1_general_cs", False), # 49 ("cp1251", "cp1251_bin", False), # 50 ("cp1251", "cp1251_general_ci", True), # 51 ("cp1251", "cp1251_general_cs", False), # 52 ("macroman", "macroman_bin", False), # 53 ("utf16", "utf16_general_ci", True), # 54 ("utf16", "utf16_bin", False), # 55 ("utf16le", "utf16le_general_ci", True), # 56 ("cp1256", "cp1256_general_ci", True), # 57 ("cp1257", "cp1257_bin", False), # 58 ("cp1257", "cp1257_general_ci", True), # 59 ("utf32", "utf32_general_ci", True), # 60 ("utf32", "utf32_bin", False), # 61 ("utf16le", "utf16le_bin", False), # 62 ("binary", "binary", True), # 63 ("armscii8", "armscii8_bin", False), # 64 ("ascii", "ascii_bin", False), # 65 ("cp1250", "cp1250_bin", False), # 66 ("cp1256", "cp1256_bin", False), # 67 ("cp866", "cp866_bin", False), # 68 ("dec8", "dec8_bin", False), # 69 ("greek", "greek_bin", False), # 70 ("hebrew", "hebrew_bin", False), # 71 ("hp8", "hp8_bin", False), # 72 ("keybcs2", "keybcs2_bin", False), # 73 ("koi8r", "koi8r_bin", False), # 74 ("koi8u", "koi8u_bin", False), # 75 None, ("latin2", "latin2_bin", False), # 77 ("latin5", "latin5_bin", False), # 78 ("latin7", "latin7_bin", False), # 79 ("cp850", "cp850_bin", False), # 80 ("cp852", "cp852_bin", False), # 81 ("swe7", "swe7_bin", False), # 82 ("utf8", "utf8_bin", False), # 83 ("big5", "big5_bin", False), # 84 ("euckr", "euckr_bin", False), # 85 ("gb2312", "gb2312_bin", False), # 86 ("gbk", "gbk_bin", False), # 87 ("sjis", "sjis_bin", False), # 88 ("tis620", "tis620_bin", False), # 89 ("ucs2", "ucs2_bin", False), # 90 ("ujis", "ujis_bin", False), # 91 ("geostd8", "geostd8_general_ci", True), # 92 ("geostd8", "geostd8_bin", False), # 93 ("latin1", "latin1_spanish_ci", False), # 94 ("cp932", "cp932_japanese_ci", True), # 95 ("cp932", "cp932_bin", False), # 96 ("eucjpms", "eucjpms_japanese_ci", True), # 97 ("eucjpms", "eucjpms_bin", False), # 98 ("cp1250", "cp1250_polish_ci", False), # 99 None, ("utf16", "utf16_unicode_ci", False), # 101 ("utf16", "utf16_icelandic_ci", False), # 102 ("utf16", "utf16_latvian_ci", False), # 103 ("utf16", "utf16_romanian_ci", False), # 104 ("utf16", "utf16_slovenian_ci", False), # 105 ("utf16", "utf16_polish_ci", False), # 106 ("utf16", "utf16_estonian_ci", False), # 107 ("utf16", "utf16_spanish_ci", False), # 108 ("utf16", "utf16_swedish_ci", False), # 109 ("utf16", "utf16_turkish_ci", False), # 110 ("utf16", "utf16_czech_ci", False), # 111 ("utf16", "utf16_danish_ci", False), # 112 ("utf16", "utf16_lithuanian_ci", False), # 113 ("utf16", "utf16_slovak_ci", False), # 114 ("utf16", "utf16_spanish2_ci", False), # 115 ("utf16", "utf16_roman_ci", False), # 116 ("utf16", "utf16_persian_ci", False), # 117 ("utf16", "utf16_esperanto_ci", False), # 118 ("utf16", "utf16_hungarian_ci", False), # 119 ("utf16", "utf16_sinhala_ci", False), # 120 ("utf16", "utf16_german2_ci", False), # 121 ("utf16", "utf16_croatian_ci", False), # 122 ("utf16", "utf16_unicode_520_ci", False), # 123 ("utf16", "utf16_vietnamese_ci", False), # 124 None, None, None, ("ucs2", "ucs2_unicode_ci", False), # 128 ("ucs2", "ucs2_icelandic_ci", False), # 129 ("ucs2", "ucs2_latvian_ci", False), # 130 ("ucs2", "ucs2_romanian_ci", False), # 131 ("ucs2", "ucs2_slovenian_ci", False), # 132 ("ucs2", "ucs2_polish_ci", False), # 133 ("ucs2", "ucs2_estonian_ci", False), # 134 ("ucs2", "ucs2_spanish_ci", False), # 135 ("ucs2", "ucs2_swedish_ci", False), # 136 ("ucs2", "ucs2_turkish_ci", False), # 137 ("ucs2", "ucs2_czech_ci", False), # 138 ("ucs2", "ucs2_danish_ci", False), # 139 ("ucs2", "ucs2_lithuanian_ci", False), # 140 ("ucs2", "ucs2_slovak_ci", False), # 141 ("ucs2", "ucs2_spanish2_ci", False), # 142 ("ucs2", "ucs2_roman_ci", False), # 143 ("ucs2", "ucs2_persian_ci", False), # 144 ("ucs2", "ucs2_esperanto_ci", False), # 145 ("ucs2", "ucs2_hungarian_ci", False), # 146 ("ucs2", "ucs2_sinhala_ci", False), # 147 ("ucs2", "ucs2_german2_ci", False), # 148 ("ucs2", "ucs2_croatian_ci", False), # 149 ("ucs2", "ucs2_unicode_520_ci", False), # 150 ("ucs2", "ucs2_vietnamese_ci", False), # 151 None, None, None, None, None, None, None, ("ucs2", "ucs2_general_mysql500_ci", False), # 159 ("utf32", "utf32_unicode_ci", False), # 160 ("utf32", "utf32_icelandic_ci", False), # 161 ("utf32", "utf32_latvian_ci", False), # 162 ("utf32", "utf32_romanian_ci", False), # 163 ("utf32", "utf32_slovenian_ci", False), # 164 ("utf32", "utf32_polish_ci", False), # 165 ("utf32", "utf32_estonian_ci", False), # 166 ("utf32", "utf32_spanish_ci", False), # 167 ("utf32", "utf32_swedish_ci", False), # 168 ("utf32", "utf32_turkish_ci", False), # 169 ("utf32", "utf32_czech_ci", False), # 170 ("utf32", "utf32_danish_ci", False), # 171 ("utf32", "utf32_lithuanian_ci", False), # 172 ("utf32", "utf32_slovak_ci", False), # 173 ("utf32", "utf32_spanish2_ci", False), # 174 ("utf32", "utf32_roman_ci", False), # 175 ("utf32", "utf32_persian_ci", False), # 176 ("utf32", "utf32_esperanto_ci", False), # 177 ("utf32", "utf32_hungarian_ci", False), # 178 ("utf32", "utf32_sinhala_ci", False), # 179 ("utf32", "utf32_german2_ci", False), # 180 ("utf32", "utf32_croatian_ci", False), # 181 ("utf32", "utf32_unicode_520_ci", False), # 182 ("utf32", "utf32_vietnamese_ci", False), # 183 None, None, None, None, None, None, None, None, ("utf8", "utf8_unicode_ci", False), # 192 ("utf8", "utf8_icelandic_ci", False), # 193 ("utf8", "utf8_latvian_ci", False), # 194 ("utf8", "utf8_romanian_ci", False), # 195 ("utf8", "utf8_slovenian_ci", False), # 196 ("utf8", "utf8_polish_ci", False), # 197 ("utf8", "utf8_estonian_ci", False), # 198 ("utf8", "utf8_spanish_ci", False), # 199 ("utf8", "utf8_swedish_ci", False), # 200 ("utf8", "utf8_turkish_ci", False), # 201 ("utf8", "utf8_czech_ci", False), # 202 ("utf8", "utf8_danish_ci", False), # 203 ("utf8", "utf8_lithuanian_ci", False), # 204 ("utf8", "utf8_slovak_ci", False), # 205 ("utf8", "utf8_spanish2_ci", False), # 206 ("utf8", "utf8_roman_ci", False), # 207 ("utf8", "utf8_persian_ci", False), # 208 ("utf8", "utf8_esperanto_ci", False), # 209 ("utf8", "utf8_hungarian_ci", False), # 210 ("utf8", "utf8_sinhala_ci", False), # 211 ("utf8", "utf8_german2_ci", False), # 212 ("utf8", "utf8_croatian_ci", False), # 213 ("utf8", "utf8_unicode_520_ci", False), # 214 ("utf8", "utf8_vietnamese_ci", False), # 215 None, None, None, None, None, None, None, ("utf8", "utf8_general_mysql500_ci", False), # 223 ("utf8mb4", "utf8mb4_unicode_ci", False), # 224 ("utf8mb4", "utf8mb4_icelandic_ci", False), # 225 ("utf8mb4", "utf8mb4_latvian_ci", False), # 226 ("utf8mb4", "utf8mb4_romanian_ci", False), # 227 ("utf8mb4", "utf8mb4_slovenian_ci", False), # 228 ("utf8mb4", "utf8mb4_polish_ci", False), # 229 ("utf8mb4", "utf8mb4_estonian_ci", False), # 230 ("utf8mb4", "utf8mb4_spanish_ci", False), # 231 ("utf8mb4", "utf8mb4_swedish_ci", False), # 232 ("utf8mb4", "utf8mb4_turkish_ci", False), # 233 ("utf8mb4", "utf8mb4_czech_ci", False), # 234 ("utf8mb4", "utf8mb4_danish_ci", False), # 235 ("utf8mb4", "utf8mb4_lithuanian_ci", False), # 236 ("utf8mb4", "utf8mb4_slovak_ci", False), # 237 ("utf8mb4", "utf8mb4_spanish2_ci", False), # 238 ("utf8mb4", "utf8mb4_roman_ci", False), # 239 ("utf8mb4", "utf8mb4_persian_ci", False), # 240 ("utf8mb4", "utf8mb4_esperanto_ci", False), # 241 ("utf8mb4", "utf8mb4_hungarian_ci", False), # 242 ("utf8mb4", "utf8mb4_sinhala_ci", False), # 243 ("utf8mb4", "utf8mb4_german2_ci", False), # 244 ("utf8mb4", "utf8mb4_croatian_ci", False), # 245 ("utf8mb4", "utf8mb4_unicode_520_ci", False), # 246 ("utf8mb4", "utf8mb4_vietnamese_ci", False), # 247 ("gb18030", "gb18030_chinese_ci", True), # 248 ("gb18030", "gb18030_bin", False), # 249 ("gb18030", "gb18030_unicode_520_ci", False), # 250 ]
PypiClean
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/engel30.py
from genice2.cell import cellvectors import genice2.lattices import numpy as np desc = { "ref": { "PCOD8324623": "Engel 2018", "engel30": "Engel 2018" }, "usage": "No options available.", "brief": "Hypothetical zeolitic ice" } class Lattice(genice2.lattices.Lattice): def __init__(self): self.cell = cellvectors( a=21.6701, b=17.0625, c=21.76874, A=90.0, B=119.249, C=90.0 ) self.waters = np.array([ [-0.299160, 0.041171, 0.096847], [-0.715723, 0.325411, -0.486134], [-0.486262, 0.408274, -0.264149], [-0.278247, 0.075185, 0.485906], [-0.104076, 0.358755, 0.096843], [-0.513203, 0.158211, 0.264597], [-0.263753, 0.491943, -0.277934], [-0.736084, 0.241856, 0.277697], [0.097104, 0.449357, -0.305402], [-0.096700, 0.192054, 0.299581], [-0.131777, 0.241399, -0.138583], [0.131791, 0.491284, 0.138555], [0.139075, 0.074852, -0.499870], [-0.506419, 0.158144, -0.138288], [-0.493788, 0.408015, 0.138043], [-0.138395, 0.324856, 0.493289], [-0.701151, 0.291807, -0.097224], [0.103850, 0.108747, -0.097163], [-0.299160, 0.541172, 0.096847], [-0.715723, -0.174589, -0.486134], [-0.486262, -0.091726, -0.264149], [-0.278247, 0.575185, 0.485906], [-0.104076, -0.141245, 0.096843], [-0.513203, 0.658210, 0.264597], [-0.263753, -0.008057, -0.277934], [-0.736085, 0.741855, 0.277697], [0.097104, -0.050643, -0.305402], [-0.096700, 0.692056, 0.299581], [-0.131777, 0.741398, -0.138583], [0.131790, -0.008716, 0.138554], [0.139075, 0.574852, -0.499870], [-0.506419, 0.658145, -0.138288], [-0.493788, -0.091985, 0.138043], [-0.138395, -0.175144, 0.493289], [-0.701151, -0.208192, -0.097224], [0.103850, 0.608744, -0.097164], ]) self.coord = 'relative'
PypiClean
/grr-response-server-3.4.6.post0.zip/grr-response-server-3.4.6.post0/grr_response_server/gui/static/angular-components/hunt/rapid-hunt-status-directive.js
goog.module('grrUi.hunt.rapidHuntStatusDirective'); goog.module.declareLegacyNamespace(); const apiService = goog.requireType('grrUi.core.apiService'); /** * Helper function to validate FileFinderArgs.path. * * @param {!Object} path A GlobExpression object. * @return {boolean} True if the path has more than one "*" symbols in it. False * otherwise. */ const isPathInvalid_ = (path) => { return (path['value'].match(/\*/g) || []).length > 1; }; /** * Checks given flowName/flowArgs for rapid-hunting eligibility. * * @param {string} flowName Name of the flow. * @param {!Object} flowArgs Flow arguments object. * @return {boolean} True if flowName/flowArgs are rapid-hunting eligible, * false otherwise. * @export */ exports.isEligible = (flowName, flowArgs) => { if (flowName !== 'FileFinder' && flowName !== 'ClientFileFinder') { return false; } const hasInvalidPath = (flowArgs['value']['paths'] || []).find(isPathInvalid_); if (hasInvalidPath) { return false; } if (flowArgs['value']['action'] !== undefined && flowArgs['value']['action']['value']['action_type'] && flowArgs['value']['action']['value']['action_type']['value'] === 'DOWNLOAD') { return false; } return true; }; const isEligible = exports.isEligible; /** * Controller for RapidHuntStatusDirective. * @unrestricted */ const RapidHuntStatusController = class { /** * @param {!angular.Scope} $scope * @param {!apiService.ApiService} grrApiService * @ngInject */ constructor($scope, grrApiService) { /** @private {!angular.Scope} */ this.scope_ = $scope; /** @private {!apiService.ApiService} */ this.grrApiService_ = grrApiService; this.enabled = false; this.isEligible = false; this.grrApiService_.getCached('/config/AdminUI.rapid_hunts_enabled') .then((response) => { this.enabled = response['data']['value']['value']; if (this.enabled) { this.scope_.$watch('flowName', this.onFlowChange_.bind(this)); this.scope_.$watch('flowArgs', this.onFlowChange_.bind(this)); } }); } /** * Callback called every time either flowName or flowArgs binding changes. * * @private */ onFlowChange_() { if (angular.isUndefined(this.scope_['flowName']) || angular.isUndefined(this.scope_['flowArgs'])) { return; } this.scope_['isEligible'] = this.isEligible = isEligible(this.scope_['flowName'], this.scope_['flowArgs']); } }; /** * Directive for displaying rapid hunting eligibility status note. * * @return {!angular.Directive} Directive definition object. * @ngInject * @export */ exports.RapidHuntStatusDirective = function() { return { scope: { /** * In-binding with the name of the hunt flow. */ flowName: '<', /** * In-binding with hunt flow arguments object. */ flowArgs: '<', /** * In-binding with the current hunt client rate. * Current hunt client rate setting influences presentation (see the * template for d */ clientRate: '<', /** * Out binding that is set to true if flowName/flowArgs make the hunt * eligible for rapid hunting. */ isEligible: '=?' }, restrict: 'E', templateUrl: '/static/angular-components/hunt/rapid-hunt-status.html', controller: RapidHuntStatusController, controllerAs: 'controller' }; }; /** * Directive's name in Angular. * * @const * @export */ exports.RapidHuntStatusDirective.directive_name = 'grrRapidHuntStatus';
PypiClean
/backports.os-0.1.1.tar.gz/backports.os-0.1.1/README.rst
============ backports.os ============ This package provides backports of new features in Python's os_ module under the backports_ namespace. .. _os: https://docs.python.org/3.5/library/os.html .. _backports: https://pypi.python.org/pypi/backports .. image:: https://img.shields.io/pypi/v/backports.os.svg :target: https://pypi.python.org/pypi/backports.os .. image:: https://img.shields.io/badge/source-GitHub-lightgrey.svg :target: https://github.com/pjdelport/backports.os .. image:: https://img.shields.io/github/issues/pjdelport/backports.os.svg :target: https://github.com/pjdelport/backports.os/issues?q=is:open .. image:: https://travis-ci.org/pjdelport/backports.os.svg?branch=master :target: https://travis-ci.org/pjdelport/backports.os .. image:: https://codecov.io/github/pjdelport/backports.os/coverage.svg?branch=master :target: https://codecov.io/github/pjdelport/backports.os?branch=master Supported Python versions ========================= * CPython: 2.7, 3.4, 3.5, 3.6 * PyPy Backported functionality ======================== * `os.fsencode`_ (new in Python 3.2) * `os.fsdecode`_ (new in Python 3.2) .. _`os.fsencode`: https://docs.python.org/3.5/library/os.html#os.fsencode .. _`os.fsdecode`: https://docs.python.org/3.5/library/os.html#os.fsdecode Contributing ============ See `<HACKING.rst>`__.
PypiClean
/parts/Top/Hats/Turban.py
def Turban(color): return ( '<mask id="mask0_0_775" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="74" y="98" width="118" height="99">' ' <path fill-rule="evenodd" clip-rule="evenodd"' ' d="M133.498 165.841C122.124 166.219 117.05 171.721 113.229 166.13C110.361 161.932 111.561 154.874 114.241 150.903C118.054 145.251 123.227 147.985 129.01 147.34C130.583 147.165 132.163 146.723 133.498 146C134.834 146.723 136.414 147.165 137.986 147.34C143.77 147.985 148.943 145.251 152.756 150.903C155.435 154.874 156.635 161.932 153.767 166.13C149.946 171.721 144.873 165.463 133.498 165.841ZM188.72 98C185.336 112.075 183.781 126.434 181.328 140.671C180.816 143.639 180.257 146.596 179.662 149.55C179.538 150.17 179.415 152.473 178.811 152.764C176.982 153.648 173.254 148.947 172.257 147.885C169.754 145.219 167.272 142.529 164.223 140.437C158.063 136.21 150.85 133.711 143.345 133.118C140.205 132.869 135.959 133.303 133 135.11C130.041 133.303 125.795 132.869 122.654 133.118C115.149 133.711 107.937 136.21 101.777 140.437C98.7278 142.529 96.2462 145.219 93.7425 147.885C92.7457 148.947 89.0182 153.648 87.1891 152.764C86.5853 152.473 86.4623 150.17 86.3375 149.55C85.7432 146.596 85.1835 143.639 84.6722 140.671C82.219 126.434 80.6643 112.075 77.2805 98C76.2959 98 75.4321 116.748 75.3223 118.495C74.8751 125.589 74.353 132.525 75.0202 139.626C76.1705 151.875 77.3696 167.234 86.5918 176.588C94.9247 185.039 107.023 186.806 117.459 192.141C118.802 192.828 120.584 193.676 122.506 194.371C124.531 195.934 128.546 197 133.172 197C138.024 197 142.205 195.827 144.12 194.138C145.801 193.493 147.345 192.753 148.541 192.141C158.976 186.805 171.075 185.039 179.408 176.588C188.63 167.234 189.829 151.875 190.98 139.626C191.647 132.525 191.125 125.589 190.678 118.495C190.568 116.748 189.704 98 188.72 98Z"' ' fill="white" />' '</mask>' '<path fill-rule="evenodd" clip-rule="evenodd"' ' d="M190.47 97.5C191.471 95.0906 192 92.5798 192 90C192 71.7746 165.585 57 133 57C100.415 57 74 71.7746 74 90C74 92.5798 74.5293 95.0906 75.5304 97.5C81.6019 82.8879 105.028 72 133 72C160.972 72 184.398 82.8879 190.47 97.5Z"' ' fill="#EDECE3" />' '<path fill-rule="evenodd" clip-rule="evenodd"' ' d="M49.0002 94.3235C48.9335 133.499 78.0002 141 78.0002 141C72.5578 91.4478 101.536 75.8486 124.529 63.4715C127.469 61.8887 130.312 60.3587 132.971 58.8171C135.641 60.3664 138.497 61.904 141.452 63.4952C164.429 75.8686 193.418 91.4794 188 141C188 141 217.066 132.54 217 94.3235C216.918 47.1483 164.851 3 135 3C134.326 3 133.656 3.02963 132.992 3.08807C132.333 3.02963 131.668 3 131 3C101.074 3 49.0804 47.1483 49.0002 94.3235Z"' ' fill="#124C74" />' '<mask id="mask1_0_775" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="49" y="3" width="168" height="138">' ' <path fill-rule="evenodd" clip-rule="evenodd"' ' d="M49.0002 94.3235C48.9335 133.499 78.0002 141 78.0002 141C72.5578 91.4478 101.536 75.8486 124.529 63.4715C127.469 61.8887 130.312 60.3587 132.971 58.8171C135.641 60.3664 138.497 61.904 141.452 63.4952C164.429 75.8686 193.418 91.4794 188 141C188 141 217.066 132.54 217 94.3235C216.918 47.1483 164.851 3 135 3C134.326 3 133.656 3.02963 132.992 3.08807C132.333 3.02963 131.668 3 131 3C101.074 3 49.0804 47.1483 49.0002 94.3235Z"' ' fill="white" />' '</mask>' '<g mask="url(#mask1_0_775)">' ' <rect x="1" width="264" height="280" fill="{color}" />' '</g>' '<path fill-rule="evenodd" clip-rule="evenodd"' ' d="M49.0134 95.8992C49.7161 133.701 78.0002 141 78.0002 141C78.0002 141 48.9335 133.934 49.0002 97.0294C49.0008 96.6525 49.0052 96.2757 49.0134 95.8992ZM77.3339 129.68C77.4832 91.8227 103.508 78.6258 124.529 67.9659C135.534 62.3853 145.168 57.5 149 50.1358C153.126 42.8892 154.39 36.1953 153.646 30.4681C153.141 34.8352 151.67 39.5668 149 44.5441C145.168 52.3615 135.534 57.5475 124.529 63.4715C103.387 74.8525 77.1834 88.9578 77.3339 129.68Z"' ' fill="black" fill-opacity="0.16" />' ).format(color=color)
PypiClean
/power-ml-1.0.0a0.tar.gz/power-ml-1.0.0a0/src/power_ml/data/s3_pickle_store.py
import logging import os from typing import Any import boto3 from power_ml.data.local_pickle_store import LocalPickleStore from power_ml.data.store import BaseStore s3 = boto3.resource('s3') def exist_key(bucket: str, key: str) -> bool: """Exist key or not. Args: bucket (str): S3 bucket name. key (str): Object key. Returns: bool: Exist or not. """ try: s3.Object(bucket, key).get() except s3.meta.client.exceptions.NoSuchKey: return False return True class S3PickleStore(BaseStore): """S3 store.""" def __init__( self, bucket_name: str, tmp_path: str, cache: bool = True, region: str = None, exist: str = 'skip', logger: logging.Logger = None, ) -> None: """Initialize object.""" if exist in ['skip', 'error']: self.exist = exist else: raise ValueError('Invalid exist parameter.') self.region = region self.bucket_name = bucket_name self.cache = cache kwargs = {} if self.region: kwargs['region_name'] = self.region super().__init__(logger=logger) self.pickle_store = LocalPickleStore(tmp_path, logger=self.logger) def init(self, **kwargs) -> None: """Init store.""" super().init(**kwargs) def _save_data(self, data: Any, name: str, exist_ok: bool = True) -> None: """Save data. Args: data (Any): Data. name (str): Data name. exist_ok (bool, optional): Exist ok or not. """ if self.exist == 'skip' and exist_key(self.bucket_name, name): return if self.exist == 'error' or exist_ok: if exist_key(self.bucket_name, name): raise FileExistsError('Key already exist. {}'.format(name)) s3.Object(self.bucket_name, name).upload_file(data) def _load_data(self, name: str) -> Any: """Load data. Args: name (str): Name. Returns: Any: Data. """ path = self.pickle_store._get_pickle_path(name) if self.cache and os.path.exists(path): data = self.pickle_store._load_data(name) else: s3.Object(self.bucket_name, name).download_file(path) data = self.pickle_store._load_data(name) if not self.cache: self.pickle_store._remove_data(name) return data def _remove_data(self, name: str) -> None: """Remove data. Args: name (str): Name. """ s3.Object(self.bucket_name, name).delete() def save(self, obj: Any, prefix: str = None, meta: dict = None, exist_ok: bool = True) -> str: """Save object. Args: obj (Any): Object. prefix (str, optional): Prefix of name. meta (dict, optional): Additional meta. exist_ok (bool, optional): Exist ok or not. Returns: str: Name. """ name = self.pickle_store.save(obj, prefix=prefix, meta=meta, exist_ok=True) path = self.pickle_store._get_pickle_path(name) meta_name = self.pickle_store._get_meta_name(name) meta_path = self.pickle_store._get_pickle_path(meta_name) self._save_data(path, name, exist_ok=exist_ok) self._save_data(meta_path, meta_name, exist_ok=exist_ok) self.pickle_store._remove_data(name) self.pickle_store._remove_data(meta_name) return name def load_with_meta( # type: ignore self, name: str, expected_type: type = None, validate: bool = True) -> tuple[Any, dict]: """Load object with meta. Args: name (str): Name. expected_type (type, optional): Expected type. validate (bool, optional): Validate with meta flag. Returns: Any: Object. dict: Meta. """ obj = self._load_data(name) if expected_type: assert isinstance(obj, expected_type) meta = self.load_meta(name) if validate: assert isinstance(obj, meta['type']) assert meta['str'] == str(obj)[:100] self.logger.debug('Loaded: "{}"'.format(name)) return obj, meta
PypiClean
/byc-pyflow-1.0.0b0.tar.gz/byc-pyflow-1.0.0b0/pyflow/blocks/widgets/blocktitle.py
import time from typing import OrderedDict from PyQt5.QtCore import Qt from PyQt5.QtGui import QFocusEvent, QFont, QMouseEvent from PyQt5.QtWidgets import QLineEdit, QWidget from pyflow.core.serializable import Serializable class Title(QLineEdit, Serializable): """The title of an Block. Needs to be double clicked to interact.""" def __init__( self, text: str, color: str = "white", font: str = "Ubuntu", size: int = 12, parent: QWidget = None, ): """Create a new title for an Block.""" Serializable.__init__(self) QLineEdit.__init__(self, text, parent) self.clickTime = None self.init_ui(color, font, size) self.setReadOnly(True) self.setCursorPosition(0) def init_ui(self, color: str, font: str, size: int): """Apply the style given to the title.""" self.color = color self.setStyleSheet( f""" QLineEdit {{ color : {self.color}; background-color: transparent; border:none; }}""" ) self.setFont(QFont(font, size)) def mousePressEvent(self, event: QMouseEvent): """ Detect double clicks and single clicks are react accordingly by dispatching the event to the parent or the current widget """ if self.clickTime is None or ( self.isReadOnly() and time.time() - self.clickTime > 0.3 ): self.parent().mousePressEvent(event) elif self.isReadOnly(): self.mouseDoubleClickEvent(event) super().mousePressEvent(event) else: super().mousePressEvent(event) self.clickTime = time.time() def focusOutEvent(self, event: QFocusEvent): """The title is read-only when focused is lost.""" self.setReadOnly(True) self.setCursorPosition(0) self.deselect() def mouseDoubleClickEvent(self, event: QMouseEvent): """Toggle readonly mode when double clicking.""" self.setReadOnly(not self.isReadOnly()) if not self.isReadOnly(): self.setFocus(Qt.MouseFocusReason) def serialize(self) -> OrderedDict: """Return a serialized version of this widget.""" return OrderedDict( [ ("color", self.color), ("font", self.font().family()), ("size", self.font().pointSize()), ] ) def deserialize(self, data: OrderedDict, hashmap: dict = None, restore_id=True): """Restore a title from serialized data.""" if restore_id: self.id = data.get("id", id(self)) self.init_ui(data["color"], data["font"], data["size"])
PypiClean
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/DataTables/extensions/ColVis/js/dataTables.colVis.min.js
(function(j,i,k){j=function(d){var e=function(a,b){(!this.CLASS||"ColVis"!=this.CLASS)&&alert("Warning: ColVis must be initialised with the keyword 'new'");"undefined"==typeof b&&(b={});d.fn.dataTable.camelToHungarian&&d.fn.dataTable.camelToHungarian(e.defaults,b);this.s={dt:null,oInit:b,hidden:!0,abOriginal:[]};this.dom={wrapper:null,button:null,collection:null,background:null,catcher:null,buttons:[],groupButtons:[],restore:null};e.aInstances.push(this);this.s.dt=d.fn.dataTable.Api?(new d.fn.dataTable.Api(a)).settings()[0]: a;this._fnConstruct(b);return this};e.prototype={button:function(){return this.dom.wrapper},fnRebuild:function(){this.rebuild()},rebuild:function(){for(var a=this.dom.buttons.length-1;0<=a;a--)this.dom.collection.removeChild(this.dom.buttons[a]);this.dom.buttons.splice(0,this.dom.buttons.length);this.dom.restore&&this.dom.restore.parentNode(this.dom.restore);this._fnAddGroups();this._fnAddButtons();this._fnDrawCallback()},_fnConstruct:function(a){this._fnApplyCustomisation(a);var b=this,c,f;this.dom.wrapper= i.createElement("div");this.dom.wrapper.className="ColVis";this.dom.button=d("<button />",{"class":!this.s.dt.bJUI?"ColVis_Button ColVis_MasterButton":"ColVis_Button ColVis_MasterButton ui-button ui-state-default"}).append("<span>"+this.s.buttonText+"</span>").bind("mouseover"==this.s.activate?"mouseover":"click",function(a){a.preventDefault();b._fnCollectionShow()}).appendTo(this.dom.wrapper)[0];this.dom.catcher=this._fnDomCatcher();this.dom.collection=this._fnDomCollection();this.dom.background= this._fnDomBackground();this._fnAddGroups();this._fnAddButtons();c=0;for(f=this.s.dt.aoColumns.length;c<f;c++)this.s.abOriginal.push(this.s.dt.aoColumns[c].bVisible);this.s.dt.aoDrawCallback.push({fn:function(){b._fnDrawCallback.call(b)},sName:"ColVis"});d(this.s.dt.oInstance).bind("column-reorder",function(a,d,e){c=0;for(f=b.s.aiExclude.length;c<f;c++)b.s.aiExclude[c]=e.aiInvertMapping[b.s.aiExclude[c]];a=b.s.abOriginal.splice(e.iFrom,1)[0];b.s.abOriginal.splice(e.iTo,0,a);b.fnRebuild()});this._fnDrawCallback()}, _fnApplyCustomisation:function(a){d.extend(!0,this.s,e.defaults,a);!this.s.showAll&&this.s.bShowAll&&(this.s.showAll=this.s.sShowAll);!this.s.restore&&this.s.bRestore&&(this.s.restore=this.s.sRestore);var a=this.s.groups,b=this.s.aoGroups;if(a)for(var c=0,f=a.length;c<f;c++)if(a[c].title&&(b[c].sTitle=a[c].title),a[c].columns)b[c].aiColumns=a[c].columns},_fnDrawCallback:function(){for(var a=this.s.dt.aoColumns,b=this.dom.buttons,c=this.s.aoGroups,f,g=0,h=b.length;g<h;g++)f=b[g],f.__columnIdx!==k&& d("input",f).prop("checked",a[f.__columnIdx].bVisible);b=0;for(f=c.length;b<f;b++){a:{for(var g=c[b].aiColumns,h=0,e=g.length;h<e;h++)if(!1===a[g[h]].bVisible){g=!1;break a}g=!0}if(g)d("input",this.dom.groupButtons[b]).prop("checked",!0),d("input",this.dom.groupButtons[b]).prop("indeterminate",!1);else{a:{g=c[b].aiColumns;h=0;for(e=g.length;h<e;h++)if(!0===a[g[h]].bVisible){g=!1;break a}g=!0}g?(d("input",this.dom.groupButtons[b]).prop("checked",!1),d("input",this.dom.groupButtons[b]).prop("indeterminate", !1)):d("input",this.dom.groupButtons[b]).prop("indeterminate",!0)}}},_fnAddGroups:function(){var a;if("undefined"!=typeof this.s.aoGroups)for(var b=0,c=this.s.aoGroups.length;b<c;b++)a=this._fnDomGroupButton(b),this.dom.groupButtons.push(a),this.dom.buttons.push(a),this.dom.collection.appendChild(a)},_fnAddButtons:function(){var a,b=this.s.dt.aoColumns;if(-1===d.inArray("all",this.s.aiExclude))for(var c=0,f=b.length;c<f;c++)-1===d.inArray(c,this.s.aiExclude)&&(a=this._fnDomColumnButton(c),a.__columnIdx= c,this.dom.buttons.push(a));"alpha"===this.s.order&&this.dom.buttons.sort(function(a,c){var d=b[a.__columnIdx].sTitle,f=b[c.__columnIdx].sTitle;return d===f?0:d<f?-1:1});this.s.restore&&(a=this._fnDomRestoreButton(),a.className+=" ColVis_Restore",this.dom.buttons.push(a));this.s.showAll&&(a=this._fnDomShowXButton(this.s.showAll,!0),a.className+=" ColVis_ShowAll",this.dom.buttons.push(a));this.s.showNone&&(a=this._fnDomShowXButton(this.s.showNone,!1),a.className+=" ColVis_ShowNone",this.dom.buttons.push(a)); d(this.dom.collection).append(this.dom.buttons)},_fnDomRestoreButton:function(){var a=this;return d('<li class="ColVis_Special '+(this.s.dt.bJUI?"ui-button ui-state-default":"")+'">'+this.s.restore+"</li>").click(function(){for(var b=0,c=a.s.abOriginal.length;b<c;b++)a.s.dt.oInstance.fnSetColumnVis(b,a.s.abOriginal[b],!1);a._fnAdjustOpenRows();a.s.dt.oInstance.fnAdjustColumnSizing(!1);a.s.dt.oInstance.fnDraw(!1)})[0]},_fnDomShowXButton:function(a,b){var c=this;return d('<li class="ColVis_Special '+ (this.s.dt.bJUI?"ui-button ui-state-default":"")+'">'+a+"</li>").click(function(){for(var a=0,d=c.s.abOriginal.length;a<d;a++)-1===c.s.aiExclude.indexOf(a)&&c.s.dt.oInstance.fnSetColumnVis(a,b,!1);c._fnAdjustOpenRows();c.s.dt.oInstance.fnAdjustColumnSizing(!1);c.s.dt.oInstance.fnDraw(!1)})[0]},_fnDomGroupButton:function(a){var b=this,c=this.s.aoGroups[a];return d('<li class="ColVis_Special '+(this.s.dt.bJUI?"ui-button ui-state-default":"")+'"><label><input type="checkbox" /><span>'+c.sTitle+"</span></label></li>").click(function(a){var g= !d("input",this).is(":checked");"li"!==a.target.nodeName.toLowerCase()&&(g=!g);for(a=0;a<c.aiColumns.length;a++)b.s.dt.oInstance.fnSetColumnVis(c.aiColumns[a],g)})[0]},_fnDomColumnButton:function(a){var b=this,c=this.s.dt.aoColumns[a],f=this.s.dt,c=null===this.s.fnLabel?c.sTitle:this.s.fnLabel(a,c.sTitle,c.nTh);return d("<li "+(f.bJUI?'class="ui-button ui-state-default"':"")+'><label><input type="checkbox" /><span>'+c+"</span></label></li>").click(function(c){var e=!d("input",this).is(":checked"); "li"!==c.target.nodeName.toLowerCase()&&(e=!e);var i=d.fn.dataTableExt.iApiIndex;d.fn.dataTableExt.iApiIndex=b._fnDataTablesApiIndex.call(b);f.oFeatures.bServerSide?(b.s.dt.oInstance.fnSetColumnVis(a,e,!1),b.s.dt.oInstance.fnAdjustColumnSizing(!1),(""!==f.oScroll.sX||""!==f.oScroll.sY)&&b.s.dt.oInstance.oApi._fnScrollDraw(b.s.dt),b._fnDrawCallback()):b.s.dt.oInstance.fnSetColumnVis(a,e);d.fn.dataTableExt.iApiIndex=i;"input"===c.target.nodeName.toLowerCase()&&null!==b.s.fnStateChange&&b.s.fnStateChange.call(b, a,e)})[0]},_fnDataTablesApiIndex:function(){for(var a=0,b=this.s.dt.oInstance.length;a<b;a++)if(this.s.dt.oInstance[a]==this.s.dt.nTable)return a;return 0},_fnDomCollection:function(){return d("<ul />",{"class":!this.s.dt.bJUI?"ColVis_collection":"ColVis_collection ui-buttonset ui-buttonset-multi"}).css({display:"none",opacity:0,position:!this.s.bCssPosition?"absolute":""})[0]},_fnDomCatcher:function(){var a=this,b=i.createElement("div");b.className="ColVis_catcher";d(b).click(function(){a._fnCollectionHide.call(a, null,null)});return b},_fnDomBackground:function(){var a=this,b=d("<div></div>").addClass("ColVis_collectionBackground").css("opacity",0).click(function(){a._fnCollectionHide.call(a,null,null)});"mouseover"==this.s.activate&&b.mouseover(function(){a.s.overcollection=!1;a._fnCollectionHide.call(a,null,null)});return b[0]},_fnCollectionShow:function(){var a=this,b;b=d(this.dom.button).offset();var c=this.dom.collection,f=this.dom.background,e=parseInt(b.left,10),h=parseInt(b.top+d(this.dom.button).outerHeight(), 10);this.s.bCssPosition||(c.style.top=h+"px",c.style.left=e+"px");d(c).css({display:"block",opacity:0});f.style.bottom="0px";f.style.right="0px";h=this.dom.catcher.style;h.height=d(this.dom.button).outerHeight()+"px";h.width=d(this.dom.button).outerWidth()+"px";h.top=b.top+"px";h.left=e+"px";i.body.appendChild(f);i.body.appendChild(c);i.body.appendChild(this.dom.catcher);d(c).animate({opacity:1},a.s.iOverlayFade);d(f).animate({opacity:0.1},a.s.iOverlayFade,"linear",function(){d.browser&&(d.browser.msie&& d.browser.version=="6.0")&&a._fnDrawCallback()});this.s.bCssPosition||(b="left"==this.s.sAlign?e:e-d(c).outerWidth()+d(this.dom.button).outerWidth(),c.style.left=b+"px",f=d(c).outerWidth(),d(c).outerHeight(),e=d(i).width(),b+f>e&&(c.style.left=e-f+"px"));this.s.hidden=!1},_fnCollectionHide:function(){var a=this;!this.s.hidden&&null!==this.dom.collection&&(this.s.hidden=!0,d(this.dom.collection).animate({opacity:0},a.s.iOverlayFade,function(){this.style.display="none"}),d(this.dom.background).animate({opacity:0}, a.s.iOverlayFade,function(){i.body.removeChild(a.dom.background);i.body.removeChild(a.dom.catcher)}))},_fnAdjustOpenRows:function(){for(var a=this.s.dt.aoOpenRows,b=this.s.dt.oApi._fnVisbleColumns(this.s.dt),c=0,d=a.length;c<d;c++)a[c].nTr.getElementsByTagName("td")[0].colSpan=b}};e.fnRebuild=function(a){var b=null;"undefined"!=typeof a&&(b=a.fnSettings().nTable);for(var c=0,d=e.aInstances.length;c<d;c++)("undefined"==typeof a||b==e.aInstances[c].s.dt.nTable)&&e.aInstances[c].fnRebuild()};e.defaults= {active:"click",buttonText:"Show / hide columns",aiExclude:[],bRestore:!1,sRestore:"Restore original",bShowAll:!1,sShowAll:"Show All",sAlign:"left",fnStateChange:null,iOverlayFade:500,fnLabel:null,bCssPosition:!1,aoGroups:[],order:"column"};e.aInstances=[];e.prototype.CLASS="ColVis";e.VERSION="1.1.1";e.prototype.VERSION=e.VERSION;"function"==typeof d.fn.dataTable&&"function"==typeof d.fn.dataTableExt.fnVersionCheck&&d.fn.dataTableExt.fnVersionCheck("1.7.0")?d.fn.dataTableExt.aoFeatures.push({fnInit:function(a){var b= a.oInit;return(new e(a,b.colVis||b.oColVis||{})).button()},cFeature:"C",sFeature:"ColVis"}):alert("Warning: ColVis requires DataTables 1.7 or greater - www.datatables.net/download");d.fn.dataTable.ColVis=e;return d.fn.DataTable.ColVis=e};"function"===typeof define&&define.amd?define(["jquery","datatables"],j):"object"===typeof exports?j(require("jquery"),require("datatables")):jQuery&&!jQuery.fn.dataTable.ColVis&&j(jQuery,jQuery.fn.dataTable)})(window,document);
PypiClean
/intel_tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/ops/ref_variable.py
"""RefVariable class.""" from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import variable_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_conversion_registry from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_v1 from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.trackable import base as trackable from tensorflow.python.types import core from tensorflow.python.util import compat from tensorflow.python.util import lazy_loader from tensorflow.python.util.deprecation import deprecated variable_scope = lazy_loader.LazyLoader( "variable_scope", globals(), "tensorflow.python.ops.variable_scope") def default_variable_creator(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) collections = kwargs.get("collections", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) expected_shape = kwargs.get("expected_shape", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) use_resource = kwargs.get("use_resource", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) if use_resource is None: use_resource = variable_scope.get_variable_scope().use_resource if use_resource is None: use_resource = variable_scope._DEFAULT_USE_RESOURCE # pylint: disable=protected-access use_resource = use_resource or context.executing_eagerly() if use_resource: distribute_strategy = kwargs.get("distribute_strategy", None) return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape) else: return RefVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, expected_shape=expected_shape, import_scope=import_scope, synchronization=synchronization, aggregation=aggregation, shape=shape) variable_v1.default_variable_creator = default_variable_creator def _to_proto_fn(v, export_scope=None): """Converts Variable and ResourceVariable to VariableDef for collections.""" return v.to_proto(export_scope=export_scope) def _from_proto_fn(v, import_scope=None): """Creates Variable or ResourceVariable from VariableDef as needed.""" if v.is_resource: return resource_variable_ops.ResourceVariable.from_proto( v, import_scope=import_scope) return variable_v1.VariableV1.from_proto(v, import_scope=import_scope) ops.register_proto_function( ops.GraphKeys.GLOBAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.TRAINABLE_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.MOVING_AVERAGE_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.LOCAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.MODEL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.GLOBAL_STEP, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) ops.register_proto_function( ops.GraphKeys.METRIC_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=_to_proto_fn, from_proto=_from_proto_fn) # TODO(apassos): do not repeat all comments here class RefVariable(variable_v1.VariableV1, core.Tensor): """Ref-based implementation of variables.""" def __init__( self, # pylint: disable=super-init-not-called initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None, constraint=None, synchronization=None, aggregation=None, shape=None): """Creates a new variable with value `initial_value`. The new variable is added to the graph collections listed in `collections`, which defaults to `[GraphKeys.GLOBAL_VARIABLES]`. If `trainable` is `True` the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This constructor creates both a `variable` Op and an `assign` Op to set the variable to its initial value. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. variable_def: `VariableDef` protocol buffer. If not `None`, recreates the Variable object with its contents, referencing the variable's nodes in the graph, which must already exist. The graph is not changed. `variable_def` and the other arguments are mutually exclusive. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. expected_shape: A TensorShape. If set, initial_value is expected to have this shape. import_scope: Optional `string`. Name scope to add to the `Variable.` Only used when initializing from protocol buffer. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If both `variable_def` and initial_value are specified. ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If eager execution is enabled. """ self._in_graph_mode = True if variable_def: # If variable_def is provided, recreates the variable from its fields. if initial_value: raise ValueError("variable_def and initial_value are mutually " "exclusive.") self._init_from_proto(variable_def, import_scope=import_scope) else: # Create from initial_value. self._init_from_args( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, expected_shape=expected_shape, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape) def __repr__(self): if context.executing_eagerly() and not self._in_graph_mode: return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % ( self.name, self.get_shape(), self.dtype.name, ops.numpy_text(self.read_value(), is_repr=True)) else: return "<tf.Variable '%s' shape=%s dtype=%s>" % ( self.name, self.get_shape(), self.dtype.name) def _init_from_args(self, initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, dtype=None, expected_shape=None, constraint=None, synchronization=None, aggregation=None, shape=None): """Creates a new variable from arguments. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If None, either the datatype will be kept (if initial_value is a Tensor) or float32 will be used (if it is a Python object convertible to a Tensor). expected_shape: Deprecated. Ignored. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If lifted into the eager context. """ _ = expected_shape if initial_value is None: raise ValueError("initial_value must be specified.") init_from_fn = callable(initial_value) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] if not isinstance(collections, (list, tuple, set)): raise ValueError( "collections argument to Variable constructor must be a list, tuple, " "or set. Got %s of type %s" % (collections, type(collections))) if constraint is not None and not callable(constraint): raise ValueError("The `constraint` argument must be a callable.") # Store the graph key so optimizers know how to only retrieve variables from # this graph. self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access if isinstance(initial_value, trackable.CheckpointInitialValue): self._maybe_initialize_trackable() self._update_uid = initial_value.checkpoint_position.restore_uid initial_value = initial_value.wrapped_value synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name)) self._synchronization = synchronization self._aggregation = aggregation self._trainable = trainable if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] with ops.init_scope(): # Ensure that we weren't lifted into the eager context. if context.executing_eagerly(): raise RuntimeError( "Reference variables are not supported when eager execution is " "enabled. Please run `tf.compat.v1.enable_resource_variables()` to " "switch to resource variables.") with ops.name_scope(name, "Variable", [] if init_from_fn else [initial_value]) as name: if init_from_fn: # Use attr_scope and device(None) to simulate the behavior of # colocate_with when the variable we want to colocate with doesn't # yet exist. true_name = ops.name_from_scope_name(name) # pylint: disable=protected-access attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue( s=[compat.as_bytes("loc:@%s" % true_name)])) # pylint: disable=protected-access with ops.get_default_graph()._attr_scope({"_class": attr}): with ops.name_scope("Initializer"), ops.device(None): initial_value = initial_value() if isinstance(initial_value, trackable.CheckpointInitialValue): self._maybe_initialize_trackable() self._update_uid = initial_value.checkpoint_position.restore_uid initial_value = initial_value.wrapped_value self._initial_value = ops.convert_to_tensor( initial_value, name="initial_value", dtype=dtype) if shape is None: shape = ( self._initial_value.get_shape() if validate_shape else tensor_shape.unknown_shape()) self._variable = state_ops.variable_op_v2( shape, self._initial_value.dtype.base_dtype, name=name) # pylint: enable=protected-access # Or get the initial value from a Tensor or Python object. else: self._initial_value = ops.convert_to_tensor( initial_value, name="initial_value", dtype=dtype) # pylint: disable=protected-access if self._initial_value.op._get_control_flow_context() is not None: raise ValueError( "Initializer for variable %s is from inside a control-flow " "construct, such as a loop or conditional. When creating a " "variable inside a loop or conditional, use a lambda as the " "initializer." % name) if shape is None: # pylint: enable=protected-access shape = ( self._initial_value.get_shape() if validate_shape else tensor_shape.unknown_shape()) # In this case, the variable op can't be created until after the # initial_value has been converted to a Tensor with a known type. self._variable = state_ops.variable_op_v2( shape, self._initial_value.dtype.base_dtype, name=name) # Cache the name in `self`, because some APIs call `Variable.name` in a # tight loop, and this halves the cost. self._name = self._variable.name # Manually overrides the variable's shape with the initial value's. if validate_shape: initial_value_shape = self._initial_value.get_shape() if not initial_value_shape.is_fully_defined(): raise ValueError("initial_value must have a shape specified: %s" % self._initial_value) # If 'initial_value' makes use of other variables, make sure we don't # have an issue if these other variables aren't initialized first by # using their initialized_value() method. self._initializer_op = state_ops.assign( self._variable, variables._try_guard_against_uninitialized_dependencies( # pylint: disable=protected-access name, self._initial_value), validate_shape=validate_shape).op # TODO(vrv): Change this class to not take caching_device, but # to take the op to colocate the snapshot with, so we can use # colocation rather than devices. if caching_device is not None: with ops.device(caching_device): self._snapshot = array_ops.identity(self._variable, name="read") else: with ops.colocate_with(self._variable.op): self._snapshot = array_ops.identity(self._variable, name="read") ops.add_to_collections(collections, self) self._caching_device = caching_device self._save_slice_info = None self._constraint = constraint def _init_from_proto(self, variable_def, import_scope=None): """Recreates the Variable object from a `VariableDef` protocol buffer. Args: variable_def: `VariableDef` protocol buffer, describing a variable whose nodes already exists in the graph. import_scope: Optional `string`. Name scope to add. """ assert isinstance(variable_def, variable_pb2.VariableDef) # Create from variable_def. g = ops.get_default_graph() self._variable = g.as_graph_element( ops.prepend_name_scope( variable_def.variable_name, import_scope=import_scope)) self._name = self._variable.name self._initializer_op = g.as_graph_element( ops.prepend_name_scope( variable_def.initializer_name, import_scope=import_scope)) # Tests whether initial_value_name exists first for backwards compatibility. if (hasattr(variable_def, "initial_value_name") and variable_def.initial_value_name): self._initial_value = g.as_graph_element( ops.prepend_name_scope( variable_def.initial_value_name, import_scope=import_scope)) else: self._initial_value = None synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)) self._synchronization = synchronization self._aggregation = aggregation self._trainable = trainable self._snapshot = g.as_graph_element( ops.prepend_name_scope( variable_def.snapshot_name, import_scope=import_scope)) if variable_def.HasField("save_slice_info_def"): self._save_slice_info = variables.Variable.SaveSliceInfo( save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope) else: self._save_slice_info = None self._caching_device = None self._constraint = None def _as_graph_element(self): """Conversion function for Graph.as_graph_element().""" return self._variable def value(self): """Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a `convert_to_tensor()` call. Returns a `Tensor` which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A `Tensor` containing the value of the variable. """ return self._snapshot def read_value(self): """Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A `Tensor` containing the value of the variable. """ return array_ops.identity(self._variable, name="read") def _ref(self): """Returns a reference to this variable. You usually do not need to call this method as all ops that need a reference to the variable call it automatically. Returns is a `Tensor` which holds a reference to the variable. You can assign a new value to the variable by passing the tensor to an assign op. See `tf.Variable.value` if you want to get the value of the variable. Returns: A `Tensor` that is a reference to the variable. """ return self._variable def set_shape(self, shape): """Overrides the shape for this variable. Args: shape: the `TensorShape` representing the overridden shape. """ self._ref().set_shape(shape) self.value().set_shape(shape) @property def trainable(self): return self._trainable @property def synchronization(self): return self._synchronization @property def aggregation(self): return self._aggregation def eval(self, session=None): """In a session, computes and returns the value of this variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See `tf.compat.v1.Session` for more information on launching a graph and on sessions. ```python v = tf.Variable([1, 2]) init = tf.compat.v1.global_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(init) # Usage passing the session explicitly. print(v.eval(sess)) # Usage with the default session. The 'with' block # above makes 'sess' the default session. print(v.eval()) ``` Args: session: The session to use to evaluate this variable. If none, the default session is used. Returns: A numpy `ndarray` with a copy of the value of this variable. """ return self._variable.eval(session=session) @property def initial_value(self): """Returns the Tensor used as the initial value for the variable. Note that this is different from `initialized_value()` which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A `Tensor`. """ return self._initial_value @property def constraint(self): """Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be `None` if no constraint was passed. """ return self._constraint def assign(self, value, use_locking=False, name=None, read_value=True): """Assigns a new value to the variable. This is essentially a shortcut for `assign(self, value)`. Args: value: A `Tensor`. The new value for this variable. use_locking: If `True`, use locking during the assignment. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the assignment has completed. """ assign = state_ops.assign( self._variable, value, use_locking=use_locking, name=name) if read_value: return assign return assign.op def assign_add(self, delta, use_locking=False, name=None, read_value=True): """Adds a value to this variable. This is essentially a shortcut for `assign_add(self, delta)`. Args: delta: A `Tensor`. The value to add to this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the addition has completed. """ assign = state_ops.assign_add( self._variable, delta, use_locking=use_locking, name=name) if read_value: return assign return assign.op def assign_sub(self, delta, use_locking=False, name=None, read_value=True): """Subtracts a value from this variable. This is essentially a shortcut for `assign_sub(self, delta)`. Args: delta: A `Tensor`. The value to subtract from this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the subtraction has completed. """ assign = state_ops.assign_sub( self._variable, delta, use_locking=use_locking, name=name) if read_value: return assign return assign.op def scatter_sub(self, sparse_delta, use_locking=False, name=None): """Subtracts `tf.IndexedSlices` from this variable. Args: sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_sub( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_add(self, sparse_delta, use_locking=False, name=None): """Adds `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be added to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_add( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_max(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the max of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of max with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered maximization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_max( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_min(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the min of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of min with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered minimization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_min( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_mul(self, sparse_delta, use_locking=False, name=None): """Multiply this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to multiply this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered multiplication has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_mul( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_div(self, sparse_delta, use_locking=False, name=None): """Divide this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to divide this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_div( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_update( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable batch-wise. Analogous to `batch_gather`. This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: `num_prefix_dims = sparse_delta.indices.ndims - 1` `batch_dim = num_prefix_dims + 1` `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[ batch_dim:]` where `sparse_delta.updates.shape[:num_prefix_dims]` `== sparse_delta.indices.shape[:num_prefix_dims]` `== var.shape[:num_prefix_dims]` And the operation performed can be expressed as: `var[i_1, ..., i_n, sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[ i_1, ..., i_n, j]` When sparse_delta.indices is a 1D tensor, this operation is equivalent to `scatter_update`. To avoid this operation one can looping over the first `ndims` of the variable and using `scatter_update` on the subtensors that result of slicing the first dimension. This is a valid option for `ndims = 1`, but less efficient than this implementation. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ return state_ops.batch_scatter_update( self, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_nd_sub(self, indices, updates, name=None): """Applies sparse subtraction to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = ref.scatter_nd_sub(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. """ return gen_state_ops.scatter_nd_sub( self._variable, indices, updates, use_locking=True, name=name) def scatter_nd_add(self, indices, updates, name=None): """Applies sparse addition to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) add = ref.scatter_nd_add(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(add) ``` The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. """ return gen_state_ops.scatter_nd_add( self._variable, indices, updates, use_locking=True, name=name) def scatter_nd_update(self, indices, updates, name=None): """Applies sparse assignment to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = ref.scatter_nd_update(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. """ return gen_state_ops.scatter_nd_update( self._variable, indices, updates, use_locking=True, name=name) def scatter_nd_max(self, indices, updates, name=None): """Updates this variable with the max of `tf.IndexedSlices` and itself. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. """ return gen_state_ops.scatter_nd_max( self._variable, indices, updates, use_locking=True, name=name) def scatter_nd_min(self, indices, updates, name=None): """Updates this variable with the min of `tf.IndexedSlices` and itself. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. """ return gen_state_ops.scatter_nd_min( self._variable, indices, updates, use_locking=True, name=name) def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask): return gen_array_ops.strided_slice_assign( ref=self._ref(), begin=begin, end=end, strides=strides, value=value, name=name, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) @deprecated(None, "Prefer Dataset.range instead.") def count_up_to(self, limit): """Increments this variable until it reaches `limit`. When that Op is run it tries to increment the variable by `1`. If incrementing the variable would bring it above `limit` then the Op raises the exception `OutOfRangeError`. If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for `count_up_to(self, limit)`. Args: limit: value at which incrementing the variable raises an error. Returns: A `Tensor` that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct. """ return state_ops.count_up_to(self._variable, limit=limit) # Conversion to tensor. @staticmethod def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name """Utility function for converting a Variable to a Tensor.""" _ = name if dtype and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) if as_ref: return v._ref() # pylint: disable=protected-access else: return v.value() # NOTE(mrry): This enables the Variable's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Variable class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Variables interact # with ndarrays. __array_priority__ = 100 @property def name(self): """The name of this variable.""" return self._name @property def initializer(self): """The initializer operation for this variable.""" return self._initializer_op @property def device(self): """The device of this variable.""" return self._variable.device @property def dtype(self): """The `DType` of this variable.""" return self._variable.dtype @property def op(self): """The `Operation` of this variable.""" return self._variable.op @property def graph(self): """The `Graph` of this variable.""" return self._variable.graph @property def _distribute_strategy(self): """The `tf.distribute.Strategy` that this variable was created under.""" return None # Ref variables are never created inside a strategy. @property def shape(self): """The `TensorShape` of this variable. Returns: A `TensorShape`. """ return self._variable.get_shape() def to_proto(self, export_scope=None): """Converts a `Variable` to a `VariableDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `VariableDef` protocol buffer, or `None` if the `Variable` is not in the specified name scope. """ if (export_scope is None or self._variable.name.startswith(export_scope)): var_def = variable_pb2.VariableDef() var_def.variable_name = ops.strip_name_scope(self._variable.name, export_scope) if self._initial_value is not None: # For backwards compatibility. var_def.initial_value_name = ops.strip_name_scope( self._initial_value.name, export_scope) var_def.trainable = self.trainable var_def.synchronization = self.synchronization.value var_def.aggregation = self.aggregation.value var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope) var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, export_scope) if self._save_slice_info: var_def.save_slice_info_def.MergeFrom( self._save_slice_info.to_proto(export_scope=export_scope)) return var_def else: return None def __iadd__(self, other): logging.log_first_n( logging.WARN, "Variable += will be deprecated. Use variable.assign_add" " if you want assignment to the variable value or 'x = x + y'" " if you want a new python Tensor object.", 1) return self + other def __isub__(self, other): logging.log_first_n( logging.WARN, "Variable -= will be deprecated. Use variable.assign_sub" " if you want assignment to the variable value or 'x = x - y'" " if you want a new python Tensor object.", 1) return self - other def __imul__(self, other): logging.log_first_n( logging.WARN, "Variable *= will be deprecated. Use `var.assign(var * other)`" " if you want assignment to the variable value or `x = x * y`" " if you want a new python Tensor object.", 1) return self * other def __idiv__(self, other): logging.log_first_n( logging.WARN, "Variable /= will be deprecated. Use `var.assign(var / other)`" " if you want assignment to the variable value or `x = x / y`" " if you want a new python Tensor object.", 1) return self / other def __itruediv__(self, other): logging.log_first_n( logging.WARN, "Variable /= will be deprecated. Use `var.assign(var / other)`" " if you want assignment to the variable value or `x = x / y`" " if you want a new python Tensor object.", 1) return self / other def __irealdiv__(self, other): logging.log_first_n( logging.WARN, "Variable /= will be deprecated. Use `var.assign(var / other)`" " if you want assignment to the variable value or `x = x / y`" " if you want a new python Tensor object.", 1) return self / other def __ipow__(self, other): logging.log_first_n( logging.WARN, "Variable **= will be deprecated. Use `var.assign(var ** other)`" " if you want assignment to the variable value or `x = x ** y`" " if you want a new python Tensor object.", 1) return self**other def _serialize_to_tensors(self): """Implements Trackable._serialize_to_tensors.""" return {trackable.VARIABLE_VALUE_KEY: self} def _restore_from_tensors(self, restored_tensors): """Implements Trackable._restore_from_tensors.""" restored_tensor = restored_tensors[trackable.VARIABLE_VALUE_KEY] return state_ops.assign( self, restored_tensor, validate_shape=self.get_shape().is_fully_defined()) # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. tensor_conversion_registry.register_tensor_conversion_function( RefVariable, RefVariable._TensorConversionFunction) # pylint: disable=protected-access variable_v1.set_variable_from_proto_fn(RefVariable)
PypiClean
/pelican_theme_config-2.0.2.tar.gz/pelican_theme_config-2.0.2/README.md
Theme Configuration: A Plugin for Pelican ========================================== [![Build Status](https://img.shields.io/github/actions/workflow/status/pelican-plugins/theme-config/main.yml?branch=main)](https://github.com/pelican-plugins/theme-config/actions) [![PyPI Version](https://img.shields.io/pypi/v/pelican-theme-config)](https://pypi.org/project/pelican-theme-config/) This package provides a plugin for the Pelican static website generator and adds support for themes to adjust Pelican's configuration using the `themeconf.py` file located in the root directory of the theme. Installation ------------ This plugin can be installed via: python -m pip install pelican-theme-config Usage ----- Add `theme_config` to the list of plugins in the `pelicanconf.py` file, e.g. PLUGINS = [ "theme_config" ] From that point on, Pelican will try to load the `themeconf.py` from theme's directory. Overview -------- This plugin allows theme authors to create more self-contained themes since everything that a theme requires can be configured within the theme itself: * themes can be shipped with their own plugins * themes can provide their static content (e.g. a theme that implements Google's PWA can provide `manifest.json` that should be put into the root of the website) * basically, authors could do almost anything :) since with this plugin theme gets control The code is hooked up early in Pelican's start-up sequence leveraging the "initialized" Pelican event, so almost every configuration option can be safely redefined and would take effect. However, since the plugin hooks up after the sanity checks on the provided configuration were done by Pelican this gives some opportunities and risks. Basically, theme authors should be careful to adhere to Pelican's conventions on the configuration directives, otherwise they may confuse their users. This plugin protects the following configuration options from being modified by the theme: - BIND - CACHE_PATH - PATH - PELICAN_CLASS - OUTPUT_PATH - SITEURL - THEME - THEME_CONFIG - THEME_CONFIG_PROTECTED - PORT This list can be configured by the end user in `pelicanconf.py` if they want to restrict it even further or make it more relaxed. The goal is to give the user the ability to define the expected behaviour for their configuration. The plugin introduces the following configuration options one can specify in the primary Pelican configuration file: # The name of the file to lookup in theme's directory THEME_CONFIG = "themeconf.py" # The list of configuration options to be protected from modification THEME_CONFIG_PROTECTED = ["PATH","OUTPUT_PATH"] Contributing ------------ Contributions are welcome and much appreciated. Every little bit helps. You can contribute by improving the documentation, adding missing features, and fixing bugs. You can also help out by reviewing and commenting on [existing issues][]. To start contributing to this plugin, review the [Contributing to Pelican][] documentation, beginning with the **Contributing Code** section. Credits ------- Authored by [Dmitry Khlebnikov](https://dmitry.khlebnikov.net/). [existing issues]: https://github.com/pelican-plugins/theme-config/issues [Contributing to Pelican]: https://docs.getpelican.com/en/latest/contribute.html
PypiClean
/bis-arelle-2022.6.14.151921.tar.gz/bis-arelle-2022.6.14.151921/src/arelle/TableStructure.py
try: import regex as re except ImportError: import re from collections import defaultdict import os, io, json from datetime import datetime, timedelta from arelle import XbrlConst from arelle.ModelDtsObject import ModelConcept from arelle.XmlValidate import VALID # regular expression components STMT = r".* - statement - " notDET = r"(?!.*details)" notCMPRH = r"(?!.*comprehensive)" isCMPRH = r"(?=.*comprehensive)" ''' common mis-spellings of parenthetical to match successfully (from 2013 SEC filings) paranthetical parenthical parentheical parenthtical parenthethical parenthentical prenthetical parenethetical use a regular expression that is forgiving on at least the above and doens't match variations of parent, transparent, etc. ''' rePARENTHETICAL = r"pa?r[ae]ne?th\w?[aei]+\w?t?h?i?c" notPAR = "(?!.*" + rePARENTHETICAL + ")" isPAR = "(?=.*" + rePARENTHETICAL + ")" UGT_TOPICS = None def RE(*args): return re.compile(''.join(args), re.IGNORECASE) # NOTE: This is an early experimental implementation of statement detection # it is not in a finished status at this time. EFMtableCodes = [ # ELRs are parsed for these patterns in sort order until there is one match per code # sheet(s) may be plural # statement detection including root element of presentation link role ("BS", RE(STMT, notDET, notPAR), ("StatementOfFinancialPositionAbstract",)), ("BSP", RE(STMT, notDET, isPAR), ("StatementOfFinancialPositionAbstract",)), ("IS", RE(STMT, notDET, notPAR), ("IncomeStatementAbstract",)), ("ISP", RE(STMT, notDET, isPAR), ("IncomeStatementAbstract",)), ("CI", RE(STMT, notDET, notPAR), ("StatementOfIncomeAndComprehensiveIncomeAbstract",)), ("CIP", RE(STMT, notDET, isPAR), ("StatementOfIncomeAndComprehensiveIncomeAbstract",)), ("EQ", RE(STMT, notDET, notPAR), ("StatementOfStockholdersEquityAbstract","StatementOfPartnersCapitalAbstract")), ("EQP", RE(STMT, notDET, isPAR), ("StatementOfStockholdersEquityAbstract","StatementOfPartnersCapitalAbstract")), ("CF", RE(STMT, notDET, notPAR), ("StatementOfCashFlowsAbstract",)), ("CFP", RE(STMT, notDET, isPAR), ("StatementOfCashFlowsAbstract",)), ("CA", RE(STMT, notDET, notPAR), ("CapitalizationLongtermDebtAndEquityAbstract",)), ("CAP", RE(STMT, notDET, isPAR), ("CapitalizationLongtermDebtAndEquityAbstract",)), ("IN", RE(STMT, notDET, notPAR), ("ScheduleOfInvestmentsAbstract",)), ("INP", RE(STMT, notDET, isPAR), ("ScheduleOfInvestmentsAbstract",)), # statement detection without considering root elements ("DEI", RE(r".* - (document|statement) - .*document\W+.*entity\W+.*information"), None), ("BS", RE(STMT, notDET, notPAR, r".*balance\W+sheet"), None), ("BSP", RE(STMT, notDET, isPAR, r".*balance\W+sheet"), None), ("CF", RE(STMT, notDET, notPAR, r".*cash\W*flow"), None), ("IS", RE(STMT, notDET, notPAR, notCMPRH, r".*(income|loss)"), None), ("ISP", RE(STMT, notDET, isPAR, notCMPRH, r".*(income|loss)"), None), ("CI", RE(STMT, notDET, notPAR, isCMPRH, r".*(income|loss|earnings)"), None), ("CIP", RE(STMT, notDET, isPAR, isCMPRH, r".*(income|loss|earnings)"), None), ("CA", RE(STMT, notDET, notPAR, r".*capitali[sz]ation"), None), ("CAP", RE(STMT, notDET, isPAR, r".*capitali[sz]ation"), None), ("EQ", RE(STMT, notDET, notPAR, r".*(equity|capital)"), None), ("EQP", RE(STMT, notDET, isPAR, r".*(equity|capital)"), None), ("IS", RE(STMT, notDET, notPAR, r".*(income|operations|earning)"), None), ("EQ", RE(STMT, notDET, notPAR, r".*def[ei][cs]it"), None), ("ISP", RE(STMT, notDET, isPAR, r".*(income|operations|earning)"), None), ("CFP", RE(STMT, notDET, isPAR, r".*cash\W*flow.*"), None), ("IS", RE(STMT, notDET, notPAR, r".*loss"), None), ("ISP", RE(STMT, notDET, isPAR, r".*loss"), None), ("BS", RE(STMT, notDET, notPAR, r".*(position|condition)"), None), ("BSP", RE(STMT, notDET, isPAR, r".*(position|condition)"), None), ("SE", RE(STMT, notDET, notPAR, r"(?=.*equity).*comprehensive"), None), ("EQ", RE(STMT, notDET, notPAR, r".*shareholder[']?s[']?\W+investment"), None), ("EQP", RE(STMT, notDET, isPAR, r".*shareholder[']?s[']?\W+investment"), None), ("EQ", RE(STMT, notDET, notPAR, r".*retained\W+earning"), None), ("IN", RE(STMT, notDET, notPAR, r".*investment"), None), ("INP", RE(STMT, notDET, isPAR, r".*investment"), None), ("LA", RE(STMT, notDET, notPAR, r"(?!.*changes)(?=.*assets).*liquidati"), None), ("LC", RE(STMT, notDET, notPAR, r"(?=.*changes)(?=.*assets).*liquidati"), None), ("IS", RE(STMT, notDET, notPAR, r"(?=.*disc).*operation"), None), ("BS", RE(STMT, notDET, notPAR, r"(?!.*changes).*assets"), None), ("BSP", RE(STMT, notDET, isPAR, r"(?!.*changes).*assets"), None), ("EQ", RE(STMT, notDET, notPAR, r"(?=.*changes).*assets"), None), ("EQP", RE(STMT, notDET, isPAR, r"(?=.*changes).*assets"), None), ("FH", RE(STMT, notDET, notPAR, r"(?=.*financial).*highlight"), None), ("FHP", RE(STMT, notDET, isPAR, r"(?=.*financial).*highlight"), None), ("EQ", RE(STMT, notDET, notPAR, r"(?=.*reserve).*trust"), None), ("EQP", RE(STMT, notDET, isPAR, r"(?=.*reserve).*trust"), None), ("LC", RE(STMT, notDET, notPAR, r"(?=.*activities).*liquidati"), None), ("EQP", RE(STMT, notDET, isPAR, r".*def[ei][cs]it"), None), ("BSV", RE(STMT, notDET,notPAR, r".*net\W+asset\W+value"), None), ("CFS", RE(STMT, notDET,notPAR, r".*cash\W*flows\W+supplemental"), None), ("LAP", RE(STMT, notDET, isPAR, r".*(?!.*changes)(?=.*assets).*liquidati"), None) ] HMRCtableCodes = [ # ELRs are parsed for these patterns in sort order until there is one match per code # sheet(s) may be plural ("DEI", RE(r".*entity\W+.*information.*"), None), ("BS", RE(r".*balance\W+sheet.*"), None), ("IS", RE(r".*loss"), None), ("CF", RE(r".*cash\W*flow.*"), None), ("SE", RE(r".*(shareholder|equity).*"), None), ] def evaluateRoleTypesTableCodes(modelXbrl): disclosureSystem = modelXbrl.modelManager.disclosureSystem if disclosureSystem.validationType in ("EFM", "HMRC"): detectMultipleOfCode = False if disclosureSystem.validationType == "EFM": tableCodes = list( EFMtableCodes ) # separate copy of list so entries can be deleted # for Registration and resubmission allow detecting multiple of code detectMultipleOfCode = any(v and any(v.startswith(dt) for dt in ('S-', 'F-', '8-K', '6-K')) for docTypeConcept in modelXbrl.nameConcepts.get('DocumentType', ()) for docTypeFact in modelXbrl.factsByQname.get(docTypeConcept.qname, ()) for v in (docTypeFact.value,)) elif disclosureSystem.validationType == "HMRC": tableCodes = list( HMRCtableCodes ) # separate copy of list so entries can be deleted codeRoleURI = {} # lookup by code for roleURI roleURICode = {} # lookup by roleURI # resolve structural model roleTypes = [roleType for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris for roleType in modelXbrl.roleTypes.get(roleURI,())] roleTypes.sort(key=lambda roleType: roleType.definition) # assign code to table link roles (Presentation ELRs) for roleType in roleTypes: definition = roleType.definition rootConcepts = None for i, tableCode in enumerate(tableCodes): code, pattern, rootConceptNames = tableCode if (detectMultipleOfCode or code not in codeRoleURI) and pattern.match(definition): if rootConceptNames and rootConcepts is None: rootConcepts = modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).rootConcepts if (not rootConceptNames or any(rootConcept.name in rootConceptNames for rootConcept in rootConcepts)): codeRoleURI[code] = roleType.roleURI roleURICode[roleType.roleURI] = code if not detectMultipleOfCode: del tableCodes[i] # done with looking at this code break # find defined non-default axes in pre hierarchy for table for roleTypes in modelXbrl.roleTypes.values(): for roleType in roleTypes: roleType._tableCode = roleURICode.get(roleType.roleURI) else: for roleTypes in modelXbrl.roleTypes.values(): for roleType in roleTypes: roleType._tableCode = None def evaluateTableIndex(modelXbrl, lang=None): usgaapRoleDefinitionPattern = re.compile(r"([0-9]+) - (Statement|Disclosure|Schedule|Document) - (.+)") ifrsRoleDefinitionPattern = re.compile(r"\[([0-9]+)\] (.+)") # build EFM rendering-compatible index definitionElrs = dict((modelXbrl.roleTypeDefinition(roleURI, lang), roleType) for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris for roleType in modelXbrl.roleTypes.get(roleURI,())) sortedRoleTypes = sorted(definitionElrs.items(), key=lambda item: item[0]) disclosureSystem = modelXbrl.modelManager.disclosureSystem _usgaapStyleELRs = _isJpFsa = _ifrsStyleELRs = False if disclosureSystem.validationType == "EFM": _usgaapStyleELRs = True elif "jp-fsa" in modelXbrl.modelManager.disclosureSystem.names: _isJpFsa = True else: # attempt to determine type if any(usgaapRoleDefinitionPattern.match(r[0]) for r in sortedRoleTypes if r[0]): _usgaapStyleELRs = True elif any(ifrsRoleDefinitionPattern.match(r[0]) for r in sortedRoleTypes if r[0]): _ifrsStyleELRs = True if _usgaapStyleELRs: COVER = "1Cover" STMTS = "2Financial Statements" NOTES = "3Notes to Financial Statements" POLICIES = "4Accounting Policies" TABLES = "5Notes Tables" DETAILS = "6Notes Details" UNCATEG = "7Uncategorized" isRR = any(ns.startswith("http://xbrl.sec.gov/rr/") for ns in modelXbrl.namespaceDocs.keys() if ns) tableGroup = None firstTableLinkroleURI = None firstDocumentLinkroleURI = None for roleDefinition, roleType in sortedRoleTypes: roleType._tableChildren = [] match = usgaapRoleDefinitionPattern.match(roleDefinition) if roleDefinition else None if not match: roleType._tableIndex = (UNCATEG, "", roleType.roleURI) continue seq, tblType, tblName = match.groups() if isRR: tableGroup = COVER elif not tableGroup: tableGroup = ("Paren" in tblName and COVER or tblType == "Statement" and STMTS or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or COVER) elif tableGroup == COVER: tableGroup = (tblType == "Statement" and STMTS or "Paren" in tblName and COVER or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or NOTES) elif tableGroup == STMTS: tableGroup = ((tblType == "Statement" or "Paren" in tblName) and STMTS or "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or NOTES) elif tableGroup == NOTES: tableGroup = ("(Polic" in tblName and POLICIES or "(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or tblType == "Disclosure" and NOTES or UNCATEG) elif tableGroup == POLICIES: tableGroup = ("(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or ("Paren" in tblName or "(Polic" in tblName) and POLICIES or UNCATEG) elif tableGroup == TABLES: tableGroup = ("(Detail" in tblName and DETAILS or ("Paren" in tblName or "(Table" in tblName) and TABLES or UNCATEG) elif tableGroup == DETAILS: tableGroup = (("Paren" in tblName or "(Detail" in tblName) and DETAILS or UNCATEG) else: tableGroup = UNCATEG if firstTableLinkroleURI is None and tableGroup == COVER: firstTableLinkroleURI = roleType.roleURI if tblType == "Document" and not firstDocumentLinkroleURI: firstDocumentLinkroleURI = roleType.roleURI roleType._tableIndex = (tableGroup, seq, tblName) # flow allocate facts to roles (SEC presentation groups) if not modelXbrl.qnameDimensionDefaults: # may not have run validatino yet from arelle import ValidateXbrlDimensions ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl) reportedFacts = set() # facts which were shown in a higher-numbered ELR table factsByQname = modelXbrl.factsByQname reportingPeriods = set() nextEnd = None deiFact = {} for conceptName in ("DocumentPeriodEndDate", "DocumentType", "CurrentFiscalPeriodEndDate"): for concept in modelXbrl.nameConcepts[conceptName]: for fact in factsByQname[concept.qname]: deiFact[conceptName] = fact if fact.context is not None and fact.context.endDatetime is not None: reportingPeriods.add((None, fact.context.endDatetime)) # for instant reportingPeriods.add((fact.context.startDatetime, fact.context.endDatetime)) # for startEnd nextEnd = fact.context.startDatetime duration = (fact.context.endDatetime - fact.context.startDatetime).days + 1 break if "DocumentType" in deiFact: fact = deiFact["DocumentType"] if fact.xValid >= VALID and "-Q" in (fact.xValue or ""): # fact may be invalid # need quarterly and yr to date durations endDatetime = fact.context.endDatetime # if within 2 days of end of month use last day of month endDatetimeMonth = endDatetime.month if (endDatetime + timedelta(2)).month != endDatetimeMonth: # near end of month endOfMonth = True while endDatetime.month == endDatetimeMonth: endDatetime += timedelta(1) # go forward to next month else: endOfMonth = False startYr = endDatetime.year startMo = endDatetime.month - 3 if startMo <= 0: startMo += 12 startYr -= 1 start_datetime_day = endDatetime.day # check if we are in Feb past the 28th if startMo == 2 and start_datetime_day > 28: import calendar if endOfMonth: # reset day to the last day of Feb. start_datetime_day = 29 if calendar.isleap(startYr) else 28 else: # probably 52-53 weeks fiscal year, 2 cases only, current qtr ends on May 29th or 30th (31st then endOfMonth is True) if start_datetime_day == 29: if not calendar.isleap(startYr): start_datetime_day = 1 # step into March startMo +=1 elif start_datetime_day == 30: start_datetime_day = 1 if calendar.isleap(startYr) else 2 # step into March startMo +=1 startDatetime = datetime(startYr, startMo, start_datetime_day, endDatetime.hour, endDatetime.minute, endDatetime.second) if endOfMonth: startDatetime -= timedelta(1) endDatetime -= timedelta(1) reportingPeriods.add((startDatetime, endDatetime)) duration = 91 # find preceding compatible default context periods while (nextEnd is not None): thisEnd = nextEnd prevMaxStart = thisEnd - timedelta(duration * .9) prevMinStart = thisEnd - timedelta(duration * 1.1) nextEnd = None for cntx in modelXbrl.contexts.values(): if (cntx.isStartEndPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime and prevMinStart <= cntx.startDatetime <= prevMaxStart): reportingPeriods.add((None, cntx.endDatetime)) reportingPeriods.add((cntx.startDatetime, cntx.endDatetime)) nextEnd = cntx.startDatetime break elif (cntx.isInstantPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime): reportingPeriods.add((None, cntx.endDatetime)) stmtReportingPeriods = set(reportingPeriods) sortedRoleTypes.reverse() # now in descending order for i, roleTypes in enumerate(sortedRoleTypes): roleDefinition, roleType = roleTypes # find defined non-default axes in pre hierarchy for table tableFacts = set() tableGroup, tableSeq, tableName = roleType._tableIndex roleURIdims, priItemQNames = EFMlinkRoleURIstructure(modelXbrl, roleType.roleURI) for priItemQName in priItemQNames: for fact in factsByQname.get(priItemQName,()): cntx = fact.context # non-explicit dims must be default if (cntx is not None and all(dimQn in modelXbrl.qnameDimensionDefaults for dimQn in (roleURIdims.keys() - cntx.qnameDims.keys())) and all(mdlDim.memberQname in roleURIdims[dimQn] for dimQn, mdlDim in cntx.qnameDims.items() if dimQn in roleURIdims)): # the flow-up part, drop cntxStartDatetime = cntx.startDatetime cntxEndDatetime = cntx.endDatetime if (tableGroup != STMTS or (cntxStartDatetime, cntxEndDatetime) in stmtReportingPeriods and (fact not in reportedFacts or all(dimQn not in cntx.qnameDims # unspecified dims are all defaulted if reported elsewhere for dimQn in (cntx.qnameDims.keys() - roleURIdims.keys())))): tableFacts.add(fact) reportedFacts.add(fact) roleType._tableFacts = tableFacts # find parent if any closestParentType = None closestParentMatchLength = 0 for _parentRoleDefinition, parentRoleType in sortedRoleTypes[i+1:]: matchLen = parentNameMatchLen(tableName, parentRoleType) if matchLen > closestParentMatchLength: closestParentMatchLength = matchLen closestParentType = parentRoleType if closestParentType is not None: closestParentType._tableChildren.insert(0, roleType) # remove lesser-matched children if there was a parent match unmatchedChildRoles = set() longestChildMatchLen = 0 numChildren = 0 for childRoleType in roleType._tableChildren: matchLen = parentNameMatchLen(tableName, childRoleType) if matchLen < closestParentMatchLength: unmatchedChildRoles.add(childRoleType) elif matchLen > longestChildMatchLen: longestChildMatchLen = matchLen numChildren += 1 if numChildren > 1: # remove children that don't have the full match pattern length to parent for childRoleType in roleType._tableChildren: if (childRoleType not in unmatchedChildRoles and parentNameMatchLen(tableName, childRoleType) < longestChildMatchLen): unmatchedChildRoles.add(childRoleType) for unmatchedChildRole in unmatchedChildRoles: roleType._tableChildren.remove(unmatchedChildRole) for childRoleType in roleType._tableChildren: childRoleType._tableParent = roleType unmatchedChildRoles = None # dereference global UGT_TOPICS if UGT_TOPICS is None: try: from arelle import FileSource fh = FileSource.openFileStream(modelXbrl.modelManager.cntlr, os.path.join(modelXbrl.modelManager.cntlr.configDir, "ugt-topics.zip/ugt-topics.json"), 'r', 'utf-8') UGT_TOPICS = json.load(fh) fh.close() for topic in UGT_TOPICS: topic[6] = set(topic[6]) # change concept abstracts list into concept abstracts set topic[7] = set(topic[7]) # change concept text blocks list into concept text blocks set topic[8] = set(topic[8]) # change concept names list into concept names set except Exception as ex: UGT_TOPICS = None if UGT_TOPICS is not None: def roleUgtConcepts(roleType): roleConcepts = set() for rel in modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).modelRelationships: if isinstance(rel.toModelObject, ModelConcept): roleConcepts.add(rel.toModelObject.name) if isinstance(rel.fromModelObject, ModelConcept): roleConcepts.add(rel.fromModelObject.name) if hasattr(roleType, "_tableChildren"): for _tableChild in roleType._tableChildren: roleConcepts |= roleUgtConcepts(_tableChild) return roleConcepts topicMatches = {} # topicNum: (best score, roleType) for roleDefinition, roleType in sortedRoleTypes: roleTopicType = 'S' if roleDefinition.startswith('S') else 'D' if getattr(roleType, "_tableParent", None) is None: # rooted tables in reverse order concepts = roleUgtConcepts(roleType) for i, ugtTopic in enumerate(UGT_TOPICS): if ugtTopic[0] == roleTopicType: countAbstracts = len(concepts & ugtTopic[6]) countTextBlocks = len(concepts & ugtTopic[7]) countLineItems = len(concepts & ugtTopic[8]) if countAbstracts or countTextBlocks or countLineItems: _score = (10 * countAbstracts + 1000 * countTextBlocks + countLineItems / len(concepts)) if i not in topicMatches or _score > topicMatches[i][0]: topicMatches[i] = (_score, roleType) for topicNum, scoredRoleType in topicMatches.items(): _score, roleType = scoredRoleType if _score > getattr(roleType, "_tableTopicScore", 0): ugtTopic = UGT_TOPICS[topicNum] roleType._tableTopicScore = _score roleType._tableTopicType = ugtTopic[0] roleType._tableTopicName = ugtTopic[3] roleType._tableTopicCode = ugtTopic[4] # print ("Match score {:.2f} topic {} preGrp {}".format(_score, ugtTopic[3], roleType.definition)) return (firstTableLinkroleURI or firstDocumentLinkroleURI), None # no restriction on contents linkroles elif _isJpFsa: # find ELR with only iod:identifierItem subs group concepts roleElrs = dict((roleURI, roleType) for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris for roleType in modelXbrl.roleTypes.get(roleURI,())) roleIdentifierItems = {} for roleURI, roleType in roleElrs.items(): roleType._tableChildren = [] relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI) for rootConcept in relSet.rootConcepts: if rootConcept.substitutionGroupQname and rootConcept.substitutionGroupQname.localName == "identifierItem": roleIdentifierItems[rootConcept] = roleType linkroleUri = None for roleURI, roleType in roleElrs.items(): relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI) def addRoleIdentifiers(fromConcept, parentRoleType, visited): for rel in relSet.fromModelObject(fromConcept): _fromConcept = rel.fromModelObject _toConcept = rel.toModelObject if isinstance(_fromConcept, ModelConcept) and isinstance(_toConcept, ModelConcept): _fromSubQn = _fromConcept.substitutionGroupQname _toSubQn = _toConcept.substitutionGroupQname if ((parentRoleType is not None or (_fromSubQn and _fromSubQn.localName == "identifierItem" and _fromConcept in roleIdentifierItems )) and _toSubQn and _toSubQn.localName == "identifierItem" and _toConcept in roleIdentifierItems): if parentRoleType is None: parentRoleType = roleIdentifierItems[_fromConcept] _toRoleType = roleIdentifierItems[_toConcept] if _toConcept not in parentRoleType._tableChildren: parentRoleType._tableChildren.append(_toRoleType) if _toConcept not in visited: visited.add(_toConcept) addRoleIdentifiers(_toConcept, _toRoleType, visited) visited.discard(_toConcept) elif _toConcept not in visited: visited.add(_toConcept) addRoleIdentifiers(_toConcept, parentRoleType, visited) visited.discard(_toConcept) for rootConcept in relSet.rootConcepts: addRoleIdentifiers(rootConcept, None, set()) if not linkroleUri and len(roleType._tableChildren) > 0: linkroleUri = roleURI return linkroleUri, linkroleUri # only show linkroleUri in index table elif _ifrsStyleELRs: for roleType in definitionElrs.values(): roleType._tableChildren = [] return sortedRoleTypes[0][1].roleURI, None # first link role in order return None, None def parentNameMatchLen(tableName, parentRoleType): lengthOfMatch = 0 parentName = parentRoleType._tableIndex[2] parentNameLen = len(parentName.partition('(')[0]) fullWordFound = False for c in tableName.partition('(')[0]: fullWordFound |= c.isspace() if lengthOfMatch >= parentNameLen or c != parentName[lengthOfMatch]: break lengthOfMatch += 1 return fullWordFound and lengthOfMatch def EFMlinkRoleURIstructure(modelXbrl, roleURI): relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI) dimMems = {} # by dimension qname, set of member qnames priItems = set() for rootConcept in relSet.rootConcepts: EFMlinkRoleDescendants(relSet, rootConcept, dimMems, priItems) return dimMems, priItems def EFMlinkRoleDescendants(relSet, concept, dimMems, priItems): if concept is not None: if concept.isDimensionItem: dimMems[concept.qname] = EFMdimMems(relSet, concept, set()) else: if not concept.isAbstract: priItems.add(concept.qname) for rel in relSet.fromModelObject(concept): EFMlinkRoleDescendants(relSet, rel.toModelObject, dimMems, priItems) def EFMdimMems(relSet, concept, memQNames): for rel in relSet.fromModelObject(concept): dimConcept = rel.toModelObject if isinstance(dimConcept, ModelConcept) and dimConcept.isDomainMember: memQNames.add(dimConcept.qname) EFMdimMems(relSet, dimConcept, memQNames) return memQNames
PypiClean
/echarts-china-counties-pypkg-0.0.2.tar.gz/echarts-china-counties-pypkg-0.0.2/echarts_china_counties_pypkg/resources/echarts-china-counties-js/d6993b4b7bb8a084e79e9f15feab972b.js
(function (root, factory) {if (typeof define === 'function' && define.amd) {define(['exports', 'echarts'], factory);} else if (typeof exports === 'object' && typeof exports.nodeName !== 'string') {factory(exports, require('echarts'));} else {factory({}, root.echarts);}}(this, function (exports, echarts) {var log = function (msg) {if (typeof console !== 'undefined') {console && console.error && console.error(msg);}};if (!echarts) {log('ECharts is not Loaded');return;}if (!echarts.registerMap) {log('ECharts Map is not loaded');return;}echarts.registerMap('平阳县', {"type":"FeatureCollection","features":[{"type":"Feature","id":"330326","properties":{"name":"平阳县","cp":[120.565793,27.661918],"childNum":13},"geometry":{"type":"MultiPolygon","coordinates":[["@@@@@@A@@@@@@@@@@@B@@@@@@@@@@@@@"],["@@A@@@@B@@@@@@@@@@B@@@@@@B@@@A@@@@@@@@@@@A@@"],["@@@@@@A@@@@@@@@@@@@@A@@BB@@@B@@@@@@@@@@@@A@@@@@@"],["@@@@@@@@@@@@@AA@@@@B@@A@@@B@@@@B@@@@@@@@@@@@@@B@@@@@@@@@BAA@@@@@@@@@@@"],["@@@A@@@@A@@@@@@@@@@@@@A@@@A@@@AB@@@@BB@@@B@@B@@@@@@A@@B@@@@@B@@A@@@@"],["@@C@EBEB@BABA@@B@@B@F@D@BAB@BA@ABA@A@@"],["@@@@@@@@@AA@@@@@A@@@@@@B@@@@A@@@A@@AABA@@@AB@@@@A@A@@B@@@B@@@@@@@BA@ABA@@@BBB@B@B@B@@@BA@@B@B@@@@@B@@@BA@@A@@@@@@@@A@@@@@@B@@@@@BAA@@@@@@@@A@@@@@@B@@@BA"],["@@FAD@@A@C@ACAA@ABABA@A@A@ABAB@B@BB@@B@@B@@@BAB@B@"],["@@GDGFADHDJADCDACECC"],["@@FADC@C@ECAE@GHEDC@CF@BDBFAFAF@"],["@@GDBD@DCBAFBBD@FAHCHCFEAAECG@E@"],["@@HIDIAAIBIJED@BCDC@KHAB@DDDBBDBJCD@ADADAHAFBBH@FAFCJA@DIDADBBB@F@HEF@FADAFABABC@CDCBEFCF@D@D@DABAFGBACC@AHEFEAAE@KFIJCB@@A@@CA@C@GDQHWJA@AAACFE@A"],["@@@@@A@@A@@AA@AA@@@@@@@@@@A@@A@@A@@BA@A@@@A@@@@@@BB@@@A@@BB@@@@@@BB@@@@@@B@@@B@BA@@@@@@@A@@B@@@@@AA@@@@@A@@@CBA@@@@@@@@@@BA@@@@@@@A@@A@@A@A@@@C@@AA@@@CBA@@B@@@@@B@@BB@@ABA@@@@@ABA@AAAA@@EC@A@@BAB@AAAA@@@@@B@A@BAA@@@@@@@@@@@@A@@@@AABAA@@A@@@A@@@@@@@@@@A@@@A@@AB@@@@@@AA@@A@@@@@@@A@@@CA@@@@@@A@@@@@A@@@@B@@@@A@@@@@B@@@@@AB@A@B@@@@@@@@@B@@@@A@@@@@@@@@@@@BA@AA@@A@@B@@@@A@@@@@A@@@@@@B@@A@@@AAA@@@@@@@AA@BA@@@@@@B@@@BA@@@@B@@B@@@B@@B@@@BA@@BB@B@@@@@@B@@@@B@@@BB@@@@@@@@@@@DA@@@@@@@@@@@A@CB@@@B@@@@@@@@A@AA@@A@@AAA@AA@@@@B@@@BAB@BCA@@AA@B@B@B@@@@@@@@@@@@@BA@@@@@@BA@@AA@@B@B@FA@@AAD@@@@@@@@@B@@@@@@@@@BA@@@@@@@@@@B@@A@@@@@@@@@@@AB@@@@AB@A@@@B@@@@A@@@@@@@A@@A@@@@A@@@@@C@@@AAABAA@@A@A@BA@@@@A@@@A@EAAA@@AAB@A@@@C@@AABA@@@A@C@A@@@EB@B@@B@B@B@@B@@@@AB@@AB@@A@A@ABA@A@@@AB@@@@@AACABA@A@AAAA@@@B@A@@@@A@A@@@@@@A@@AAA@@@AB@@A@@@A@@@A@@@A@A@@@@@@@@A@@@@@A@@BAD@BAA@@@A@@@AA@@A@@@@@@A@@@@@@A@AA@@AC@@@AABA@A@A@@@A@@@@AA@@A@@AA@@E@C@A@A@A@@@@B@B@@@@@@A@@@A@@B@@AA@BAAB@@@A@@@@@@AA@A@@A@@@@B@@ABA@@@@@@@@@@@A@@@@@@@@@@@@A@@@@@@A@@@@A@@@@@A@@@@@@@@@@@@@@@@@AAA@@@@A@@@AA@A@@@@@A@A@@@A@A@@@AAA@@@A@CDA@@@@@A@@B@@@@A@@@A@@@A@@AA@A@@@AB@@B@@@@B@@A@@@A@@@@@@A@@@AAA@@@@@A@@A@A@A@@@CC@@@A@@@@AAA@@@@AA@@@@@AAB@@AAAA@A@AA@@@A@@A@A@@AAA@@A@@A@AA@@@@@AA@@@@A@@@A@@@AA@@C@@A@@@@A@@@@@A@@AA@@@@@@B@@A@@@A@@@@@@AA@@AAB@AA@@A@A@@@ABA@@@@@@ABEBA@AAA@@AA@@@@@A@@@@@@@A@A@A@@@A@@BA@@@@@@B@@A@AB@@A@@AAB@A@@AB@@@@@@@B@@A@@@A@@@@@@@A@A@@@A@@@@B@@A@A@@@@@@@A@@@@B@@@@@@AB@@@@@@@B@@@@@B@@@@@@AB@@@B@@@@@B@@@@AB@@@@@AA@A@A@@@A@A@@@A@@@A@A@AA@@@@@@AAAA@@ABA@@@AB@@@@@@AB@@@@A@AA@@@AACA@@@@@A@@@@@A@@@@AA@A@@@A@A@@A@@@B@@@@A@@@@@@@@A@B@@A@@@@@A@@A@@@@@@@@AB@@A@@B@@A@@@@@@@AB@@@BA@@@@@AB@@AA@@ABAA@@@@@B@@@@@BA@@@A@@B@@@@@@AA@@@AA@@@BA@@B@@A@@A@@@@@@AB@@AA@B@@A@ABA@@A@@@A@@@@@@@AAA@@@A@AB@B@B@@A@@@A@A@@@A@@@AA@@AA@@@@@@@A@@A@@ACAA@@@A@A@AAA@@@A@@@@@A@@@A@@@A@A@A@@@A@@@A@@@A@A@@@ABA@A@@B@@AB@@B@@@@B@@@B@@@@@@@B@@@@@B@@@B@@@B@@@BA@@@@B@@C@@@A@A@ABBB@@A@@@@@A@@@@@@@A@@B@@@AA@B@@AA@@@AA@@@@AAC@@@A@A@A@@@A@@@@@AB@@A@BA@@@@B@@AB@@@BAB@@@@A@@@@A@@AA@@@B@@A@@@@@A@@@@@@A@@@@@AAA@@@AA@@@@AA@@AA@@ABA@AB@@@@@@@@@BA@A@@@@@@@AB@@A@@BA@@@AB@@@@@@@@@B@@A@A@@AA@@@B@@A@@A@C@@@@@@@@B@@@B@@@@@@@AA@A@AB@@A@@@AB@@A@@AA@@@@@@@A@@@@@@@AB@BA@A@@BABAB@@B@@B@@@@@BA@@@@@BB@@@B@@@@BB@@@@@@@@AB@@@B@B@@@BAB@@@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@@A@@@@@@@@@@@@@A@@@@@@@@B@@@@@@@@A@@@@@@@@@@@A@@@@@@@@@@@@@@@A@@@@@@@@@@A@@@@@@@@AB@@@@@@@@@@@@A@@A@@@@@@@@@@@@@@@@A@@@@@@@@@@@A@@@@@@@@@@@A@@@@@@@@@A@@@@@@@@@@A@@@@@@@@@@A@@B@@@@@@@@@@@@@@@A@@@@@@@@@@@@@@@@@@A@@@@@@@A@@@@@@@@@@@@@A@@@@@@@@@@@@B@@@@@@@@A@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@@@@A@@@@@@@@@@@@@@@@B@@@@A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@A@@B@@@@@@@@@@@@@@A@@@@@@@@@@@A@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@AB@@@@@@B@@@@@@@A@@@@B@@@@@@@@@@@@@@@@@@@@C@@@@BA@@@AB@@A@A@@@A@A@A@@BC@@@@@AACA@@@AA@@@@A@@@@@@@AA@@@@A@B@@A@@B@@@@A@@B@@A@@@@BA@A@@BA@@BA@@@A@@BA@@@@BA@@B@@@@A@@@A@B@@BB@@B@ABBB@@BB@BB@@AA@B@@AB@BAB@@A@@B@BA@BB@BB@@BBB@@A@@@A@@@AA@@@@A@A@A@@@@AA@@A@A@@@@@@AA@@A@@BA@@B@@@@@B@@@@A@@@@BA@A@@@A@A@AAAA@@@BA@@A@A@@ACABA@B@@BA@AB@@AB@@@BA@@H@@@B@@@@A@@@@@@B@A@B@@A@BB@@@@@@@@A@@@@B@@A@@BB@@@@@@B@B@@@B@@BB@@@@@A@A@A@@B@@@@@B@@A@@@@B@@B@@@@@BBB@@D@@@@@BA@@BAB@BA@@B@BB@@@@A@@@AB@@@@B@@@B@@@@@@@B@@B@@@@B@@A@@B@BA@@D@@@@@@@@@B@@@B@@@@@@@A@@B@@@@B@DA@@B@@B@@@@ADB@@B@BB@@@B@@@@BB@DBBB@@@@AB@B@@C@AB@@@@DB@B@@@@@B@@@@@@BB@@@B@@B@@@B@@@B@@@B@@@@@B@D@@@@BD@@@DB@BB@@@@@@@@BA@ABA@@@@@@@A@@@@BBB@B@@@@@BA@@@@@@B@@@@BAB@B@@@@@@ABABA@@@@B@@@@@@@B@B@@B@@B@B@@@@BB@@@ABABB@@@@B@@@@@BABAB@@@BBB@@@BA@@@@@AB@@@@ABABAB@@@B@@A@A@@@ABA@ABABA@A@@B@@@@@D@@@BA@@@@BB@@@@@@@@B@BB@@B@@BB@@@B@@@@AB@@@@ABAB@@BD@@@B@B@@BB@B@B@@@B@B@@@BA@@B@@@B@@AF@@B@B@B@@@BDB@@@@B@B@@B@@@B@@@BAB@@@B@@@B@B@@@@@BBB@B@B@@BB@@@A@@B@B@BBD@@@B@@B@BB@BBDA@@B@B@@A@@B@@AB@@A@@@A@A@@BA@@@@B@@@B@B@@@@@D@B@@AB@@@@BB@B@B@B@@B@@BB@@@@B@@B@@BBA@@@BB@@@BB@@@BB@B@@@@@@B@@AB@@@@@BB@BBBD@@A@@@@@@B@@@BBA@@@@@@@BB@@@@@B@@@B@@@BA@AB@@@@A@@DAB@@@BA@@@@AAB@BA@@BA@@B@BA@@B@BA@BB@@BBA@B@@BBB@@B@@@@BBB@@A@@DBB@B@B@@@B@B@@A@@B@@@BBB@@@@BDB@B@@BBB@@@@BB@BB@BB@@@@@BA@A@@@@AABA@@B@@@@@@A@@B@@@BA@@B@@A@@B@@@@@B@@@B@@@B@BABB@@B@@@B@@@B@B@B@BB@@BB@BB@@BB@@@@@D@DB@@@@@@@@@@@@@@@@@A@B@@B@@@@@@@@@@@@@@@@@@A@@@@@@@@@B@@@@B@@@@@@@@@@@@@@@@B@@@@@@@@@@@@B@@@@@@B@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@B@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@B@@B@@@@@@@@@@@@@@@@@@@@@@@BB@@@@@B@B@@@B@@@D@@C@@@@@@@@B@BB@A@A@@B@@@@@@@@BD@@BB@BB@@@@@B@@@@BB@B@@@@@@B@@@@B@@@@B@B@BBBAB@B@B@B@B@@@@AD@@@D@@BB@@B@@@BBB@@AB@@@@@B@B@@@BBBB@@B@@@B@@@BB@@B@@B@@BB@@@B@@@B@@@BB@@@BB@@@BB@@@@@B@@A@@B@@@@@B@@AB@@@@A@@B@@BB@@B@DBB@@@BB@@B@@@B@@@@@@@B@@@@B@@@@@@@B@@BB@@@BB@BD@B@@D@DBDA@@BB@@B@B@B@B@@@@A@A@A@@@A@AA@@@@@@@@A@@@A@@@@@@@@@@@@DABAB@BAB@@@B@@@@@@BB@@B@@@@B@@B@@@@@@BB@@BB@@@@@@@@@@@@B@@@B@@@@@@@B@@@@@@A@@BB@@BA@@B@B@B@@@B@@@@BAB@@@@A@BB@@@@@@@BB@B@@@B@@@B@@@@AB@@B@@B@A@@B@@@@B@@BBA@@@@B@@@@@B@@@@BBB@@B@@B@@@@@@B@@@@@B@@@@B@@A@A@@@@@ABABBB@@BBB@B@@@@@BABA@@@@BA@@@@B@@@@@@@@@@@@@@B@@ABB@@@@@B@@@@@@@@@@@@@@A@@@@@@@A@@B@@@BA@@B@B@@@@@@ABA@@DCB@@ADA@@@@@A@A@@BA@@@@@AB@B@@@@AB@B@@A@ABAB@@@BABBBA@@BA@@@@@@B@@@@B@@@BBB@@@@B@@@BB@B@@BB@@@@@BA@@@@BA@@@@@@B@@@@@@A@@@@@A@@@@B@@@@B@@@@@@@A@@@@B@@@@B@@@@@A@@B@@B@@@@@AB@B@@@@B@B@@BA@A@@BC@@B@B@@@B@@B@BBBAB@@@D@@ABA@B@@@BB@@@B@@@@B@@@@@@@@B@@B@@B@@@@@@@@A@@@@B@@@B@@@@BB@B@BB@@@@@B@@B@@@@@@@@@BB@B@B@@@@@@@B@@@@@@@@@B@@@@@@@@A@@@@@@B@@@@@@@BA@B@@B@@@@@@AB@@B@@@@BB@@BD@@@@AB@@@B@@ABABA@@@ABA@@@ABA@@@AB@BCB@BAB@B@@@BA@BB@@@@@BAA@B@@@@@BA@@@@@A@@@A@@@AA@@@BA@@B@@AB@@A@@@@@A@@@@@A@@B@@@@@DB@AD@@@B@@BB@B@B@@@@AB@@@@@@@@AB@@AA@@@@@@@BA@BB@@@B@B@@@@@@A@C@@B@B@B@B@B@@BB@@@B@@@D@DB@B@@@@@@BA@@@@BA@@D@BA@@BA@@B@B@B@@A@@B@BA@@BA@BBA@@B@@@B@@@@@@@@@@AB@@@B@@@@@@A@@@@A@@ABAB@@@@@BABCB@@@A@@A@@@@@A@@@@@A@A@@@@A@A@AA@B@@AA@A@AA@B@@@@AB@@@B@B@@@B@@@BA@@@@B@@@@@@A@@BA@@@@@@D@@A@A@@@A@@A@@AA@@@A@@ABA@@@@B@@@@@@@B@@A@@@@@@@@@@BB@@@@@@@@B@@@@@@A@@@@@A@@@@@@@@@A@@@AB@@@B@B@B@@A@@@@@C@@@A@@@@@@@AB@@@B@@@@AB@@A@ABA@@A@@A@@B@BAB@@@@@BAB@@@@@B@@B@@@@@AB@@@@@@@@CB@@@@B@@@B@@@@@@@@B@@@B@@@B@@@A@@B@@AB@@@@A@@@@BA@@@ABA@@@@B@@@@@BB@B@@@AB@@@@BB@@@@B@@@D@@@BA@@@@BB@@@@@@B@BABADAB@@@B@@@BD@@A@@B@B@BBBA@@B@BBAB@@@B@@@@A@@@@BA@@B@@AB@B@BCB@@@@@@@@B@B@@@@B@BA@@@@B@@@@@@@B@@@@@BAB@F@@@@@BA@@@A@B@A@@BABABA@@BAB@@@@@B@@@BA@@BA@@@@@A@@@B@A@@@@B@@@@@B@@A@@@@@@BA@@AA@@@A@@BA@@@@@@@@@@A@@@@@@A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@B@@@@A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@A@@@@@@B@@@@B@@@@@@@@@@@A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@BA@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@A@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@AB@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@B@@@BBBBB@B@BB@@BBB@@AB@BBB@@@BBB@B@D@BB@@@@@B@@@B@B@@@@A@BB@@B@@@B@@@@AB@@@@@@@@@@@BB@@B@@@@@@@@@@DB@@@@@BB@@@AB@@B@@@B@@@@BB@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@BA@B@@@@@@@@@@@@@@@@@B@@@@@@@@@@@A@@@@B@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@A@@@@@@@@@A@@@@@@@@@@@@@A@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@B@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@B@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@BB@@@@@@@A@@B@@@B@BB@@@@BB@@@ABAB@@@@@@@B@@A@@B@@B@@@BBBDA@AB@B@@@B@@@B@@@@@@@B@@@B@BAB@@BBBBAB@@B@@BB@@B@@@B@@@@@BB@@@@@BB@BB@@@@ABAB@@@@@@AB@@@@@D@@A@@@AB@@@@@B@@@@@B@@@@@BA@@@B@@@@BB@@@B@@@@@@@@@B@@@B@@@@@@@BB@@@@@@@@BB@@@@@@@B@B@B@@@B@B@BBB@B@@@BB@@@@@@B@@B@@@@@B@B@@@@@@@B@B@@@@@BB@@B@@@@@B@@BBBBB@@@@@BBB@@@@B@@@@BB@@ABAB@@@B@@@@BB@@@@@@BBDB@B@@@BB@BB@B@@@B@@@@@@B@@@@@@@@@@@@@@@@@@@@@B@@B@@@@@@@@@@@@BB@@@@@@@@@@@@@@@@A@@@@@@@@@@@B@@@@B@@@@@@@@@@@@A@@@@@@@@@@@BB@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@B@@B@@@B@@@@@@@@@@B@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@B@@@@B@@@@@@@@@@@B@@@@@@@@@@@@@B@@@@@@B@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@B@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@BA@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@@@@@@@BB@@@@@@@@@@@@@@@B@@B@B@@@@@B@@B@@B@@BBA@B@@B@B@B@@@B@@@@@B@@A@@@@@A@@B@BB@@@@BBDAB@@@B@@@@@BA@@@A@@BABC@AB@@@B@@A@A@@@@@A@@BAB@@B@@@@BA@A@@B@@@BA@@@A@@@@AA@@@@BA@@@@B@@@@BDCB@@ABA@@@@@@@@@@@A@@@@@@B@@BB@ABBBBB@A@@BB@@@@BB@@@B@@@@@BB@@@@B@@B@@@@B@@BBD@@@@BB@@B@A@@B@@@@@@@@B@@BBB@@@B@@A@@@@@A@@BB@A@@@@B@B@@ABA@@@@@@BAB@BCBABCB@@@@AB@@A@@B@@ABBB@@@@@@@@@@A@ABC@@BAFE@@BA@A@@BA@@@ABA@@@@B@@@@@@A@@@@@@B@@AB@@@@ABA@@B@B@@@@@A@@A@@@A@@@AD@@@AA@@B@BA@@A@AEA@@CA@@B@@AA@@A@@@@@AA@@@@@C@A@@@C@@A@@@A@@@@@@A@B@@A@@@AABA@AA@@@@@@@@@@@A@@AAB@@@@@@A@@ABA@@BA@@@@A@@@AA@@@A@@AA@@@@@AB@@@@@@@B@@A@@@AA@@@@A@@BA@A@@@@@A@@@@B@B@B@@@@AB@@@@@B@@@@@B@@A@@@@@@@A@@B@B@@@@@@@@A@@@@@AB@@@@@@@@AD@@@@D@B@D@@B@D@FAB@@@@A@@@@@@@A@@B@B@DA@@@A@@@@F@B@@@@@B@@@@B@@@@@BA@@BAB@B@B@B@BB@DAD@B@@@@@B@B@@AB@@A@@B@B@@@@@B@D@B@@B@@D@B@@A@@B@@@@@B@@B@A@@B@AA@@D@BAAA@@@@@A@@B@B@B@@AA@B@@@DAB@HEB@@@BADADA@@DADC@@FA@AJEVMVBS{EUEMOUCEgeCCCCE@GCGAGCCGBUCBEDSLABEDABA@EFEDCBABA@A@ABA@A@C@C@E@A@A@EAA@@@A@@A@@@@@@@@@@A@@@@@@@@@@@@@A@@@@@@@@A@@A@@@@@@@@@@@@@A@@@@@@A@@@@A@@@@@@@@@A@@@@@@A@@@@@@@@@@@@A@@@@A@@A@@@@@@@@@@@AA@@@@@@A@@@@A@@@@AA@@@@@@A@AACA@@C@@AA@E@@@CBA@@@CB@@ABAB@@AB@@@B@@AB@@@B@@@@@BAB@BAB@@@B@B@@CD@@@@ABAB@@A@A@A@A@A@A@@AA@AA@@@@@AA@@@@@@@AA@@AAAACCA@EA@@C@A@C@@@CB@@EBA@CBA@C@A@A@AACAAAA@@AA@@A@@AC@ABC@ADC@ABABA@@@A@A@@@AAA@AA@ECAACAA@A@@@AB@@A@@BAD@BAB@DAD@BABADEB@BA@A@@@A@A@A@CAC@E@EBC@A@@@@@@@@@@A@@BA@@BA@@@A@@B@@A@@@@B@@AB@@ABAB@BA@@B@B@BAB@BABAB@@A@@@@@@@CB@@A@@@ABA@@BA@A@@@AA@@A@@CB@@C@C@C@@@A@@A@@@A@@@AB@DAB@B@@A@@AA@A@AA@CAAA@@@@@AB@@A@A@A"]],"encodeOffsets":[[[123640,28233]],[[123652,28244]],[[123639,28232]],[[123633,28224]],[[123621,28215]],[[123973,28139]],[[123979,28097]],[[124038,28132]],[[124001,28146]],[[123992,28091]],[[124020,28123]],[[123972,28114]],[[123380,28206]]]}}],"UTF8Encoding":true});}));
PypiClean
/topiary-3.0.6.tar.gz/topiary-3.0.6/README.md
<a href="https://travis-ci.org/openvax/topiary"> <img src="https://travis-ci.org/openvax/topiary.svg?branch=master" alt="Build Status" /> </a> <a href="https://coveralls.io/github/openvax/topiary?branch=master"> <img src="https://coveralls.io/repos/openvax/topiary/badge.svg?branch=master&service=github" alt="Coverage Status" /> </a> <a href="https://pypi.python.org/pypi/topiary/"> <img src="https://img.shields.io/pypi/v/topiary.svg?maxAge=1000" alt="PyPI" /> </a> # Topiary Predict mutation-derived cancer T-cell epitopes from (1) somatic variants (2) tumor RNA expression data, and (3) patient HLA type. ## Example ```sh ./topiary \ --vcf somatic.vcf \ --mhc-predictor netmhcpan \ --mhc-alleles HLA-A*02:01,HLA-B*07:02 \ --ic50-cutoff 500 \ --percentile-cutoff 2.0 \ --mhc-epitope-lengths 8-11 \ --rna-gene-fpkm-tracking-file genes.fpkm_tracking \ --rna-min-gene-expression 4.0 \ --rna-transcript-fpkm-tracking-file isoforms.fpkm_tracking \ --rna-min-transcript-expression 1.5 \ --output-csv epitopes.csv \ --output-html epitopes.html ``` ## Installation You can install Topiary and all of the libraries it depends on by running: ``` pip install topiary ``` You'll need to download the reference genome sequences and annotations for a recent Ensembl release (e.g. 81) by running: ``` pyensembl install --release 81 --species human ``` If you want to work with variants which were aligned against the older reference GRCh37, you will need to also download its annotation data, which is contained in Ensembl release 75: ``` pyensembl install --release 75 --species human ``` ## Commandline Arguments ### Genomic Variants Specify some variants by giving at least one of the following options. They can be used in combination and repeated. * `--vcf VCF_FILENAME`: Load a [VCF](http://www.1000genomes.org/wiki/analysis/variant%20call%20format/vcf-variant-call-format-version-41) file * `--maf MAF_FILENAME`: Load a TCGA [MAF](https://wiki.nci.nih.gov/display/TCGA/Mutation+Annotation+Format+%28MAF%29+Specification) file * `--variant CHR POS REF ALT : Specify an individual variant (requires --ensembl-version)` ### Output Format * `--output-csv OUTPUT_CSV_FILENAME`: Path to an output CSV file * `--output-html OUTPUT_HTML_FILENAME`: Path to an output HTML file ### RNA Expression Filtering Optional flags to use Cufflinks expression estimates for dropping epitopes arising from genes or transcripts that are not highly expressed. * `--rna-gene-fpkm-tracking-file RNA_GENE_FPKM_TRACKING_FILE`: Cufflinks FPKM tracking file containing gene expression estimates. * `--rna-min-gene-expression RNA_MIN_GENE_EXPRESSION`: Minimum FPKM for genes * `--rna-transcript-fpkm-tracking-file RNA_TRANSCRIPT_FPKM_TRACKING_FILE`: Cufflinks FPKM tracking file containing transcript expression estimates. * `--rna-min-transcript-expression RNA_MIN_TRANSCRIPT_EXPRESSION`: Minimum FPKM for transcripts * `--rna-transcript-fpkm-gtf-file RNA_TRANSCRIPT_FPKM_GTF_FILE`: StringTie GTF file file containing transcript expression estimates. ### Choose an MHC Binding Predictor You *must* choose an MHC binding predictor using one of the following values for the `--mhc-predictor` flag: * `netmhc`: Local [NetMHC](http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?netMHC) predictor (Topiary will attempt to automatically detect whether NetMHC 3.x or 4.0 is available) * `netmhcpan`: Local [NetMHCpan](http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?netMHCpan) predictor * `netmhciipan`: Local [NetMHCIIpan](http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?netMHCIIpan) predictor * `netmhccons`: Local [NetMHCcons](http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?netMHCcons) * `random`: Random IC50 values * `smm`: Local [SMM](http://www.mhc-pathway.net/smm) predictor * `smm-pmbec`: Local [SMM-PMBEC](http://www.mhc-pathway.net/smmpmbec) predictor * `netmhcpan-iedb`: Use NetMHCpan via the IEDB web API * `netmhccons-iedb`: Use NetMHCcons via the IEDB web API * `smm-iedb`: Use SMM via the IEDB web API * `smm-pmbec-iedb`: Use SMM-PMBEC via the IEDB web API ### MHC Alleles You must specify the alleles to perform binding prediction for using one of the following flags: * `--mhc-alleles-file MHC_ALLELES_FILE`: Text file containing one allele name per line * `--mhc-alleles MHC_ALLELES`: Comma separated list of allele names, e.g. "HLA-A02:01,HLA-B07:02" ### Peptide Length * `--mhc-epitope-lengths MHC_EPITOPE_LENGTHS`: comma separated list of integers specifying which peptide lengths to use for MHC binding prediction ### Binding Prediction Filtering * `--only-novel-epitopes`: Topiary will normally keep all predicted epitopes, even those which occur in a given self-ligandome or don't overlap a mutated region of a protein. Use this flag to drop any epitopes which don't contain mutations or that occur elsewhere in the self-ligandome. * `--ic50-cutoff IC50_CUTOFF`: Drop peptides with predicted IC50 nM greater than this value (typical value is 500.0) * `--percentile-cutoff PERCENTILE_CUTOFF`: Drop peptides with percentile rank of their predicted IC50 (among predictions for a particular allele) fall below this threshold (lower values are stricter filters, typical value is 2.0) ### Misc * `--padding-around-mutation PADDING_AROUND_MUTATION`: Include more unmutated residues around the mutation (useful when not using `--only-novel-epitopes`) * `--self-filter-directory SELF_FILTER_DIRECTORY`: Directory of files named by MHC allele containing a self peptide ligandome (peptides which should be excluded from results) * `--skip-variant-errors`: If a particular mutation causes an exception to be raised during annotation, you can skip it using this flag.
PypiClean
/zopyx.check_ssl_domains-1.0.5.tar.gz/zopyx.check_ssl_domains-1.0.5/README.rst
zopyx.check-ssl-domains ======================= Check a list of host/domain names for their expiration date. Usage ----- - create a file domains.txt containing a number of domain names - one per line like: www.example.com www.example2.com - install `zopyx.check-ssl-domains` using pip - run `check-ssl-domains domains.txt` Requirements ------------ - Python 3.6 or higher Homepage -------- - https://github.com/zopyx/ssl-cert-check Author ------ Andreas Jung/ZOPYX www.zopyx.com [email protected]
PypiClean
/nomad_camels-0.2.0-py3-none-any.whl/nomad_camels/nomad_integration/sample_selection.py
import os.path from PySide6.QtWidgets import QApplication, QDialog, QLabel, QLineEdit, QGridLayout, QDialogButtonBox, QComboBox, QTextEdit from PySide6.QtCore import Qt import yaml from nomad_camels.ui_widgets.path_button_edit import Path_Button_Edit from nomad_camels.nomad_integration import nomad_communication class Sample_Selector(QDialog): def __init__(self, parent=None): super().__init__(parent) entries = nomad_communication.get_entries(parent)['data'] if not entries: raise Exception('No Entries found!') self.entry_metadata = [] self.entry_names = [] self.entry_uploads = [] self.entry_types = [] self.entry_data = [] for entry in entries: if 'archive' not in entry: continue arch = entry['archive'] if 'data' not in arch: continue self.entry_data.append(arch['data']) self.entry_metadata.append(arch['metadata']) self.entry_names.append(arch['metadata']['entry_name']) self.entry_types.append(arch['metadata']['entry_type']) if 'upload_name' in arch['metadata']: self.entry_uploads.append(arch['metadata']['upload_name']) else: self.entry_uploads.append(arch['metadata']['upload_id']) label_upload = QLabel('Upload:') self.upload_box = QComboBox() self.upload_box.addItems(sorted(list(set(self.entry_uploads)))) label_entry_type = QLabel('Entry Type:') self.entry_type_box = QComboBox() self.entry_type_box.addItems(sorted(list(set(self.entry_types)))) label_entry = QLabel('Entry:') self.entry_box = QComboBox() self.entry_box.addItems(sorted(self.entry_names)) self.entry_info = QTextEdit() self.entry_info.setTextInteractionFlags(Qt.TextSelectableByKeyboard | Qt.TextSelectableByMouse) self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) self.button_box.accepted.connect(self.accept) self.button_box.rejected.connect(self.reject) layout = QGridLayout() layout.addWidget(label_upload, 0, 0) layout.addWidget(self.upload_box, 0, 1) layout.addWidget(label_entry_type, 1, 0) layout.addWidget(self.entry_type_box, 1, 1) layout.addWidget(label_entry, 10, 0) layout.addWidget(self.entry_box, 10, 1) layout.addWidget(self.entry_info, 0, 2, 12, 1) layout.addWidget(self.button_box, 20, 0, 1, 3) self.setLayout(layout) self.entry_filtering() self.entry_change() self.upload_box.currentTextChanged.connect(self.entry_filtering) self.entry_type_box.currentTextChanged.connect(self.entry_filtering) self.entry_box.currentTextChanged.connect(self.entry_change) self.sample_data = {} self.adjustSize() def entry_filtering(self): upload = self.upload_box.currentText() entry_type = self.entry_type_box.currentText() entries = [] for i, entry in enumerate(self.entry_names): if upload == self.entry_uploads[i] and entry_type == self.entry_types[i]: entries.append(entry) self.entry_box.clear() self.entry_box.addItems(entries) def entry_change(self): self.entry_info.setText(yaml.dump(self.get_current_entry_data())) def get_current_entry_data(self): entry = self.entry_box.currentText() for i, ent in enumerate(self.entry_names): if ent == entry: return self.entry_data[i] return {} def accept(self): self.sample_data = self.get_current_entry_data() if 'name' not in self.sample_data and 'Name' not in self.sample_data: self.sample_data['name'] = self.entry_box.currentText().split('.')[0] super().accept()
PypiClean
/v2/model/glance_show_image_member_schemas_request.py
import pprint import re import six class GlanceShowImageMemberSchemasRequest: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { } attribute_map = { } def __init__(self): """GlanceShowImageMemberSchemasRequest - a model defined in huaweicloud sdk""" self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, GlanceShowImageMemberSchemasRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
PypiClean
/justbackoff-0.6.0.tar.gz/justbackoff-0.6.0/README.md
# justbackoff [![Build Status](https://travis-ci.org/admiralobvious/justbackoff.svg?branch=master)](https://travis-ci.org/admiralobvious/justbackoff) A simple backoff algorithm for Python >3.6. ### Install ```shell script $ pip install justbackoff ``` ### Usage Backoff is a counter. It starts at `min_ms`. After every call to `duration()`, it is multiplied by `factor`. It is capped at `max_ms`. It returns to `min_ms` on every call to `reset()`. `jitter` adds randomness ([see below](#example-using-jitter)). --- #### Simple example ``` python from justbackoff import Backoff b = Backoff(min_ms=100, max_ms=10000, factor=2, jitter=False) print(b.duration()) print(b.duration()) print(b.duration()) print("Reset!") b.reset() print(b.duration()) ``` ``` shell script 0.1 0.2 0.4 Reset! 0.1 ``` --- #### Example using `socket` package ``` python import socket import time from justbackoff import Backoff sock = socket.socket() b = Backoff() while True: try: sock.connect(("127.0.0.1", 1337)) except Exception as e: d = b.duration() print("{}, reconnecting in {} seconds".format(e, d)) time.sleep(d) continue b.reset() sock.send("Hello, world!") sock.close() ``` --- #### Example using `jitter` Enabling `jitter` adds some randomization to the backoff durations. [See Amazon's writeup of performance gains using jitter](http://www.awsarchitectureblog.com/2015/03/backoff.html). Seeding is not necessary but doing so gives repeatable results. ```python import random from justbackoff import Backoff b = Backoff(min_ms=100, max_ms=10000, factor=2, jitter=True) random.seed(42) print(b.duration()) print(b.duration()) print(b.duration()) print("Reset!") b.reset() print(b.duration()) print(b.duration()) print(b.duration()) ``` ``` shell script 0.1 0.102501075522 0.182508795511 Reset! 0.1 0.173647121416 0.303009846227 ``` #### Credits Ported from Go [backoff](https://github.com/jpillora/backoff)
PypiClean
/LanguageProcessor-1.0.0.tar.gz/LanguageProcessor-1.0.0/LanguageProcessor.py
from ChemTagger import ChemTagger from NaiveBayes import NaiveBayes import pickle import os.path as op from math import log10 from random import shuffle class LanguageProcessor(): def __init__(self, decisions=None, processors=None): self.tagger = ChemTagger() self.decisions = decisions self.processors = processors if not self.processors == None: for tag_type in self.processors.keys(): for process in self.processors[tag_type].keys(): file = self.processors[tag_type][process] self.processors[tag_type][process] = self.loadLanguageModel(file) def extractAttributes(self, item_features, selected_attributes): attributes = {} for item in selected_attributes: if item in item_features: attributes[item] = 'True' else: attributes[item] = 'False' return(attributes) def loadLanguageModel(self, file): if not op.isfile(file): raise Exception("\'" + file + "\' is not a file") with open(file, 'rb') as fh: lang_model = pickle.load(fh) if not isinstance(lang_model, NaiveBayes): raise Exception("\'" + file + "\' is not a valid language model") return(lang_model) def identify_type(self, label): l_type = set() if self.decisions == None: raise Exception("No decisions defined") for decision in self.decisions: if not decision[0].match(label) == None: l_type.add(decision[1]) return(l_type) def formatNumber(self, number): return("{0:.6f}".format(number)) def addNumbers(self, number1, number2): return(self.formatNumber(float(number1) + float(number2))) def prodNumbers(self, numbers): prod = 1 for number in numbers: prod *= float(number) return(self.formatNumber(prod)) def compilePositionProxies(self, left, right): missing1 = 3 - len(left) missing2 = 3 - len(right) for i in range(missing1): left.add("#S" + str(i + 1)) for i in range(missing2): right.add("#E" + str(i + 1)) return(";".join(left) + ";" + ";".join(right)) def defaultScoring(self, catagories, samples=1000): max_length = None c = len(catagories.keys()) for catagory in catagories.keys(): length = len(catagories[catagory]) if max_length == None: max_length = length if max_length > length: max_length = length N = c * max_length * samples feature_counts = {} for i in range(samples): for catagory in catagories.keys(): if not catagory in feature_counts.keys(): feature_counts[catagory] = {} shuffle(catagories[catagory]) for j in range(max_length): for word in set(catagories[catagory][j]): if not word in feature_counts[catagory].keys(): feature_counts[catagory][word] = 0 feature_counts[catagory][word] += 1 contribution = {} for catagory in feature_counts.keys(): for word in feature_counts[catagory].keys(): if not word in contribution.keys(): contribution[word] = 0 contribution[word] += feature_counts[catagory][word] scores = {} #statistics = {} for word in contribution.keys(): scores[word] = (contribution[word] / N) * log10(N / (1 + contribution[word])) #statistics[word] = [contribution[word], N] sorted_features = sorted(scores, key=scores.get, reverse=True) return(sorted_features, scores) def getConfusionMatrix(self, actual, predicted): confusion_matrix = {} if not len(predicted) == len(actual): raise Exception("Actual and predicted not of the same size") length = len(predicted) for i in range(length): if not actual[i] in confusion_matrix.keys(): confusion_matrix[actual[i]] = {} if not predicted[i] in confusion_matrix[actual[i]].keys(): confusion_matrix[actual[i]][predicted[i]] = 0 confusion_matrix[actual[i]][predicted[i]] += 1 return(confusion_matrix) def accuracy(self, confusion_matrix): true = 0 total = 0 measures = {} for actual in confusion_matrix.keys(): l_true = 0 l_total = 0 for predicted in confusion_matrix[actual].keys(): if actual == predicted: true += confusion_matrix[actual][predicted] l_true += confusion_matrix[actual][predicted] l_total += confusion_matrix[actual][predicted] total += confusion_matrix[actual][predicted] l_ac = l_true / l_total measures[actual] = l_ac measures['model'] = true / total return(measures) def macroF1(self, confusion_matrix): measures = {} fp = {} for actual in confusion_matrix.keys(): tp = 0 fn = 0 for prediction in confusion_matrix[actual].keys(): if actual == prediction: tp = confusion_matrix[actual][prediction] else: fn += confusion_matrix[actual][prediction] if not prediction in fp.keys(): fp[prediction] = 0 fp[prediction] += confusion_matrix[actual][prediction] measures[actual] = {'tp' : tp, 'fn' : fn, 'fp' : 0} for prediction in fp.keys(): if not prediction in measures.keys(): measures[prediction] = {'tp' : 0, 'fn' : 0} measures[prediction]['fp'] = fp[prediction] precision = 0 recall = 0 for label in measures.keys(): precision += measures[label]['tp'] / (measures[label]['tp'] + measures[label]['fp']) recall += measures[label]['tp'] / (measures[label]['tp'] + measures[label]['fn']) precision /= len(measures.keys()) recall /= len(measures.keys()) return(2 * (precision * recall) / (precision + recall)) def processTagText(self, ref, text): paragraph_tags = self.tagger.tagParagraph(text) output_tags = [] for tag in paragraph_tags: labels = ';'.join(set(tag[3])) output_tags.append([ref, str(tag[0]), tag[1], tag[2], labels, text[tag[1]:tag[2]]]) return(output_tags) def compileFeatures(self, text, tags): tagged_sentences = self.tagger.getTaggedSentences(text, tags) proxi_features = self.tagger.getProximityFeatures(tagged_sentences) if not len(tagged_sentences) == len(proxi_features): raise Exception("Feature discontinuation detected") proxi_dictionary = {} for tag in proxi_features: proxi_dictionary[tag[0]] = set(self.compilePositionProxies(tag[4], tag[5]).split(";")) features = {} for tag in tagged_sentences: features[tag[0]] = [self.identify_type(tag[3]), self.tagger.getWordFeatures(tag[4]), proxi_dictionary[tag[0]]] return([self.tagger.getWordFeatures(text), features]) def compileFeatureVector(self, text, tags, labels=None): (abstract, positions) = self.compileFeatures(text, tags) lv = lambda label, vector: [label + '#' + v for v in vector] vectors = [] for p in positions: label = None if not labels == None: if p in labels.keys(): label = labels[p] l_type = positions[p][0] features = lv('A', abstract) for item in lv('S', positions[p][1]): features.append(item) for item in lv('P', positions[p][2]): features.append(item) vectors.append([l_type, p, label, features]) return(vectors) def compilePairFeatureVectors(self, pairs, text, tags): vectors = {} for vector in self.compileFeatureVector(text, tags): if not vector[1] in vectors.keys(): vectors[vector[1]] = [] vectors[vector[1]].append(vector[3]) label_to_positions = {} for tag in tags: if not tag[4] in label_to_positions.keys(): label_to_positions[tag[4]] = set() label_to_positions[tag[4]].add(str(tag[2]) + '-' + str(tag[3])) def getFeatureVector(prefix, label): output_vectors = [] for position in label_to_positions[label]: for v in vectors[position]: for i in range(len(v)): r_prefix = prefix + '#' if not r_prefix in v[i]: v[i] = r_prefix + v[i] output_vectors.append(v) return(output_vectors) all_combinations = {} for pair in pairs: ref = pair[0] + "=>" + pair[1] all_combinations[ref] = [] source = getFeatureVector("source", pair[0]) target = getFeatureVector("target", pair[1]) for t in target: target_space = set(t) for s in source: feature_space = target_space for e in s: feature_space.add(e) all_combinations[ref].append(feature_space) return(all_combinations, label_to_positions) def determinePairLabelValue(self, source_positions, target_positions, label_values): def getMostUsedLabel(positions): label_count = {} for p in positions: if not p in label_values.keys(): return(None) value = label_values[p] if not value in label_count.keys(): label_count[value]=0 label_count[value] += 1 labelValues = sorted(label_count.keys(), key = lambda x: label_count[x], reverse=True) return(labelValues[0]) s = getMostUsedLabel(source_positions) t = getMostUsedLabel(target_positions) if s == None or t == None: return(None) return(sorted([s,t])[0]) def processPairLangauge(self, process_path, pairs, text, tags): if self.processors == None: raise Exception("No processors defined") (features, label_to_positions) = self.compilePairFeatureVectors(pairs, text, tags) scores = {} for pair in pairs: ref = pair[0] + "=>" + pair[1] if not ref in scores.keys(): scores[ref] = [] model = self.processors[process_path[0]][process_path[1]] for feature in features[ref]: attributes = self.extractAttributes(feature, model.attributes) scores[ref].append(model.preferredLabel(model.predict({'attributes' : attributes }))) return(scores) def processTagLangauge(self, text, tags): if self.processors == None: raise Exception("No processors defined") scores = {} for vector in self.compileFeatureVector(text, tags): if not vector[1] in scores.keys(): scores[vector[1]] = {} for processor in self.processors[vector[0]]: model = self.processors[vector[0]][processor] attributes = self.extractAttributes(vector[3], model.attributes) if not processor in scores[vector[1]].keys(): scores[vector[1]][processor] = model.preferredLabel(model.predict({'attributes' : attributes })) return(scores) def split_seq(self, seq, size): newseq = [] splitsize = 1.0 / size * len(seq) for i in range(size): newseq.append(seq[int(round(i * splitsize)):int(round((i + 1) * splitsize))]) return newseq def balanced_execution(self, data, features, cross_fold=10, smoothing=1 / 3): max_length = None for catagory in data.keys(): length = len(data[catagory]) if max_length == None: max_length = length if max_length > length: max_length = length data_subset = [] for catagory in data.keys(): shuffle(data[catagory]) for i in range(max_length): data_subset.append([data[catagory][i], catagory]) shuffle(data_subset) parts = self.split_seq(data_subset, cross_fold) actual_labels = [] predicted_labels = [] for i in range(cross_fold): eval_set = parts[i] train_set = [] for j in range(cross_fold): if not i == j: for item in parts[j]: train_set.append(item) model = NaiveBayes() for f in features: model.set_smoothing({f:smoothing}) for item in train_set: attributes = self.extractAttributes(item[0], features) model.add_instances({'attributes':attributes, 'label' : item[1], 'cases' : 1}) model.train() for item in eval_set: attributes = self.extractAttributes(item[0], features) predicted_labels.append(model.preferredLabel(model.predict({'attributes' : attributes }))) actual_labels.append(item[1]) model = NaiveBayes() for f in features: model.set_smoothing({f:smoothing}) for item in data_subset: attributes = self.extractAttributes(item[0], features) model.add_instances({'attributes':attributes, 'label' : item[1], 'cases' : 1}) model.train() return(self.getConfusionMatrix(actual_labels, predicted_labels), model) def one_feature_reduction(self, data, features, cross_fold=10, smoothing=1 / 3): (start_confusion, start_model) = self.balanced_execution(data, features, cross_fold, smoothing) start_f1 = self.macroF1(start_confusion) remove_penalty = {} for feature in features: test_features = set(features) test_features.remove(feature) (confusion_matrix, run_model) = self.balanced_execution(data, list(test_features), cross_fold, smoothing) f1 = self.macroF1(confusion_matrix) remove_penalty[feature] = f1 - start_f1 remove_feature = sorted(remove_penalty, key=lambda x: remove_penalty[x])[0] return(remove_feature, remove_penalty[remove_feature])
PypiClean
/netmiko_mishki-4.1.2.5.tar.gz/netmiko_mishki-4.1.2.5/src/netmiko/cisco/cisco_ios.py
from typing import Any, Optional, Callable, Type from types import TracebackType import time import re import os import hashlib import io from netmiko.cisco_base_connection import CiscoBaseConnection, CiscoFileTransfer from netmiko.base_connection import BaseConnection class CiscoIosBase(CiscoBaseConnection): """Common Methods for IOS (both SSH and telnet).""" def session_preparation(self) -> None: """Prepare the session after the connection has been established.""" cmd = "terminal width 511" self.set_terminal_width(command=cmd, pattern=cmd) self.disable_paging() self.set_base_prompt() def set_base_prompt( self, pri_prompt_terminator: str = "#", alt_prompt_terminator: str = ">", delay_factor: float = 1.0, pattern: Optional[str] = None, ) -> str: """ Cisco IOS/IOS-XE abbreviates the prompt at 20-chars in config mode. Consequently, abbreviate the base_prompt """ base_prompt = super().set_base_prompt( pri_prompt_terminator=pri_prompt_terminator, alt_prompt_terminator=alt_prompt_terminator, delay_factor=delay_factor, pattern=pattern, ) self.base_prompt = base_prompt[:16] return self.base_prompt def check_config_mode( self, check_string: str = ")#", pattern: str = r"[>#]", force_regex: bool = False, ) -> bool: """ Checks if the device is in configuration mode or not. Cisco IOS devices abbreviate the prompt at 20 chars in config mode """ return super().check_config_mode(check_string=check_string, pattern=pattern) def save_config( self, cmd: str = "write mem", confirm: bool = False, confirm_response: str = "" ) -> str: """Saves Config Using Copy Run Start""" return super().save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response ) class CiscoIosSSH(CiscoIosBase): """Cisco IOS SSH driver.""" pass class CiscoIosTelnet(CiscoIosBase): """Cisco IOS Telnet driver.""" pass class CiscoIosSerial(CiscoIosBase): """Cisco IOS Serial driver.""" pass class CiscoIosFileTransfer(CiscoFileTransfer): """Cisco IOS SCP File Transfer driver.""" pass class InLineTransfer(CiscoIosFileTransfer): """Use TCL on Cisco IOS to directly transfer file.""" def __init__( self, ssh_conn: BaseConnection, source_file: str = "", dest_file: str = "", file_system: Optional[str] = None, direction: str = "put", source_config: Optional[str] = None, socket_timeout: float = 10.0, progress: Optional[Callable[..., Any]] = None, progress4: Optional[Callable[..., Any]] = None, hash_supported: bool = True, ) -> None: if not dest_file: raise ValueError( "Destination file must be specified for InlineTransfer operations." ) if hash_supported is False: raise ValueError("hash_supported=False is not supported for InLineTransfer") if source_file and source_config: msg = "Invalid call to InLineTransfer both source_file and source_config specified." raise ValueError(msg) if direction != "put": raise ValueError("Only put operation supported by InLineTransfer.") if progress is not None or progress4 is not None: raise NotImplementedError( "Progress bar is not supported on inline transfers." ) else: self.progress = progress self.progress4 = progress4 self.ssh_ctl_chan = ssh_conn self.source_file = source_file if source_file: self.source_config = None self.source_md5 = self.file_md5(source_file) self.file_size = os.stat(source_file).st_size elif source_config: self.source_config = source_config self.source_md5 = self.config_md5(source_config) self.file_size = len(source_config.encode("UTF-8")) self.dest_file = dest_file self.direction = direction if not file_system: self.file_system = self.ssh_ctl_chan._autodetect_fs() else: self.file_system = file_system self.socket_timeout = socket_timeout @staticmethod def _read_file(file_name: str) -> str: with io.open(file_name, "rt", encoding="utf-8") as f: return f.read() @staticmethod def _tcl_newline_rationalize(tcl_string: str) -> str: r""" When using put inside a TCL {} section the newline is considered a new TCL statement and causes a missing curly-brace message. Convert "\n" to "\r". TCL will convert the "\r" to a "\n" i.e. you will see a "\n" inside the file on the Cisco IOS device. """ NEWLINE = r"\n" CARRIAGE_RETURN = r"\r" tmp_string = re.sub(NEWLINE, CARRIAGE_RETURN, tcl_string) if re.search(r"[{}]", tmp_string): msg = "Curly brace detected in string; TCL requires this be escaped." raise ValueError(msg) return tmp_string def __enter__(self) -> "InLineTransfer": self._enter_tcl_mode() return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self._exit_tcl_mode() def _enter_tcl_mode(self) -> str: TCL_ENTER = "tclsh" cmd_failed = ['Translating "tclsh"', "% Unknown command", "% Bad IP address"] output = self.ssh_ctl_chan._send_command_str( TCL_ENTER, expect_string=r"\(tcl\)#", strip_prompt=False, strip_command=False, ) for pattern in cmd_failed: if pattern in output: raise ValueError(f"Failed to enter tclsh mode on router: {output}") return output def _exit_tcl_mode(self) -> str: TCL_EXIT = "tclquit" self.ssh_ctl_chan.write_channel("\r") time.sleep(1) output = self.ssh_ctl_chan.read_channel() if "(tcl)" in output: self.ssh_ctl_chan.write_channel(TCL_EXIT + "\r") time.sleep(1) output += self.ssh_ctl_chan.read_channel() return output def establish_scp_conn(self) -> None: raise NotImplementedError def close_scp_chan(self) -> None: raise NotImplementedError def local_space_available(self) -> bool: raise NotImplementedError def file_md5(self, file_name: str, add_newline: bool = False) -> str: """Compute MD5 hash of file.""" if add_newline is True: raise ValueError( "add_newline argument is not supported for inline transfers." ) file_contents = self._read_file(file_name) file_contents = file_contents + "\n" # Cisco IOS automatically adds this file_contents_bytes = file_contents.encode("UTF-8") return hashlib.md5(file_contents_bytes).hexdigest() def config_md5(self, source_config: str) -> str: """Compute MD5 hash of text.""" file_contents = source_config + "\n" # Cisco IOS automatically adds this file_contents_bytes = file_contents.encode("UTF-8") return hashlib.md5(file_contents_bytes).hexdigest() def put_file(self) -> None: curlybrace = r"{" TCL_FILECMD_ENTER = 'puts [open "{}{}" w+] {}'.format( self.file_system, self.dest_file, curlybrace ) TCL_FILECMD_EXIT = "}" if self.source_file: file_contents = self._read_file(self.source_file) elif self.source_config: file_contents = self.source_config file_contents = self._tcl_newline_rationalize(file_contents) # Try to remove any existing data self.ssh_ctl_chan.clear_buffer() self.ssh_ctl_chan.write_channel(TCL_FILECMD_ENTER) time.sleep(0.25) self.ssh_ctl_chan.write_channel(file_contents) self.ssh_ctl_chan.write_channel(TCL_FILECMD_EXIT + "\r") # This operation can be slow (depends on the size of the file) read_timeout = 100 sleep_time = 4 if self.file_size >= 2500: read_timeout = 300 sleep_time = 12 elif self.file_size >= 7500: read_timeout = 600 sleep_time = 25 # Initial delay time.sleep(sleep_time) # File paste and TCL_FILECMD_exit should be indicated by "router(tcl)#" output = self.ssh_ctl_chan.read_until_pattern( pattern=r"\(tcl\).*$", re_flags=re.M, read_timeout=read_timeout ) # The file doesn't write until tclquit TCL_EXIT = "tclquit" self.ssh_ctl_chan.write_channel(TCL_EXIT + "\r") time.sleep(1) # Read all data remaining from the TCLSH session pattern = rf"tclquit.*{self.ssh_ctl_chan.base_prompt}.*$" re_flags = re.DOTALL | re.M output += self.ssh_ctl_chan.read_until_pattern( pattern=pattern, re_flags=re_flags, read_timeout=read_timeout ) return None def get_file(self) -> None: raise NotImplementedError def enable_scp(self, cmd: str = "") -> None: raise NotImplementedError def disable_scp(self, cmd: str = "") -> None: raise NotImplementedError
PypiClean
/zohocrmsdk2_0-5.1.0.tar.gz/zohocrmsdk2_0-5.1.0/zcrmsdk/src/com/zoho/crm/api/taxes/preference.py
try: from zcrmsdk.src.com.zoho.crm.api.exception import SDKException from zcrmsdk.src.com.zoho.crm.api.util import Constants except Exception: from ..exception import SDKException from ..util import Constants class Preference(object): def __init__(self): """Creates an instance of Preference""" self.__auto_populate_tax = None self.__modify_tax_rates = None self.__key_modified = dict() def get_auto_populate_tax(self): """ The method to get the auto_populate_tax Returns: bool: A bool representing the auto_populate_tax """ return self.__auto_populate_tax def set_auto_populate_tax(self, auto_populate_tax): """ The method to set the value to auto_populate_tax Parameters: auto_populate_tax (bool) : A bool representing the auto_populate_tax """ if auto_populate_tax is not None and not isinstance(auto_populate_tax, bool): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: auto_populate_tax EXPECTED TYPE: bool', None, None) self.__auto_populate_tax = auto_populate_tax self.__key_modified['auto_populate_tax'] = 1 def get_modify_tax_rates(self): """ The method to get the modify_tax_rates Returns: bool: A bool representing the modify_tax_rates """ return self.__modify_tax_rates def set_modify_tax_rates(self, modify_tax_rates): """ The method to set the value to modify_tax_rates Parameters: modify_tax_rates (bool) : A bool representing the modify_tax_rates """ if modify_tax_rates is not None and not isinstance(modify_tax_rates, bool): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modify_tax_rates EXPECTED TYPE: bool', None, None) self.__modify_tax_rates = modify_tax_rates self.__key_modified['modify_tax_rates'] = 1 def is_key_modified(self, key): """ The method to check if the user has modified the given key Parameters: key (string) : A string representing the key Returns: int: An int representing the modification """ if key is not None and not isinstance(key, str): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if key in self.__key_modified: return self.__key_modified.get(key) return None def set_key_modified(self, key, modification): """ The method to mark the given key as modified Parameters: key (string) : A string representing the key modification (int) : An int representing the modification """ if key is not None and not isinstance(key, str): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None) if modification is not None and not isinstance(modification, int): raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None) self.__key_modified[key] = modification
PypiClean
/colour_hdri-0.2.2-py3-none-any.whl/colour_hdri/distortion/vignette.py
from __future__ import annotations import numpy as np from dataclasses import dataclass from scipy.interpolate import RBFInterpolator, RectBivariateSpline from scipy.ndimage import center_of_mass, gaussian_filter from scipy.optimize import curve_fit from colour.algebra import ( LinearInterpolator, linear_conversion, polar_to_cartesian, ) from colour.hints import ( ArrayLike, Callable, Literal, NDArrayFloat, Tuple, cast, ) from colour.utilities import ( CanonicalMapping, MixinDataclassIterable, as_float_array, as_int_array, ones, tsplit, tstack, validate_method, zeros, ) __author__ = "Colour Developers" __copyright__ = "Copyright 2015 Colour Developers" __license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "[email protected]" __status__ = "Production" __all__ = [ "apply_radial_gradient", "vignette_principal_point", "parabolic_2D_function", "hyperbolic_cosine_2D_function", "FunctionVignetteCharacterisation", "VIGNETTE_CHARACTERISATION_2D_FUNCTIONS", "DataVignetteCharacterisation", "characterise_vignette_2D_function", "correct_vignette_2D_function", "characterise_vignette_bivariate_spline", "correct_vignette_bivariate_spline", "radial_sampling_function", "vignette_sampling_coordinates", "characterise_vignette_RBF", "correct_vignette_RBF", "VIGNETTE_CHARACTERISATION_METHODS", "characterise_vignette", "VIGNETTE_CORRECTION_METHODS", "correct_vignette", ] def apply_radial_gradient( image: ArrayLike, scale: ArrayLike = (1, 1), offset: ArrayLike = (0.5, 0.5), intensity: float = 1, bias: float = 1, noise: float = 0, ) -> NDArrayFloat: """ Apply a radial gradient on given image. Parameters ---------- image Image to apply the radial gradient onto. scale Radial gradient scale as a ratio of the image height. offset Radial gradiant offset from the image center and as a ratio of image dimensions. intensity Radial gradient intensity where a value of 1 produces black at the top and bottom corners. bias Power function applied on the gradient. noise Noise factor. Returns ------- :class:`numpy.ndarray` Image with radial gradient applied. Examples -------- >>> np.around(apply_radial_gradient(np.ones([5, 7])), 3) array([[ 0. , 0.023, 0.212, 0.286, 0.212, 0.023, 0. ], [ 0. , 0.244, 0.511, 0.643, 0.511, 0.244, 0. ], [ 0. , 0.333, 0.667, 1. , 0.667, 0.333, 0. ], [ 0. , 0.244, 0.511, 0.643, 0.511, 0.244, 0. ], [ 0. , 0.023, 0.212, 0.286, 0.212, 0.023, 0. ]]) """ image = as_float_array(np.atleast_3d(image)) scale_x, scale_y = tsplit(scale) offset_x, offset_y = tsplit(offset) height, width = cast(Tuple, image.shape)[0:2] ratio = height / width samples_x = np.linspace(-1, 1, height) samples_x *= (1 / scale_x) * ratio samples_x += offset_x - 0.5 samples_y = np.linspace(-1, 1, width) samples_y *= 1 / scale_y samples_y += offset_y - 0.5 distance = cast( NDArrayFloat, np.sqrt((samples_x**2)[..., None] + (samples_y**2)[None, ...]), ) image *= 1 - distance[..., None] * intensity image **= bias image += np.random.random(image.shape) * noise return np.squeeze(np.nan_to_num(np.clip(image, 0, 1))) def vignette_principal_point( image: ArrayLike, threshold: float = 0.99 ) -> NDArrayFloat: """ Return the vignette principal point for given image. Parameters ---------- image Vignette image to return the principal point of. threshold Pixels threshold before finding the vignette principal point. Returns ------- :class:`numpy.ndarray` Vignette principal point. Examples -------- >>> vignette_principal_point( # doctest: +ELLIPSIS ... apply_radial_gradient(np.ones([5, 7, 3])) ... ) array([ 0.4 , 0.4285714...]) """ image = as_float_array(image) shape_x, shape_y, _ = image.shape M = np.median(image, axis=-1) thresholded = zeros(M.shape) thresholded[np.max(M) * threshold < M] = 1 return center_of_mass(thresholded) / as_float_array([shape_x, shape_y]) def parabolic_2D_function( x_y: Tuple, a_x2: float, a_x1: float, a_x0: float, a_y2: float, a_y1: float, a_y0: float, ): """ Evaluate a parabolic 2D function on given coordinate matrices from coordinate vectors. The parabolic 2D function adopts the following form as given by :cite:`Kordecki2016`: :math:`I_v(x, y) = \\cfrac{1}{2}(a_{x2}x^2 + a_{x1}x + a_{x0}) + \ \\cfrac{1}{2}(a_{y2}y^2 + a_{y1}y + a_{y0})` Parameters ---------- x_y Coordinate matrices from coordinate vectors to evaluate the parabolic 2d function on. The coordinate matrices can be generated with the :func:`numpy.meshgrid` definition. a_x2 Coefficient :math:`a_{x2}` for the parabolic equation. a_x1 Coefficient :math:`a_{x1}` for the parabolic equation. a_x0 Coefficient :math:`a_{x0}` for the parabolic equation. a_y2 Coefficient :math:`a_{y2}` for the parabolic equation. a_y1 Coefficient :math:`a_{y1}` for the parabolic equation. a_y0 Coefficient :math:`a_{y0}` for the parabolic equation. Returns ------- :class:`numpy.ndarray` Coordinate matrices with evaluated parabolic 2D function. References ---------- :cite:`Kordecki2016` Examples -------- >>> x_1, y_1 = np.meshgrid(np.linspace(0, 1, 4), np.linspace(0, 1, 3)) >>> parabolic_2D_function( # doctest: +ELLIPSIS ... (x_1, y_1), -0.5, 0, 1, -0.5, 0, 1 ... ) array([[ 1. , 0.9722222..., 0.8888888..., 0.75 ], [ 0.9375 , 0.9097222..., 0.8263888..., 0.6875 ], [ 0.75 , 0.7222222..., 0.6388888..., 0.5 ]]) """ x, y = x_y I_v = (a_x2 * x**2 + a_x1 * x + a_x0) / 2 I_v += (a_y2 * y**2 + a_y1 * y + a_y0) / 2 return I_v def hyperbolic_cosine_2D_function( x_y: Tuple, r_x: float, x_0: float, r_y: float, y_0: float, c: float, ): """ Evaluate a hyperbolic cosine 2D function on given coordinate matrices from coordinate vectors. The hyperbolic cosine 2D function adopts the following form: :math:`I_v(x, y) = 1 - (cosh(r_x * (x - x_0)) * cosh(r_y * (y - y_0))) + c` Parameters ---------- x_y Coordinate matrices from coordinate vectors to evaluate the parabolic 2d function on. The coordinate matrices can be generated with the :func:`numpy.meshgrid` definition. r_x Coefficient :math:`r_x` for the hyperbolic cosine equation. x_0 Coefficient :math:`x_0` for the hyperbolic cosine equation. r_y Coefficient :math:`r_y` for the hyperbolic cosine equation. y_0 Coefficient :math:`y_0` for the hyperbolic cosine equation. c_y Coefficient :math:`c_y` for the hyperbolic cosine equation. c Coefficient :math:`c` for the hyperbolic cosine equation. Returns ------- :class:`numpy.ndarray` Coordinate matrices with evaluated hyperbolic cosine 2D function. References ---------- :cite:`WonpilYu2004` Examples -------- >>> x_1, y_1 = np.meshgrid(np.linspace(0, 1, 4), np.linspace(0, 1, 3)) >>> hyperbolic_cosine_2D_function( # doctest: +ELLIPSIS ... (x_1, y_1), 1, -0.5, 1, -0.5, 1 ... ) array([[ 1. ..., 0.9439281..., 0.7694244..., 0.4569193...], [ 0.8723740..., 0.8091459..., 0.6123710..., 0.2599822...], [ 0.4569193..., 0.3703959..., 0.1011226..., -0.3810978...]]) """ x, y = x_y x = linear_conversion(x, (0, 1), (-0.5, 0.5)) y = linear_conversion(y, (0, 1), (-0.5, 0.5)) I_v = 1 - (np.cosh(r_x * (x - x_0)) * np.cosh(r_y * (y - y_0))) + c return I_v @dataclass class FunctionVignetteCharacterisation(MixinDataclassIterable): """ Define a vignette characterisation function and the required data for fitting it to an image. Parameters ---------- function Vignette characterisation function. p0 Initial guess for the function fitting, passed to :func:`scipy.optimize.curve_fit` definition. bounds Lower and upper bounds for the function fitting, passed to :func:`scipy.optimize.curve_fit` definition. """ function: Callable p0: NDArrayFloat bounds: NDArrayFloat VIGNETTE_CHARACTERISATION_2D_FUNCTIONS: CanonicalMapping = CanonicalMapping( { "Parabolic": FunctionVignetteCharacterisation( parabolic_2D_function, np.array([0, 0, 1, 0, 0, 1]), np.array( [ (-5.0, -0.5, 0.9, -5.0, -0.5, 0.9), (+0.0, +0.5, 1.1, +0.0, +0.5, 1.1), ] ), ), "Hyperbolic Cosine": FunctionVignetteCharacterisation( hyperbolic_cosine_2D_function, np.array([1, 0, 1, 0, 0]), np.array( [ (0.5, -1.0, 0.5, -1.0, 0.0), (5.0, +0.0, 5.0, +0.0, 1.5), ] ), ), } ) VIGNETTE_CHARACTERISATION_2D_FUNCTIONS.__doc__ = """ Supported vignette characterisation 2D functions. References ---------- :cite:`Kordecki2016`, :cite:`WonpilYu2004` """ @dataclass class DataVignetteCharacterisation(MixinDataclassIterable): """ Define the data of a vignette characterisation process. Parameters ---------- parameters Vignette characterisation parameters. principal_point Vignette principal point. """ # noqa: D405, D407, D410, D411, D414 parameters: ArrayLike principal_point: ArrayLike def characterise_vignette_2D_function( image: ArrayLike, function: Literal["Parabolic", "Hyperbolic Cosine"] | str = "Parabolic", ) -> DataVignetteCharacterisation: """ Characterise the vignette of given image using a given 2D function. Parameters ---------- image Image to characterise the vignette of. function Characterisation function. Returns ------- :class:`DataVignetteCharacterisation` Vignette characterisation. Examples -------- >>> characterise_vignette_2D_function( # doctest: +ELLIPSIS ... apply_radial_gradient(np.ones([5, 7])) ... ) DataVignetteCharacterisation(parameters=array([[-5. , 0.5 , \ 0.9 , -4.4699758..., 0.5 , 0.9 ]]), principal_point=array([ 0.4 , 0.4285714...])) """ image = np.atleast_3d(image) function = validate_method( function, tuple(VIGNETTE_CHARACTERISATION_2D_FUNCTIONS.keys()), '"{0}" function is invalid, it must be one of {1}!', ) ( vignette_characterisation_function, p0, bounds, ) = VIGNETTE_CHARACTERISATION_2D_FUNCTIONS[function].values height, width, channels = image.shape x_1, y_1 = np.meshgrid( np.linspace(0, 1, width), np.linspace(0, 1, height), ) principal_point = vignette_principal_point(image) parameters = [] for i in range(channels): parameters.append( curve_fit( vignette_characterisation_function, ( np.ravel(x_1 - principal_point[0]), np.ravel(y_1 - principal_point[1]), ), np.ravel(np.nan_to_num(image[..., i])), p0=p0, bounds=bounds, )[0] ) return DataVignetteCharacterisation( as_float_array(parameters), principal_point ) def correct_vignette_2D_function( image: ArrayLike, characterisation_data: DataVignetteCharacterisation, function: Literal["Parabolic", "Hyperbolic Cosine"] | str = "Parabolic", ) -> NDArrayFloat: """ Correct the vignette of given image using given characterisation for a 2D function. Parameters ---------- image Image to correct the vignette of. characterisation_data Vignette characterisation data for given function. function Correction function. Returns ------- :class:`numpy.ndarray` Vignette corrected image. Examples -------- >>> image = apply_radial_gradient(np.ones([5, 7])) >>> characterisation_data = characterise_vignette_2D_function(image) >>> np.around( ... correct_vignette_2D_function(image, characterisation_data), 3 ... ) array([[-0. , 0.122, 0.597, 0.747, 0.781, 1.08 , -0. ], [ 0. , 0.413, 0.676, 0.82 , 0.76 , 0.576, 0. ], [ 0. , 0.468, 0.759, 1.103, 0.838, 0.611, 0. ], [ 0. , 0.439, 0.709, 0.858, 0.801, 0.628, -0. ], [-0. , 0.193, 0.742, 0.913, 1.049, -0.477, -0. ]]) """ image = np.copy(np.atleast_3d(image)) function = validate_method( function, tuple(VIGNETTE_CHARACTERISATION_2D_FUNCTIONS.keys()), '"{0}" function is invalid, it must be one of {1}!', ) vignette_characterisation_function = ( VIGNETTE_CHARACTERISATION_2D_FUNCTIONS[function] ) parameters, principal_point = characterisation_data.values height, width, channels = image.shape x_1, y_1 = np.meshgrid( np.linspace(0, 1, width), np.linspace(0, 1, height), ) for i in range(channels): image[..., i] /= vignette_characterisation_function.function( (x_1 - principal_point[0], y_1 - principal_point[1]), *parameters[i], ) return np.squeeze(image) def characterise_vignette_bivariate_spline( image: ArrayLike, pre_denoise_sigma: float = 6, post_denoise_sigma: float = 1, samples: int = 50, degree: int = 3, ) -> DataVignetteCharacterisation: """ Characterise the vignette of given image using a bivariate spline. Parameters ---------- image Image to characterise the vignette of. pre_denoise_sigma Standard deviation of the gaussian filtering kernel applied on the image. post_denoise_sigma Standard deviation of the gaussian filtering kernel applied on the resampled image at given ``samples`` count. samples Samples count of the resampled image on the long edge. degree Degree of the bivariate spline. Returns ------- :class:`DataVignetteCharacterisation` Vignette characterisation. Examples -------- >>> parameters, principal_point = characterise_vignette_bivariate_spline( ... apply_radial_gradient(np.ones([300, 400])) ... ).values >>> parameters.shape (37, 50, 1) >>> principal_point # doctest: +ELLIPSIS array([ 0.4983333..., 0.49875 ]) """ image = np.copy(np.atleast_3d(image)) principal_point = vignette_principal_point(image) height, width, channels = image.shape ratio = samples / max(height, width) height_n, width_n = int(height * ratio), int(width * ratio) x_1, y_1 = np.linspace(0, 1, height), np.linspace(0, 1, width) x_1_n, y_1_n = np.linspace(0, 1, height_n), np.linspace(0, 1, width_n) # NOTE: Here "parameters" represent a lower resolution version of the # image, i.e. the "I_v" function directly. parameters = zeros((height_n, width_n, channels)) for i in range(channels): image[..., i] = gaussian_filter( image[..., i], pre_denoise_sigma, truncate=pre_denoise_sigma, mode="nearest", ) interpolator = RectBivariateSpline( x_1, y_1, image[..., i], kx=degree, ky=degree ) parameters[..., i] = interpolator(x_1_n, y_1_n) parameters[..., i] = gaussian_filter( parameters[..., i], post_denoise_sigma, truncate=pre_denoise_sigma, mode="nearest", ) return DataVignetteCharacterisation(parameters, principal_point) def correct_vignette_bivariate_spline( image: ArrayLike, characterisation_data: DataVignetteCharacterisation, degree: int = 3, ) -> NDArrayFloat: """ Correct the vignette of given image using given characterisation for a bivariate spline. Parameters ---------- image Image to correct the vignette of. characterisation_data Vignette characterisation data for given function. degree Degree of the bivariate spline. Returns ------- :class:`numpy.ndarray` Vignette corrected image. Examples -------- >>> image = apply_radial_gradient(np.ones([5, 7])) >>> characterisation_data = characterise_vignette_bivariate_spline(image) >>> np.around( ... correct_vignette_bivariate_spline(image, characterisation_data), 3 ... ) array([[ 0. , 0.345, 3.059, 4.072, 3.059, 0.345, 0. ], [ 0. , 3.624, 7.304, 9.058, 7.304, 3.624, 0. ], [ 0. , 4.936, 9.481, 14.032, 9.481, 4.936, 0. ], [ 0. , 3.624, 7.304, 9.058, 7.304, 3.624, 0. ], [ 0. , 0.345, 3.059, 4.072, 3.059, 0.345, 0. ]]) """ image = np.copy(np.atleast_3d(image)) parameters, principal_point = characterisation_data.values height, width, channels = image.shape height_I_v, width_I_v, channels_I_v = parameters.shape x_1, y_1 = np.linspace(0, 1, height), np.linspace(0, 1, width) x_I_v, y_I_v = np.linspace(0, 1, height_I_v), np.linspace(0, 1, width_I_v) for i in range(channels): interpolator = RectBivariateSpline( x_I_v, y_I_v, parameters[..., i], kx=degree, ky=degree ) image[..., i] /= interpolator(x_1, y_1) return np.squeeze(image) def radial_sampling_function( samples_rho: int = 7, samples_phi: int = 21, radius: float = 1, radial_bias: float = 1, ) -> NDArrayFloat: """ Return a series of radial samples. Parameters ---------- samples_rho Sample count along the radial coordinate. samples_phi Sample count along the angular coordinate. radius Sample distribution radius. radial_bias Sample distribution bias, i.e. an exponent affecting the radial distribution. Returns ------- :class:`numpy.ndarray` Radial samples. Examples -------- >>> radial_sampling_function().shape (21, 7, 2) """ rho, phi = np.meshgrid( np.linspace(0, radius, samples_rho) ** radial_bias, np.linspace(-np.pi, np.pi, samples_phi), ) return polar_to_cartesian(tstack([rho, phi])) def vignette_sampling_coordinates( principal_point: ArrayLike = np.array([0.5, 0.5]), aspect_ratio: float = 1, diagonal_samples: int = 10, diagonal_selection: int = 2, edge_samples: int = 10, samples_rho: int = 7, samples_phi: int = 21, radius: float = 0.9, radial_bias: float = 1, ) -> NDArrayFloat: """ Return a series of sampling coordinates appropriate for radial basis function (RBF) interpolation of a vignette function. Parameters ---------- principal_point Principal point of the vignette function to sample. aspect_ratio Aspect ratio of the image storing the vignette function to sample. diagonal_samples Sample count along the diagonals. diagonal_selection Sample count to retain along the diagonals ends. Given a series of 6 ``diagonal_samples`` as follows: `[0, 1, 2, 3, 4, 5]`, a ``diagonal_selection`` of 2 would retain the following samples: `[0, 1, 4, 5]`. edge_samples Sample count along the edges. samples_rho Sample count along the radial coordinate. samples_phi Sample count along the angular coordinate. radius Sample distribution radius. radial_bias Sample distribution bias, i.e. an exponent affecting the radial distribution. Returns ------- :class:`numpy.ndarray` Radial samples. Examples -------- >>> vignette_sampling_coordinates().shape (187, 2) """ principal_point = as_float_array(principal_point) samples = [] diagonal = np.linspace(0, 1, diagonal_samples) diagonal = np.hstack( [diagonal[1:diagonal_selection], diagonal[-diagonal_selection:-1]] ) samples.append(tstack([diagonal, diagonal])) samples.append(tstack([diagonal, 1 - diagonal])) edge = np.linspace(0, 1, edge_samples) samples.append(tstack([edge, zeros(edge_samples)])) samples.append(tstack([edge, ones(edge_samples)])) samples.append(tstack([zeros(edge_samples), edge])[1:-1]) samples.append(tstack([ones(edge_samples), edge])[1:-1]) coordinates = np.vstack(samples) coordinates[..., 0] = LinearInterpolator( [0, 0.5, 1], [0, principal_point[0], 1] )(coordinates[..., 0]) coordinates[..., 1] = LinearInterpolator( [0, 0.5, 1], [0, principal_point[1], 1] )(coordinates[..., 1]) radial_samples = radial_sampling_function( samples_rho, samples_phi, cast(float, 1 + (np.max(principal_point - 0.5) * 2)), radial_bias, ) # NOTE: Some randomisation is required to avoid a # "LinAlgError: Singular matrix" exception raised by # "scipy.interpolate.RBFInterpolator" definition. radial_samples += ( np.random.default_rng(8).random(radial_samples.shape) - 0.5 ) / 1000 radial_samples = np.reshape(radial_samples / (2 * 1 / radius), [-1, 2]) radial_samples[..., 1] *= aspect_ratio radial_samples += principal_point coordinates = np.vstack([coordinates, radial_samples]) coordinates = coordinates[ np.logical_and( np.all(coordinates >= 0, axis=-1), np.all(coordinates <= 1, axis=-1), ) ] return coordinates def characterise_vignette_RBF( image: ArrayLike, denoise_sigma: float = 6 ) -> DataVignetteCharacterisation: """ Characterise the vignette of given image using a series of sampling coordinates appropriate for radial basis function (RBF) interpolation of a vignette function. Parameters ---------- image Image to characterise the vignette of. denoise_sigma Standard deviation of the gaussian filtering kernel applied on the image. Returns ------- :class:`DataVignetteCharacterisation` Vignette characterisation. Examples -------- >>> parameters, principal_point = characterise_vignette_RBF( ... apply_radial_gradient(np.ones([300, 400])) ... ).values >>> parameters.shape (180, 1) >>> principal_point # doctest: +ELLIPSIS array([ 0.4983333..., 0.49875 ]) """ image = np.copy(np.atleast_3d(image)) height, width, channels = image.shape principal_point = vignette_principal_point(image) sampling_coordinates = vignette_sampling_coordinates( principal_point, width / height ) x_indices = as_int_array(sampling_coordinates[..., 0] * (height - 1)) y_indices = as_int_array(sampling_coordinates[..., 1] * (width - 1)) parameters = [] for i in range(channels): filtered = gaussian_filter( image[..., i], denoise_sigma, truncate=denoise_sigma ) parameters.append(filtered[x_indices, y_indices]) return DataVignetteCharacterisation( np.transpose(parameters), principal_point ) def correct_vignette_RBF( image: ArrayLike, characterisation_data: DataVignetteCharacterisation, smoothing: float = 0.001, kernel: Literal[ "linear", "thin_plate_spline", "cubic", "quintic", "multiquadric", "inverse_multiquadric", "inverse_quadratic", "gaussian", ] = "cubic", epsilon: float = 1, ) -> NDArrayFloat: """ Correct the vignette of given image using given characterisation for radial basis function (RBF) interpolation. Parameters ---------- image Image to correct the vignette of. characterisation_data Vignette characterisation data for given function. smoothing Smoothing parameter, see :class:`scipy.interpolate.RBFInterpolator` class. kernel Type of RBF, see :class:`scipy.interpolate.RBFInterpolator` class. epsilon Shape parameter that scales the input to the RBF, see :class:`scipy.interpolate.RBFInterpolator` class. Returns ------- :class:`numpy.ndarray` Vignette corrected image. Examples -------- >>> image = apply_radial_gradient(np.ones([5, 7])) >>> characterisation_data = characterise_vignette_RBF(image) >>> np.around(correct_vignette_RBF(image, characterisation_data), 3) array([[ 0. , 0.091, 0.841, 1.134, 0.841, 0.091, 0. ], [ 0. , 0.967, 2.03 , 2.552, 2.03 , 0.967, 0. ], [ 0. , 1.323, 2.647, 3.97 , 2.647, 1.323, 0. ], [ 0. , 0.967, 2.03 , 2.552, 2.03 , 0.967, 0. ], [ 0. , 0.091, 0.841, 1.134, 0.841, 0.091, 0. ]]) """ image = np.copy(np.atleast_3d(image)) height, width, channels = image.shape parameters, principal_point = characterisation_data.values sampling_coordinates = vignette_sampling_coordinates( principal_point, width / height ) x_1, y_1 = np.meshgrid( np.linspace(0, 1, width), np.linspace(0, 1, height), ) for i in range(channels): interpolator = RBFInterpolator( sampling_coordinates, parameters[..., i], kernel=kernel, smoothing=smoothing, epsilon=epsilon, ) I_v = interpolator(tstack([y_1, x_1]).reshape([-1, 2])).reshape( height, width ) image[..., i] /= I_v return np.squeeze(image) VIGNETTE_CHARACTERISATION_METHODS: CanonicalMapping = CanonicalMapping( { "2D Function": characterise_vignette_2D_function, "Bivariate Spline": characterise_vignette_bivariate_spline, "RBF": characterise_vignette_RBF, } ) VIGNETTE_CHARACTERISATION_METHODS.__doc__ = """ Supported vignette characterisation methods. """ def characterise_vignette( image: ArrayLike, method: Literal["2D Function", "Bivariate Spline", "RBF"] | str = "RBF", **kwargs, ) -> DataVignetteCharacterisation: """ Characterise the vignette of given image using given method. Parameters ---------- image Image to characterise the vignette of. method Vignette characterisation method. Other Parameters ---------------- function {:func:`colour_hdri.distortion.characterise_vignette_2D_function`}, Characterisation function. pre_denoise_sigma {:func:`colour_hdri.distortion.characterise_vignette_bivariate_spline`}, Standard deviation of the gaussian filtering kernel applied on the image. post_denoise_sigma {:func:`colour_hdri.distortion.characterise_vignette_bivariate_spline`}, Standard deviation of the gaussian filtering kernel applied on the resampled image at given ``samples`` count. samples {:func:`colour_hdri.distortion.characterise_vignette_bivariate_spline`}, Samples count of the resampled image on the long edge. degree {:func:`colour_hdri.distortion.characterise_vignette_bivariate_spline`}, Degree of the bivariate spline. denoise_sigma {:func:`colour_hdri.distortion.characterise_vignette_RBF`}, Standard deviation of the gaussian filtering kernel applied on the image. Returns ------- :class:`DataVignetteCharacterisation` Vignette characterisation. Examples -------- >>> image = apply_radial_gradient(np.ones([300, 400])) >>> parameters, principal_point = characterise_vignette(image).values >>> parameters.shape (180, 1) >>> principal_point # doctest: +ELLIPSIS array([ 0.4983333..., 0.49875 ]) >>> parameters, principal_point = characterise_vignette( ... image, method="RBF" ... ).values >>> parameters.shape (180, 1) >>> principal_point # doctest: +ELLIPSIS array([ 0.4983333..., 0.49875 ]) >>> parameters, principal_point = characterise_vignette( ... image, method="2D Function" ... ).values >>> parameters.shape (1, 6) >>> principal_point # doctest: +ELLIPSIS array([ 0.4983333..., 0.49875 ]) >>> parameters, principal_point = characterise_vignette( ... image, method="Bivariate Spline" ... ).values >>> parameters.shape (37, 50, 1) >>> principal_point # doctest: +ELLIPSIS array([ 0.4983333..., 0.49875 ]) """ method = validate_method( method, tuple(VIGNETTE_CHARACTERISATION_METHODS.keys()) ) return VIGNETTE_CHARACTERISATION_METHODS[method](image, **kwargs) VIGNETTE_CORRECTION_METHODS: CanonicalMapping = CanonicalMapping( { "2D Function": correct_vignette_2D_function, "Bivariate Spline": correct_vignette_bivariate_spline, "RBF": correct_vignette_RBF, } ) VIGNETTE_CHARACTERISATION_METHODS.__doc__ = """ Supported vignette correction methods. """ def correct_vignette( image: ArrayLike, characterisation_data: DataVignetteCharacterisation, method: Literal["2D Function", "Bivariate Spline", "RBF"] | str = "RBF", **kwargs, ) -> NDArrayFloat: """ Correct the vignette of given image using given method. Parameters ---------- image Image to correct the vignette of. characterisation_data Vignette characterisation data for given function. method Vignette characterisation method. Other Parameters ---------------- function {:func:`colour_hdri.distortion.correct_vignette_2D_function`}, Characterisation function. degree {:func:`colour_hdri.distortion.correct_vignette_bivariate_spline`}, Degree of the bivariate spline. smoothing {:func:`colour_hdri.distortion.correct_vignette_RBF`}, Smoothing parameter, see :class:`scipy.interpolate.RBFInterpolator` class. kernel {:func:`colour_hdri.distortion.correct_vignette_RBF`}, Type of RBF, see :class:`scipy.interpolate.RBFInterpolator` class. epsilon {:func:`colour_hdri.distortion.correct_vignette_RBF`}, Shape parameter that scales the input to the RBF, see :class:`scipy.interpolate.RBFInterpolator` class. Returns ------- :class:`numpy.ndarray` Vignette corrected image. Examples -------- >>> image = apply_radial_gradient(np.ones([5, 7])) >>> characterisation_data = characterise_vignette(image) >>> np.around(correct_vignette_RBF(image, characterisation_data), 3) array([[ 0. , 0.091, 0.841, 1.134, 0.841, 0.091, 0. ], [ 0. , 0.967, 2.03 , 2.552, 2.03 , 0.967, 0. ], [ 0. , 1.323, 2.647, 3.97 , 2.647, 1.323, 0. ], [ 0. , 0.967, 2.03 , 2.552, 2.03 , 0.967, 0. ], [ 0. , 0.091, 0.841, 1.134, 0.841, 0.091, 0. ]]) >>> characterisation_data = characterise_vignette(image, method="RBF") >>> np.around( ... correct_vignette(image, characterisation_data, method="RBF"), 3 ... ) array([[ 0. , 0.091, 0.841, 1.134, 0.841, 0.091, 0. ], [ 0. , 0.967, 2.03 , 2.552, 2.03 , 0.967, 0. ], [ 0. , 1.323, 2.647, 3.97 , 2.647, 1.323, 0. ], [ 0. , 0.967, 2.03 , 2.552, 2.03 , 0.967, 0. ], [ 0. , 0.091, 0.841, 1.134, 0.841, 0.091, 0. ]]) >>> characterisation_data = characterise_vignette( ... image, method="2D Function" ... ) >>> np.around( ... correct_vignette( ... image, characterisation_data, method="2D Function" ... ), ... 3, ... ) array([[-0. , 0.122, 0.597, 0.747, 0.781, 1.08 , -0. ], [ 0. , 0.413, 0.676, 0.82 , 0.76 , 0.576, 0. ], [ 0. , 0.468, 0.759, 1.103, 0.838, 0.611, 0. ], [ 0. , 0.439, 0.709, 0.858, 0.801, 0.628, -0. ], [-0. , 0.193, 0.742, 0.913, 1.049, -0.477, -0. ]]) >>> characterisation_data = characterise_vignette( ... image, method="Bivariate Spline" ... ) >>> np.around( ... correct_vignette( ... image, characterisation_data, method="Bivariate Spline" ... ), ... 3, ... ) array([[ 0. , 0.345, 3.059, 4.072, 3.059, 0.345, 0. ], [ 0. , 3.624, 7.304, 9.058, 7.304, 3.624, 0. ], [ 0. , 4.936, 9.481, 14.032, 9.481, 4.936, 0. ], [ 0. , 3.624, 7.304, 9.058, 7.304, 3.624, 0. ], [ 0. , 0.345, 3.059, 4.072, 3.059, 0.345, 0. ]]) """ method = validate_method(method, tuple(VIGNETTE_CORRECTION_METHODS.keys())) return VIGNETTE_CORRECTION_METHODS[method]( image, characterisation_data, **kwargs )
PypiClean
/UW_RestClients_GWS-2.3.7-py3-none-any.whl/uw_gws/models.py
from restclients_core import models import json import time class GWSModel(models.Model): def __str__(self): return json.dumps(self.json_data(), indent=4) class GroupReference(GWSModel): name = models.CharField(max_length=500) uwregid = models.CharField(max_length=32) display_name = models.CharField(max_length=500) url = models.CharField(max_length=200) def json_data(self): return { "id": self.name, "regid": self.uwregid, "displayName": self.display_name, "url": self.url, } def __init__(self, *args, **kwargs): super(GroupReference, self).__init__(*args, **kwargs) class Group(GWSModel): CLASSIFICATION_NONE = "u" CLASSIFICATION_RESTRICTED = "r" CLASSIFICATION_CONFIDENTIAL = "c" CLASSIFICATION_TYPES = ( (CLASSIFICATION_NONE, "Unclassified"), (CLASSIFICATION_RESTRICTED, "Restricted"), (CLASSIFICATION_CONFIDENTIAL, "Confidential") ) name = models.CharField(max_length=500) uwregid = models.CharField(max_length=32) display_name = models.CharField(max_length=500) description = models.CharField(max_length=2000, null=True) contact = models.CharField(max_length=120, null=True) last_modified = models.DateTimeField(null=True) membership_modified = models.DateTimeField(null=True) authnfactor = models.PositiveSmallIntegerField( choices=((0, ""), (1, ""), (2, "")), default=1) classification = models.CharField( max_length=1, choices=CLASSIFICATION_TYPES, null=True) dependson = models.CharField(max_length=500, null=True) def __init__(self, *args, **kwargs): super(Group, self).__init__(*args, **kwargs) self.admins = [] self.creators = [] self.optins = [] self.optouts = [] self.readers = [] self.updaters = [] self.affiliates = [] def _to_timestamp(self, dt): if dt is not None: return int(time.mktime(dt.timetuple())*1000 + dt.microsecond/1000) def has_regid(self): return self.uwregid is not None and len(self.uwregid) == 32 def json_data(self, is_put_req=False): data = { "id": self.name, "regid": self.uwregid, "displayName": self.display_name, "description": self.description, "lastModified": self._to_timestamp(self.last_modified), "lastMemberModified": self._to_timestamp(self.membership_modified), "contact": self.contact, "authnfactor": int(self.authnfactor), "classification": self.classification, "dependson": self.dependson, "admins": [e.json_data(is_put_req=is_put_req) for e in self.admins], "updaters": [e.json_data(is_put_req=is_put_req) for e in self.updaters], "creators": [e.json_data(is_put_req=is_put_req) for e in self.creators], "readers": [e.json_data(is_put_req=is_put_req) for e in self.readers], "optins": [e.json_data(is_put_req=is_put_req) for e in self.optins], "optouts": [e.json_data(is_put_req=is_put_req) for e in self.optouts], "affiliates": [a.json_data() for a in self.affiliates], } if is_put_req is False: return data return {k: v for k, v in data.items() if v is not None and v != ''} class CourseGroup(Group): SPRING = "spring" SUMMER = "summer" AUTUMN = "autumn" WINTER = "winter" QUARTERNAME_CHOICES = ( (SPRING, "Spring"), (SUMMER, "Summer"), (AUTUMN, "Autumn"), (WINTER, "Winter"), ) curriculum_abbr = models.CharField(max_length=8) course_number = models.PositiveSmallIntegerField() year = models.PositiveSmallIntegerField() quarter = models.CharField(max_length=6, choices=QUARTERNAME_CHOICES) section_id = models.CharField(max_length=2, db_index=True) sln = models.PositiveIntegerField() def __init__(self, *args, **kwargs): super(CourseGroup, self).__init__(*args, **kwargs) self.instructors = [] def json_data(self): data = super(CourseGroup, self).json_data() data["course"] = { "quarter": self.quarter[:3], "year": int(self.year), "curriculum": self.curriculum_abbr.lower(), "number": int(self.course_number), "section": self.section_id.lower(), "sln": self.sln, "instructors": [i.json_data() for i in self.instructors], } return data class GroupEntity(GWSModel): UWNETID_TYPE = "uwnetid" EPPN_TYPE = "eppn" GROUP_TYPE = "group" DNS_TYPE = "dns" SET_TYPE = "set" UWWI_TYPE = "uwwi" TYPE_CHOICES = ( (UWNETID_TYPE, "UWNetID"), (EPPN_TYPE, "ePPN"), (GROUP_TYPE, "Group ID"), (DNS_TYPE, "Hostname"), (SET_TYPE, "Set"), (UWWI_TYPE, "UWWI"), ) name = models.CharField(max_length=50) display_name = models.CharField(max_length=500, null=True) type = models.SlugField(max_length=8, choices=TYPE_CHOICES) def is_uwnetid(self): return self.type == self.UWNETID_TYPE def is_eppn(self): return self.type == self.EPPN_TYPE def is_group(self): return self.type == self.GROUP_TYPE def json_data(self, is_put_req=False): data = {"id": self.name, "type": self.type} if is_put_req is False: data["name"] = self.display_name return data def __eq__(self, other): return self.name == other.name and self.type == other.type def __init__(self, *args, **kwargs): super(GroupEntity, self).__init__(*args, **kwargs) class GroupMember(GroupEntity): DIRECT_MTYPE = "direct" INDIRECT_MTYPE = "indirect" MTYPE_CHOICES = ( (DIRECT_MTYPE, DIRECT_MTYPE), (INDIRECT_MTYPE, INDIRECT_MTYPE), ) mtype = models.SlugField( max_length=10, choices=MTYPE_CHOICES, default=DIRECT_MTYPE) source = models.CharField(max_length=1000, null=True) def json_data(self, is_put_req=False): data = {"id": self.name, "type": self.type} if is_put_req is False: data["mtype"] = self.mtype data["source"] = self.source return data def __init__(self, *args, **kwargs): super(GroupMember, self).__init__(*args, **kwargs) class GroupAffiliate(GWSModel): UWNETID_NAME = "uwnetid" GOOGLE_NAME = "google" EMAIL_NAME = "email" RADIUS_NAME = "radius" NAME_CHOICES = ( (UWNETID_NAME, "UWNetID"), (GOOGLE_NAME, "Google"), (EMAIL_NAME, "Email"), (RADIUS_NAME, "Radius"), ) ACTIVE_STATUS = "active" INACTIVE_STATUS = "inactive" STATUS_CHOICES = ( (ACTIVE_STATUS, ACTIVE_STATUS), (INACTIVE_STATUS, INACTIVE_STATUS), ) name = models.CharField(max_length=20, choices=NAME_CHOICES) status = models.CharField(max_length=16, choices=STATUS_CHOICES) forward = models.CharField(max_length=50) def __init__(self, *args, **kwargs): super(GWSModel, self).__init__(*args, **kwargs) self.senders = [] def is_active(self): return self.status == self.ACTIVE_STATUS def json_data(self): return { "name": self.name, "status": self.status, "forward": self.forward, "sender": [s.json_data() for s in self.senders], } class GroupHistory(GWSModel): description = models.CharField(max_length=512) activity = models.CharField(max_length=64) member_uwnetid = models.CharField(max_length=128, null=True) member_action = models.CharField(max_length=32, null=True) timestamp = models.IntegerField() # Epoch timestamp in milliseconds def is_add_member(self): return self.member_action and self.member_action == "add member" def is_delete_member(self): return self.member_action and self.member_action == "delete member" def json_data(self): return { "description": self.description, "activity": self.activity, "timestamp": self.timestamp, "member_uwnetid": self.member_uwnetid, "member_action": self.member_action, "is_add_member": self.is_add_member(), "is_delete_member": self.is_delete_member(), } def __init__(self, *args, **kwargs): data = kwargs.get("data") if data is None: return super(GroupHistory, self).__init__(*args, **kwargs) self.activity = data.get("activity") self.description = data.get("description") self.timestamp = int(data.get("timestamp")) if self.activity == "membership": self.member_action, name = self.description.split(": ") self.member_uwnetid = name.replace("'", "")
PypiClean