sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def enumerate_query_by_limit(q, limit=1000): """ Enumerate over SQLAlchemy query object ``q`` and yield individual results fetched in batches of size ``limit`` using SQL LIMIT and OFFSET. """ for offset in count(0, limit): r = q.offset(offset).limit(limit).all() for row in r: yield row if len(r) < limit: break
Enumerate over SQLAlchemy query object ``q`` and yield individual results fetched in batches of size ``limit`` using SQL LIMIT and OFFSET.
entailment
def validate_many(d, schema): """Validate a dictionary of data against the provided schema. Returns a list of values positioned in the same order as given in ``schema``, each value is validated with the corresponding validator. Raises formencode.Invalid if validation failed. Similar to get_many but using formencode validation. :param d: A dictionary of data to read values from. :param schema: A list of (key, validator) tuples. The key will be used to fetch a value from ``d`` and the validator will be applied to it. Example:: from formencode import validators email, password, password_confirm = validate_many(request.params, [ ('email', validators.Email(not_empty=True)), ('password', validators.String(min=4)), ('password_confirm', validators.String(min=4)), ]) """ return [validator.to_python(d.get(key), state=key) for key,validator in schema]
Validate a dictionary of data against the provided schema. Returns a list of values positioned in the same order as given in ``schema``, each value is validated with the corresponding validator. Raises formencode.Invalid if validation failed. Similar to get_many but using formencode validation. :param d: A dictionary of data to read values from. :param schema: A list of (key, validator) tuples. The key will be used to fetch a value from ``d`` and the validator will be applied to it. Example:: from formencode import validators email, password, password_confirm = validate_many(request.params, [ ('email', validators.Email(not_empty=True)), ('password', validators.String(min=4)), ('password_confirm', validators.String(min=4)), ])
entailment
def assert_hashable(*args, **kw): """ Verify that each argument is hashable. Passes silently if successful. Raises descriptive TypeError otherwise. Example:: >>> assert_hashable(1, 'foo', bar='baz') >>> assert_hashable(1, [], baz='baz') Traceback (most recent call last): ... TypeError: Argument in position 1 is not hashable: [] >>> assert_hashable(1, 'foo', bar=[]) Traceback (most recent call last): ... TypeError: Keyword argument 'bar' is not hashable: [] """ try: for i, arg in enumerate(args): hash(arg) except TypeError: raise TypeError('Argument in position %d is not hashable: %r' % (i, arg)) try: for key, val in iterate_items(kw): hash(val) except TypeError: raise TypeError('Keyword argument %r is not hashable: %r' % (key, val))
Verify that each argument is hashable. Passes silently if successful. Raises descriptive TypeError otherwise. Example:: >>> assert_hashable(1, 'foo', bar='baz') >>> assert_hashable(1, [], baz='baz') Traceback (most recent call last): ... TypeError: Argument in position 1 is not hashable: [] >>> assert_hashable(1, 'foo', bar=[]) Traceback (most recent call last): ... TypeError: Keyword argument 'bar' is not hashable: []
entailment
def memoized(fn=None, cache=None): """ Memoize a function into an optionally-specificed cache container. If the `cache` container is not specified, then the instance container is accessible from the wrapped function's `memoize_cache` property. Example:: >>> @memoized ... def foo(bar): ... print("Not cached.") >>> foo(1) Not cached. >>> foo(1) >>> foo(2) Not cached. Example with a specific cache container (in this case, the ``RecentlyUsedContainer``, which will only store the ``maxsize`` most recently accessed items):: >>> from unstdlib.standard.collections_ import RecentlyUsedContainer >>> lru_container = RecentlyUsedContainer(maxsize=2) >>> @memoized(cache=lru_container) ... def baz(x): ... print("Not cached.") >>> baz(1) Not cached. >>> baz(1) >>> baz(2) Not cached. >>> baz(3) Not cached. >>> baz(2) >>> baz(1) Not cached. >>> # Notice that the '2' key remains, but the '1' key was evicted from >>> # the cache. """ if fn: # This is a hack to support both @memoize and @memoize(...) return memoized(cache=cache)(fn) if cache is None: cache = {} def decorator(fn): wrapped = wraps(fn)(partial(_memoized_call, fn, cache)) wrapped.memoize_cache = cache return wrapped return decorator
Memoize a function into an optionally-specificed cache container. If the `cache` container is not specified, then the instance container is accessible from the wrapped function's `memoize_cache` property. Example:: >>> @memoized ... def foo(bar): ... print("Not cached.") >>> foo(1) Not cached. >>> foo(1) >>> foo(2) Not cached. Example with a specific cache container (in this case, the ``RecentlyUsedContainer``, which will only store the ``maxsize`` most recently accessed items):: >>> from unstdlib.standard.collections_ import RecentlyUsedContainer >>> lru_container = RecentlyUsedContainer(maxsize=2) >>> @memoized(cache=lru_container) ... def baz(x): ... print("Not cached.") >>> baz(1) Not cached. >>> baz(1) >>> baz(2) Not cached. >>> baz(3) Not cached. >>> baz(2) >>> baz(1) Not cached. >>> # Notice that the '2' key remains, but the '1' key was evicted from >>> # the cache.
entailment
def memoized_method(method=None, cache_factory=None): """ Memoize a class's method. Arguments are similar to to `memoized`, except that the cache container is specified with `cache_factory`: a function called with no arguments to create the caching container for the instance. Note that, unlike `memoized`, the result cache will be stored on the instance, so cached results will be deallocated along with the instance. Example:: >>> class Person(object): ... def __init__(self, name): ... self._name = name ... @memoized_method ... def get_name(self): ... print("Calling get_name on %r" %(self._name, )) ... return self._name >>> shazow = Person("shazow") >>> shazow.get_name() Calling get_name on 'shazow' 'shazow' >>> shazow.get_name() 'shazow' >>> shazow._get_name_cache {((), ()): 'shazow'} Example with a specific cache container:: >>> from unstdlib.standard.collections_ import RecentlyUsedContainer >>> class Foo(object): ... @memoized_method(cache_factory=lambda: RecentlyUsedContainer(maxsize=2)) ... def add(self, a, b): ... print("Calling add with %r and %r" %(a, b)) ... return a + b >>> foo = Foo() >>> foo.add(1, 1) Calling add with 1 and 1 2 >>> foo.add(1, 1) 2 >>> foo.add(2, 2) Calling add with 2 and 2 4 >>> foo.add(3, 3) Calling add with 3 and 3 6 >>> foo.add(1, 1) Calling add with 1 and 1 2 """ if method is None: return lambda f: memoized_method(f, cache_factory=cache_factory) cache_factory = cache_factory or dict @wraps(method) def memoized_method_property(self): cache = cache_factory() cache_attr = "_%s_cache" %(method.__name__, ) setattr(self, cache_attr, cache) result = partial( _memoized_call, partial(method, self), cache ) result.memoize_cache = cache return result return memoized_property(memoized_method_property)
Memoize a class's method. Arguments are similar to to `memoized`, except that the cache container is specified with `cache_factory`: a function called with no arguments to create the caching container for the instance. Note that, unlike `memoized`, the result cache will be stored on the instance, so cached results will be deallocated along with the instance. Example:: >>> class Person(object): ... def __init__(self, name): ... self._name = name ... @memoized_method ... def get_name(self): ... print("Calling get_name on %r" %(self._name, )) ... return self._name >>> shazow = Person("shazow") >>> shazow.get_name() Calling get_name on 'shazow' 'shazow' >>> shazow.get_name() 'shazow' >>> shazow._get_name_cache {((), ()): 'shazow'} Example with a specific cache container:: >>> from unstdlib.standard.collections_ import RecentlyUsedContainer >>> class Foo(object): ... @memoized_method(cache_factory=lambda: RecentlyUsedContainer(maxsize=2)) ... def add(self, a, b): ... print("Calling add with %r and %r" %(a, b)) ... return a + b >>> foo = Foo() >>> foo.add(1, 1) Calling add with 1 and 1 2 >>> foo.add(1, 1) 2 >>> foo.add(2, 2) Calling add with 2 and 2 4 >>> foo.add(3, 3) Calling add with 3 and 3 6 >>> foo.add(1, 1) Calling add with 1 and 1 2
entailment
def deprecated(message, exception=PendingDeprecationWarning): """Throw a warning when a function/method will be soon deprecated Supports passing a ``message`` and an ``exception`` class (uses ``PendingDeprecationWarning`` by default). This is useful if you want to alternatively pass a ``DeprecationWarning`` exception for already deprecated functions/methods. Example:: >>> import warnings >>> from functools import wraps >>> message = "this function will be deprecated in the near future" >>> @deprecated(message) ... def foo(n): ... return n+n >>> with warnings.catch_warnings(record=True) as w: ... warnings.simplefilter("always") ... foo(4) ... assert len(w) == 1 ... assert issubclass(w[-1].category, PendingDeprecationWarning) ... assert message == str(w[-1].message) ... assert foo.__name__ == 'foo' 8 """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): warnings.warn(message, exception, stacklevel=2) return func(*args, **kwargs) return wrapper return decorator
Throw a warning when a function/method will be soon deprecated Supports passing a ``message`` and an ``exception`` class (uses ``PendingDeprecationWarning`` by default). This is useful if you want to alternatively pass a ``DeprecationWarning`` exception for already deprecated functions/methods. Example:: >>> import warnings >>> from functools import wraps >>> message = "this function will be deprecated in the near future" >>> @deprecated(message) ... def foo(n): ... return n+n >>> with warnings.catch_warnings(record=True) as w: ... warnings.simplefilter("always") ... foo(4) ... assert len(w) == 1 ... assert issubclass(w[-1].category, PendingDeprecationWarning) ... assert message == str(w[-1].message) ... assert foo.__name__ == 'foo' 8
entailment
def groupby_count(i, key=None, force_keys=None): """ Aggregate iterator values into buckets based on how frequently the values appear. Example:: >>> list(groupby_count([1, 1, 1, 2, 3])) [(1, 3), (2, 1), (3, 1)] """ counter = defaultdict(lambda: 0) if not key: key = lambda o: o for k in i: counter[key(k)] += 1 if force_keys: for k in force_keys: counter[k] += 0 return counter.items()
Aggregate iterator values into buckets based on how frequently the values appear. Example:: >>> list(groupby_count([1, 1, 1, 2, 3])) [(1, 3), (2, 1), (3, 1)]
entailment
def is_iterable(maybe_iter, unless=(string_types, dict)): """ Return whether ``maybe_iter`` is an iterable, unless it's an instance of one of the base class, or tuple of base classes, given in ``unless``. Example:: >>> is_iterable('foo') False >>> is_iterable(['foo']) True >>> is_iterable(['foo'], unless=list) False >>> is_iterable(xrange(5)) True """ try: iter(maybe_iter) except TypeError: return False return not isinstance(maybe_iter, unless)
Return whether ``maybe_iter`` is an iterable, unless it's an instance of one of the base class, or tuple of base classes, given in ``unless``. Example:: >>> is_iterable('foo') False >>> is_iterable(['foo']) True >>> is_iterable(['foo'], unless=list) False >>> is_iterable(xrange(5)) True
entailment
def iterate(maybe_iter, unless=(string_types, dict)): """ Always return an iterable. Returns ``maybe_iter`` if it is an iterable, otherwise it returns a single element iterable containing ``maybe_iter``. By default, strings and dicts are treated as non-iterable. This can be overridden by passing in a type or tuple of types for ``unless``. :param maybe_iter: A value to return as an iterable. :param unless: A type or tuple of types (same as ``isinstance``) to be treated as non-iterable. Example:: >>> iterate('foo') ['foo'] >>> iterate(['foo']) ['foo'] >>> iterate(['foo'], unless=list) [['foo']] >>> list(iterate(xrange(5))) [0, 1, 2, 3, 4] """ if is_iterable(maybe_iter, unless=unless): return maybe_iter return [maybe_iter]
Always return an iterable. Returns ``maybe_iter`` if it is an iterable, otherwise it returns a single element iterable containing ``maybe_iter``. By default, strings and dicts are treated as non-iterable. This can be overridden by passing in a type or tuple of types for ``unless``. :param maybe_iter: A value to return as an iterable. :param unless: A type or tuple of types (same as ``isinstance``) to be treated as non-iterable. Example:: >>> iterate('foo') ['foo'] >>> iterate(['foo']) ['foo'] >>> iterate(['foo'], unless=list) [['foo']] >>> list(iterate(xrange(5))) [0, 1, 2, 3, 4]
entailment
def iterate_items(dictish): """ Return a consistent (key, value) iterable on dict-like objects, including lists of tuple pairs. Example: >>> list(iterate_items({'a': 1})) [('a', 1)] >>> list(iterate_items([('a', 1), ('b', 2)])) [('a', 1), ('b', 2)] """ if hasattr(dictish, 'iteritems'): return dictish.iteritems() if hasattr(dictish, 'items'): return dictish.items() return dictish
Return a consistent (key, value) iterable on dict-like objects, including lists of tuple pairs. Example: >>> list(iterate_items({'a': 1})) [('a', 1)] >>> list(iterate_items([('a', 1), ('b', 2)])) [('a', 1), ('b', 2)]
entailment
def iterate_chunks(i, size=10): """ Iterate over an iterator ``i`` in ``size`` chunks, yield chunks. Similar to pagination. Example:: >>> list(iterate_chunks([1, 2, 3, 4], size=2)) [[1, 2], [3, 4]] """ accumulator = [] for n, i in enumerate(i): accumulator.append(i) if (n+1) % size == 0: yield accumulator accumulator = [] if accumulator: yield accumulator
Iterate over an iterator ``i`` in ``size`` chunks, yield chunks. Similar to pagination. Example:: >>> list(iterate_chunks([1, 2, 3, 4], size=2)) [[1, 2], [3, 4]]
entailment
def listify(fn=None, wrapper=list): """ A decorator which wraps a function's return value in ``list(...)``. Useful when an algorithm can be expressed more cleanly as a generator but the function should return an list. Example:: >>> @listify ... def get_lengths(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths(["spam", "eggs"]) [4, 4] >>> >>> @listify(wrapper=tuple) ... def get_lengths_tuple(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths_tuple(["foo", "bar"]) (3, 3) """ def listify_return(fn): @wraps(fn) def listify_helper(*args, **kw): return wrapper(fn(*args, **kw)) return listify_helper if fn is None: return listify_return return listify_return(fn)
A decorator which wraps a function's return value in ``list(...)``. Useful when an algorithm can be expressed more cleanly as a generator but the function should return an list. Example:: >>> @listify ... def get_lengths(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths(["spam", "eggs"]) [4, 4] >>> >>> @listify(wrapper=tuple) ... def get_lengths_tuple(iterable): ... for i in iterable: ... yield len(i) >>> get_lengths_tuple(["foo", "bar"]) (3, 3)
entailment
def is_subclass(o, bases): """ Similar to the ``issubclass`` builtin, but does not raise a ``TypeError`` if either ``o`` or ``bases`` is not an instance of ``type``. Example:: >>> is_subclass(IOError, Exception) True >>> is_subclass(Exception, None) False >>> is_subclass(None, Exception) False >>> is_subclass(IOError, (None, Exception)) True >>> is_subclass(Exception, (None, 42)) False """ try: return _issubclass(o, bases) except TypeError: pass if not isinstance(o, type): return False if not isinstance(bases, tuple): return False bases = tuple(b for b in bases if isinstance(b, type)) return _issubclass(o, bases)
Similar to the ``issubclass`` builtin, but does not raise a ``TypeError`` if either ``o`` or ``bases`` is not an instance of ``type``. Example:: >>> is_subclass(IOError, Exception) True >>> is_subclass(Exception, None) False >>> is_subclass(None, Exception) False >>> is_subclass(IOError, (None, Exception)) True >>> is_subclass(Exception, (None, 42)) False
entailment
def get_many(d, required=[], optional=[], one_of=[]): """ Returns a predictable number of elements out of ``d`` in a list for auto-expanding. Keys in ``required`` will raise KeyError if not found in ``d``. Keys in ``optional`` will return None if not found in ``d``. Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``. Example:: uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset']) Note: This function has been added to the webhelpers package. """ d = d or {} r = [d[k] for k in required] r += [d.get(k)for k in optional] if one_of: for k in (k for k in one_of if k in d): return r + [d[k]] raise KeyError("Missing a one_of value.") return r
Returns a predictable number of elements out of ``d`` in a list for auto-expanding. Keys in ``required`` will raise KeyError if not found in ``d``. Keys in ``optional`` will return None if not found in ``d``. Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``. Example:: uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset']) Note: This function has been added to the webhelpers package.
entailment
def random_string(length=6, alphabet=string.ascii_letters+string.digits): """ Return a random string of given length and alphabet. Default alphabet is url-friendly (base62). """ return ''.join([random.choice(alphabet) for i in xrange(length)])
Return a random string of given length and alphabet. Default alphabet is url-friendly (base62).
entailment
def number_to_string(n, alphabet): """ Given an non-negative integer ``n``, convert it to a string composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> number_to_string(12345678, '01') '101111000110000101001110' >>> number_to_string(12345678, 'ab') 'babbbbaaabbaaaababaabbba' >>> number_to_string(12345678, string.ascii_letters + string.digits) 'ZXP0' >>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ']) 'one two three four five ' """ result = '' base = len(alphabet) current = int(n) if current < 0: raise ValueError("invalid n (must be non-negative): %s", n) while current: result = alphabet[current % base] + result current = current // base return result
Given an non-negative integer ``n``, convert it to a string composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> number_to_string(12345678, '01') '101111000110000101001110' >>> number_to_string(12345678, 'ab') 'babbbbaaabbaaaababaabbba' >>> number_to_string(12345678, string.ascii_letters + string.digits) 'ZXP0' >>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ']) 'one two three four five '
entailment
def string_to_number(s, alphabet): """ Given a string ``s``, convert it to an integer composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> string_to_number('101111000110000101001110', '01') 12345678 >>> string_to_number('babbbbaaabbaaaababaabbba', 'ab') 12345678 >>> string_to_number('ZXP0', string.ascii_letters + string.digits) 12345678 """ base = len(alphabet) inverse_alphabet = dict(zip(alphabet, xrange(0, base))) n = 0 exp = 0 for i in reversed(s): n += inverse_alphabet[i] * (base ** exp) exp += 1 return n
Given a string ``s``, convert it to an integer composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> string_to_number('101111000110000101001110', '01') 12345678 >>> string_to_number('babbbbaaabbaaaababaabbba', 'ab') 12345678 >>> string_to_number('ZXP0', string.ascii_letters + string.digits) 12345678
entailment
def bytes_to_number(b, endian='big'): """ Convert a string to an integer. :param b: String or bytearray to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of string_to_number with a full base-256 ASCII alphabet. It is the reverse of ``number_to_bytes(n)``. Examples:: >>> bytes_to_number(b'*') 42 >>> bytes_to_number(b'\\xff') 255 >>> bytes_to_number(b'\\x01\\x00') 256 >>> bytes_to_number(b'\\x00\\x01', endian='little') 256 """ if endian == 'big': b = reversed(b) n = 0 for i, ch in enumerate(bytearray(b)): n ^= ch << i * 8 return n
Convert a string to an integer. :param b: String or bytearray to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of string_to_number with a full base-256 ASCII alphabet. It is the reverse of ``number_to_bytes(n)``. Examples:: >>> bytes_to_number(b'*') 42 >>> bytes_to_number(b'\\xff') 255 >>> bytes_to_number(b'\\x01\\x00') 256 >>> bytes_to_number(b'\\x00\\x01', endian='little') 256
entailment
def number_to_bytes(n, endian='big'): """ Convert an integer to a corresponding string of bytes.. :param n: Integer to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of number_to_string with a full base-256 ASCII alphabet. It is the reverse of ``bytes_to_number(b)``. Examples:: >>> r(number_to_bytes(42)) b'*' >>> r(number_to_bytes(255)) b'\\xff' >>> r(number_to_bytes(256)) b'\\x01\\x00' >>> r(number_to_bytes(256, endian='little')) b'\\x00\\x01' """ res = [] while n: n, ch = divmod(n, 256) if PY3: res.append(ch) else: res.append(chr(ch)) if endian == 'big': res.reverse() if PY3: return bytes(res) else: return ''.join(res)
Convert an integer to a corresponding string of bytes.. :param n: Integer to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of number_to_string with a full base-256 ASCII alphabet. It is the reverse of ``bytes_to_number(b)``. Examples:: >>> r(number_to_bytes(42)) b'*' >>> r(number_to_bytes(255)) b'\\xff' >>> r(number_to_bytes(256)) b'\\x01\\x00' >>> r(number_to_bytes(256, endian='little')) b'\\x00\\x01'
entailment
def to_str(obj, encoding='utf-8', **encode_args): r""" Returns a ``str`` of ``obj``, encoding using ``encoding`` if necessary. For example:: >>> some_str = b"\xff" >>> some_unicode = u"\u1234" >>> some_exception = Exception(u'Error: ' + some_unicode) >>> r(to_str(some_str)) b'\xff' >>> r(to_str(some_unicode)) b'\xe1\x88\xb4' >>> r(to_str(some_exception)) b'Error: \xe1\x88\xb4' >>> r(to_str([42])) b'[42]' See source code for detailed semantics. """ # Note: On py3, ``b'x'.__str__()`` returns ``"b'x'"``, so we need to do the # explicit check first. if isinstance(obj, binary_type): return obj # We coerce to unicode if '__unicode__' is available because there is no # way to specify encoding when calling ``str(obj)``, so, eg, # ``str(Exception(u'\u1234'))`` will explode. if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod): # Note: unicode(u'foo') is O(1) (by experimentation) return text_type(obj).encode(encoding, **encode_args) return binary_type(obj)
r""" Returns a ``str`` of ``obj``, encoding using ``encoding`` if necessary. For example:: >>> some_str = b"\xff" >>> some_unicode = u"\u1234" >>> some_exception = Exception(u'Error: ' + some_unicode) >>> r(to_str(some_str)) b'\xff' >>> r(to_str(some_unicode)) b'\xe1\x88\xb4' >>> r(to_str(some_exception)) b'Error: \xe1\x88\xb4' >>> r(to_str([42])) b'[42]' See source code for detailed semantics.
entailment
def to_unicode(obj, encoding='utf-8', fallback='latin1', **decode_args): r""" Returns a ``unicode`` of ``obj``, decoding using ``encoding`` if necessary. If decoding fails, the ``fallback`` encoding (default ``latin1``) is used. Examples:: >>> r(to_unicode(b'\xe1\x88\xb4')) u'\u1234' >>> r(to_unicode(b'\xff')) u'\xff' >>> r(to_unicode(u'\u1234')) u'\u1234' >>> r(to_unicode(Exception(u'\u1234'))) u'\u1234' >>> r(to_unicode([42])) u'[42]' See source code for detailed semantics. """ # Note: on py3, the `bytes` type defines an unhelpful "__str__" function, # so we need to do this check (see comments in ``to_str``). if not isinstance(obj, binary_type): if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod): return text_type(obj) obj_str = binary_type(obj) else: obj_str = obj try: return text_type(obj_str, encoding, **decode_args) except UnicodeDecodeError: return text_type(obj_str, fallback, **decode_args)
r""" Returns a ``unicode`` of ``obj``, decoding using ``encoding`` if necessary. If decoding fails, the ``fallback`` encoding (default ``latin1``) is used. Examples:: >>> r(to_unicode(b'\xe1\x88\xb4')) u'\u1234' >>> r(to_unicode(b'\xff')) u'\xff' >>> r(to_unicode(u'\u1234')) u'\u1234' >>> r(to_unicode(Exception(u'\u1234'))) u'\u1234' >>> r(to_unicode([42])) u'[42]' See source code for detailed semantics.
entailment
def to_float(s, default=0.0, allow_nan=False): """ Return input converted into a float. If failed, then return ``default``. Note that, by default, ``allow_nan=False``, so ``to_float`` will not return ``nan``, ``inf``, or ``-inf``. Examples:: >>> to_float('1.5') 1.5 >>> to_float(1) 1.0 >>> to_float('') 0.0 >>> to_float('nan') 0.0 >>> to_float('inf') 0.0 >>> to_float('-inf', allow_nan=True) -inf >>> to_float(None) 0.0 >>> to_float(0, default='Empty') 0.0 >>> to_float(None, default='Empty') 'Empty' """ try: f = float(s) except (TypeError, ValueError): return default if not allow_nan: if f != f or f in _infs: return default return f
Return input converted into a float. If failed, then return ``default``. Note that, by default, ``allow_nan=False``, so ``to_float`` will not return ``nan``, ``inf``, or ``-inf``. Examples:: >>> to_float('1.5') 1.5 >>> to_float(1) 1.0 >>> to_float('') 0.0 >>> to_float('nan') 0.0 >>> to_float('inf') 0.0 >>> to_float('-inf', allow_nan=True) -inf >>> to_float(None) 0.0 >>> to_float(0, default='Empty') 0.0 >>> to_float(None, default='Empty') 'Empty'
entailment
def format_int(n, singular=_Default, plural=_Default): """ Return `singular.format(n)` if n is 1, or `plural.format(n)` otherwise. If plural is not specified, then it is assumed to be same as singular but suffixed with an 's'. :param n: Integer which determines pluralness. :param singular: String with a format() placeholder for n. (Default: `u"{:,}"`) :param plural: String with a format() placeholder for n. (Default: If singular is not default, then it's `singular + u"s"`. Otherwise it's same as singular.) Example: :: >>> r(format_int(1000)) u'1,000' >>> r(format_int(1, u"{} day")) u'1 day' >>> r(format_int(2, u"{} day")) u'2 days' >>> r(format_int(2, u"{} box", u"{} boxen")) u'2 boxen' >>> r(format_int(20000, u"{:,} box", u"{:,} boxen")) u'20,000 boxen' """ n = int(n) if singular in (None, _Default): if plural is _Default: plural = None singular = u'{:,}' elif plural is _Default: plural = singular + u's' if n == 1 or not plural: return singular.format(n) return plural.format(n)
Return `singular.format(n)` if n is 1, or `plural.format(n)` otherwise. If plural is not specified, then it is assumed to be same as singular but suffixed with an 's'. :param n: Integer which determines pluralness. :param singular: String with a format() placeholder for n. (Default: `u"{:,}"`) :param plural: String with a format() placeholder for n. (Default: If singular is not default, then it's `singular + u"s"`. Otherwise it's same as singular.) Example: :: >>> r(format_int(1000)) u'1,000' >>> r(format_int(1, u"{} day")) u'1 day' >>> r(format_int(2, u"{} day")) u'2 days' >>> r(format_int(2, u"{} box", u"{} boxen")) u'2 boxen' >>> r(format_int(20000, u"{:,} box", u"{:,} boxen")) u'20,000 boxen'
entailment
def dollars_to_cents(s, allow_negative=False): """ Given a string or integer representing dollars, return an integer of equivalent cents, in an input-resilient way. This works by stripping any non-numeric characters before attempting to cast the value. Examples:: >>> dollars_to_cents('$1') 100 >>> dollars_to_cents('1') 100 >>> dollars_to_cents(1) 100 >>> dollars_to_cents('1e2') 10000 >>> dollars_to_cents('-1$', allow_negative=True) -100 >>> dollars_to_cents('1 dollar') 100 """ # TODO: Implement cents_to_dollars if not s: return if isinstance(s, string_types): s = ''.join(RE_NUMBER.findall(s)) dollars = int(round(float(s) * 100)) if not allow_negative and dollars < 0: raise ValueError('Negative values not permitted.') return dollars
Given a string or integer representing dollars, return an integer of equivalent cents, in an input-resilient way. This works by stripping any non-numeric characters before attempting to cast the value. Examples:: >>> dollars_to_cents('$1') 100 >>> dollars_to_cents('1') 100 >>> dollars_to_cents(1) 100 >>> dollars_to_cents('1e2') 10000 >>> dollars_to_cents('-1$', allow_negative=True) -100 >>> dollars_to_cents('1 dollar') 100
entailment
def slugify(s, delimiter='-'): """ Normalize `s` into ASCII and replace non-word characters with `delimiter`. """ s = unicodedata.normalize('NFKD', to_unicode(s)).encode('ascii', 'ignore').decode('ascii') return RE_SLUG.sub(delimiter, s).strip(delimiter).lower()
Normalize `s` into ASCII and replace non-word characters with `delimiter`.
entailment
def get_cache_buster(src_path, method='importtime'): """ Return a string that can be used as a parameter for cache-busting URLs for this asset. :param src_path: Filesystem path to the file we're generating a cache-busting value for. :param method: Method for cache-busting. Supported values: importtime, mtime, md5 The default is 'importtime', because it requires the least processing. Note that the mtime and md5 cache busting methods' results are cached on the src_path. Example:: >>> SRC_PATH = os.path.join(os.path.dirname(__file__), 'html.py') >>> get_cache_buster(SRC_PATH) is _IMPORT_TIME True >>> get_cache_buster(SRC_PATH, method='mtime') == _cache_key_by_mtime(SRC_PATH) True >>> get_cache_buster(SRC_PATH, method='md5') == _cache_key_by_md5(SRC_PATH) True """ try: fn = _BUST_METHODS[method] except KeyError: raise KeyError('Unsupported busting method value: %s' % method) return fn(src_path)
Return a string that can be used as a parameter for cache-busting URLs for this asset. :param src_path: Filesystem path to the file we're generating a cache-busting value for. :param method: Method for cache-busting. Supported values: importtime, mtime, md5 The default is 'importtime', because it requires the least processing. Note that the mtime and md5 cache busting methods' results are cached on the src_path. Example:: >>> SRC_PATH = os.path.join(os.path.dirname(__file__), 'html.py') >>> get_cache_buster(SRC_PATH) is _IMPORT_TIME True >>> get_cache_buster(SRC_PATH, method='mtime') == _cache_key_by_mtime(SRC_PATH) True >>> get_cache_buster(SRC_PATH, method='md5') == _cache_key_by_md5(SRC_PATH) True
entailment
def _generate_dom_attrs(attrs, allow_no_value=True): """ Yield compiled DOM attribute key-value strings. If the value is `True`, then it is treated as no-value. If `None`, then it is skipped. """ for attr in iterate_items(attrs): if isinstance(attr, basestring): attr = (attr, True) key, value = attr if value is None: continue if value is True and not allow_no_value: value = key # E.g. <option checked="true" /> if value is True: yield True # E.g. <option checked /> else: yield '%s="%s"' % (key, value.replace('"', '\\"'))
Yield compiled DOM attribute key-value strings. If the value is `True`, then it is treated as no-value. If `None`, then it is skipped.
entailment
def tag(tagname, content='', attrs=None): """ Helper for programmatically building HTML tags. Note that this barely does any escaping, and will happily spit out dangerous user input if used as such. :param tagname: Tag name of the DOM element we want to return. :param content: Optional content of the DOM element. If `None`, then the element is self-closed. By default, the content is an empty string. Supports iterables like generators. :param attrs: Optional dictionary-like collection of attributes for the DOM element. Example:: >>> tag('div', content='Hello, world.') u'<div>Hello, world.</div>' >>> tag('script', attrs={'src': '/static/js/core.js'}) u'<script src="/static/js/core.js"></script>' >>> tag('script', attrs=[('src', '/static/js/core.js'), ('type', 'text/javascript')]) u'<script src="/static/js/core.js" type="text/javascript"></script>' >>> tag('meta', content=None, attrs=dict(content='"quotedquotes"')) u'<meta content="\\\\"quotedquotes\\\\"" />' >>> tag('ul', (tag('li', str(i)) for i in xrange(3))) u'<ul><li>0</li><li>1</li><li>2</li></ul>' """ attrs_str = attrs and ' '.join(_generate_dom_attrs(attrs)) open_tag = tagname if attrs_str: open_tag += ' ' + attrs_str if content is None: return literal('<%s />' % open_tag) content = ''.join(iterate(content, unless=(basestring, literal))) return literal('<%s>%s</%s>' % (open_tag, content, tagname))
Helper for programmatically building HTML tags. Note that this barely does any escaping, and will happily spit out dangerous user input if used as such. :param tagname: Tag name of the DOM element we want to return. :param content: Optional content of the DOM element. If `None`, then the element is self-closed. By default, the content is an empty string. Supports iterables like generators. :param attrs: Optional dictionary-like collection of attributes for the DOM element. Example:: >>> tag('div', content='Hello, world.') u'<div>Hello, world.</div>' >>> tag('script', attrs={'src': '/static/js/core.js'}) u'<script src="/static/js/core.js"></script>' >>> tag('script', attrs=[('src', '/static/js/core.js'), ('type', 'text/javascript')]) u'<script src="/static/js/core.js" type="text/javascript"></script>' >>> tag('meta', content=None, attrs=dict(content='"quotedquotes"')) u'<meta content="\\\\"quotedquotes\\\\"" />' >>> tag('ul', (tag('li', str(i)) for i in xrange(3))) u'<ul><li>0</li><li>1</li><li>2</li></ul>'
entailment
def javascript_link(src_url, src_path=None, cache_bust=None, content='', extra_attrs=None): """ Helper for programmatically building HTML JavaScript source include links, with optional cache busting. :param src_url: Goes into the `src` attribute of the `<script src="...">` tag. :param src_path: Optional filesystem path to the source file, used when `cache_bust` is enabled. :param content: Optional content of the DOM element. If `None`, then the element is self-closed. :param cache_bust: Optional method to use for cache busting. Can be one of: importtime, md5, or mtime. If the value is md5 or mtime, then `src_path` must be supplied. Example:: >>> javascript_link('/static/js/core.js') u'<script src="/static/js/core.js" type="text/javascript"></script>' """ if cache_bust: append_suffix = get_cache_buster(src_path=src_path, method=cache_bust) delim = '&' if '?' in src_url else '?' src_url += delim + append_suffix attrs = { 'src': src_url, 'type': 'text/javascript', } if extra_attrs: attrs.update(extra_attrs) return tag('script', content=content, attrs=attrs)
Helper for programmatically building HTML JavaScript source include links, with optional cache busting. :param src_url: Goes into the `src` attribute of the `<script src="...">` tag. :param src_path: Optional filesystem path to the source file, used when `cache_bust` is enabled. :param content: Optional content of the DOM element. If `None`, then the element is self-closed. :param cache_bust: Optional method to use for cache busting. Can be one of: importtime, md5, or mtime. If the value is md5 or mtime, then `src_path` must be supplied. Example:: >>> javascript_link('/static/js/core.js') u'<script src="/static/js/core.js" type="text/javascript"></script>'
entailment
def package_meta(): """Read __init__.py for global package metadata. Do this without importing the package. """ _version_re = re.compile(r'__version__\s+=\s+(.*)') _url_re = re.compile(r'__url__\s+=\s+(.*)') _license_re = re.compile(r'__license__\s+=\s+(.*)') with open('lambda_uploader/__init__.py', 'rb') as ffinit: initcontent = ffinit.read() version = str(ast.literal_eval(_version_re.search( initcontent.decode('utf-8')).group(1))) url = str(ast.literal_eval(_url_re.search( initcontent.decode('utf-8')).group(1))) licencia = str(ast.literal_eval(_license_re.search( initcontent.decode('utf-8')).group(1))) return { 'version': version, 'license': licencia, 'url': url, }
Read __init__.py for global package metadata. Do this without importing the package.
entailment
def create_subscriptions(config, profile_name): ''' Adds supported subscriptions ''' if 'kinesis' in config.subscription.keys(): data = config.subscription['kinesis'] function_name = config.name stream_name = data['stream'] batch_size = data['batch_size'] starting_position = data['starting_position'] starting_position_ts = None if starting_position == 'AT_TIMESTAMP': ts = data.get('starting_position_timestamp') starting_position_ts = datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ') s = KinesisSubscriber(config, profile_name, function_name, stream_name, batch_size, starting_position, starting_position_ts=starting_position_ts) s.subscribe()
Adds supported subscriptions
entailment
def subscribe(self): ''' Subscribes the lambda to the Kinesis stream ''' try: LOG.debug('Creating Kinesis subscription') if self.starting_position_ts: self._lambda_client \ .create_event_source_mapping( EventSourceArn=self.stream_name, FunctionName=self.function_name, BatchSize=self.batch_size, StartingPosition=self.starting_position, StartingPositionTimestamp=self.starting_position_ts) else: self._lambda_client \ .create_event_source_mapping( EventSourceArn=self.stream_name, FunctionName=self.function_name, BatchSize=self.batch_size, StartingPosition=self.starting_position) LOG.debug('Subscription created') except botocore.exceptions.ClientError as ex: response_code = ex.response['Error']['Code'] if response_code == 'ResourceConflictException': LOG.debug('Subscription exists. Updating ...') resp = self._lambda_client\ .list_event_source_mappings( FunctionName=self.function_name, EventSourceArn=self.stream_name) uuid = resp['EventSourceMappings'][0]['UUID'] self._lambda_client \ .update_event_source_mapping( UUID=uuid, FunctionName=self.function_name, Enabled=True, BatchSize=self.batch_size) else: LOG.error('Subscription failed, error=%s' % str(ex)) raise ex
Subscribes the lambda to the Kinesis stream
entailment
def _format_vpc_config(self): ''' Returns {} if the VPC config is set to None by Config, returns the formatted config otherwise ''' if self._config.raw['vpc']: return { 'SubnetIds': self._config.raw['vpc']['subnets'], 'SecurityGroupIds': self._config.raw['vpc']['security_groups'] } else: return { 'SubnetIds': [], 'SecurityGroupIds': [], }
Returns {} if the VPC config is set to None by Config, returns the formatted config otherwise
entailment
def _upload_s3(self, zip_file): ''' Uploads the lambda package to s3 ''' s3_client = self._aws_session.client('s3') transfer = boto3.s3.transfer.S3Transfer(s3_client) transfer.upload_file(zip_file, self._config.s3_bucket, self._config.s3_package_name())
Uploads the lambda package to s3
entailment
def main(arv=None): """lambda-uploader command line interface.""" # Check for Python 2.7 or later if sys.version_info[0] < 3 and not sys.version_info[1] == 7: raise RuntimeError('lambda-uploader requires Python 2.7 or later') import argparse parser = argparse.ArgumentParser( description='Simple way to create and upload python lambda jobs') parser.add_argument('--version', '-v', action='version', version=lambda_uploader.__version__) parser.add_argument('--no-upload', dest='no_upload', action='store_const', help='dont upload the zipfile', const=True) parser.add_argument('--no-clean', dest='no_clean', action='store_const', help='dont cleanup the temporary workspace', const=True) parser.add_argument('--publish', '-p', dest='publish', action='store_const', help='publish an upload to an immutable version', const=True) parser.add_argument('--virtualenv', '-e', help='use specified virtualenv instead of making one', default=None) parser.add_argument('--extra-files', '-x', action='append', help='include file or directory path in package', default=[]) parser.add_argument('--no-virtualenv', dest='no_virtualenv', action='store_const', help='do not create or include a virtualenv at all', const=True) parser.add_argument('--role', dest='role', default=getenv('LAMBDA_UPLOADER_ROLE'), help=('IAM role to assign the lambda function, ' 'can be set with $LAMBDA_UPLOADER_ROLE')) parser.add_argument('--variables', dest='variables', help='add environment variables') parser.add_argument('--profile', dest='profile', help='specify AWS cli profile') parser.add_argument('--requirements', '-r', dest='requirements', help='specify a requirements.txt file') alias_help = 'alias for published version (WILL SET THE PUBLISH FLAG)' parser.add_argument('--alias', '-a', dest='alias', default=None, help=alias_help) parser.add_argument('--alias-description', '-m', dest='alias_description', default=None, help='alias description') parser.add_argument('--s3-bucket', '-s', dest='s3_bucket', help='S3 bucket to store the lambda function in', default=None) parser.add_argument('--s3-key', '-k', dest='s3_key', help='Key name of the lambda function s3 object', default=None) parser.add_argument('--config', '-c', help='Overrides lambda.json', default='lambda.json') parser.add_argument('function_dir', default=getcwd(), nargs='?', help='lambda function directory') parser.add_argument('--no-build', dest='no_build', action='store_const', help='dont build the sourcecode', const=True) verbose = parser.add_mutually_exclusive_group() verbose.add_argument('-V', dest='loglevel', action='store_const', const=logging.INFO, help="Set log-level to INFO.") verbose.add_argument('-VV', dest='loglevel', action='store_const', const=logging.DEBUG, help="Set log-level to DEBUG.") parser.set_defaults(loglevel=logging.WARNING) args = parser.parse_args() logging.basicConfig(level=args.loglevel) try: _execute(args) except Exception: print(TRACEBACK_MESSAGE % (INTERROBANG, lambda_uploader.__version__, boto3_version, botocore_version), file=sys.stderr) traceback.print_exc() sys.stderr.flush() sys.exit(1)
lambda-uploader command line interface.
entailment
def build_package(path, requires, virtualenv=None, ignore=None, extra_files=None, zipfile_name=ZIPFILE_NAME, pyexec=None): '''Builds the zip file and creates the package with it''' pkg = Package(path, zipfile_name, pyexec) if extra_files: for fil in extra_files: pkg.extra_file(fil) if virtualenv is not None: pkg.virtualenv(virtualenv) pkg.requirements(requires) pkg.build(ignore) return pkg
Builds the zip file and creates the package with it
entailment
def build(self, ignore=None): '''Calls all necessary methods to build the Lambda Package''' self._prepare_workspace() self.install_dependencies() self.package(ignore)
Calls all necessary methods to build the Lambda Package
entailment
def clean_workspace(self): '''Clean up the temporary workspace if one exists''' if os.path.isdir(self._temp_workspace): shutil.rmtree(self._temp_workspace)
Clean up the temporary workspace if one exists
entailment
def clean_zipfile(self): '''remove existing zipfile''' if os.path.isfile(self.zip_file): os.remove(self.zip_file)
remove existing zipfile
entailment
def requirements(self, requires): ''' Sets the requirements for the package. It will take either a valid path to a requirements file or a list of requirements. ''' if requires: if isinstance(requires, basestring) and \ os.path.isfile(os.path.abspath(requires)): self._requirements_file = os.path.abspath(requires) else: if isinstance(self._requirements, basestring): requires = requires.split() self._requirements_file = None self._requirements = requires else: # If the default requirements file is found use that if os.path.isfile(self._requirements_file): return self._requirements, self._requirements_file = None, None
Sets the requirements for the package. It will take either a valid path to a requirements file or a list of requirements.
entailment
def virtualenv(self, virtualenv): ''' Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped. ''' # If a boolean is passed then set the internal _skip_virtualenv flag if isinstance(virtualenv, bool): self._skip_virtualenv = virtualenv else: self._virtualenv = virtualenv if not os.path.isdir(self._virtualenv): raise Exception("virtualenv %s not found" % self._virtualenv) LOG.info("Using existing virtualenv at %s" % self._virtualenv) # use supplied virtualenv path self._pkg_venv = self._virtualenv self._skip_virtualenv = True
Sets the virtual environment for the lambda package If this is not set then package_dependencies will create a new one. Takes a path to a virtualenv or a boolean if the virtualenv creation should be skipped.
entailment
def install_dependencies(self): ''' Creates a virtualenv and installs requirements ''' # If virtualenv is set to skip then do nothing if self._skip_virtualenv: LOG.info('Skip Virtualenv set ... nothing to do') return has_reqs = _isfile(self._requirements_file) or self._requirements if self._virtualenv is None and has_reqs: LOG.info('Building new virtualenv and installing requirements') self._build_new_virtualenv() self._install_requirements() elif self._virtualenv is None and not has_reqs: LOG.info('No requirements found, so no virtualenv will be made') self._pkg_venv = False else: raise Exception('Cannot determine what to do about virtualenv')
Creates a virtualenv and installs requirements
entailment
def _build_new_virtualenv(self): '''Build a new virtualenvironment if self._virtualenv is set to None''' if self._virtualenv is None: # virtualenv was "None" which means "do default" self._pkg_venv = os.path.join(self._temp_workspace, 'venv') self._venv_pip = 'bin/pip' if sys.platform == 'win32' or sys.platform == 'cygwin': self._venv_pip = 'Scripts\pip.exe' python_exe = self._python_executable() proc = Popen(["virtualenv", "-p", python_exe, self._pkg_venv], stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() LOG.debug("Virtualenv stdout: %s" % stdout) LOG.debug("Virtualenv stderr: %s" % stderr) if proc.returncode is not 0: raise Exception('virtualenv returned unsuccessfully') else: raise Exception('cannot build a new virtualenv when asked to omit')
Build a new virtualenvironment if self._virtualenv is set to None
entailment
def _install_requirements(self): ''' Create a new virtualenvironment and install requirements if there are any. ''' if not hasattr(self, '_pkg_venv'): err = 'Must call build_new_virtualenv before install_requirements' raise Exception(err) cmd = None if self._requirements: LOG.debug("Installing requirements found %s in config" % self._requirements) cmd = [os.path.join(self._pkg_venv, self._venv_pip), 'install'] + self._requirements elif _isfile(self._requirements_file): # Pip install LOG.debug("Installing requirements from requirements.txt file") cmd = [os.path.join(self._pkg_venv, self._venv_pip), "install", "-r", self._requirements_file] if cmd is not None: prc = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = prc.communicate() LOG.debug("Pip stdout: %s" % stdout) LOG.debug("Pip stderr: %s" % stderr) if prc.returncode is not 0: raise Exception('pip returned unsuccessfully')
Create a new virtualenvironment and install requirements if there are any.
entailment
def package(self, ignore=None): """ Create a zip file of the lambda script and its dependencies. :param list ignore: a list of regular expression strings to match paths of files in the source of the lambda script against and ignore those files when creating the zip file. The paths to be matched are local to the source root. """ ignore = ignore or [] package = os.path.join(self._temp_workspace, 'lambda_package') # Copy site packages into package base LOG.info('Copying site packages') if hasattr(self, '_pkg_venv') and self._pkg_venv: lib_dir = 'lib/python*/site-packages' lib64_dir = 'lib64/python*/site-packages' if sys.platform == 'win32' or sys.platform == 'cygwin': lib_dir = 'lib\\site-packages' lib64_dir = 'lib64\\site-packages' # Look for the site packages lib_site_list = glob.glob(os.path.join( self._pkg_venv, lib_dir)) if lib_site_list: utils.copy_tree(lib_site_list[0], package) else: LOG.debug("no lib site packages found") lib64_site_list = glob.glob(os.path.join( self._pkg_venv, lib64_dir)) if lib64_site_list: lib64_site_packages = lib64_site_list[0] if not os.path.islink(lib64_site_packages): LOG.info('Copying lib64 site packages') utils.copy_tree(lib64_site_packages, package) lib64_site_packages = lib64_site_list[0] else: LOG.debug("no lib64 site packages found") # Append the temp workspace to the ignore list: ignore.append(r"^%s/.*" % re.escape(TEMP_WORKSPACE_NAME)) utils.copy_tree(self._path, package, ignore) # Add extra files for p in self._extra_files: LOG.info('Copying extra %s into package' % p) ignore.append(re.escape(p)) if os.path.isdir(p): utils.copy_tree(p, package, ignore=ignore, include_parent=True) else: shutil.copy(p, package) self._create_zip(package)
Create a zip file of the lambda script and its dependencies. :param list ignore: a list of regular expression strings to match paths of files in the source of the lambda script against and ignore those files when creating the zip file. The paths to be matched are local to the source root.
entailment
def encode_properties(parameters): """ Performs encoding of url parameters from dictionary to a string. It does not escape backslash because it is not needed. See: http://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-SetItemProperties """ result = [] for param in iter(sorted(parameters)): if isinstance(parameters[param], (list, tuple)): value = ','.join([escape_chars(x) for x in parameters[param]]) else: value = escape_chars(parameters[param]) result.append("%s=%s" % (param, value)) return '|'.join(result)
Performs encoding of url parameters from dictionary to a string. It does not escape backslash because it is not needed. See: http://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-SetItemProperties
entailment
def splitroot(self, part, sep=sep): """ Splits path string into drive, root and relative path Uses '/artifactory/' as a splitting point in URI. Everything before it, including '/artifactory/' itself is treated as drive. The next folder is treated as root, and everything else is taken for relative path. """ drv = '' root = '' base = get_global_base_url(part) if base and without_http_prefix(part).startswith(without_http_prefix(base)): mark = without_http_prefix(base).rstrip(sep)+sep parts = part.split(mark) else: mark = sep+'artifactory'+sep parts = part.split(mark) if len(parts) >= 2: drv = parts[0] + mark.rstrip(sep) rest = sep + mark.join(parts[1:]) elif part.endswith(mark.rstrip(sep)): drv = part rest = '' else: rest = part if not rest: return drv, '', '' if rest == sep: return drv, '', '' if rest.startswith(sep): root, _, part = rest[1:].partition(sep) root = sep + root + sep return drv, root, part
Splits path string into drive, root and relative path Uses '/artifactory/' as a splitting point in URI. Everything before it, including '/artifactory/' itself is treated as drive. The next folder is treated as root, and everything else is taken for relative path.
entailment
def rest_get(self, url, params=None, headers=None, auth=None, verify=True, cert=None): """ Perform a GET request to url with optional authentication """ res = requests.get(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
Perform a GET request to url with optional authentication
entailment
def rest_put(self, url, params=None, headers=None, auth=None, verify=True, cert=None): """ Perform a PUT request to url with optional authentication """ res = requests.put(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
Perform a PUT request to url with optional authentication
entailment
def rest_post(self, url, params=None, headers=None, auth=None, verify=True, cert=None): """ Perform a PUT request to url with optional authentication """ res = requests.post(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
Perform a PUT request to url with optional authentication
entailment
def rest_del(self, url, params=None, auth=None, verify=True, cert=None): """ Perform a DELETE request to url with optional authentication """ res = requests.delete(url, params=params, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
Perform a DELETE request to url with optional authentication
entailment
def rest_get_stream(self, url, auth=None, verify=True, cert=None): """ Perform a chunked GET request to url with optional authentication This is specifically to download files. """ res = requests.get(url, auth=auth, stream=True, verify=verify, cert=cert) return res.raw, res.status_code
Perform a chunked GET request to url with optional authentication This is specifically to download files.
entailment
def get_stat_json(self, pathobj): """ Request remote file/directory status info Returns a json object as specified by Artifactory REST API """ url = '/'.join([pathobj.drive, 'api/storage', str(pathobj.relative_to(pathobj.drive)).strip('/')]) text, code = self.rest_get(url, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if code == 404 and "Unable to find item" in text: raise OSError(2, "No such file or directory: '%s'" % url) if code != 200: raise RuntimeError(text) return json.loads(text)
Request remote file/directory status info Returns a json object as specified by Artifactory REST API
entailment
def open(self, pathobj): """ Opens the remote file and returns a file-like object HTTPResponse Given the nature of HTTP streaming, this object doesn't support seek() """ url = str(pathobj) raw, code = self.rest_get_stream(url, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if not code == 200: raise RuntimeError("%d" % code) return raw
Opens the remote file and returns a file-like object HTTPResponse Given the nature of HTTP streaming, this object doesn't support seek()
entailment
def deploy(self, pathobj, fobj, md5=None, sha1=None, parameters=None): """ Uploads a given file-like object HTTP chunked encoding will be attempted """ if isinstance(fobj, urllib3.response.HTTPResponse): fobj = HTTPResponseWrapper(fobj) url = str(pathobj) if parameters: url += ";%s" % encode_matrix_parameters(parameters) headers = {} if md5: headers['X-Checksum-Md5'] = md5 if sha1: headers['X-Checksum-Sha1'] = sha1 text, code = self.rest_put_stream(url, fobj, headers=headers, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if code not in [200, 201]: raise RuntimeError("%s" % text)
Uploads a given file-like object HTTP chunked encoding will be attempted
entailment
def move(self, src, dst): """ Move artifact from src to dst """ url = '/'.join([src.drive, 'api/move', str(src.relative_to(src.drive)).rstrip('/')]) params = {'to': str(dst.relative_to(dst.drive)).rstrip('/')} text, code = self.rest_post(url, params=params, auth=src.auth, verify=src.verify, cert=src.cert) if code not in [200, 201]: raise RuntimeError("%s" % text)
Move artifact from src to dst
entailment
def set_properties(self, pathobj, props, recursive): """ Set artifact properties """ url = '/'.join([pathobj.drive, 'api/storage', str(pathobj.relative_to(pathobj.drive)).strip('/')]) params = {'properties': encode_properties(props)} if not recursive: params['recursive'] = '0' text, code = self.rest_put(url, params=params, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if code == 404 and "Unable to find item" in text: raise OSError(2, "No such file or directory: '%s'" % url) if code != 204: raise RuntimeError(text)
Set artifact properties
entailment
def del_properties(self, pathobj, props, recursive): """ Delete artifact properties """ if isinstance(props, str): props = (props,) url = '/'.join([pathobj.drive, 'api/storage', str(pathobj.relative_to(pathobj.drive)).strip('/')]) params = {'properties': ','.join(sorted(props))} if not recursive: params['recursive'] = '0' text, code = self.rest_del(url, params=params, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if code == 404 and "Unable to find item" in text: raise OSError(2, "No such file or directory: '%s'" % url) if code != 204: raise RuntimeError(text)
Delete artifact properties
entailment
def relative_to(self, *other): """ Return the relative path to another path identified by the passed arguments. If the operation is not possible (because this is not a subpath of the other path), raise ValueError. """ obj = super(ArtifactoryPath, self).relative_to(*other) obj.auth = self.auth obj.verify = self.verify obj.cert = self.cert return obj
Return the relative path to another path identified by the passed arguments. If the operation is not possible (because this is not a subpath of the other path), raise ValueError.
entailment
def set_properties(self, properties, recursive=True): """ Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior. """ if not properties: return return self._accessor.set_properties(self, properties, recursive)
Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior.
entailment
def getScreenDims(self): """returns a tuple that contains (screen_width,screen_height) """ width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) return (width,height)
returns a tuple that contains (screen_width,screen_height)
entailment
def getScreen(self,screen_data=None): """This function fills screen_data with the RAW Pixel data screen_data MUST be a numpy array of uint8/int8. This could be initialized like so: screen_data = np.array(w*h,dtype=np.uint8) Notice, it must be width*height in size also If it is None, then this function will initialize it Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenWidth(self.obj) screen_data = np.zeros(width*height,dtype=np.uint8) ale_lib.getScreen(self.obj,as_ctypes(screen_data)) return screen_data
This function fills screen_data with the RAW Pixel data screen_data MUST be a numpy array of uint8/int8. This could be initialized like so: screen_data = np.array(w*h,dtype=np.uint8) Notice, it must be width*height in size also If it is None, then this function will initialize it Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place
entailment
def getScreenRGB(self,screen_data=None): """This function fills screen_data with the data screen_data MUST be a numpy array of uint32/int32. This can be initialized like so: screen_data = np.array(w*h,dtype=np.uint32) Notice, it must be width*height in size also If it is None, then this function will initialize it """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenWidth(self.obj) screen_data = np.zeros(width*height,dtype=np.uint32) ale_lib.getScreenRGB(self.obj,as_ctypes(screen_data)) return screen_data
This function fills screen_data with the data screen_data MUST be a numpy array of uint32/int32. This can be initialized like so: screen_data = np.array(w*h,dtype=np.uint32) Notice, it must be width*height in size also If it is None, then this function will initialize it
entailment
def getRAM(self,ram=None): """This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size,dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it. """ if(ram is None): ram_size = ale_lib.getRAMSize(self.obj) ram = np.zeros(ram_size,dtype=np.uint8) ale_lib.getRAM(self.obj,as_ctypes(ram))
This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size,dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it.
entailment
def lowstrip(term): """Convert to lowercase and strip spaces""" term = re.sub('\s+', ' ', term) term = term.lower() return term
Convert to lowercase and strip spaces
entailment
def main(left_path, left_column, right_path, right_column, outfile, titles, join, minscore, count, warp): """Perform the similarity join""" right_file = csv.reader(open(right_path, 'r')) if titles: right_header = next(right_file) index = NGram((tuple(r) for r in right_file), threshold=minscore, warp=warp, key=lambda x: lowstrip(x[right_column])) left_file = csv.reader(open(left_path, 'r')) out = csv.writer(open(outfile, 'w'), lineterminator='\n') if titles: left_header = next(left_file) out.writerow(left_header + ["Rank", "Similarity"] + right_header) for row in left_file: if not row: continue # skip blank lines row = tuple(row) results = index.search(lowstrip(row[left_column]), threshold=minscore) if results: if count > 0: results = results[:count] for rank, result in enumerate(results, 1): out.writerow(row + (rank, result[1]) + result[0]) elif join == "outer": out.writerow(row)
Perform the similarity join
entailment
def console_main(): """Process command-line arguments.""" from argparse import ArgumentParser parser = ArgumentParser(description=__doc__) parser.add_argument('-t', '--titles', action='store_true', help='input files have column titles') parser.add_argument( '-j', '--join', choices=['inner', 'outer'], help=('The kind of left join to perform. Outer join outputs left-hand ' 'rows which have no right hand match, while inner join discards ' 'such rows. Default: %(default)s')) parser.add_argument('-m', '--minscore', type=float, help='Minimum match score: %(default)s') parser.add_argument('-c', '--count', type=int, help='Max number of rows to match (0 for all): %(default)s') parser.add_argument('-w', '--warp', type=float, help='N-gram warp, higher helps short strings: %(default)s') parser.add_argument('left', nargs=1, help='First CSV file') parser.add_argument('leftcolumn', nargs=1, type=int, help='Column in first CSV file') parser.add_argument('right', nargs=1, help='Second CSV file') parser.add_argument('rightcolumn', nargs=1, type=int, help='Column in second CSV file') parser.add_argument('outfile', nargs=1, help='Output CSV file') parser.set_defaults( titles=False, join='outer', minscore=0.24, count=0, warp=1.0) args = parser.parse_args() for path in [args.left[0], args.right[0]]: if not os.path.isfile(path): parser.error('File "%s" does not exist.' % path) if not (0 <= args.minscore <= 1.0): parser.error("Minimum score must be between 0 and 1") if not args.count >= 0: parser.error("Maximum number of matches per row must be non-negative.") if args.count == 0: args.count = None # to return all results main(args.left[0], args.leftcolumn[0], args.right[0], args.rightcolumn[0], args.outfile[0], args.titles, args.join, args.minscore, args.count, args.warp)
Process command-line arguments.
entailment
def copy(self, items=None): """Return a new NGram object with the same settings, and referencing the same items. Copy is shallow in that each item is not recursively copied. Optionally specify alternate items to populate the copy. >>> from ngram import NGram >>> from copy import deepcopy >>> n = NGram(['eggs', 'spam']) >>> m = n.copy() >>> m.add('ham') >>> sorted(list(n)) ['eggs', 'spam'] >>> sorted(list(m)) ['eggs', 'ham', 'spam'] >>> p = n.copy(['foo', 'bar']) >>> sorted(list(p)) ['bar', 'foo'] """ return NGram(items if items is not None else self, self.threshold, self.warp, self._key, self.N, self._pad_len, self._pad_char)
Return a new NGram object with the same settings, and referencing the same items. Copy is shallow in that each item is not recursively copied. Optionally specify alternate items to populate the copy. >>> from ngram import NGram >>> from copy import deepcopy >>> n = NGram(['eggs', 'spam']) >>> m = n.copy() >>> m.add('ham') >>> sorted(list(n)) ['eggs', 'spam'] >>> sorted(list(m)) ['eggs', 'ham', 'spam'] >>> p = n.copy(['foo', 'bar']) >>> sorted(list(p)) ['bar', 'foo']
entailment
def _split(self, string): """Iterates over the ngrams of a string (no padding). >>> from ngram import NGram >>> n = NGram() >>> list(n._split("hamegg")) ['ham', 'ame', 'meg', 'egg'] """ for i in range(len(string) - self.N + 1): yield string[i:i + self.N]
Iterates over the ngrams of a string (no padding). >>> from ngram import NGram >>> n = NGram() >>> list(n._split("hamegg")) ['ham', 'ame', 'meg', 'egg']
entailment
def add(self, item): """Add an item to the N-gram index (if it has not already been added). >>> from ngram import NGram >>> n = NGram() >>> n.add("ham") >>> list(n) ['ham'] >>> n.add("spam") >>> sorted(list(n)) ['ham', 'spam'] """ if item not in self: # Add the item to the base set super(NGram, self).add(item) # Record length of padded string padded_item = self.pad(self.key(item)) self.length[item] = len(padded_item) for ngram in self._split(padded_item): # Add a new n-gram and string to index if necessary self._grams.setdefault(ngram, {}).setdefault(item, 0) # Increment number of times the n-gram appears in the string self._grams[ngram][item] += 1
Add an item to the N-gram index (if it has not already been added). >>> from ngram import NGram >>> n = NGram() >>> n.add("ham") >>> list(n) ['ham'] >>> n.add("spam") >>> sorted(list(n)) ['ham', 'spam']
entailment
def items_sharing_ngrams(self, query): """Retrieve the subset of items that share n-grams the query string. :param query: look up items that share N-grams with this string. :return: mapping from matched string to the number of shared N-grams. >>> from ngram import NGram >>> n = NGram(["ham","spam","eggs"]) >>> sorted(n.items_sharing_ngrams("mam").items()) [('ham', 2), ('spam', 2)] """ # From matched string to number of N-grams shared with query string shared = {} # Dictionary mapping n-gram to string to number of occurrences of that # ngram in the string that remain to be matched. remaining = {} for ngram in self.split(query): try: for match, count in self._grams[ngram].items(): remaining.setdefault(ngram, {}).setdefault(match, count) # match as many occurrences as exist in matched string if remaining[ngram][match] > 0: remaining[ngram][match] -= 1 shared.setdefault(match, 0) shared[match] += 1 except KeyError: pass return shared
Retrieve the subset of items that share n-grams the query string. :param query: look up items that share N-grams with this string. :return: mapping from matched string to the number of shared N-grams. >>> from ngram import NGram >>> n = NGram(["ham","spam","eggs"]) >>> sorted(n.items_sharing_ngrams("mam").items()) [('ham', 2), ('spam', 2)]
entailment
def searchitem(self, item, threshold=None): """Search the index for items whose key exceeds the threshold similarity to the key of the given item. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"), ... (3, "SPANN")], key=lambda x:x[1]) >>> sorted(n.searchitem((2, "SPA"), 0.35)) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] """ return self.search(self.key(item), threshold)
Search the index for items whose key exceeds the threshold similarity to the key of the given item. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"), ... (3, "SPANN")], key=lambda x:x[1]) >>> sorted(n.searchitem((2, "SPA"), 0.35)) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
entailment
def search(self, query, threshold=None): """Search the index for items whose key exceeds threshold similarity to the query string. :param query: returned items will have at least `threshold` \ similarity to the query string. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1]) >>> sorted(n.search("SPA")) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] >>> n.search("M") [((0, 'SPAM'), 0.125)] >>> n.search("EG") [((2, 'EG'), 1.0)] """ threshold = threshold if threshold is not None else self.threshold results = [] # Identify possible results for match, samegrams in self.items_sharing_ngrams(query).items(): allgrams = (len(self.pad(query)) + self.length[match] - (2 * self.N) - samegrams + 2) similarity = self.ngram_similarity(samegrams, allgrams, self.warp) if similarity >= threshold: results.append((match, similarity)) # Sort results by decreasing similarity results.sort(key=lambda x: x[1], reverse=True) return results
Search the index for items whose key exceeds threshold similarity to the query string. :param query: returned items will have at least `threshold` \ similarity to the query string. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1]) >>> sorted(n.search("SPA")) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] >>> n.search("M") [((0, 'SPAM'), 0.125)] >>> n.search("EG") [((2, 'EG'), 1.0)]
entailment
def finditem(self, item, threshold=None): """Return most similar item to the provided one, or None if nothing exceeds the threshold. >>> from ngram import NGram >>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")], ... key=lambda x:x[1].lower()) >>> n.finditem((3, 'Hom')) (1, 'Ham') >>> n.finditem((4, "Oggsy")) (2, 'Eggsy') >>> n.finditem((4, "Oggsy"), 0.8) """ results = self.searchitem(item, threshold) if results: return results[0][0] else: return None
Return most similar item to the provided one, or None if nothing exceeds the threshold. >>> from ngram import NGram >>> n = NGram([(0, "Spam"), (1, "Ham"), (2, "Eggsy"), (3, "Egggsy")], ... key=lambda x:x[1].lower()) >>> n.finditem((3, 'Hom')) (1, 'Ham') >>> n.finditem((4, "Oggsy")) (2, 'Eggsy') >>> n.finditem((4, "Oggsy"), 0.8)
entailment
def find(self, query, threshold=None): """Simply return the best match to the query, None on no match. >>> from ngram import NGram >>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1) >>> n.find('Hom') 'Ham' >>> n.find("Spom") 'Spam' >>> n.find("Spom", 0.8) """ results = self.search(query, threshold) if results: return results[0][0] else: return None
Simply return the best match to the query, None on no match. >>> from ngram import NGram >>> n = NGram(["Spam","Eggs","Ham"], key=lambda x:x.lower(), N=1) >>> n.find('Hom') 'Ham' >>> n.find("Spom") 'Spam' >>> n.find("Spom", 0.8)
entailment
def ngram_similarity(samegrams, allgrams, warp=1.0): """Similarity for two sets of n-grams. :note: ``similarity = (a**e - d**e)/a**e`` where `a` is \ "all n-grams", `d` is "different n-grams" and `e` is the warp. :param samegrams: number of n-grams shared by the two strings. :param allgrams: total of the distinct n-grams across the two strings. :return: similarity in the range 0.0 to 1.0. >>> from ngram import NGram >>> NGram.ngram_similarity(5, 10) 0.5 >>> NGram.ngram_similarity(5, 10, warp=2) 0.75 >>> NGram.ngram_similarity(5, 10, warp=3) 0.875 >>> NGram.ngram_similarity(2, 4, warp=2) 0.75 >>> NGram.ngram_similarity(3, 4) 0.75 """ if abs(warp - 1.0) < 1e-9: similarity = float(samegrams) / allgrams else: diffgrams = float(allgrams - samegrams) similarity = ((allgrams ** warp - diffgrams ** warp) / (allgrams ** warp)) return similarity
Similarity for two sets of n-grams. :note: ``similarity = (a**e - d**e)/a**e`` where `a` is \ "all n-grams", `d` is "different n-grams" and `e` is the warp. :param samegrams: number of n-grams shared by the two strings. :param allgrams: total of the distinct n-grams across the two strings. :return: similarity in the range 0.0 to 1.0. >>> from ngram import NGram >>> NGram.ngram_similarity(5, 10) 0.5 >>> NGram.ngram_similarity(5, 10, warp=2) 0.75 >>> NGram.ngram_similarity(5, 10, warp=3) 0.875 >>> NGram.ngram_similarity(2, 4, warp=2) 0.75 >>> NGram.ngram_similarity(3, 4) 0.75
entailment
def compare(s1, s2, **kwargs): """Compares two strings and returns their similarity. :param s1: first string :param s2: second string :param kwargs: additional keyword arguments passed to __init__. :return: similarity between 0.0 and 1.0. >>> from ngram import NGram >>> NGram.compare('spa', 'spam') 0.375 >>> NGram.compare('ham', 'bam') 0.25 >>> NGram.compare('spam', 'pam') #N=2 0.375 >>> NGram.compare('ham', 'ams', N=1) 0.5 """ if s1 is None or s2 is None: if s1 == s2: return 1.0 return 0.0 try: return NGram([s1], **kwargs).search(s2)[0][1] except IndexError: return 0.0
Compares two strings and returns their similarity. :param s1: first string :param s2: second string :param kwargs: additional keyword arguments passed to __init__. :return: similarity between 0.0 and 1.0. >>> from ngram import NGram >>> NGram.compare('spa', 'spam') 0.375 >>> NGram.compare('ham', 'bam') 0.25 >>> NGram.compare('spam', 'pam') #N=2 0.375 >>> NGram.compare('ham', 'ams', N=1) 0.5
entailment
def clear(self): """Remove all elements from this set. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> sorted(list(n)) ['eggs', 'spam'] >>> n.clear() >>> list(n) [] """ super(NGram, self).clear() self._grams = {} self.length = {}
Remove all elements from this set. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> sorted(list(n)) ['eggs', 'spam'] >>> n.clear() >>> list(n) []
entailment
def union(self, *others): """Return the union of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> sorted(list(a.union(b))) ['eggs', 'ham', 'spam'] """ return self.copy(super(NGram, self).union(*others))
Return the union of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> sorted(list(a.union(b))) ['eggs', 'ham', 'spam']
entailment
def difference(self, *others): """Return the difference of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.difference(b)) ['eggs'] """ return self.copy(super(NGram, self).difference(*others))
Return the difference of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.difference(b)) ['eggs']
entailment
def intersection(self, *others): """Return the intersection of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.intersection(b)) ['spam'] """ return self.copy(super(NGram, self).intersection(*others))
Return the intersection of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.intersection(b)) ['spam']
entailment
def intersection_update(self, *others): """Update the set with the intersection of itself and other sets. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> other = set(['spam', 'ham']) >>> n.intersection_update(other) >>> list(n) ['spam'] """ self.difference_update(super(NGram, self).difference(*others))
Update the set with the intersection of itself and other sets. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> other = set(['spam', 'ham']) >>> n.intersection_update(other) >>> list(n) ['spam']
entailment
def symmetric_difference(self, other): """Return the symmetric difference of two sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> sorted(list(a.symmetric_difference(b))) ['eggs', 'ham'] """ return self.copy(super(NGram, self).symmetric_difference(other))
Return the symmetric difference of two sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> sorted(list(a.symmetric_difference(b))) ['eggs', 'ham']
entailment
def symmetric_difference_update(self, other): """Update the set with the symmetric difference of itself and `other`. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> other = set(['spam', 'ham']) >>> n.symmetric_difference_update(other) >>> sorted(list(n)) ['eggs', 'ham'] """ intersection = super(NGram, self).intersection(other) self.update(other) # add items present in other self.difference_update(intersection)
Update the set with the symmetric difference of itself and `other`. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> other = set(['spam', 'ham']) >>> n.symmetric_difference_update(other) >>> sorted(list(n)) ['eggs', 'ham']
entailment
def sub(value, arg): """Subtract the arg from the value.""" try: nvalue, narg = handle_float_decimal_combinations( valid_numeric(value), valid_numeric(arg), '-') return nvalue - narg except (ValueError, TypeError): try: return value - arg except Exception: return ''
Subtract the arg from the value.
entailment
def absolute(value): """Return the absolute value.""" try: return abs(valid_numeric(value)) except (ValueError, TypeError): try: return abs(value) except Exception: return ''
Return the absolute value.
entailment
def cli(config, server, api_key, all, credentials, project): """Create the cli command line.""" # Check first for the pybossa.rc file to configure server and api-key home = expanduser("~") if os.path.isfile(os.path.join(home, '.pybossa.cfg')): config.parser.read(os.path.join(home, '.pybossa.cfg')) config.server = config.parser.get(credentials,'server') config.api_key = config.parser.get(credentials, 'apikey') try: config.all = config.parser.get(credentials, 'all') except ConfigParser.NoOptionError: config.all = None if server: config.server = server if api_key: config.api_key = api_key if all: config.all = all try: config.project = json.loads(project.read()) except JSONDecodeError as e: click.secho("Error: invalid JSON format in project.json:", fg='red') if e.msg == 'Expecting value': e.msg += " (if string enclose it with double quotes)" click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno)) raise click.Abort() try: project_schema = { "type": "object", "properties": { "name": {"type": "string"}, "short_name": {"type": "string"}, "description": {"type": "string"} } } jsonschema.validate(config.project, project_schema) except jsonschema.exceptions.ValidationError as e: click.secho("Error: invalid type in project.json", fg='red') click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow') click.echo("'%s' must be a %s" % (e.path[0], e.validator_value)) raise click.Abort() config.pbclient = pbclient config.pbclient.set('endpoint', config.server) config.pbclient.set('api_key', config.api_key)
Create the cli command line.
entailment
def version(): """Show pbs version.""" try: import pkg_resources click.echo(pkg_resources.get_distribution('pybossa-pbs').version) except ImportError: click.echo("pybossa-pbs package not found!")
Show pbs version.
entailment
def update_project(config, task_presenter, results, long_description, tutorial, watch): # pragma: no cover """Update project templates and information.""" if watch: res = _update_project_watch(config, task_presenter, results, long_description, tutorial) else: res = _update_project(config, task_presenter, results, long_description, tutorial) click.echo(res)
Update project templates and information.
entailment
def add_tasks(config, tasks_file, tasks_type, priority, redundancy): """Add tasks to a project.""" res = _add_tasks(config, tasks_file, tasks_type, priority, redundancy) click.echo(res)
Add tasks to a project.
entailment
def add_helpingmaterials(config, helping_materials_file, helping_type): """Add helping materials to a project.""" res = _add_helpingmaterials(config, helping_materials_file, helping_type) click.echo(res)
Add helping materials to a project.
entailment
def delete_tasks(config, task_id): """Delete tasks from a project.""" if task_id is None: msg = ("Are you sure you want to delete all the tasks and associated task runs?") if click.confirm(msg): res = _delete_tasks(config, task_id) click.echo(res) else: click.echo("Aborting.") else: res = _delete_tasks(config, task_id) click.echo(res)
Delete tasks from a project.
entailment
def update_task_redundancy(config, task_id, redundancy): """Update task redudancy for a project.""" if task_id is None: msg = ("Are you sure you want to update all the tasks redundancy?") if click.confirm(msg): res = _update_tasks_redundancy(config, task_id, redundancy) click.echo(res) else: click.echo("Aborting.") else: res = _update_tasks_redundancy(config, task_id, redundancy) click.echo(res)
Update task redudancy for a project.
entailment
def _create_project(config): """Create a project in a PyBossa server.""" try: response = config.pbclient.create_project(config.project['name'], config.project['short_name'], config.project['description']) check_api_error(response) return ("Project: %s created!" % config.project['short_name']) except exceptions.ConnectionError: return("Connection Error! The server %s is not responding" % config.server) except (ProjectNotFound, TaskNotFound): raise
Create a project in a PyBossa server.
entailment
def _update_project_watch(config, task_presenter, results, long_description, tutorial): # pragma: no cover """Update a project in a loop.""" logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') path = os.getcwd() event_handler = PbsHandler(config, task_presenter, results, long_description, tutorial) observer = Observer() # We only want the current folder, not sub-folders observer.schedule(event_handler, path, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
Update a project in a loop.
entailment
def _update_task_presenter_bundle_js(project): """Append to template a distribution bundle js.""" if os.path.isfile ('bundle.min.js'): with open('bundle.min.js') as f: js = f.read() project.info['task_presenter'] += "<script>\n%s\n</script>" % js return if os.path.isfile ('bundle.js'): with open('bundle.js') as f: js = f.read() project.info['task_presenter'] += "<script>\n%s\n</script>" % js
Append to template a distribution bundle js.
entailment
def _update_project(config, task_presenter, results, long_description, tutorial): """Update a project.""" try: # Get project project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) # Update attributes project.name = config.project['name'] project.short_name = config.project['short_name'] project.description = config.project['description'] # Update long_description with open(long_description, 'r') as f: project.long_description = f.read() # Update task presenter with open(task_presenter, 'r') as f: project.info['task_presenter'] = f.read() _update_task_presenter_bundle_js(project) # Update results with open(results, 'r') as f: project.info['results'] = f.read() # Update tutorial with open(tutorial, 'r') as f: project.info['tutorial'] = f.read() response = config.pbclient.update_project(project) check_api_error(response) return ("Project %s updated!" % config.project['short_name']) except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except ProjectNotFound: return ("Project not found! The project: %s is missing." \ " Use the flag --all=1 to search in all the server " \ % config.project['short_name']) except TaskNotFound: raise
Update a project.
entailment
def _load_data(data_file, data_type): """Load data from CSV, JSON, Excel, ..., formats.""" raw_data = data_file.read() if data_type is None: data_type = data_file.name.split('.')[-1] # Data list to process data = [] # JSON type if data_type == 'json': data = json.loads(raw_data) return data # CSV type elif data_type == 'csv': csv_data = StringIO(raw_data) reader = csv.DictReader(csv_data, delimiter=',') for line in reader: data.append(line) return data elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']: excel_data = StringIO(raw_data) wb = openpyxl.load_workbook(excel_data) ws = wb.active # First headers headers = [] for row in ws.iter_rows(max_row=1): for cell in row: tmp = '_'.join(cell.value.split(" ")).lower() headers.append(tmp) # Simulate DictReader for row in ws.iter_rows(row_offset=1): values = [] for cell in row: values.append(cell.value) tmp = dict(itertools.izip(headers, values)) if len(values) == len(headers) and not row_empty(values): data.append(tmp) return data # PO type elif data_type == 'po': po = polib.pofile(raw_data) for entry in po.untranslated_entries(): data.append(entry.__dict__) return data # PROPERTIES type (used in Java and Firefox extensions) elif data_type == 'properties': lines = raw_data.split('\n') for l in lines: if l: var_id, string = l.split('=') tmp = dict(var_id=var_id, string=string) data.append(tmp) return data else: return data
Load data from CSV, JSON, Excel, ..., formats.
entailment
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy): """Add tasks to a project.""" try: project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) data = _load_data(tasks_file, tasks_type) if len(data) == 0: return ("Unknown format for the tasks file. Use json, csv, po or " "properties.") # If true, warn user # if sleep: # pragma: no cover # click.secho(msg, fg='yellow') # Show progress bar with click.progressbar(data, label="Adding Tasks") as pgbar: for d in pgbar: task_info = create_task_info(d) response = config.pbclient.create_task(project_id=project.id, info=task_info, n_answers=redundancy, priority_0=priority) # Check if for the data we have to auto-throttle task creation sleep, msg = enable_auto_throttling(config, data) check_api_error(response) # If auto-throttling enabled, sleep for sleep seconds if sleep: # pragma: no cover time.sleep(sleep) return ("%s tasks added to project: %s" % (len(data), config.project['short_name'])) except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except (ProjectNotFound, TaskNotFound): raise
Add tasks to a project.
entailment
def _add_helpingmaterials(config, helping_file, helping_type): """Add helping materials to a project.""" try: project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) data = _load_data(helping_file, helping_type) if len(data) == 0: return ("Unknown format for the tasks file. Use json, csv, po or " "properties.") # Show progress bar with click.progressbar(data, label="Adding Helping Materials") as pgbar: for d in pgbar: helping_info, file_path = create_helping_material_info(d) if file_path: # Create first the media object hm = config.pbclient.create_helpingmaterial(project_id=project.id, info=helping_info, file_path=file_path) check_api_error(hm) z = hm.info.copy() z.update(helping_info) hm.info = z response = config.pbclient.update_helping_material(hm) check_api_error(response) else: response = config.pbclient.create_helpingmaterial(project_id=project.id, info=helping_info) check_api_error(response) # Check if for the data we have to auto-throttle task creation sleep, msg = enable_auto_throttling(config, data, endpoint='/api/helpinmaterial') # If true, warn user if sleep: # pragma: no cover click.secho(msg, fg='yellow') # If auto-throttling enabled, sleep for sleep seconds if sleep: # pragma: no cover time.sleep(sleep) return ("%s helping materials added to project: %s" % (len(data), config.project['short_name'])) except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except (ProjectNotFound, TaskNotFound): raise
Add helping materials to a project.
entailment