Search is not available for this dataset
text
stringlengths
75
104k
def encode(self) -> str: """ Create a token based on the data held in the class. :return: A new token :rtype: str """ payload = {} payload.update(self.registered_claims) payload.update(self.payload) return encode(self.secret, payload, self.alg, self.header)
def decode(secret: Union[str, bytes], token: Union[str, bytes], alg: str = default_alg) -> 'Jwt': """ Decodes the given token into an instance of `Jwt`. :param secret: The secret used to decode the token. Must match the secret used when creating the token. :type secret: Union[str, bytes] :param token: The token to decode. :type token: Union[str, bytes] :param alg: The algorithm used to decode the token. Must match the algorithm used when creating the token. :type alg: str :return: The decoded token. :rtype: `Jwt` """ header, payload = decode(secret, token, alg) return Jwt(secret, payload, alg, header)
def compare(self, jwt: 'Jwt', compare_dates: bool = False) -> bool: """ Compare against another `Jwt`. :param jwt: The token to compare against. :type jwt: Jwt :param compare_dates: Should the comparision take dates into account? :type compare_dates: bool :return: Are the two Jwt's the same? :rtype: bool """ if self.secret != jwt.secret: return False if self.payload != jwt.payload: return False if self.alg != jwt.alg: return False if self.header != jwt.header: return False expected_claims = self.registered_claims actual_claims = jwt.registered_claims if not compare_dates: strip = ['exp', 'nbf', 'iat'] expected_claims = {k: {v if k not in strip else None} for k, v in expected_claims.items()} actual_claims = {k: {v if k not in strip else None} for k, v in actual_claims.items()} if expected_claims != actual_claims: return False return True
def get(self, request, hash, filename): """Download a file.""" if _ws_download is True: return HttpResponseForbidden() upload = Upload.objects.uploaded().get(hash=hash, name=filename) return FileResponse(upload.file, content_type=upload.type)
def b64_encode(data: bytes) -> bytes: """ :param data: Data the encode. :type data: bytes :return: Base 64 encoded data with padding removed. :rtype: bytes """ encoded = urlsafe_b64encode(data) return encoded.replace(b'=', b'')
def b64_decode(data: bytes) -> bytes: """ :param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes """ missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return urlsafe_b64decode(data)
def to_bytes(data: Union[str, bytes]) -> bytes: """ :param data: Data to convert to bytes. :type data: Union[str, bytes] :return: `data` encoded to UTF8. :rtype: bytes """ if isinstance(data, bytes): return data return data.encode('utf-8')
def from_bytes(data: Union[str, bytes]) -> str: """ :param data: A UTF8 byte string. :type data: Union[str, bytes] :return: `data` decoded from UTF8. :rtype: str """ if isinstance(data, str): return data return str(data, 'utf-8')
def camelize_classname(base, tablename, table): "Produce a 'camelized' class name, e.g. " "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
def pluralize_collection(base, local_cls, referred_cls, constraint): "Produce an 'uncamelized', 'pluralized' class name, e.g. " "'SomeTerm' -> 'some_terms'" referred_name = referred_cls.__name__ uncamelized = re.sub(r'[A-Z]', lambda m: "_%s" % m.group(0).lower(), referred_name)[1:] pluralized = _pluralizer.plural(uncamelized) return pluralized
def is_compressed_json_file(abspath): """Test a file is a valid json file. - *.json: uncompressed, utf-8 encode json file - *.js: uncompressed, utf-8 encode json file - *.gz: compressed, utf-8 encode json file """ abspath = abspath.lower() fname, ext = os.path.splitext(abspath) if ext in [".json", ".js"]: is_compressed = False elif ext == ".gz": is_compressed = True else: raise ValueError( "'%s' is not a valid json file. " "extension has to be '.json' or '.js' for uncompressed, '.gz' " "for compressed." % abspath) return is_compressed
def dump_set(self, obj, class_name=set_class_name): """ ``set`` dumper. """ return {"$" + class_name: [self._json_convert(item) for item in obj]}
def dump_deque(self, obj, class_name="collections.deque"): """ ``collections.deque`` dumper. """ return {"$" + class_name: [self._json_convert(item) for item in obj]}
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"): """ ``collections.OrderedDict`` dumper. """ return { "$" + class_name: [ (key, self._json_convert(value)) for key, value in iteritems(obj) ] }
def dump_nparray(self, obj, class_name=numpy_ndarray_class_name): """ ``numpy.ndarray`` dumper. """ return {"$" + class_name: self._json_convert(obj.tolist())}
def _invalidates_cache(f): """ Decorator for rruleset methods which may invalidate the cached length. """ def inner_func(self, *args, **kwargs): rv = f(self, *args, **kwargs) self._invalidate_cache() return rv return inner_func
def before(self, dt, inc=False): """ Returns the last recurrence before the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned. """ if self._cache_complete: gen = self._cache else: gen = self last = None if inc: for i in gen: if i > dt: break last = i else: for i in gen: if i >= dt: break last = i return last
def after(self, dt, inc=False): """ Returns the first recurrence after the given datetime instance. The inc keyword defines what happens if dt is an occurrence. With inc=True, if dt itself is an occurrence, it will be returned. """ if self._cache_complete: gen = self._cache else: gen = self if inc: for i in gen: if i >= dt: return i else: for i in gen: if i > dt: return i return None
def xafter(self, dt, count=None, inc=False): """ Generator which yields up to `count` recurrences after the given datetime instance, equivalent to `after`. :param dt: The datetime at which to start generating recurrences. :param count: The maximum number of recurrences to generate. If `None` (default), dates are generated until the recurrence rule is exhausted. :param inc: If `dt` is an instance of the rule and `inc` is `True`, it is included in the output. :yields: Yields a sequence of `datetime` objects. """ if self._cache_complete: gen = self._cache else: gen = self # Select the comparison function if inc: def comp(dc, dtc): return dc >= dtc else: def comp(dc, dtc): return dc > dtc # Generate dates n = 0 for d in gen: if comp(d, dt): if count is not None: n += 1 if n > count: break yield d
def replace(self, **kwargs): """Return new rrule with same attributes except for those attributes given new values by whichever keyword arguments are specified.""" new_kwargs = {"interval": self._interval, "count": self._count, "dtstart": self._dtstart, "freq": self._freq, "until": self._until, "wkst": self._wkst, "cache": False if self._cache is None else True} new_kwargs.update(self._original_rule) new_kwargs.update(kwargs) return rrule(**new_kwargs)
def __construct_byset(self, start, byxxx, base): """ If a `BYXXX` sequence is passed to the constructor at the same level as `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some specifications which cannot be reached given some starting conditions. This occurs whenever the interval is not coprime with the base of a given unit and the difference between the starting position and the ending position is not coprime with the greatest common denominator between the interval and the base. For example, with a FREQ of hourly starting at 17:00 and an interval of 4, the only valid values for BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not coprime. :param start: Specifies the starting position. :param byxxx: An iterable containing the list of allowed values. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). This does not preserve the type of the iterable, returning a set, since the values should be unique and the order is irrelevant, this will speed up later lookups. In the event of an empty set, raises a :exception:`ValueError`, as this results in an empty rrule. """ cset = set() # Support a single byxxx value. if isinstance(byxxx, integer_types): byxxx = (byxxx, ) for num in byxxx: i_gcd = gcd(self._interval, base) # Use divmod rather than % because we need to wrap negative nums. if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: cset.add(num) if len(cset) == 0: raise ValueError("Invalid rrule byxxx generates an empty set.") return cset
def __mod_distance(self, value, byxxx, base): """ Calculates the next value in a sequence where the `FREQ` parameter is specified along with a `BYXXX` parameter at the same "level" (e.g. `HOURLY` specified with `BYHOUR`). :param value: The old value of the component. :param byxxx: The `BYXXX` set, which should have been generated by `rrule._construct_byset`, or something else which checks that a valid rule is present. :param base: The largest allowable value for the specified frequency (e.g. 24 hours, 60 minutes). If a valid value is not found after `base` iterations (the maximum number before the sequence would start to repeat), this raises a :exception:`ValueError`, as no valid values were found. This returns a tuple of `divmod(n*interval, base)`, where `n` is the smallest number of `interval` repetitions until the next specified value in `byxxx` is found. """ accumulator = 0 for ii in range(1, base + 1): # Using divmod() over % to account for negative intervals div, value = divmod(value + self._interval, base) accumulator += div if value in byxxx: return (accumulator, value)
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): """ Two ways to specify this: +1MO or MO(+1) """ l = [] for wday in value.split(','): if '(' in wday: # If it's of the form TH(+1), etc. splt = wday.split('(') w = splt[0] n = int(splt[1][:-1]) elif len(wday): # If it's of the form +1MO for i in range(len(wday)): if wday[i] not in '+-0123456789': break n = wday[:i] or None w = wday[i:] if n: n = int(n) else: raise ValueError("Invalid (empty) BYDAY specification.") l.append(weekdays[self._weekday_map[w]](n)) rrkwargs["byweekday"] = l
def get_data_for_root(project_root): """This is the only API function of the projectfile module. It parses the Projectfiles from the given path and assembles the flattened command data structure. Returned data: { 'min-version': (1, 0, 0), 'description': 'Optional main description.', 'commands': { 'command_1': { 'description': 'Optional command level description for command_1.', 'script': [ 'flattened', 'out command', 'list for', 'command_1', ... ] } ... } } Raises: ProjectfileError with descriptive error message in the format of: { 'path': 'Optional path for the corresponding Projectfile.', 'line': 'Optional line number for the error in the Projectfile.', 'error': 'Mandatory descriptive error message.' } :param project_root: :return: {dict} parsed and flattened commands with descriptions """ raw_nodes = file_handler.get_node_list(project_root) command_tree = command_processor.generate_command_tree(raw_nodes) command_processor.flatten_commands(command_tree) command_processor.process_variables(command_tree) return command_tree
def run_get_percentage(): """ Calculate what percentage a given number is of another, e.g. 50 is 50% of 100. """ description = run_get_percentage.__doc__ parser = argparse.ArgumentParser( prog='get_percentage', description=description, epilog="Example use: get_percentage 25 100", ) parser.add_argument( 'a', help='Integer or floating point number that is a percent of another number' ) parser.add_argument( 'b', help='Integer or floating point number of which the first number is a percent', ) args = parser.parse_args() print(sm.get_percentage(float(args.a), float(args.b)))
def run_excel_to_html(): """ Run the excel_to_html function from the command-line. Args: -p path to file -s name of the sheet to convert -css classes to apply -m attempt to combine merged cells -c caption for accessibility -su summary for accessibility -d details for accessibility Example use: excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true """ # Capture commandline arguments. prog='' argument must # match the command name in setup.py entry_points parser = argparse.ArgumentParser(prog='excel_to_html') parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.') parser.add_argument( '-s', nargs='?', help='The name of a sheet in our excel file. Defaults to "Sheet1".', ) parser.add_argument( '-css', nargs='?', help='Space separated css classes to append to the table.' ) parser.add_argument( '-m', action='store_true', help='Merge, attempt to combine merged cells.' ) parser.add_argument( '-c', nargs='?', help='Caption for creating an accessible table.' ) parser.add_argument( '-d', nargs='?', help='Two strings separated by a | character. The first string \ is for the html "summary" attribute and the second string is for the html "details" attribute. \ both values must be provided and nothing more.', ) parser.add_argument( '-r', action='store_true', help='Row headers. Does the table have row headers?' ) args = parser.parse_args() inputs = { 'p': args.p, 's': args.s, 'css': args.css, 'm': args.m, 'c': args.c, 'd': args.d, 'r': args.r, } p = inputs['p'] s = inputs['s'] if inputs['s'] else 'Sheet1' css = inputs['css'] if inputs['css'] else '' m = inputs['m'] if inputs['m'] else False c = inputs['c'] if inputs['c'] else '' d = inputs['d'].split('|') if inputs['d'] else [] r = inputs['r'] if inputs['r'] else False html = fp.excel_to_html( p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m ) print(html)
def get_built_in(self, language, level, data): """ Gets the return string for a language that's supported by python. Used in cases when python provides support for the conversion. Args: language: string the langage to return for. level: integer, the indentation level. data: python data structure being converted (list of tuples) Returns: None, updates self.data_structure """ # Language is python pp = pprint.PrettyPrinter(indent=level) lookup = {'python' : pp.pformat(data), 'json' : str(json.dumps(data, sort_keys=True, indent=level, separators=(',', ': ')))} self.data_structure = lookup[language]
def get_inner_template(self, language, template_type, indentation, key, val): """ Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language. """ #Language specific inner templates inner_templates = {'php' : { 'iterable' : '%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation), 'singular' : '%s%s => %s, \n' % (indentation, key, val) }, 'javascript' : { 'iterable' : '%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation), 'singular' : '%s%s: %s,\n' % (indentation, key, val)}, 'ocaml' : { 'iterable' : '%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation), 'singular' : '%s(%s, %s);\n' % (indentation, key, val)}} return inner_templates[language][template_type]
def translate_array(self, string, language, level=3, retdata=False): """Unserializes a serialized php array and prints it to the console as a data structure in the specified language. Used to translate or convert a php array into a data structure in another language. Currently supports, PHP, Python, Javascript, and JSON. Args: string: a string of serialized php language: a string representing the desired output format for the array. level: integer, indentation level in spaces. Defaults to 3. retdata: boolean, the method will return the string in addition to printing it if set to True. Defaults to false. Returns: None but prints a string to the console if retdata is False, otherwise returns a string. """ language = language.lower() assert self.is_built_in(language) or language in self.outer_templates, \ "Sorry, " + language + " is not a supported language." # Serialized data converted to a python data structure (list of tuples) data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_strings=True) # If language conversion is supported by python avoid recursion entirely # and use a built in library if self.is_built_in(language): self.get_built_in(language, level, data) print(self) return self.data_structure if retdata else None # The language is not supported. Use recursion to build a data structure. def loop_print(iterable, level=3): """ Loops over a python representation of a php array (list of tuples) and constructs a representation in another language. Translates a php array into another structure. Args: iterable: list or tuple to unpack. level: integer, number of spaces to use for indentation """ retval = '' indentation = ' ' * level # Base case - variable is not an iterable if not self.is_iterable(iterable) or isinstance(iterable, str): non_iterable = str(iterable) return str(non_iterable) # Recursive case for item in iterable: # If item is a tuple it should be a key, value pair if isinstance(item, tuple) and len(item) == 2: # Get the key value pair key = item[0] val = loop_print(item[1], level=level+3) # Translate special values val = self.translate_val(language, val) if language in self.lang_specific_values \ and val in self.lang_specific_values[language] else val # Convert keys to their properly formatted strings # Integers are not quoted as array keys key = str(key) if isinstance(key, int) else '\'' + str(key) + '\'' # The first item is a key and the second item is an iterable, boolean needs_unpacking = hasattr(item[0],'__iter__') == False \ and hasattr(item[1],'__iter__') == True # The second item is an iterable if needs_unpacking: retval += self.get_inner_template(language, 'iterable', indentation, key, val) # The second item is not an iterable else: # Convert values to their properly formatted strings # Integers and booleans are not quoted as array values val = str(val) if val.isdigit() or val in self.lang_specific_values[language].values() else '\'' + str(val) + '\'' retval += self.get_inner_template(language, 'singular', indentation, key, val) return retval # Execute the recursive call in language specific wrapper template self.data_structure = self.outer_templates[language] % (loop_print(data)) print(self) return self.data_structure if retdata else None
def pc( self ): """ e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc. """ n = self.npc return self.U[:, :n] * self.d[:n]
def get(): """ Only API function for the config module. :return: {dict} loaded validated configuration. """ config = {} try: config = _load_config() except IOError: try: _create_default_config() config = _load_config() except IOError as e: raise ConfigError(_FILE_CREATION_ERROR.format(e.args[0])) except SyntaxError as e: raise ConfigError(_JSON_SYNTAX_ERROR.format(e.args[0])) except Exception: raise ConfigError(_JSON_SYNTAX_ERROR.format('Yaml syntax error..')) try: _validate(config) except KeyError as e: raise ConfigError(_MANDATORY_KEY_ERROR.format(e.args[0])) except SyntaxError as e: raise ConfigError(_INVALID_KEY_ERROR.format(e.args[0])) except ValueError as e: raise ConfigError(_INVALID_VALUE_ERROR.format(e.args[0])) config['projects-path'] = os.path.expanduser(config['projects-path']) _complete_config(config) return config
def _validate(config): """ Config validation Raises: KeyError on missing mandatory key SyntaxError on invalid key ValueError on invalid value for key :param config: {dict} config to validate :return: None """ for mandatory_key in _mandatory_keys: if mandatory_key not in config: raise KeyError(mandatory_key) for key in config.keys(): if key not in _mandatory_keys and key not in _optional_keys: raise SyntaxError(key) if not isinstance(config[key], _default_config[key].__class__): raise ValueError(key)
def _create_default_config(): """ Writes the full default configuration to the appropriate place. Raises: IOError - on unsuccesful file write :return: None """ config_path = _get_config_path() with open(config_path, 'w+') as f: yaml.dump(_default_config, f, default_flow_style=False)
def reusable(func): """Create a reusable class from a generator function Parameters ---------- func: GeneratorCallable[T_yield, T_send, T_return] the function to wrap Note ---- * the callable must have an inspectable signature * If bound to a class, the new reusable generator is callable as a method. To opt out of this, add a :func:`staticmethod` decorator above this decorator. """ sig = signature(func) origin = func while hasattr(origin, '__wrapped__'): origin = origin.__wrapped__ return type( origin.__name__, (ReusableGenerator, ), dict([ ('__doc__', origin.__doc__), ('__module__', origin.__module__), ('__signature__', sig), ('__wrapped__', staticmethod(func)), ] + [ (name, property(compose(itemgetter(name), attrgetter('_bound_args.arguments')))) for name in sig.parameters ] + ([ ('__qualname__', origin.__qualname__), ] if sys.version_info > (3, ) else [])))
def sendreturn(gen, value): """Send an item into a generator expecting a final return value Parameters ---------- gen: ~typing.Generator[T_yield, T_send, T_return] the generator to send the value to value: T_send the value to send Raises ------ RuntimeError if the generator did not return as expected Returns ------- T_return the generator's return value """ try: gen.send(value) except StopIteration as e: return stopiter_value(e) else: raise RuntimeError('generator did not return as expected')
def imap_send(func, gen): """Apply a function to all ``send`` values of a generator Parameters ---------- func: ~typing.Callable[[T_send], T_mapped] the function to apply gen: Generable[T_yield, T_mapped, T_return] the generator iterable. Returns ------- ~typing.Generator[T_yield, T_send, T_return] the mapped generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: yielder.send(func((yield item))) return_(yielder.result)
def irelay(gen, thru): """Create a new generator by relaying yield/send interactions through another generator Parameters ---------- gen: Generable[T_yield, T_send, T_return] the original generator thru: ~typing.Callable[[T_yield], ~typing.Generator] the generator callable through which each interaction is relayed Returns ------- ~typing.Generator the relayed generator """ gen = iter(gen) assert _is_just_started(gen) yielder = yield_from(gen) for item in yielder: with yielder: subgen = thru(item) subyielder = yield_from(subgen) for subitem in subyielder: with subyielder: subyielder.send((yield subitem)) yielder.send(subyielder.result) return_(yielder.result)
def _data_integrity_check(data): """Checks if all command dependencies refers to and existing command. If not, a ProjectfileError will be raised with the problematic dependency and it's command. :param data: parsed raw data set. :return: None """ deps = [] for command in data['commands']: if 'dependencies' in data['commands'][command]: for d in data['commands'][command]['dependencies']: deps.append({ 'd': d, 'c': command }) for d in deps: if d['d'] not in data['commands']: raise error.ProjectfileError({ 'error': error.PROJECTFILE_INVALID_DEPENDENCY.format(d['d'], d['c']) })
def _link_rels(obj, fields=None, save=False, overwrite=False): """Populate any database related fields (ForeignKeyField, OneToOneField) that have `_get`ters to populate them with""" if not fields: meta = obj._meta fields = [f.name for f in meta.fields if hasattr(f, 'do_related_class') and not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)] for field in fields: # skip fields if they contain non-null data and `overwrite` option wasn't set if not overwrite and not isinstance(getattr(obj, field, None), NoneType): # print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None)) continue if hasattr(obj, field): setattr(obj, field, getattr(obj, '_' + field, None)) if save: obj.save() return obj
def _update(obj, fields=None, save=False, overwrite=False): """Update/populate any database fields that have `_get`ters to populate them with, regardless of whether they are data fields or related fields""" if not fields: meta = obj._meta fields = [f.name for f in meta.fields if not f.primary_key and hasattr(meta, '_get_' + f.name) and hasattr(meta, '_' + f.name)] # print fields fields_updated = [] for field in fields: # skip fields if they contain non-null data and `overwrite` option wasn't set if not overwrite and not getattr(obj, field, None) == None: # print 'skipping %s which already has a value of %s' % (field, getattr(obj, field, None)) continue # print field if hasattr(obj, field): # print field, getattr(obj, '_' + field, None) setattr(obj, field, getattr(obj, '_' + field, None)) if getattr(obj, field, None) != None: fields_updated += [field] if save: obj.save() return fields_updated
def bug_info(exc_type, exc_value, exc_trace): """Prints the traceback and invokes the ipython debugger on any exception Only invokes ipydb if you are outside ipython or python interactive session. So scripts must be called from OS shell in order for exceptions to ipy-shell-out. Dependencies: Needs `pip install ipdb` Arguments: exc_type (type): The exception type/class (e.g. RuntimeError) exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor) exc_trace (Traceback): The traceback instance References: http://stackoverflow.com/a/242531/623735 Example Usage: $ python -c 'from pug import debug;x=[];x[0]' Traceback (most recent call last): File "<string>", line 1, in <module> IndexError: list index out of range > <string>(1)<module>() ipdb> x [] ipdb> locals() {'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None} ipdb> """ if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # We are in interactive mode or don't have a tty-like device, so we call the default hook sys.__excepthook__(exc_type, exc_value, exc_trace) else: # Need to import non-built-ins here, so if dependencies haven't been installed, both tracebacks will print # (e.g. the ImportError and the Exception that got you here) import ipdb # We are NOT in interactive mode, print the exception traceback.print_exception(exc_type, exc_value, exc_trace) print # Start the debugger in post-mortem mode. ipdb.post_mortem(exc_trace)
def copy_web_file_to_local(file_path, target_path): """Copies a file from its location on the web to a designated place on the local machine. Args: file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css). target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css) Returns: None. """ response = urllib.request.urlopen(file_path) f = open(target_path, 'w') f.write(response.read()) f.close()
def get_line_count(fname): """Counts the number of lines in a file. Args: fname: string, name of the file. Returns: integer, the number of lines in the file. """ i = 0 with open(fname) as f: for i, l in enumerate(f): pass return i + 1
def indent_css(f, output): """Indentes css that has not been indented and saves it to a new file. A new file is created if the output destination does not already exist. Args: f: string, path to file. output: string, path/name of the output file (e.g. /directory/output.css). print type(response.read()) Returns: None. """ line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline().rstrip() if len(string) > 0: if string[-1] == ";": output.write(" " + string + "\n") else: output.write(string + "\n") output.close() f.close()
def add_newlines(f, output, char): """Adds line breaks after every occurance of a given character in a file. Args: f: string, path to input file. output: string, path to output file. Returns: None. """ line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline() string = re.sub(char, char + '\n', string) output.write(string)
def add_whitespace_before(char, input_file, output_file): """Adds a space before a character if there's isn't one already. Args: char: string, character that needs a space before it. input_file: string, path to file to parse. output_file: string, path to destination file. Returns: None. """ line_count = get_line_count(input_file) input_file = open(input_file, 'r') output_file = open(output_file, 'r+') for line in range(line_count): string = input_file.readline() # If there's not already a space before the character, add one if re.search(r'[a-zA-Z0-9]' + char, string) != None: string = re.sub(char, ' ' + char, string) output_file.write(string) input_file.close()
def reformat_css(input_file, output_file): """Reformats poorly written css. This function does not validate or fix errors in the code. It only gives code the proper indentation. Args: input_file: string, path to the input file. output_file: string, path to where the reformatted css should be saved. If the target file doesn't exist, a new file is created. Returns: None. """ # Number of lines in the file. line_count = get_line_count(input_file) # Open source and target files. f = open(input_file, 'r+') output = open(output_file, 'w') # Loop over every line in the file. for line in range(line_count): # Eliminate whitespace at the beginning and end of lines. string = f.readline().strip() # New lines after { string = re.sub('\{', '{\n', string) # New lines after ; string = re.sub('; ', ';', string) string = re.sub(';', ';\n', string) # Eliminate whitespace before comments string = re.sub('} /*', '}/*', string) # New lines after } string = re.sub('\}', '}\n', string) # New lines at the end of comments string = re.sub('\*/', '*/\n', string) # Write to the output file. output.write(string) # Close the files. output.close() f.close() # Indent the css. indent_css(output_file, output_file) # Make sure there's a space before every { add_whitespace_before("{", output_file, output_file)
def is_int(string): """ Checks if a string is an integer. If the string value is an integer return True, otherwise return False. Args: string: a string to test. Returns: boolean """ try: a = float(string) b = int(a) except ValueError: return False else: return a == b
def total_hours(input_files): """ Totals the hours for a given projct. Takes a list of input files for which to total the hours. Each input file represents a project. There are only multiple files for the same project when the duration was more than a year. A typical entry in an input file might look like this: 8/24/14 9:30-12:00 wrote foobar code for x, wrote a unit test for foobar code, tested. 2.5 hours Args: input_files: a list of files to parse. Returns: float: the total number of hours spent on the project. """ hours = 0 # Look for singular and plural forms of the word # and allow typos. allow = set(['hours', 'hour', 'huors', 'huor']) for input_file in input_files: doc = open(input_file, 'r') for line in doc: line = line.rstrip() data = line.split(' ') if (len(data) == 2) and (is_numeric(data[0])) and (data[1].lower() in allow): hours += float(data[0]) doc.close() return hours
def clean_strings(iterable): """ Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list """ retval = [] for val in iterable: try: retval.append(val.strip()) except(AttributeError): retval.append(val) return retval
def excel_to_html(path, sheetname='Sheet1', css_classes='', \ caption='', details=[], row_headers=False, merge=False): """ Convert an excel spreadsheet to an html table. This function supports the conversion of merged cells. It can be used in code or run from the command-line. If passed the correct arguments it can generate fully accessible html. Args: path: string, path to the spreadsheet. sheetname: string, name of the sheet to convert. css_classes: string, space separated classnames to append to the table. caption: string, a short heading-like description of the table. details: list of strings, where the first item in the list is a string for the html summary element and the second item is a string for the details element. The summary should be very short, e.g. "Help", where as the details element should be a long description regarding the purpose or how to navigate the table. row_headers: boolean, defaults to False. Does the table have row headers? If set to True, the first element in each row will be a <th scope="row"> element instead of a <td> element. merge: boolean, whether or not to combine cells that were merged in the spreadsheet. Returns: string, html table """ def get_data_on_merged_cells(): """ Build a datastructure with data on merged cells. """ # Use this to build support for merged columns and rows???? merged_cells = xls.book.sheet_by_name(sheetname).merged_cells ds = {} for crange in merged_cells: rlo, rhi, clo, chi = crange for rowx in range(rlo, rhi): for colx in range(clo, chi): # Cell (rlo, clo) (the top left one) will carry the data and # formatting info. The remainder will be recorded as blank cells, # but a renderer will apply the formatting info for the top left # cell (e.g. border, pattern) to all cells in the range. #print(str(rlo) + ' ' + str(clo)) #print(str(rowx) + ' ' + str(colx)) parent_cell = (rlo,clo) child_cell = (rowx,colx) if not parent_cell in ds: # Return data structure is a dictionary with numeric tuples # as keys. Each tuple holds the x, y coordinates of the cell. # The dictionary holds two values: # 1. A list with two numbers which represent the x/y count # starting at 1 for the current cell. # 2. A set describing which direction the cells are merged. ds[parent_cell] = [[1,1], set([])] else: if parent_cell != child_cell and child_cell[0] == parent_cell[0]: ds[parent_cell][0][0] += 1 ds[parent_cell][1].add('right') elif parent_cell != child_cell and child_cell[0] > parent_cell[0]: if child_cell[1] == parent_cell[1]: ds[parent_cell][0][1] += 1 ds[parent_cell][1].add('down') else: raise RuntimeError('Something went wrong') return ds def mark_cells_going_right(cell, curr_cell, merged_cells): """ Add a "colspan" attribute and mark empty table columns for deletion if they are part of a merged cell going right. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ #if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['right']): try: xcount = merged_cells[curr_cell][0][0] if xcount > 1: # No colspans on 1 cell['colspan'] = xcount col_count = xcount - 1 while col_count > 0: cell = cell.find_next_sibling() cell['class'] = 'delete' col_count -= 1 except: pass def mark_cells_going_down(cell, curr_cell, merged_cells): """ Add a "rowspan" attribute and mark empty table columns for deletion if they are part of a merged cell going down. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ if curr_cell in merged_cells and merged_cells[curr_cell][1] == set(['down']): ycount = merged_cells[curr_cell][0][1] cell['rowspan'] = ycount row_count = ycount for child_row in cell.parent.find_next_siblings(limit=row_count - 1): i = 0 for child in child_row.find_all('td'): if i == curr_cell[1]: child['class'] = 'delete' i += 1 def mark_cells_going_down_and_right(cell, curr_cell, merged_cells): """ Add "rowspan" and "colspan" attributes and mark empty columns for deletion if they are part of a merged cell going down and to the right diagonally. Args: cell: BeautifulSoup element tag object representation of the current cell. curr_cell: tuple, numeric representation of the current cell. merged_cells: dictionary of of data about merged cells. """ if curr_cell in merged_cells and \ ('down' in merged_cells[curr_cell][1] and \ 'right' in merged_cells[curr_cell][1]): xcount = merged_cells[curr_cell][0][0] ycount = merged_cells[curr_cell][0][1] row_count = ycount col_count = xcount mark_cells_going_right(cell, curr_cell, merged_cells) flag = False for child_row in [cell.parent] + cell.parent.find_all_next('tr', limit=row_count - 1): i = 0 for child in child_row.find_all('td'): if i == curr_cell[1]: mark_cells_going_right(child, curr_cell, merged_cells) if not flag: child['colspan'] = col_count child['rowspan'] = row_count flag = True else: child['class'] = 'delete' i += 1 def is_empty_th(string): """ Detects if a table cell is left empty (is a merged cell). Args: string: string """ if string[:8] == 'Unnamed:': data = string.split(' ') if is_numeric(data[1]): return True return False def mark_header_cells(html): """ Mark header cells for deletion if they need to be merged. Also, add colspan and scope attributes. Args: html: string """ th = html.find_all('th') for header in th: txt = header.string if not is_empty_th(txt): header['scope'] = 'col' count = 1 for sibling in header.find_next_siblings(): if is_empty_th(sibling.string): count += 1 sibling['class'] = 'delete' else: break if count > 1: header['colspan'] = count header['scope'] = 'colgroup' def create_caption(html, caption): """ Create a caption element for an accessible table and append it to the right part of the tree. Args: html: string caption: string """ ctag = html.new_tag('caption') ctag.insert(0, caption) html.table.insert(0, ctag) def create_summary_and_details(html, details): """ Create a summary and details element for an accessible table and insert it into the right part of the tree. Args: html: string details: string """ if len(details) != 2: msg = 'The "details" argument should be a list with two items. ' \ + 'The first item should be a string for the html summary ' \ + 'and the second should be a long description for the details ' \ + 'element. Both of those must be included and nothing else.' raise RuntimeError(msg) summary = details[0] details = details[1] if not caption: create_caption(html, caption) dtag = html.new_tag('details') stag = html.new_tag('summary') ptag = html.new_tag('p') stag.insert(0, summary) ptag.insert(0, details) dtag.insert(0, stag) dtag.append(ptag) html.table.caption.insert(1, dtag) def format_properly(html): """ Fix bad formatting from beautifulsoup. Args: html: string of html representing a table. """ return html.replace('\n ', '').replace('\n </td>', \ '</td>').replace('\n </th>', '</th>').replace('\n </summary>', \ '</summary>').replace('\n </p>', '</p>') def add_row_headers(html): """ Convert <td>s to <th>s if row_headers is set to True. Args: html: string, table. """ for row in html.tbody.find_all('tr'): spans_rows = 'rowspan' in row.td.attrs spans_columns = 'colspan' in row.td.attrs new_tag = html.new_tag('th') new_tag['scope'] = 'row' new_tag.string = row.td.string if spans_rows: new_tag['rowspan'] = row.td.attrs['rowspan'] new_tag['scope'] = 'rowgroup' if spans_columns: new_tag['colspan'] = row.td.attrs['colspan'] row.td.replace_with(new_tag) def beautify(html): """ Beautify the html from pandas. Args: html: table markup from pandas. """ table = html.find('table') first_tr = table.find('tr') del table['border'] del first_tr['style'] return format_properly(html.prettify(formatter='minimal')) def parse_html(html, caption, details): """ Use BeautifulSoup to correct the html for merged columns and rows. What could possibly go wrong? Args: html: string caption: string details: list of strings lenght of two Returns: string, modified html """ new_html = BeautifulSoup(html, 'html.parser') if merge: row_num = 1 # e.g. {(4, 3): [1, 'right'], (2, 1): [1, 'down']} merged_cells = get_data_on_merged_cells() rows = new_html.find('table').find('tbody').find_all('tr') for row in rows: cell_num = 0 # Why are we off by 1? Maybe because we set index to False in to_html? cells = row.find_all('td') for cell in cells: #cell['class'] = str(row_num) + ' ' + str(cell_num) # DEBUG curr_cell = (row_num, cell_num) # Mark merged cells for deletion mark_cells_going_right(cell, curr_cell, merged_cells) mark_cells_going_down(cell, curr_cell, merged_cells) mark_cells_going_down_and_right(cell, curr_cell, merged_cells) cell_num += 1 row_num += 1 # Mark header cells for deletion mark_header_cells(new_html) # Delete all the renegade cells at once destroy = new_html.find_all(attrs={'class' : 'delete' }) for item in destroy: item.extract() # Convert <td>s to <th>s if needed. if row_headers: add_row_headers(new_html) # Add caption if applicable if caption: create_caption(new_html, caption) # Add summary and details if possible if details: create_summary_and_details(new_html, details) return beautify(new_html) # Set options for pandas and load the excel file pd.options.display.max_colwidth = -1 xls = pd.ExcelFile(path) # Parse the sheet you're interested in, results in a Dataframe df = xls.parse(sheetname) # Convert the dataframe to html panda_html = df.to_html(classes=css_classes, index=False, na_rep='') # Parse the panda html to merge cells and beautify the markup return parse_html(panda_html, caption, details)
def future_value(present_value, annual_rate, periods_per_year, years): """ Calculates the future value of money invested at an anual interest rate, x times per year, for a given number of years. Args: present_value: int or float, the current value of the money (principal). annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out. periods_per_year: int, the number of times money is invested per year. years: int, the number of years invested. Returns: Float, the future value of the money invested with compound interest. """ # The nominal interest rate per period (rate) is how much interest you earn during a # particular length of time, before accounting for compounding. This is typically # expressed as a percentage. rate_per_period = annual_rate / float(periods_per_year) # How many periods in the future the calculation is for. periods = periods_per_year * years return present_value * (1 + rate_per_period) ** periods
def triangle_area(point1, point2, point3): """ Uses Heron's formula to find the area of a triangle based on the coordinates of three points. Args: point1: list or tuple, the x y coordinate of point one. point2: list or tuple, the x y coordinate of point two. point3: list or tuple, the x y coordinate of point three. Returns: The area of a triangle as a floating point number. Requires: The math module, point_distance(). """ """Lengths of the three sides of the triangle""" a = point_distance(point1, point2) b = point_distance(point1, point3) c = point_distance(point2, point3) """Where s is the semiperimeter""" s = (a + b + c) / 2.0 """Return the area of the triangle (using Heron's formula)""" return math.sqrt(s * (s - a) * (s - b) * (s - c))
def regular_polygon_area(number_of_sides, length_of_sides): """ Calculates the area of a regular polygon (with sides of equal length). Args: number_of_sides: Integer, the number of sides of the polygon length_of_sides: Integer or floating point number, the length of the sides Returns: The area of a regular polygon as an integer or floating point number Requires: The math module """ return (0.25 * number_of_sides * length_of_sides ** 2) / math.tan( math.pi / number_of_sides )
def median(data): """ Calculates the median of a list of integers or floating point numbers. Args: data: A list of integers or floating point numbers Returns: Sorts the list numerically and returns the middle number if the list has an odd number of items. If the list contains an even number of items the mean of the two middle numbers is returned. """ ordered = sorted(data) length = len(ordered) if length % 2 == 0: return ( ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)] ) / 2.0 elif length % 2 != 0: return ordered[math.floor(length / 2)]
def average(numbers, numtype='float'): """ Calculates the average or mean of a list of numbers Args: numbers: a list of integers or floating point numbers. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: The average (mean) of the numbers as a floating point number or a Decimal object. Requires: The math module """ if type == 'decimal': return Decimal(sum(numbers)) / len(numbers) else: return float(sum(numbers)) / len(numbers)
def variance(numbers, type='population'): """ Calculates the population or sample variance of a list of numbers. A large number means the results are all over the place, while a small number means the results are comparatively close to the average. Args: numbers: a list of integers or floating point numbers to compare. type: string, 'population' or 'sample', the kind of variance to be computed. Returns: The computed population or sample variance. Defaults to population variance. Requires: The math module, average() """ mean = average(numbers) variance = 0 for number in numbers: variance += (mean - number) ** 2 if type == 'population': return variance / len(numbers) else: return variance / (len(numbers) - 1)
def get_percentage(a, b, i=False, r=False): """ Finds the percentage of one number over another. Args: a: The number that is a percent, int or float. b: The base number that a is a percent of, int or float. i: Optional boolean integer. True if the user wants the result returned as a whole number. Assumes False. r: Optional boolean round. True if the user wants the result rounded. Rounds to the second decimal point on floating point numbers. Assumes False. Returns: The argument a as a percentage of b. Throws a warning if integer is set to True and round is set to False. """ # Round to the second decimal if i is False and r is True: percentage = round(100.0 * (float(a) / b), 2) # Round to the nearest whole number elif (i is True and r is True) or (i is True and r is False): percentage = int(round(100 * (float(a) / b))) # A rounded number and an integer were requested if r is False: warnings.warn( "If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments." ) # A precise unrounded decimal else: percentage = 100.0 * (float(a) / b) return percentage
def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: gross_pay: float or int, gross monthly pay. employer_match: float or int, the 401(k) match from your employer. taxes_and_fees: list, taxes and fees that are deducted from your paycheck. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly take-home pay. """ if numtype == 'decimal': return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal( sum(taxes_and_fees) ) else: return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees)
def savings_rate(take_home_pay, spending, numtype='float'): """ Calculate net take-home pay including employer retirement savings match using the formula laid out by Mr. Money Mustache: http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/ Args: take_home_pay: float or int, monthly take-home pay spending: float or int, monthly spending numtype: string, 'decimal' or 'float'; the type of number to return. Returns: your monthly savings rate expressed as a percentage. """ if numtype == 'decimal': try: return ( (Decimal(take_home_pay) - Decimal(spending)) / (Decimal(take_home_pay)) ) * Decimal(100.0) # Leave InvalidOperation for backwards compatibility except (InvalidOperation, DivisionByZero): return Decimal(0.0) else: try: return ( (float(take_home_pay) - float(spending)) / (float(take_home_pay)) ) * 100.0 except (ZeroDivisionError): return 0.0
def get_variable(relpath, keyword='__version__'): """Read __version__ or other properties from a python file without importing it from gist.github.com/technonik/406623 but with added keyward kwarg """ for line in open(os.path.join(os.path.dirname(__file__), relpath), encoding='cp437'): if keyword in line: if '"' in line: return line.split('"')[1] elif "'" in line: return line.split("'")[1]
def datetime_str_to_timestamp(datetime_str): ''' '2018-01-01 00:00:00' (str) --> 1514736000 :param str datetime_str: datetime string :return: unix timestamp (int) or None :rtype: int or None ''' try: dtf = DTFormat() struct_time = time.strptime(datetime_str, dtf.datetime_format) return time.mktime(struct_time) except: return None
def get_datetime_string(datetime_obj): ''' Get datetime string from datetime object :param datetime datetime_obj: datetime object :return: datetime string :rtype: str ''' if isinstance(datetime_obj, datetime): dft = DTFormat() return datetime_obj.strftime(dft.datetime_format) return None
def timestamp_to_datetime(timestamp): ''' 1514736000 --> datetime object :param int timestamp: unix timestamp (int) :return: datetime object or None :rtype: datetime or None ''' if isinstance(timestamp, (int, float, str)): try: timestamp = float(timestamp) if timestamp.is_integer(): timestamp = int(timestamp) except: return None temp = str(timestamp).split('.')[0] if len(temp) == 13: timestamp = timestamp / 1000.0 if len(temp) < 10: return None else: return None return datetime.fromtimestamp(timestamp)
def attr(prev, attr_name): """attr pipe can extract attribute value of object. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_name: The name of attribute :type attr_name: str :returns: generator """ for obj in prev: if hasattr(obj, attr_name): yield getattr(obj, attr_name)
def attrs(prev, attr_names): """attrs pipe can extract attribute values of object. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list of attribute names :type attr_names: str of list :returns: generator """ for obj in prev: attr_values = [] for name in attr_names: if hasattr(obj, name): attr_values.append(getattr(obj, name)) yield attr_values
def attrdict(prev, attr_names): """attrdict pipe can extract attribute values of object into a dict. The argument attr_names can be a list or a dict. If attr_names is a list and its item is not a valid attribute of prev's object. It will be excluded from yielded dict. If attr_names is dict and the key doesn't exist in prev's object. the value of corresponding attr_names key will be copy to yielded dict. :param prev: The previous iterator of pipe. :type prev: Pipe :param attr_names: The list or dict of attribute names :type attr_names: str of list or dict :returns: generator """ if isinstance(attr_names, dict): for obj in prev: attr_values = dict() for name in attr_names.keys(): if hasattr(obj, name): attr_values[name] = getattr(obj, name) else: attr_values[name] = attr_names[name] yield attr_values else: for obj in prev: attr_values = dict() for name in attr_names: if hasattr(obj, name): attr_values[name] = getattr(obj, name) yield attr_values
def flatten(prev, depth=sys.maxsize): """flatten pipe extracts nested item from previous pipe. :param prev: The previous iterator of pipe. :type prev: Pipe :param depth: The deepest nested level to be extracted. 0 means no extraction. :type depth: integer :returns: generator """ def inner_flatten(iterable, curr_level, max_levels): for i in iterable: if hasattr(i, '__iter__') and curr_level < max_levels: for j in inner_flatten(i, curr_level + 1, max_levels): yield j else: yield i for d in prev: if hasattr(d, '__iter__') and depth > 0: for inner_d in inner_flatten(d, 1, depth): yield inner_d else: yield d
def values(prev, *keys, **kw): """values pipe extract value from previous pipe. If previous pipe send a dictionary to values pipe, keys should contains the key of dictionary which you want to get. If previous pipe send list or tuple, :param prev: The previous iterator of pipe. :type prev: Pipe :returns: generator """ d = next(prev) if isinstance(d, dict): yield [d[k] for k in keys if k in d] for d in prev: yield [d[k] for k in keys if k in d] else: yield [d[i] for i in keys if 0 <= i < len(d)] for d in prev: yield [d[i] for i in keys if 0 <= i < len(d)]
def pack(prev, n, rest=False, **kw): """pack pipe takes n elements from previous generator and yield one list to next. :param prev: The previous iterator of pipe. :type prev: Pipe :param rest: Set True to allow to output the rest part of last elements. :type prev: boolean :param padding: Specify the padding element for the rest part of last elements. :type prev: boolean :returns: generator :Example: >>> result([1,2,3,4,5,6,7] | pack(3)) [[1, 2, 3], [4, 5, 6]] >>> result([1,2,3,4,5,6,7] | pack(3, rest=True)) [[1, 2, 3], [4, 5, 6], [7,]] >>> result([1,2,3,4,5,6,7] | pack(3, padding=None)) [[1, 2, 3], [4, 5, 6], [7, None, None]] """ if 'padding' in kw: use_padding = True padding = kw['padding'] else: use_padding = False padding = None items = [] for i, data in enumerate(prev, 1): items.append(data) if (i % n) == 0: yield items items = [] if len(items) != 0 and rest: if use_padding: items.extend([padding, ] * (n - (i % n))) yield items
def grep(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter out data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :param kw: :type kw: dict :returns: generator """ inv = False if 'inv' not in kw else kw.pop('inv') pattern_obj = re.compile(pattern, *args, **kw) for data in prev: if bool(inv) ^ bool(pattern_obj.match(data)): yield data
def match(prev, pattern, *args, **kw): """The pipe greps the data passed from previous generator according to given regular expression. The data passed to next pipe is MatchObject , dict or tuple which determined by 'to' in keyword argument. By default, match pipe yields MatchObject. Use 'to' in keyword argument to change the type of match result. If 'to' is dict, yield MatchObject.groupdict(). If 'to' is tuple, yield MatchObject.groups(). If 'to' is list, yield list(MatchObject.groups()). :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter data. :type pattern: str|unicode :param to: What data type the result should be stored. dict|tuple|list :type to: type :returns: generator """ to = 'to' in kw and kw.pop('to') pattern_obj = re.compile(pattern, *args, **kw) if to is dict: for data in prev: match = pattern_obj.match(data) if match is not None: yield match.groupdict() elif to is tuple: for data in prev: match = pattern_obj.match(data) if match is not None: yield match.groups() elif to is list: for data in prev: match = pattern_obj.match(data) if match is not None: yield list(match.groups()) else: for data in prev: match = pattern_obj.match(data) if match is not None: yield match
def resplit(prev, pattern, *args, **kw): """The resplit pipe split previous pipe input by regular expression. Use 'maxsplit' keyword argument to limit the number of split. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to split string. :type pattern: str|unicode """ maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit') pattern_obj = re.compile(pattern, *args, **kw) for s in prev: yield pattern_obj.split(s, maxsplit=maxsplit)
def sub(prev, pattern, repl, *args, **kw): """sub pipe is a wrapper of re.sub method. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern string. :type pattern: str|unicode :param repl: Check repl argument in re.sub method. :type repl: str|unicode|callable """ count = 0 if 'count' not in kw else kw.pop('count') pattern_obj = re.compile(pattern, *args, **kw) for s in prev: yield pattern_obj.sub(repl, s, count=count)
def wildcard(prev, pattern, *args, **kw): """wildcard pipe greps data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The wildcard string which used to filter data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :returns: generator """ import fnmatch inv = 'inv' in kw and kw.pop('inv') pattern_obj = re.compile(fnmatch.translate(pattern), *args, **kw) if not inv: for data in prev: if pattern_obj.match(data): yield data else: for data in prev: if not pattern_obj.match(data): yield data
def stdout(prev, endl='\n', thru=False): """This pipe read data from previous iterator and write it to stdout. :param prev: The previous iterator of pipe. :type prev: Pipe :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator """ for i in prev: sys.stdout.write(str(i) + endl) if thru: yield i
def readline(prev, filename=None, mode='r', trim=str.rstrip, start=1, end=sys.maxsize): """This pipe get filenames or file object from previous pipe and read the content of file. Then, send the content of file line by line to next pipe. The start and end parameters are used to limit the range of reading from file. :param prev: The previous iterator of pipe. :type prev: Pipe :param filename: The files to be read. If None, use previous pipe input as filenames. :type filename: None|str|unicode|list|tuple :param mode: The mode to open file. default is 'r' :type mode: str :param trim: The function to trim the line before send to next pipe. :type trim: function object. :param start: if star is specified, only line number larger or equal to start will be sent. :type start: integer :param end: The last line number to read. :type end: integer :returns: generator """ if prev is None: if filename is None: raise Exception('No input available for readline.') elif is_str_type(filename): file_list = [filename, ] else: file_list = filename else: file_list = prev for fn in file_list: if isinstance(fn, file_type): fd = fn else: fd = open(fn, mode) try: if start <= 1 and end == sys.maxsize: for line in fd: yield trim(line) else: for line_no, line in enumerate(fd, 1): if line_no < start: continue yield trim(line) if line_no >= end: break finally: if fd != fn: fd.close()
def fileobj(prev, file_handle, endl='', thru=False): """This pipe read/write data from/to file object which specified by file_handle. :param prev: The previous iterator of pipe. :type prev: Pipe :param file_handle: The file object to read or write. :type file_handle: file object :param endl: The end-of-line symbol for each output. :type endl: str :param thru: If true, data will passed to next generator. If false, data will be dropped. :type thru: bool :returns: generator """ if prev is not None: for i in prev: file_handle.write(str(i)+endl) if thru: yield i else: for data in file_handle: yield data
def sh(prev, *args, **kw): """sh pipe execute shell command specified by args. If previous pipe exists, read data from it and write it to stdin of shell process. The stdout of shell process will be passed to next pipe object line by line. A optional keyword argument 'trim' can pass a function into sh pipe. It is used to trim the output from shell process. The default trim function is str.rstrip. Therefore, any space characters in tail of shell process output line will be removed. For example: py_files = result(sh('ls') | strip | wildcard('*.py')) :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The command line arguments. It will be joined by space character. :type args: list of string. :param kw: arguments for subprocess.Popen. :type kw: dictionary of options. :returns: generator """ endl = '\n' if 'endl' not in kw else kw.pop('endl') trim = None if 'trim' not in kw else kw.pop('trim') if trim is None: trim = bytes.rstrip if is_py3 else str.rstrip cmdline = ' '.join(args) if not cmdline: if prev is not None: for i in prev: yield i else: while True: yield None process = subprocess.Popen(cmdline, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw) if prev is not None: stdin_buffer = StringIO() for i in prev: stdin_buffer.write(i) if endl: stdin_buffer.write(endl) if is_py3: process.stdin.write(stdin_buffer.getvalue().encode('utf-8')) else: process.stdin.write(stdin_buffer.getvalue()) process.stdin.flush() process.stdin.close() stdin_buffer.close() for line in process.stdout: yield trim(line) process.wait()
def walk(prev, inital_path, *args, **kw): """This pipe wrap os.walk and yield absolute path one by one. :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The end-of-line symbol for each output. :type args: list of string. :param kw: The end-of-line symbol for each output. :type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol. :returns: generator """ for dir_path, dir_names, filenames in os.walk(inital_path): for filename in filenames: yield os.path.join(dir_path, filename)
def join(prev, sep, *args, **kw): '''alias of str.join''' yield sep.join(prev, *args, **kw)
def substitute(prev, *args, **kw): '''alias of string.Template.substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data)
def safe_substitute(prev, *args, **kw): '''alias of string.Template.safe_substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.safe_substitute(data)
def to_str(prev, encoding=None): """Convert data from previous pipe with specified encoding.""" first = next(prev) if isinstance(first, str): if encoding is None: yield first for s in prev: yield s else: yield first.encode(encoding) for s in prev: yield s.encode(encoding) else: if encoding is None: encoding = sys.stdout.encoding or 'utf-8' yield first.decode(encoding) for s in prev: yield s.decode(encoding)
def register_default_types(): """Regiser all default type-to-pipe convertors.""" register_type(type, pipe.map) register_type(types.FunctionType, pipe.map) register_type(types.MethodType, pipe.map) register_type(tuple, seq) register_type(list, seq) register_type(types.GeneratorType, seq) register_type(string_type, sh) register_type(unicode_type, sh) register_type(file_type, fileobj) if is_py3: register_type(range, seq) register_type(map, seq)
def get_dict(self): ''' Convert Paginator instance to dict :return: Paging data :rtype: dict ''' return dict( current_page=self.current_page, total_page_count=self.total_page_count, items=self.items, total_item_count=self.total_item_count, page_size=self.page_size )
def write_log(log_directory, log_filename, header, logline, debug, require_latest, latest_directory, latest_filename): """This function logs a line of data to both a 'log' file, and a 'latest' file The 'latest' file is optional, and is sent to this function as a boolean value via the variable 'require_latest'. So the 2 log directories and filenames are: a. (REQUIRED): log_directory + log_filename b. (OPTIONAL): latest_directory + latest_filename The 'latest' directory and filename is provided so as to have a consistent file of the latest events This is usually the latest day of events. The way this function works with the 'latest' log_dir is as follows: a. It checks for the existance of log_directory + log_filename b. If (a) doesn't exist, then any 'latest' file is removed and a new one created c. If (a) already exists, logs are written to any existing 'latest' file If one doesn't exist, it will be created For both the 'log' and 'latest' files, a header line will be written if a new file is created Please note that a header must start with the '#' symbol, so the Ardexa agent can interpret this line as a header , and will not send it to the cloud """ create_new_file = False # Make sure the logging directory exists. The following will create all the necessary subdirs, # if the subdirs exist in part or in full if not os.path.exists(log_directory): os.makedirs(log_directory) full_path_log = os.path.join(log_directory, log_filename) if debug > 1: print("Full path of log directory: ", full_path_log) # If the file doesn't exist, annotate that a new 'latest' file is to be created # and that a header is to be created if not os.path.isfile(full_path_log): if debug > 1: print("Log file doesn't exist: ", full_path_log) create_new_file = True # Repeat for the 'latest', if it doesn't exist if require_latest: if not os.path.exists(latest_directory): os.makedirs(latest_directory) full_path_latest = os.path.join(latest_directory, latest_filename) if debug > 1: print("Full path of latest directory: ", full_path_latest) # If the 'create_new_file' tag is set AND the file exists, then remove it if create_new_file and os.path.isfile(full_path_latest): # then remove the file os.remove(full_path_latest) # Now create both (or open both) and write to them if debug > 1: print("##########################################") print("Writing the line to", full_path_latest) print(logline) print("##########################################") # Write the logline to the log file output_file = open(full_path_log, "a") if create_new_file: output_file.write(header) output_file.write(logline) output_file.close() # And write it to the 'latest' if required if require_latest: write_latest = open(full_path_latest, "a") if create_new_file: write_latest.write(header) write_latest.write(logline) write_latest.close()
def check_pidfile(pidfile, debug): """Check that a process is not running more than once, using PIDFILE""" # Check PID exists and see if the PID is running if os.path.isfile(pidfile): pidfile_handle = open(pidfile, 'r') # try and read the PID file. If no luck, remove it try: pid = int(pidfile_handle.read()) pidfile_handle.close() if check_pid(pid, debug): return True except: pass # PID is not active, remove the PID file os.unlink(pidfile) # Create a PID file, to ensure this is script is only run once (at a time) pid = str(os.getpid()) open(pidfile, 'w').write(pid) return False
def check_pid(pid, debug): """This function will check whether a PID is currently running""" try: # A Kill of 0 is to check if the PID is active. It won't kill the process os.kill(pid, 0) if debug > 1: print("Script has a PIDFILE where the process is still running") return True except OSError: if debug > 1: print("Script does not appear to be running") return False
def convert_words_to_uint(high_word, low_word): """Convert two words to a floating point""" try: low_num = int(low_word) # low_word might arrive as a signed number. Convert to unsigned if low_num < 0: low_num = abs(low_num) + 2**15 number = (int(high_word) << 16) | low_num return number, True except: return 0, False
def convert_words_to_float(high_word, low_word): """Convert two words to a floating point""" number, retval = convert_words_to_uint(high_word, low_word) if not retval: return 0.0, False try: packed_float = struct.pack('>l', number) return struct.unpack('>f', packed_float)[0], True except: return 0.0, False
def disown(debug): """This function will disown, so the Ardexa service can be restarted""" # Get the current PID pid = os.getpid() cgroup_file = "/proc/" + str(pid) + "/cgroup" try: infile = open(cgroup_file, "r") except IOError: print("Could not open cgroup file: ", cgroup_file) return False # Read each line for line in infile: # Check if the line contains "ardexa.service" if line.find("ardexa.service") == -1: continue # if the lines contains "name=", replace it with nothing line = line.replace("name=", "") # Split the line by commas items_list = line.split(':') accounts = items_list[1] dir_str = accounts + "/ardexa.disown" # If accounts is empty, continue if not accounts: continue # Create the dir and all subdirs full_dir = "/sys/fs/cgroup/" + dir_str if not os.path.exists(full_dir): os.makedirs(full_dir) if debug >= 1: print("Making directory: ", full_dir) else: if debug >= 1: print("Directory already exists: ", full_dir) # Add the PID to the file full_path = full_dir + "/cgroup.procs" prog_list = ["echo", str(pid), ">", full_path] run_program(prog_list, debug, True) # If this item contains a comma, then separate it, and reverse # some OSes will need cpuacct,cpu reversed to actually work if accounts.find(",") != -1: acct_list = accounts.split(',') accounts = acct_list[1] + "," + acct_list[0] dir_str = accounts + "/ardexa.disown" # Create the dir and all subdirs. But it may not work. So use a TRY full_dir = "/sys/fs/cgroup/" + dir_str try: if not os.path.exists(full_dir): os.makedirs(full_dir) except: continue # Add the PID to the file full_path = full_dir + "/cgroup.procs" prog_list = ["echo", str(pid), ">", full_path] run_program(prog_list, debug, True) infile.close() # For debug purposes only if debug >= 1: prog_list = ["cat", cgroup_file] run_program(prog_list, debug, False) # If there are any "ardexa.service" in the proc file. If so, exit with error prog_list = ["grep", "-q", "ardexa.service", cgroup_file] if run_program(prog_list, debug, False): # There are entries still left in the file return False return True
def run_program(prog_list, debug, shell): """Run a program and check program return code Note that some commands don't work well with Popen. So if this function is specifically called with 'shell=True', then it will run the old 'os.system'. In which case, there is no program output """ try: if not shell: process = Popen(prog_list, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() retcode = process.returncode if debug >= 1: print("Program : ", " ".join(prog_list)) print("Return Code: ", retcode) print("Stdout: ", stdout) print("Stderr: ", stderr) return bool(retcode) else: command = " ".join(prog_list) os.system(command) return True except: return False
def parse_address_list(addrs): """Yield each integer from a complex range string like "1-9,12,15-20,23" >>> list(parse_address_list('1-9,12,15-20,23')) [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23] >>> list(parse_address_list('1-9,12,15-20,2-3-4')) Traceback (most recent call last): ... ValueError: format error in 2-3-4 """ for addr in addrs.split(','): elem = addr.split('-') if len(elem) == 1: # a number yield int(elem[0]) elif len(elem) == 2: # a range inclusive start, end = list(map(int, elem)) for i in range(start, end+1): yield i else: # more than one hyphen raise ValueError('format error in %s' % addr)
def _encode_ids(*args): """ Do url-encode resource ids """ ids = [] for v in args: if isinstance(v, basestring): qv = v.encode('utf-8') if isinstance(v, unicode) else v ids.append(urllib.quote(qv)) else: qv = str(v) ids.append(urllib.quote(qv)) return ';'.join(ids)
def random_string(length): ''' Generate random string with parameter length. Example: >>> from eggit.egg_string import random_string >>> random_string(8) 'q4f2eaT4' >>> ''' str_list = [random.choice(string.digits + string.ascii_letters) for i in range(length)] return ''.join(str_list)
def get_item_creator(item_type): """Get item creator according registered item type. :param item_type: The type of item to be checed. :type item_type: types.TypeType. :returns: Creator function. None if type not found. """ if item_type not in Pipe.pipe_item_types: for registered_type in Pipe.pipe_item_types: if issubclass(item_type, registered_type): return Pipe.pipe_item_types[registered_type] return None else: return Pipe.pipe_item_types[item_type]
def clone(self): """Self-cloning. All its next Pipe objects are cloned too. :returns: cloned object """ new_object = copy.copy(self) if new_object.next: new_object.next = new_object.next.clone() return new_object
def append(self, next): """Append next object to pipe tail. :param next: The Pipe object to be appended to tail. :type next: Pipe object. """ next.chained = True if self.next: self.next.append(next) else: self.next = next
def iter(self, prev=None): """Return an generator as iterator object. :param prev: Previous Pipe object which used for data input. :returns: A generator for iteration. """ if self.next: generator = self.next.iter(self.func(prev, *self.args, **self.kw)) else: generator = self.func(prev, *self.args, **self.kw) return generator