Search is not available for this dataset
text
stringlengths
75
104k
def _getCallingContext(): """ Utility function for the RedisLogRecord. Returns the module, function, and lineno of the function that called the logger. We look way up in the stack. The stack at this point is: [0] logger.py _getCallingContext (hey, that's me!) [1] logger.py __init__ [2] logger.py makeRecord [3] _log [4] <logging method> [5] caller of logging method """ frames = inspect.stack() if len(frames) > 4: context = frames[5] else: context = frames[0] modname = context[1] lineno = context[2] if context[3]: funcname = context[3] else: funcname = "" # python docs say you don't want references to # frames lying around. Bad things can happen. del context del frames return modname, funcname, lineno
def format(self, record): """ JSON-encode a record for serializing through redis. Convert date to iso format, and stringify any exceptions. """ data = record._raw.copy() # serialize the datetime date as utc string data['time'] = data['time'].isoformat() # stringify exception data if data.get('traceback'): data['traceback'] = self.formatException(data['traceback']) return json.dumps(data)
def emit(self, record): """ Publish record to redis logging channel """ try: self.redis_client.publish(self.channel, self.format(record)) except redis.RedisError: pass
def emit(self, record): """ Publish record to redis logging list """ try: if self.max_messages: p = self.redis_client.pipeline() p.rpush(self.key, self.format(record)) p.ltrim(self.key, -self.max_messages, -1) p.execute() else: self.redis_client.rpush(self.key, self.format(record)) except redis.RedisError: pass
def require_template_debug(f): """Decorated function is a no-op if TEMPLATE_DEBUG is False""" def _(*args, **kwargs): TEMPLATE_DEBUG = getattr(settings, 'TEMPLATE_DEBUG', False) return f(*args, **kwargs) if TEMPLATE_DEBUG else '' return _
def _display_details(var_data): """ Given a dictionary of variable attribute data from get_details display the data in the terminal. """ meta_keys = (key for key in list(var_data.keys()) if key.startswith('META_')) for key in meta_keys: display_key = key[5:].capitalize() pprint('{0}: {1}'.format(display_key, var_data.pop(key))) pprint(var_data)
def set_trace(context): """ Start a pdb set_trace inside of the template with the context available as 'context'. Uses ipdb if available. """ try: import ipdb as pdb except ImportError: import pdb print("For best results, pip install ipdb.") print("Variables that are available in the current context:") render = lambda s: template.Template(s).render(context) availables = get_variables(context) pprint(availables) print('Type `availables` to show this list.') print('Type <variable_name> to access one.') print('Use render("template string") to test template rendering') # Cram context variables into the local scope for var in availables: locals()[var] = context[var] pdb.set_trace() return ''
def pydevd(context): """ Start a pydev settrace """ global pdevd_not_available if pdevd_not_available: return '' try: import pydevd except ImportError: pdevd_not_available = True return '' render = lambda s: template.Template(s).render(context) availables = get_variables(context) for var in availables: locals()[var] = context[var] #catch the case where no client is listening try: pydevd.settrace() except socket.error: pdevd_not_available = True return ''
def _flatten(iterable): """ Given an iterable with nested iterables, generate a flat iterable """ for i in iterable: if isinstance(i, Iterable) and not isinstance(i, string_types): for sub_i in _flatten(i): yield sub_i else: yield i
def get_details(var): """ Given a variable inside the context, obtain the attributes/callables, their values where possible, and the module name and class name if possible """ var_data = {} # Obtain module and class details if available and add them in module = getattr(var, '__module__', '') kls = getattr(getattr(var, '__class__', ''), '__name__', '') if module: var_data['META_module_name'] = module if kls: var_data['META_class_name'] = kls for attr in get_attributes(var): value = _get_detail_value(var, attr) if value is not None: var_data[attr] = value return var_data
def _get_detail_value(var, attr): """ Given a variable and one of its attributes that are available inside of a template, return its 'method' if it is a callable, its class name if it is a model manager, otherwise return its value """ value = getattr(var, attr) # Rename common Django class names kls = getattr(getattr(value, '__class__', ''), '__name__', '') if kls in ('ManyRelatedManager', 'RelatedManager', 'EmptyManager'): return kls if callable(value): return 'routine' return value
def get_attributes(var): """ Given a varaible, return the list of attributes that are available inside of a template """ is_valid = partial(is_valid_in_template, var) return list(filter(is_valid, dir(var)))
def is_valid_in_template(var, attr): """ Given a variable and one of its attributes, determine if the attribute is accessible inside of a Django template and return True or False accordingly """ # Remove private variables or methods if attr.startswith('_'): return False # Remove any attributes that raise an acception when read try: value = getattr(var, attr) except: return False if isroutine(value): # Remove any routines that are flagged with 'alters_data' if getattr(value, 'alters_data', False): return False else: # Remove any routines that require arguments try: argspec = getargspec(value) num_args = len(argspec.args) if argspec.args else 0 num_defaults = len(argspec.defaults) if argspec.defaults else 0 if num_args - num_defaults > 1: return False except TypeError: # C extension callables are routines, but getargspec fails with # a TypeError when these are passed. pass return True
def version_bump(self, version, type="bug"): """ Increment version number string 'version'. Type can be one of: major, minor, or bug """ parsed_version = LooseVersion(version).version total_components = max(3, len(parsed_version)) bits = [] for bit in parsed_version: try: bit = int(bit) except ValueError: continue bits.append(bit) indexes = { "major": 0, "minor": 1, "bug": 2, } bits += [0] * (3 - len(bits)) # pad to 3 digits # Increment the version bits[indexes[type]] += 1 # Set the subsequent digits to 0 for i in range(indexes[type] + 1, 3): bits[i] = 0 return ".".join(map(str, bits))
def parse_log_messages(self, text): """Will parse git log messages in the 'short' format""" regex = r"commit ([0-9a-f]+)\nAuthor: (.*?)\n\n(.*?)(?:\n\n|$)" messages = re.findall(regex, text, re.DOTALL) parsed = [] for commit, author, message in messages: parsed.append(( commit[:10], re.sub(r"\s*<.*?>", "", author), # Remove email address if present message.strip() )) return parsed
def determine_paths(self, package_name=None, create_package_dir=False, dry_run=False): """Determine paths automatically and a little intelligently""" # Give preference to the environment variable here as it will not # derefrence sym links self.project_dir = Path(os.getenv('PWD') or os.getcwd()) # Try and work out the project name distribution = self.get_distribution() if distribution: # Get name from setup.py self.project_name = distribution.get_name() else: # ...failing that, use the current directory name self.project_name = self.project_dir.name # Descend into the 'src' directory to find the package # if necessary if os.path.isdir(self.project_dir / "src"): package_search_dir = self.project_dir / "src" else: package_search_dir = self.project_dir created_package_dir = False if not package_name: # Lets try and work out the package_name from the project_name package_name = self.project_name.replace("-", "_") # Now do some fuzzy matching def get_matches(name): possibles = [n for n in os.listdir(package_search_dir) if os.path.isdir(package_search_dir / n)] return difflib.get_close_matches(name, possibles, n=1, cutoff=0.8) close = get_matches(package_name) # If no matches, try removing the first part of the package name # (e.g. django-guardian becomes guardian) if not close and "_" in package_name: short_package_name = "_".join(package_name.split("_")[1:]) close = get_matches(short_package_name) if not close: if create_package_dir: package_dir = package_search_dir / package_name # Gets set to true even during dry run created_package_dir = True if not dry_run: print("Creating package directory at %s" % package_dir) os.mkdir(package_dir) else: print("Would have created package directory at %s" % package_dir) else: raise CommandError("Could not guess the package name. Specify it using --name.") else: package_name = close[0] self.package_name = package_name self.package_dir = package_search_dir / package_name if not os.path.exists(self.package_dir) and not created_package_dir: raise CommandError("Package directory did not exist at %s. Perhaps specify it using --name" % self.package_dir)
def check_integrity(sakefile, settings): """ Checks the format of the sakefile dictionary to ensure it conforms to specification Args: A dictionary that is the parsed Sakefile (from sake.py) The setting dictionary (for print functions) Returns: True if the Sakefile is conformant False if not """ sprint = settings["sprint"] error = settings["error"] sprint("Call to check_integrity issued", level="verbose") if not sakefile: error("Sakefile is empty") return False # checking for duplicate targets if len(sakefile.keys()) != len(set(sakefile.keys())): error("Sakefile contains duplicate targets") return False for target in sakefile: if target == "all": if not check_target_integrity(target, sakefile["all"], all=True): error("Failed to accept target 'all'") return False continue if "formula" not in sakefile[target]: if not check_target_integrity(target, sakefile[target], meta=True): errmes = "Failed to accept meta-target '{}'".format(target) error(errmes) return False for atom_target in sakefile[target]: if atom_target == "help": continue if not check_target_integrity(atom_target, sakefile[target][atom_target], parent=target): errmes = "Failed to accept target '{}'\n".format( atom_target) error(errmes) return False continue if not check_target_integrity(target, sakefile[target]): errmes = "Failed to accept target '{}'\n".format(target) error(errmes) return False return True
def check_target_integrity(key, values, meta=False, all=False, parent=None): """ Checks the integrity of a specific target. Gets called multiple times from check_integrity() Args: The target name The dictionary values of that target A boolean representing whether it is a meta-target A boolean representing whether it is the "all" target A string representing name of parent (default None) Returns: True is the target is conformant False if not """ # logic to audit "all" target if all: if not values: print("Warning: target 'all' is empty") # will check if it has unrecognized target later return True errmes = "target '{}' is not allowed to be missing a help message\n" # logic to audit a meta-target if meta: # check if help is missing if "help" not in values: sys.stderr.write(errmes.format(key)) return False # checking if empty if len(values.keys()) == 1: sys.stderr.write("Meta-target '{}' is empty\n".format(key)) return False return True # logic to audit any other target expected_fields = ["dependencies", "help", "output", "formula"] expected_fields = set(expected_fields) try: our_keys_set = set(values.keys()) except: sys.stderr.write("Error processing target '{}'\n".format(key)) sys.stderr.write("Are you sure '{}' is a meta-target?\n".format( parent)) sys.stderr.write("If it's not, it's missing a formula\n") return False ignored_fields = set([field for field in our_keys_set\ if field.strip().startswith("(ignore)")]) difference = our_keys_set - expected_fields - ignored_fields if difference: print("The following fields were not recognized and will be ignored") for item in difference: print(" - " + item) if "help" not in values: sys.stderr.write(errmes.format(key)) return False # can't be missing formula either if "formula" not in values: sys.stderr.write("Target '{}' is missing formula\n".format(key)) return False return True
def check_shastore_version(from_store, settings): """ This function gives us the option to emit errors or warnings after sake upgrades """ sprint = settings["sprint"] error = settings["error"] sprint("checking .shastore version for potential incompatibilities", level="verbose") if not from_store or 'sake version' not in from_store: errmes = ["Since you've used this project last, a new version of ", "sake was installed that introduced backwards incompatible", " changes. Run 'sake clean', and rebuild before continuing\n"] errmes = " ".join(errmes) error(errmes) sys.exit(1)
def get_sha(a_file, settings=None): """ Returns sha1 hash of the file supplied as an argument """ if settings: error = settings["error"] else: error = ERROR_FN try: BLOCKSIZE = 65536 hasher = hashlib.sha1() with io.open(a_file, "rb") as fh: buf = fh.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = fh.read(BLOCKSIZE) the_hash = hasher.hexdigest() except IOError: errmes = "File '{}' could not be read! Exiting!".format(a_file) error(errmes) sys.exit(1) except: errmes = "Unspecified error returning sha1 hash. Exiting!" error(errmes) sys.exit(1) return the_hash
def write_shas_to_shastore(sha_dict): """ Writes a sha1 dictionary stored in memory to the .shastore file """ if sys.version_info[0] < 3: fn_open = open else: fn_open = io.open with fn_open(".shastore", "w") as fh: fh.write("---\n") fh.write('sake version: {}\n'.format(constants.VERSION)) if sha_dict: fh.write(yaml.dump(sha_dict)) fh.write("...")
def take_shas_of_all_files(G, settings): """ Takes sha1 hash of all dependencies and outputs of all targets Args: The graph we are going to build The settings dictionary Returns: A dictionary where the keys are the filenames and the value is the sha1 hash """ global ERROR_FN sprint = settings["sprint"] error = settings["error"] ERROR_FN = error sha_dict = {} all_files = [] for target in G.nodes(data=True): sprint("About to take shas of files in target '{}'".format(target[0]), level="verbose") if 'dependencies' in target[1]: sprint("It has dependencies", level="verbose") deplist = [] for dep in target[1]['dependencies']: glist = glob.glob(dep) if glist: for oneglob in glist: deplist.append(oneglob) else: deplist.append(dep) target[1]['dependencies'] = list(deplist) for dep in target[1]['dependencies']: sprint(" - {}".format(dep), level="verbose") all_files.append(dep) if 'output' in target[1]: sprint("It has outputs", level="verbose") for out in acts.get_all_outputs(target[1]): sprint(" - {}".format(out), level="verbose") all_files.append(out) if len(all_files): sha_dict['files'] = {} # check if files exist and de-dupe extant_files = [] for item in all_files: if item not in extant_files and os.path.isfile(item): extant_files.append(item) pool = Pool() results = pool.map(get_sha, extant_files) pool.close() pool.join() for fn, sha in zip(extant_files, results): sha_dict['files'][fn] = {'sha': sha} return sha_dict sprint("No dependencies", level="verbose")
def needs_to_run(G, target, in_mem_shas, from_store, settings): """ Determines if a target needs to run. This can happen in two ways: (a) If a dependency of the target has changed (b) If an output of the target is missing Args: The graph we are going to build The name of the target The dictionary of the current shas held in memory The dictionary of the shas from the shastore The settings dictionary Returns: True if the target needs to be run False if not """ force = settings["force"] sprint = settings["sprint"] if(force): sprint("Target rebuild is being forced so {} needs to run".format(target), level="verbose") return True node_dict = get_the_node_dict(G, target) if 'output' in node_dict: for output in acts.get_all_outputs(node_dict): if not os.path.isfile(output): outstr = "Output file '{}' is missing so it needs to run" sprint(outstr.format(output), level="verbose") return True if 'dependencies' not in node_dict: # if it has no dependencies, it always needs to run sprint("Target {} has no dependencies and needs to run".format(target), level="verbose") return True for dep in node_dict['dependencies']: # because the shas are updated after all targets build, # its possible that the dependency's sha doesn't exist # in the current "in_mem" dictionary. If this is the case, # then the target needs to run if ('files' in in_mem_shas and dep not in in_mem_shas['files'] or 'files' not in in_mem_shas): outstr = "Dep '{}' doesn't exist in memory so it needs to run" sprint(outstr.format(dep), level="verbose") return True now_sha = in_mem_shas['files'][dep]['sha'] if ('files' in from_store and dep not in from_store['files'] or 'files' not in from_store): outst = "Dep '{}' doesn't exist in shastore so it needs to run" sprint(outst.format(dep), level="verbose") return True old_sha = from_store['files'][dep]['sha'] if now_sha != old_sha: outstr = "There's a mismatch for dep {} so it needs to run" sprint(outstr.format(dep), level="verbose") return True sprint("Target '{}' doesn't need to run".format(target), level="verbose") return False
def run_commands(commands, settings): """ Runs the commands supplied as an argument It will exit the program if the commands return a non-zero code Args: the commands to run The settings dictionary """ sprint = settings["sprint"] quiet = settings["quiet"] error = settings["error"] enhanced_errors = True the_shell = None if settings["no_enhanced_errors"]: enhanced_errors = False if "shell" in settings: the_shell = settings["shell"] windows_p = sys.platform == "win32" STDOUT = None STDERR = None if quiet: STDOUT = PIPE STDERR = PIPE commands = commands.rstrip() sprint("About to run commands '{}'".format(commands), level="verbose") if not quiet: sprint(commands) if the_shell: tmp = shlex.split(the_shell) the_shell = tmp[0] tmp = tmp[1:] if enhanced_errors and not windows_p: tmp.append("-e") tmp.append(commands) commands = tmp else: if enhanced_errors and not windows_p: commands = ["-e", commands] p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR, executable=the_shell) out, err = p.communicate() if p.returncode: if quiet: error(err.decode(locale.getpreferredencoding())) error("Command failed to run") sys.exit(1)
def run_the_target(G, target, settings): """ Wrapper function that sends to commands in a target's 'formula' to run_commands() Args: The graph we are going to build The target to run The settings dictionary """ sprint = settings["sprint"] sprint("Running target {}".format(target)) the_formula = get_the_node_dict(G, target)["formula"] run_commands(the_formula, settings)
def get_the_node_dict(G, name): """ Helper function that returns the node data of the node with the name supplied """ for node in G.nodes(data=True): if node[0] == name: return node[1]
def get_direct_ancestors(G, list_of_nodes): """ Returns a list of nodes that are the parents from all of the nodes given as an argument. This is for use in the parallel topo sort """ parents = [] for item in list_of_nodes: anc = G.predecessors(item) for one in anc: parents.append(one) return parents
def get_sinks(G): """ A sink is a node with no children. This means that this is the end of the line, and it should be run last in topo sort. This returns a list of all sinks in a graph """ sinks = [] for node in G: if not len(list(G.successors(node))): sinks.append(node) return sinks
def get_levels(G): """ For the parallel topo sort to work, the targets have to be executed in layers such that there is no dependency relationship between any nodes in a layer. What is returned is a list of lists representing all the layers, or levels """ levels = [] ends = get_sinks(G) levels.append(ends) while get_direct_ancestors(G, ends): ends = get_direct_ancestors(G, ends) levels.append(ends) levels.reverse() return levels
def remove_redundancies(levels): """ There are repeats in the output from get_levels(). We want only the earliest occurrence (after it's reversed) """ seen = [] final = [] for line in levels: new_line = [] for item in line: if item not in seen: seen.append(item) new_line.append(item) final.append(new_line) return final
def parallel_run_these(G, list_of_targets, in_mem_shas, from_store, settings, dont_update_shas_of): """ The parallel equivalent of "run_this_target()" It receives a list of targets to execute in parallel. Unlike "run_this_target()" it has to update the shas (in memory and in the store) within the function. This is because one of the targets may fail but many can succeed, and those outputs need to be updated Args: G A graph A list of targets that we need to build in parallel The dictionary containing the in-memory sha store The dictionary containing the contents of the .shastore file The settings dictionary A list of outputs to not update shas of """ verbose = settings["verbose"] quiet = settings["quiet"] error = settings["error"] sprint = settings["sprint"] if len(list_of_targets) == 1: target = list_of_targets[0] sprint("Going to run target '{}' serially".format(target), level="verbose") run_the_target(G, target, settings) node_dict = get_the_node_dict(G, target) if "output" in node_dict: for output in acts.get_all_outputs(node_dict): if output not in dont_update_shas_of: in_mem_shas['files'][output] = {"sha": get_sha(output, settings)} in_mem_shas[output] = get_sha(output, settings) write_shas_to_shastore(in_mem_shas) if "dependencies" in node_dict: for dep in acts.get_all_dependencies(node_dict): if dep not in dont_update_shas_of: in_mem_shas['files'][dep] = {"sha": get_sha(dep, settings)} write_shas_to_shastore(in_mem_shas) return True a_failure_occurred = False out = "Going to run these targets '{}' in parallel" sprint(out.format(", ".join(list_of_targets))) info = [(target, get_the_node_dict(G, target)) for target in list_of_targets] commands = [item[1]['formula'].rstrip() for item in info] if not quiet: procs = [Popen(command, shell=True) for command in commands] else: procs = [Popen(command, shell=True, stdout=PIPE, stderr=PIPE) for command in commands] for index, process in enumerate(procs): if process.wait(): error("Target '{}' failed!".format(info[index][0])) a_failure_occurred = True else: if "output" in info[index][1]: for output in acts.get_all_outputs(info[index][1]): if output not in dont_update_shas_of: in_mem_shas['files'][output] = {"sha": get_sha(output, settings)} write_shas_to_shastore(in_mem_shas) if "dependencies" in info[index][1]: for dep in acts.get_all_dependencies(info[index][1]): if dep not in dont_update_shas_of: in_mem_shas['files'][dep] = {"sha": get_sha(dep, settings)} write_shas_to_shastore(in_mem_shas) if a_failure_occurred: error("A command failed to run") sys.exit(1) return True
def merge_from_store_and_in_mems(from_store, in_mem_shas, dont_update_shas_of): """ If we don't merge the shas from the sha store and if we build a subgraph, the .shastore will only contain the shas of the files from the subgraph and the rest of the graph will have to be rebuilt """ if not from_store: for item in dont_update_shas_of: if item in in_mem_shas['files']: del in_mem_shas['files'][item] return in_mem_shas for key in from_store['files']: if key not in in_mem_shas['files'] and key not in dont_update_shas_of: in_mem_shas['files'][key] = from_store['files'][key] for item in dont_update_shas_of: if item in in_mem_shas['files']: del in_mem_shas['files'][item] return in_mem_shas
def build_this_graph(G, settings, dont_update_shas_of=None): """ This is the master function that performs the building. Args: A graph (often a subgraph) The settings dictionary An optional list of files to not update the shas of (needed when building specific targets) Returns: 0 if successful UN-success results in a fatal error so it will return 0 or nothing """ verbose = settings["verbose"] quiet = settings["quiet"] force = settings["force"] recon = settings["recon"] parallel = settings["parallel"] error = settings["error"] sprint = settings["sprint"] if not dont_update_shas_of: dont_update_shas_of = [] sprint("Checking that graph is directed acyclic", level="verbose") if not nx.is_directed_acyclic_graph(G): errmes = "Dependency resolution is impossible; " errmes += "graph is not directed and acyclic" errmes += "\nCheck the Sakefile\n" error(errmes) sys.exit(1) sprint("Dependency resolution is possible", level="verbose") in_mem_shas = take_shas_of_all_files(G, settings) from_store = {} if not os.path.isfile(".shastore"): write_shas_to_shastore(in_mem_shas) in_mem_shas = {} in_mem_shas['files'] = {} with io.open(".shastore", "r") as fh: shas_on_disk = fh.read() from_store = yaml.load(shas_on_disk) check_shastore_version(from_store, settings) if not from_store: write_shas_to_shastore(in_mem_shas) in_mem_shas = {} in_mem_shas['files'] = {} with io.open(".shastore", "r") as fh: shas_on_disk = fh.read() from_store = yaml.load(shas_on_disk) # parallel if parallel: for line in parallel_sort(G): line = sorted(line) out = "Checking if targets '{}' need to be run" sprint(out.format(", ".join(line)), level="verbose") to_build = [] for item in line: if needs_to_run(G, item, in_mem_shas, from_store, settings): to_build.append(item) if to_build: if recon: if len(to_build) == 1: out = "Would run target '{}'" sprint(out.format(to_build[0])) else: out = "Would run targets '{}' in parallel" sprint(out.format(", ".join(to_build))) continue parallel_run_these(G, to_build, in_mem_shas, from_store, settings, dont_update_shas_of) # not parallel else: # still have to use parallel_sort to make # build order deterministic (by sorting targets) targets = [] for line in parallel_sort(G): for item in sorted(line): targets.append(item) for target in targets: outstr = "Checking if target '{}' needs to be run" sprint(outstr.format(target), level="verbose") if needs_to_run(G, target, in_mem_shas, from_store, settings): if recon: sprint("Would run target: {}".format(target)) continue run_the_target(G, target, settings) node_dict = get_the_node_dict(G, target) if "output" in node_dict: for output in acts.get_all_outputs(node_dict): if output not in dont_update_shas_of: in_mem_shas['files'][output] = {"sha": get_sha(output, settings)} write_shas_to_shastore(in_mem_shas) if "dependencies" in node_dict: for dep in acts.get_all_dependencies(node_dict): if dep not in dont_update_shas_of: in_mem_shas['files'][dep] = {"sha": get_sha(dep, settings)} write_shas_to_shastore(in_mem_shas) if recon: return 0 in_mem_shas = take_shas_of_all_files(G, settings) if in_mem_shas: in_mem_shas = merge_from_store_and_in_mems(from_store, in_mem_shas, dont_update_shas_of) write_shas_to_shastore(in_mem_shas) sprint("Done", color=True) return 0
def get_print_functions(settings): """ This returns the appropriate print functions in a tuple The print function are: - sprint - for standard printing - warn - for warnings - error - for errors This will all be the same if color is False. The returned print functions will contain an optional parameter that specifies the output level (verbose or not). If not verbose, the print function will ignore the message. """ verbose = settings["verbose"] # the regular print doesn't use color by default # (even if color is True) def sprint(message, level=None, color=False): if level=="verbose" and not verbose: return # for colors prepend = "" postfix = "" if settings["color"] and color: prepend = "\033[92m" postfix = "\033[0m" print("{}{}{}".format(prepend, message, postfix)) sys.stdout.flush() def warn(message, level=None, color=True): if level=="verbose" and not verbose: return # for colors prepend = "" postfix = "" if settings["color"] and color: prepend = "\033[93m" postfix = "\033[0m" print("{}{}{}".format(prepend, message, postfix)) sys.stdout.flush() def error(message, level=None, color=True): # this condition does really make any sense but w/e if level=="verbose" and not verbose: return # for colors prepend = "" postfix = "" if settings["color"] and color: prepend = "\033[91m" postfix = "\033[0m" print("{}{}{}".format(prepend, message, postfix), file=sys.stderr) sys.stderr.flush() return sprint, warn, error
def find_standard_sakefile(settings): """Returns the filename of the appropriate sakefile""" error = settings["error"] if settings["customsake"]: custom = settings["customsake"] if not os.path.isfile(custom): error("Specified sakefile '{}' doesn't exist", custom) sys.exit(1) return custom # no custom specified, going over defaults in order for name in ["Sakefile", "Sakefile.yaml", "Sakefile.yml"]: if os.path.isfile(name): return name error("Error: there is no Sakefile to read") sys.exit(1)
def clean_path(a_path, force_os=None, force_start=None): """ This function is used to normalize the path (of an output or dependency) and also provide the path in relative form. It is relative to the current working directory """ if not force_start: force_start = os.curdir if force_os == "windows": import ntpath return ntpath.relpath(ntpath.normpath(a_path), start=force_start) if force_os == "posix": import posixpath return posixpath.relpath(posixpath.normpath(a_path), start=force_start) return os.path.relpath(os.path.normpath(a_path), start=force_start)
def get_help(sakefile): """ Returns the prettily formatted help strings (for printing) Args: A dictionary that is the parsed Sakefile (from sake.py) NOTE: the list sorting in this function is required for this function to be deterministic """ full_string = "You can 'sake' one of the following...\n\n" errmes = "target '{}' is not allowed to not have help message\n" outerlines = [] for target in sakefile: if target == "all": # this doesn't have a help message continue middle_lines = [] if "formula" not in sakefile[target]: # this means it's a meta-target innerstr = "{}:\n - {}\n\n".format(escp(target), sakefile[target]["help"]) inner = [] for atom_target in sakefile[target]: if atom_target == "help": continue inner.append(" {}:\n - {}\n\n".format(escp(atom_target), sakefile[target][atom_target]["help"])) if inner: innerstr += '\n'.join(sorted(inner)) middle_lines.append(innerstr) else: middle_lines.append("{}:\n - {}\n\n".format(escp(target), sakefile[target]["help"])) if middle_lines: outerlines.append('\n'.join(sorted(middle_lines))) if outerlines: full_string += '\n'.join(sorted(outerlines)) what_clean_does = "remove all targets' outputs and start from scratch" full_string += "\nclean:\n - {}\n\n".format(what_clean_does) what_visual_does = "output visual representation of project's dependencies" full_string += "visual:\n - {}\n".format(what_visual_does) full_string = re.sub("\n{3,}", "\n\n", full_string) return full_string
def parse_defines(args): """ This parses a list of define argument in the form of -DNAME=VALUE or -DNAME ( which is treated as -DNAME=1). """ macros = {} for arg in args: try: var, val = arg.split('=', 1) except ValueError: var = arg val = '1' macros[var] = val return macros
def expand_macros(raw_text, macros): """ this gets called before the sakefile is parsed. it looks for macros defined anywhere in the sakefile (the start of the line is '#!') and then replaces all occurences of '$variable' with the value defined in the macro. it then returns the contents of the file with the macros expanded. """ includes = {} result = [] pattern = re.compile("#!\s*(\w+)\s*(?:(\??\s*)=\s*(.*$)|or\s*(.*))", re.UNICODE) ipattern = re.compile("#<\s*(\S+)\s*(optional|or\s+(.+))?$", re.UNICODE) for line in raw_text.split("\n"): line = string.Template(line).safe_substitute(macros) # note that the line is appended to result before it is checked for macros # this prevents macros expanding into themselves result.append(line) if line.startswith("#!"): match = pattern.match(line) try: var, opt, val, or_ = match.group(1, 2, 3, 4) except: raise InvalidMacroError("Failed to parse macro {}\n".format(line)) if or_: if var not in macros: raise InvalidMacroError("Macro {} is not defined: {}\n".format(var, or_)) elif not (opt and var in macros): macros[var] = val elif line.startswith("#<"): match = ipattern.match(line) try: filename = match.group(1) except: error("Failed to parse include {}\n".format(line)) sys.exit(1) try: with io.open(filename, 'r') as f: includes[filename] = expand_macros(f.read(), macros) except IOError: if match.group(2): if match.group(2).startswith('or '): sprint(match.group(3)) else: error("Nonexistent include {}\n".format(filename)) sys.exit(1) return "\n".join(result), includes
def check_for_dep_in_outputs(dep, verbose, G): """ Function to help construct_graph() identify dependencies Args: A dependency A flag indication verbosity A (populated) NetworkX DiGraph Returns: A list of targets that build given dependency """ if verbose: print("checking dep {}".format(dep)) ret_list = [] for node in G.nodes(data=True): if "output" not in node[1]: continue for out in node[1]['output']: if fnmatch.fnmatch(out, dep): ret_list.append(node[0]) break return ret_list
def get_ties(G): """ If you specify a target that shares a dependency with another target, both targets need to be updated. This is because running one will resolve the sha mismatch and sake will think that the other one doesn't have to run. This is called a "tie". This function will find such ties. """ # we are going to make a dictionary whose keys are every dependency # and whose values are a list of all targets that use that dependency. # after making the dictionary, values whose length is above one will # be called "ties" ties = [] dep_dict = {} for node in G.nodes(data=True): if 'dependencies' in node[1]: for item in node[1]['dependencies']: if item not in dep_dict: dep_dict[item] = [] dep_dict[item].append(node[0]) for item in dep_dict: if len(list(set(dep_dict[item]))) > 1: ties.append(list(set(dep_dict[item]))) return ties
def get_tied_targets(original_targets, the_ties): """ This function gets called when a target is specified to ensure that all 'tied' targets also get included in the subgraph to be built """ my_ties = [] for original_target in original_targets: for item in the_ties: if original_target in item: for thing in item: my_ties.append(thing) my_ties = list(set(my_ties)) if my_ties: ties_message = "" ties_message += "The following targets share dependencies and must be run together:" for item in sorted(my_ties): ties_message += "\n - {}".format(item) return list(set(my_ties+original_targets)), ties_message return original_targets, ""
def construct_graph(sakefile, settings): """ Takes the sakefile dictionary and builds a NetworkX graph Args: A dictionary that is the parsed Sakefile (from sake.py) The settings dictionary Returns: A NetworkX graph """ verbose = settings["verbose"] sprint = settings["sprint"] G = nx.DiGraph() sprint("Going to construct Graph", level="verbose") for target in sakefile: if target == "all": # we don't want this node continue if "formula" not in sakefile[target]: # that means this is a meta target for atomtarget in sakefile[target]: if atomtarget == "help": continue sprint("Adding '{}'".format(atomtarget), level="verbose") data_dict = sakefile[target][atomtarget] data_dict["parent"] = target G.add_node(atomtarget, **data_dict) else: sprint("Adding '{}'".format(target), level="verbose") G.add_node(target, **sakefile[target]) sprint("Nodes are built\nBuilding connections", level="verbose") for node in G.nodes(data=True): sprint("checking node {} for dependencies".format(node[0]), level="verbose") # normalize all paths in output for k, v in node[1].items(): if v is None: node[1][k] = [] if "output" in node[1]: for index, out in enumerate(node[1]['output']): node[1]['output'][index] = clean_path(node[1]['output'][index]) if "dependencies" not in node[1]: continue sprint("it has dependencies", level="verbose") connects = [] # normalize all paths in dependencies for index, dep in enumerate(node[1]['dependencies']): dep = os.path.normpath(dep) shrt = "dependencies" node[1]['dependencies'][index] = clean_path(node[1][shrt][index]) for node in G.nodes(data=True): connects = [] if "dependencies" not in node[1]: continue for dep in node[1]['dependencies']: matches = check_for_dep_in_outputs(dep, verbose, G) if not matches: continue for match in matches: sprint("Appending {} to matches".format(match), level="verbose") connects.append(match) if connects: for connect in connects: G.add_edge(connect, node[0]) return G
def get_all_outputs(node_dict): """ This function takes a node dictionary and returns a list of the node's output files. Some of the entries in the 'output' attribute may be globs, and without this function, sake won't know how to handle that. This will unglob all globs and return the true list of *all* outputs. """ outlist = [] for item in node_dict['output']: glist = glob.glob(item) if glist: for oneglob in glist: outlist.append(oneglob) else: outlist.append(item) return outlist
def get_all_dependencies(node_dict): """ ............................... """ deplist = [] for item in node_dict['dependencies']: glist = glob.glob(item) if glist: for oneglob in glist: deplist.append(oneglob) else: deplist.append(item) return deplist
def clean_all(G, settings): """ Removes all the output files from all targets. Takes the graph as the only argument Args: The networkx graph object The settings dictionary Returns: 0 if successful 1 if removing even one file failed """ quiet = settings["quiet"] recon = settings["recon"] sprint = settings["sprint"] error = settings["error"] all_outputs = [] for node in G.nodes(data=True): if "output" in node[1]: for item in get_all_outputs(node[1]): all_outputs.append(item) all_outputs.append(".shastore") retcode = 0 for item in sorted(all_outputs): if os.path.isfile(item): if recon: sprint("Would remove file: {}".format(item)) continue sprint("Attempting to remove file '{}'", level="verbose") try: os.remove(item) sprint("Removed file", level="verbose") except: errmes = "Error: file '{}' failed to be removed" error(errmes.format(item)) retcode = 1 if not retcode and not recon: sprint("All clean", color=True) return retcode
def write_dot_file(G, filename): """ Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files """ with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) # draw nodes with no links if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}")
def visualize(G, settings, filename="dependencies", no_graphviz=False): """ Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure """ error = settings["error"] if no_graphviz: write_dot_file(G, filename) return 0 write_dot_file(G, "tempdot") renderer = "svg" if re.search("\.jpg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.jpeg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.svg$", filename, re.IGNORECASE): renderer = "svg" elif re.search("\.png$", filename, re.IGNORECASE): renderer = "png" elif re.search("\.gif$", filename, re.IGNORECASE): renderer = "gif" elif re.search("\.ps$", filename, re.IGNORECASE): renderer = "ps" elif re.search("\.pdf$", filename, re.IGNORECASE): renderer = "pdf" else: renderer = "svg" filename += ".svg" command = "dot -T{} tempdot -o {}".format(renderer, filename) p = Popen(command, shell=True) p.communicate() if p.returncode: errmes = "Either graphviz is not installed, or its not on PATH" os.remove("tempdot") error(errmes) sys.exit(1) os.remove("tempdot") return 0
def itertable(table): """Auxiliary function for iterating over a data table.""" for item in table: res = { k.lower(): nfd(v) if isinstance(v, text_type) else v for k, v in item.items()} for extra in res.pop('extra', []): k, _, v = extra.partition(':') res[k.strip()] = v.strip() yield res
def _make_package(args): # pragma: no cover """Prepare transcriptiondata from the transcription sources.""" from lingpy.sequence.sound_classes import token2class from lingpy.data import Model columns = ['LATEX', 'FEATURES', 'SOUND', 'IMAGE', 'COUNT', 'NOTE'] bipa = TranscriptionSystem('bipa') for src, rows in args.repos.iter_sources(type='td'): args.log.info('TranscriptionData {0} ...'.format(src['NAME'])) uritemplate = URITemplate(src['URITEMPLATE']) if src['URITEMPLATE'] else None out = [['BIPA_GRAPHEME', 'CLTS_NAME', 'GENERATED', 'EXPLICIT', 'GRAPHEME', 'URL'] + columns] graphemes = set() for row in rows: if row['GRAPHEME'] in graphemes: args.log.warn('skipping duplicate grapheme: {0}'.format(row['GRAPHEME'])) continue graphemes.add(row['GRAPHEME']) if not row['BIPA']: bipa_sound = bipa[row['GRAPHEME']] explicit = '' else: bipa_sound = bipa[row['BIPA']] explicit = '+' generated = '+' if bipa_sound.generated else '' if is_valid_sound(bipa_sound, bipa): bipa_grapheme = bipa_sound.s bipa_name = bipa_sound.name else: bipa_grapheme, bipa_name = '<NA>', '<NA>' url = uritemplate.expand(**row) if uritemplate else row.get('URL', '') out.append( [bipa_grapheme, bipa_name, generated, explicit, row['GRAPHEME'], url] + [ row.get(c, '') for c in columns]) found = len([o for o in out if o[0] != '<NA>']) args.log.info('... {0} of {1} graphemes found ({2:.0f}%)'.format( found, len(out), found / len(out) * 100)) with UnicodeWriter( pkg_path('transcriptiondata', '{0}.tsv'.format(src['NAME'])), delimiter='\t' ) as writer: writer.writerows(out) count = 0 with UnicodeWriter(pkg_path('soundclasses', 'lingpy.tsv'), delimiter='\t') as writer: writer.writerow(['CLTS_NAME', 'BIPA_GRAPHEME'] + SOUNDCLASS_SYSTEMS) for grapheme, sound in sorted(bipa.sounds.items()): if not sound.alias: writer.writerow( [sound.name, grapheme] + [token2class( grapheme, Model(cls)) for cls in SOUNDCLASS_SYSTEMS]) count += 1 args.log.info('SoundClasses: {0} written to file.'.format(count))
def is_valid_sound(sound, ts): """Check the consistency of a given transcription system conversino""" if isinstance(sound, (Marker, UnknownSound)): return False s1 = ts[sound.name] s2 = ts[sound.s] return s1.name == s2.name and s1.s == s2.s
def resolve_sound(self, sound): """Function tries to identify a sound in the data. Notes ----- The function tries to resolve sounds to take a sound with less complex features in order to yield the next approximate sound class, if the transcription data are sound classes. """ sound = sound if isinstance(sound, Sound) else self.system[sound] if sound.name in self.data: return '//'.join([x['grapheme'] for x in self.data[sound.name]]) raise KeyError(":td:resolve_sound: No sound could be found.")
def _norm(self, string): """Extended normalization: normalize by list of norm-characers, split by character "/".""" nstring = norm(string) if "/" in string: s, t = string.split('/') nstring = t return self.normalize(nstring)
def normalize(self, string): """Normalize the string according to normalization list""" return ''.join([self._normalize.get(x, x) for x in nfd(string)])
def _from_name(self, string): """Parse a sound from its name""" components = string.split(' ') if frozenset(components) in self.features: return self.features[frozenset(components)] rest, sound_class = components[:-1], components[-1] if sound_class in ['diphthong', 'cluster']: if string.startswith('from ') and 'to ' in string: extension = {'diphthong': 'vowel', 'cluster': 'consonant'}[sound_class] string_ = ' '.join(string.split(' ')[1:-1]) from_, to_ = string_.split(' to ') v1, v2 = frozenset(from_.split(' ') + [extension]), frozenset( to_.split(' ') + [extension]) if v1 in self.features and v2 in self.features: s1, s2 = (self.features[v1], self.features[v2]) if sound_class == 'diphthong': return Diphthong.from_sounds(s1 + s2, s1, s2, self) # noqa: F405 else: return Cluster.from_sounds(s1 + s2, s1, s2, self) # noqa: F405 else: # try to generate the sounds if they are not there s1, s2 = self._from_name(from_ + ' ' + extension), self._from_name( to_ + ' ' + extension) if not (isinstance( s1, UnknownSound) or isinstance(s2, UnknownSound)): # noqa: F405 if sound_class == 'diphthong': return Diphthong.from_sounds( # noqa: F405 s1 + s2, s1, s2, self) return Cluster.from_sounds(s1 + s2, s1, s2, self) # noqa: F405 raise ValueError('components could not be found in system') else: raise ValueError('name string is erroneously encoded') if sound_class not in self.sound_classes: raise ValueError('no sound class specified') args = {self._feature_values.get(comp, '?'): comp for comp in rest} if '?' in args: raise ValueError('string contains unknown features') args['grapheme'] = '' args['ts'] = self sound = self.sound_classes[sound_class](**args) if sound.featureset not in self.features: sound.generated = True return sound return self.features[sound.featureset]
def _parse(self, string): """Parse a string and return its features. :param string: A one-symbol string in NFD Notes ----- Strategy is rather simple: we determine the base part of a string and then search left and right of this part for the additional features as expressed by the diacritics. Fails if a segment has more than one basic part. """ nstring = self._norm(string) # check whether sound is in self.sounds if nstring in self.sounds: sound = self.sounds[nstring] sound.normalized = nstring != string sound.source = string return sound match = list(self._regex.finditer(nstring)) # if the match has length 2, we assume that we have two sounds, so we split # the sound and pass it on for separate evaluation (recursive function) if len(match) == 2: sound1 = self._parse(nstring[:match[1].start()]) sound2 = self._parse(nstring[match[1].start():]) # if we have ANY unknown sound, we mark the whole sound as unknown, if # we have two known sounds of the same type (vowel or consonant), we # either construct a diphthong or a cluster if 'unknownsound' not in (sound1.type, sound2.type) and \ sound1.type == sound2.type: # diphthong creation if sound1.type == 'vowel': return Diphthong.from_sounds( # noqa: F405 string, sound1, sound2, self) elif sound1.type == 'consonant' and \ sound1.manner in ('stop', 'implosive', 'click', 'nasal') and \ sound2.manner in ('stop', 'implosive', 'affricate', 'fricative'): return Cluster.from_sounds( # noqa: F405 string, sound1, sound2, self) return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 if len(match) != 1: # Either no match or more than one; both is considered an error. return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 pre, mid, post = nstring.partition(nstring[match[0].start():match[0].end()]) base_sound = self.sounds[mid] if isinstance(base_sound, Marker): # noqa: F405 assert pre or post return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 # A base sound with diacritics or a custom symbol. features = attr.asdict(base_sound) features.update( source=string, generated=True, normalized=nstring != string, base=base_sound.grapheme) # we construct two versions: the "normal" version and the version where # we search for aliases and normalize them (as our features system for # diacritics may well define aliases grapheme, sound = '', '' for dia in [p + EMPTY for p in pre]: feature = self.diacritics[base_sound.type].get(dia, {}) if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature # we add the unaliased version to the grapheme grapheme += dia[0] # we add the corrected version (if this is needed) to the sound sound += self.features[base_sound.type][feature][0] # add the base sound grapheme += base_sound.grapheme sound += base_sound.s for dia in [EMPTY + p for p in post]: feature = self.diacritics[base_sound.type].get(dia, {}) # we are strict: if we don't know the feature, it's an unknown # sound if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature grapheme += dia[1] sound += self.features[base_sound.type][feature][1] features['grapheme'] = sound new_sound = self.sound_classes[base_sound.type](**features) # check whether grapheme differs from re-generated sound if text_type(new_sound) != sound: new_sound.alias = True if grapheme != sound: new_sound.alias = True new_sound.grapheme = grapheme return new_sound
def resolve_sound(self, sound): """Function tries to identify a sound in the data. Notes ----- The function tries to resolve sounds to take a sound with less complex features in order to yield the next approximate sound class, if the transcription data are sound classes. """ sound = sound if isinstance(sound, Symbol) else self.system[sound] if sound.name in self.data: return self.data[sound.name]['grapheme'] if not sound.type == 'unknownsound': if sound.type in ['diphthong', 'cluster']: return self.resolve_sound(sound.from_sound) name = [ s for s in sound.name.split(' ') if self.system._feature_values.get(s, '') not in ['laminality', 'ejection', 'tone']] while len(name) >= 4: sound = self.system.get(' '.join(name)) if sound and sound.name in self.data: return self.resolve_sound(sound) name.pop(0) raise KeyError(":sc:resolve_sound: No sound could be found.")
def ipfn_np(self, m, aggregates, dimensions, weight_col='total'): """ Runs the ipfn method from a matrix m, aggregates/marginals and the dimension(s) preserved. For example: from ipfn import ipfn import numpy as np m = np.array([[8., 4., 6., 7.], [3., 6., 5., 2.], [9., 11., 3., 1.]], ) xip = np.array([20., 18., 22.]) xpj = np.array([18., 16., 12., 14.]) aggregates = [xip, xpj] dimensions = [[0], [1]] IPF = ipfn(m, aggregates, dimensions) m = IPF.iteration() """ steps = len(aggregates) dim = len(m.shape) product_elem = [] tables = [m] # TODO: do we need to persist all these dataframe? Or maybe we just need to persist the table_update and table_current # and then update the table_current to the table_update to the latest we have. And create an empty zero dataframe for table_update (Evelyn) for inc in range(steps - 1): tables.append(np.array(np.zeros(m.shape))) original = copy.copy(m) # Calculate the new weights for each dimension for inc in range(steps): if inc == (steps - 1): table_update = m table_current = tables[inc] else: table_update = tables[inc + 1] table_current = tables[inc] for dimension in dimensions[inc]: product_elem.append(range(m.shape[dimension])) for item in product(*product_elem): idx = self.index_axis_elem(dim, dimensions[inc], item) table_current_slice = table_current[idx] mijk = table_current_slice.sum() # TODO: Directly put it as xijk = aggregates[inc][item] (Evelyn) xijk = aggregates[inc] xijk = xijk[item] if mijk == 0: # table_current_slice += 1e-5 # TODO: Basically, this part would remain 0 as always right? Cause if the sum of the slice is zero, then we only have zeros in this slice. # TODO: you could put it as table_update[idx] = table_current_slice (since multiplication on zero is still zero) table_update[idx] = table_current_slice else: # TODO: when inc == steps - 1, this part is also directly updating the dataframe m (Evelyn) # If we are not going to persist every table generated, we could still keep this part to directly update dataframe m table_update[idx] = table_current_slice * 1.0 * xijk / mijk # For debug purposes # if np.isnan(table_update).any(): # print(idx) # sys.exit(0) product_elem = [] # Check the convergence rate for each dimension max_conv = 0 for inc in range(steps): # TODO: this part already generated before, we could somehow persist it. But it's not important (Evelyn) for dimension in dimensions[inc]: product_elem.append(range(m.shape[dimension])) for item in product(*product_elem): idx = self.index_axis_elem(dim, dimensions[inc], item) ori_ijk = aggregates[inc][item] m_slice = m[idx] m_ijk = m_slice.sum() # print('Current vs original', abs(m_ijk/ori_ijk - 1)) if abs(m_ijk / ori_ijk - 1) > max_conv: max_conv = abs(m_ijk / ori_ijk - 1) product_elem = [] return m, max_conv
def ipfn_df(self, df, aggregates, dimensions, weight_col='total'): """ Runs the ipfn method from a dataframe df, aggregates/marginals and the dimension(s) preserved. For example: from ipfn import ipfn import pandas as pd age = [30, 30, 30, 30, 40, 40, 40, 40, 50, 50, 50, 50] distance = [10,20,30,40,10,20,30,40,10,20,30,40] m = [8., 4., 6., 7., 3., 6., 5., 2., 9., 11., 3., 1.] df = pd.DataFrame() df['age'] = age df['distance'] = distance df['total'] = m xip = df.groupby('age')['total'].sum() xip.loc[30] = 20 xip.loc[40] = 18 xip.loc[50] = 22 xpj = df.groupby('distance')['total'].sum() xpj.loc[10] = 18 xpj.loc[20] = 16 xpj.loc[30] = 12 xpj.loc[40] = 14 dimensions = [['age'], ['distance']] aggregates = [xip, xpj] IPF = ipfn(df, aggregates, dimensions) df = IPF.iteration() print(df) print(df.groupby('age')['total'].sum(), xip)""" steps = len(aggregates) tables = [df] for inc in range(steps - 1): tables.append(df.copy()) original = df.copy() # Calculate the new weights for each dimension inc = 0 for features in dimensions: if inc == (steps - 1): table_update = df table_current = tables[inc] else: table_update = tables[inc + 1] table_current = tables[inc] tmp = table_current.groupby(features)[weight_col].sum() xijk = aggregates[inc] feat_l = [] for feature in features: feat_l.append(np.unique(table_current[feature])) table_update.set_index(features, inplace=True) table_current.set_index(features, inplace=True) for feature in product(*feat_l): den = tmp.loc[feature] # calculate new weight for this iteration if den == 0: table_update.loc[feature, weight_col] =\ table_current.loc[feature, weight_col] *\ xijk.loc[feature] else: table_update.loc[feature, weight_col] = \ table_current.loc[feature, weight_col].astype(float) * \ xijk.loc[feature] / den table_update.reset_index(inplace=True) table_current.reset_index(inplace=True) inc += 1 feat_l = [] # Calculate the max convergence rate max_conv = 0 inc = 0 for features in dimensions: tmp = df.groupby(features)[weight_col].sum() ori_ijk = aggregates[inc] temp_conv = max(abs(tmp / ori_ijk - 1)) if temp_conv > max_conv: max_conv = temp_conv inc += 1 return df, max_conv
def iteration(self): """ Runs the ipfn algorithm. Automatically detects of working with numpy ndarray or pandas dataframes. """ i = 0 conv = np.inf old_conv = -np.inf conv_list = [] m = self.original # If the original data input is in pandas DataFrame format if isinstance(self.original, pd.DataFrame): ipfn_method = self.ipfn_df elif isinstance(self.original, np.ndarray): ipfn_method = self.ipfn_np self.original = self.original.astype('float64') else: print('Data input instance not recognized') sys.exit(0) while ((i <= self.max_itr and conv > self.conv_rate) and (i <= self.max_itr and abs(conv - old_conv) > self.rate_tolerance)): old_conv = conv m, conv = ipfn_method(m, self.aggregates, self.dimensions, self.weight_col) conv_list.append(conv) i += 1 converged = 1 if i <= self.max_itr: if not conv > self.conv_rate: print('ipfn converged: convergence_rate below threshold') elif not abs(conv - old_conv) > self.rate_tolerance: print('ipfn converged: convergence_rate not updating or below rate_tolerance') else: print('Maximum iterations reached') converged = 0 # Handle the verbose if self.verbose == 0: return m elif self.verbose == 1: return m, converged elif self.verbose == 2: return m, converged, pd.DataFrame({'iteration': range(i), 'conv': conv_list}).set_index('iteration') else: print('wrong verbose input, return None') sys.exit(0)
def render(self, obj): """ Render link as HTML output tag <a>. """ self.obj = obj attrs = ' '.join([ '%s="%s"' % (attr_name, attr.resolve(obj)) if isinstance(attr, Accessor) else '%s="%s"' % (attr_name, attr) for attr_name, attr in self.attrs.items() ]) return mark_safe(u'<a %s>%s</a>' % (attrs, self.text))
def resolve(self, context, quiet=True): """ Return an object described by the accessor by traversing the attributes of context. """ try: obj = context for level in self.levels: if isinstance(obj, dict): obj = obj[level] elif isinstance(obj, list) or isinstance(obj, tuple): obj = obj[int(level)] else: if callable(getattr(obj, level)): try: obj = getattr(obj, level)() except KeyError: obj = getattr(obj, level) else: # for model field that has choice set # use get_xxx_display to access display = 'get_%s_display' % level obj = getattr(obj, display)() if hasattr(obj, display) else getattr(obj, level) if not obj: break return obj except Exception as e: if quiet: return '' else: raise e
def header_rows(self): """ [ [header1], [header3, header4] ] """ # TO BE FIX: refactor header_rows = [] headers = [col.header for col in self.columns] for header in headers: if len(header_rows) <= header.row_order: header_rows.append([]) header_rows[header.row_order].append(header) return header_rows
def get_context_data(self, **kwargs): """ Get context data for datatable server-side response. See http://www.datatables.net/usage/server-side """ sEcho = self.query_data["sEcho"] context = super(BaseListView, self).get_context_data(**kwargs) queryset = context["object_list"] if queryset is not None: total_length = self.get_queryset_length(queryset) queryset = self.filter_queryset(queryset) display_length = self.get_queryset_length(queryset) queryset = self.sort_queryset(queryset) queryset = self.paging_queryset(queryset) values_list = self.convert_queryset_to_values_list(queryset) context = { "sEcho": sEcho, "iTotalRecords": total_length, "iTotalDisplayRecords": display_length, "aaData": values_list, } else: context = { "sEcho": sEcho, "iTotalRecords": 0, "iTotalDisplayRecords": 0, "aaData": [], } return context
def get_days_span(self, month_index): """ Calculate how many days the month spans. """ is_first_month = month_index == 0 is_last_month = month_index == self.__len__() - 1 y = int(self.start_date.year + (self.start_date.month + month_index) / 13) m = int((self.start_date.month + month_index) % 12 or 12) total = calendar.monthrange(y, m)[1] if is_first_month and is_last_month: return (self.end_date - self.start_date).days + 1 else: if is_first_month: return total - self.start_date.day + 1 elif is_last_month: return self.end_date.day else: return total
def _calculate_float(self, byte_array): """Returns an IEEE 754 float from an array of 4 bytes :param byte_array: Expects an array of 4 bytes :type byte_array: array :rtype: float """ if len(byte_array) != 4: return None return struct.unpack('f', struct.pack('4B', *byte_array))[0]
def _calculate_period(self, vals): ''' calculate the sampling period in seconds ''' if len(vals) < 4: return None if self.firmware['major'] < 16: return ((vals[3] << 24) | (vals[2] << 16) | (vals[1] << 8) | vals[0]) / 12e6 else: return self._calculate_float(vals)
def wait(self, **kwargs): """Wait for the OPC to prepare itself for data transmission. On some devides this can take a few seconds :rtype: self :Example: >> alpha = opc.OPCN2(spi, debug=True).wait(check=200) >> alpha = opc.OPCN2(spi, debug=True, wait=True, check=200) """ if not callable(self.on): raise UserWarning('Your device does not support the self.on function, try without wait') if not callable(self.histogram): raise UserWarning('Your device does not support the self.histogram function, try without wait') self.on() while True: try: if self.histogram() is None: raise UserWarning('Could not load histogram, perhaps the device is not yet connected') except UserWarning as e: sleep(kwargs.get('check', 200) / 1000.) return self
def calculate_bin_boundary(self, bb): """Calculate the adc value that corresponds to a specific bin boundary diameter in microns. :param bb: Bin Boundary in microns :type bb: float :rtype: int """ return min(enumerate(OPC_LOOKUP), key = lambda x: abs(x[1] - bb))[0]
def read_info_string(self): """Reads the information string for the OPC :rtype: string :Example: >>> alpha.read_info_string() 'OPC-N2 FirmwareVer=OPC-018.2....................BD' """ infostring = [] # Send the command byte and sleep for 9 ms self.cnxn.xfer([0x3F]) sleep(9e-3) # Read the info string by sending 60 empty bytes for i in range(60): resp = self.cnxn.xfer([0x00])[0] infostring.append(chr(resp)) sleep(0.1) return ''.join(infostring)
def ping(self): """Checks the connection between the Raspberry Pi and the OPC :rtype: Boolean """ b = self.cnxn.xfer([0xCF])[0] # send the command byte sleep(0.1) return True if b == 0xF3 else False
def on(self): """Turn ON the OPC (fan and laser) :rtype: boolean :Example: >>> alpha.on() True """ b1 = self.cnxn.xfer([0x03])[0] # send the command byte sleep(9e-3) # sleep for 9 ms b2, b3 = self.cnxn.xfer([0x00, 0x01]) # send the following byte sleep(0.1) return True if b1 == 0xF3 and b2 == 0x03 else False
def off(self): """Turn OFF the OPC (fan and laser) :rtype: boolean :Example: >>> alpha.off() True """ b1 = self.cnxn.xfer([0x03])[0] # send the command byte sleep(9e-3) # sleep for 9 ms b2 = self.cnxn.xfer([0x01])[0] # send the following two bytes sleep(0.1) return True if b1 == 0xF3 and b2 == 0x03 else False
def config(self): """Read the configuration variables and returns them as a dictionary :rtype: dictionary :Example: >>> alpha.config() { 'BPD 13': 1.6499, 'BPD 12': 1.6499, 'BPD 11': 1.6499, 'BPD 10': 1.6499, 'BPD 15': 1.6499, 'BPD 14': 1.6499, 'BSVW 15': 1.0, ... } """ config = [] data = {} # Send the command byte and sleep for 10 ms self.cnxn.xfer([0x3C]) sleep(10e-3) # Read the config variables by sending 256 empty bytes for i in range(256): resp = self.cnxn.xfer([0x00])[0] config.append(resp) # Add the bin bounds to the dictionary of data [bytes 0-29] for i in range(0, 15): data["Bin Boundary {0}".format(i)] = self._16bit_unsigned(config[2*i], config[2*i + 1]) # Add the Bin Particle Volumes (BPV) [bytes 32-95] for i in range(0, 16): data["BPV {0}".format(i)] = self._calculate_float(config[4*i + 32:4*i + 36]) # Add the Bin Particle Densities (BPD) [bytes 96-159] for i in range(0, 16): data["BPD {0}".format(i)] = self._calculate_float(config[4*i + 96:4*i + 100]) # Add the Bin Sample Volume Weight (BSVW) [bytes 160-223] for i in range(0, 16): data["BSVW {0}".format(i)] = self._calculate_float(config[4*i + 160: 4*i + 164]) # Add the Gain Scaling Coefficient (GSC) and sample flow rate (SFR) data["GSC"] = self._calculate_float(config[224:228]) data["SFR"] = self._calculate_float(config[228:232]) # Add laser dac (LDAC) and Fan dac (FanDAC) data["LaserDAC"] = config[232] data["FanDAC"] = config[233] # If past firmware 15, add other things if self.firmware['major'] > 15.: data['TOF_SFR'] = config[234] sleep(0.1) return data
def config2(self): """Read the second set of configuration variables and return as a dictionary. **NOTE: This method is supported by firmware v18+.** :rtype: dictionary :Example: >>> a.config2() { 'AMFanOnIdle': 0, 'AMIdleIntervalCount': 0, 'AMMaxDataArraysInFile': 61798, 'AMSamplingInterval': 1, 'AMOnlySavePMData': 0, 'AMLaserOnIdle': 0 } """ config = [] data = {} # Send the command byte and sleep for 10 ms self.cnxn.xfer([0x3D]) sleep(10e-3) # Read the config variables by sending 256 empty bytes for i in range(9): resp = self.cnxn.xfer([0x00])[0] config.append(resp) data["AMSamplingInterval"] = self._16bit_unsigned(config[0], config[1]) data["AMIdleIntervalCount"] = self._16bit_unsigned(config[2], config[3]) data['AMFanOnIdle'] = config[4] data['AMLaserOnIdle'] = config[5] data['AMMaxDataArraysInFile'] = self._16bit_unsigned(config[6], config[7]) data['AMOnlySavePMData'] = config[8] sleep(0.1) return data
def histogram(self, number_concentration=True): """Read and reset the histogram. As of v1.3.0, histogram values are reported in particle number concentration (#/cc) by default. :param number_concentration: If true, histogram bins are reported in number concentration vs. raw values. :type number_concentration: boolean :rtype: dictionary :Example: >>> alpha.histogram() { 'Temperature': None, 'Pressure': None, 'Bin 0': 0, 'Bin 1': 0, 'Bin 2': 0, ... 'Bin 15': 0, 'SFR': 3.700, 'Bin1MToF': 0, 'Bin3MToF': 0, 'Bin5MToF': 0, 'Bin7MToF': 0, 'PM1': 0.0, 'PM2.5': 0.0, 'PM10': 0.0, 'Sampling Period': 2.345, 'Checksum': 0 } """ resp = [] data = {} # Send the command byte self.cnxn.xfer([0x30]) # Wait 10 ms sleep(10e-3) # read the histogram for i in range(62): r = self.cnxn.xfer([0x00])[0] resp.append(r) # convert to real things and store in dictionary! data['Bin 0'] = self._16bit_unsigned(resp[0], resp[1]) data['Bin 1'] = self._16bit_unsigned(resp[2], resp[3]) data['Bin 2'] = self._16bit_unsigned(resp[4], resp[5]) data['Bin 3'] = self._16bit_unsigned(resp[6], resp[7]) data['Bin 4'] = self._16bit_unsigned(resp[8], resp[9]) data['Bin 5'] = self._16bit_unsigned(resp[10], resp[11]) data['Bin 6'] = self._16bit_unsigned(resp[12], resp[13]) data['Bin 7'] = self._16bit_unsigned(resp[14], resp[15]) data['Bin 8'] = self._16bit_unsigned(resp[16], resp[17]) data['Bin 9'] = self._16bit_unsigned(resp[18], resp[19]) data['Bin 10'] = self._16bit_unsigned(resp[20], resp[21]) data['Bin 11'] = self._16bit_unsigned(resp[22], resp[23]) data['Bin 12'] = self._16bit_unsigned(resp[24], resp[25]) data['Bin 13'] = self._16bit_unsigned(resp[26], resp[27]) data['Bin 14'] = self._16bit_unsigned(resp[28], resp[29]) data['Bin 15'] = self._16bit_unsigned(resp[30], resp[31]) data['Bin1 MToF'] = self._calculate_mtof(resp[32]) data['Bin3 MToF'] = self._calculate_mtof(resp[33]) data['Bin5 MToF'] = self._calculate_mtof(resp[34]) data['Bin7 MToF'] = self._calculate_mtof(resp[35]) # Bins associated with firmware versions 14 and 15(?) if self.firmware['version'] < 16.: data['Temperature'] = self._calculate_temp(resp[36:40]) data['Pressure'] = self._calculate_pressure(resp[40:44]) data['Sampling Period'] = self._calculate_period(resp[44:48]) data['Checksum'] = self._16bit_unsigned(resp[48], resp[49]) data['PM1'] = self._calculate_float(resp[50:54]) data['PM2.5'] = self._calculate_float(resp[54:58]) data['PM10'] = self._calculate_float(resp[58:]) else: data['SFR'] = self._calculate_float(resp[36:40]) # Alright, we don't know whether it is temp or pressure since it switches.. tmp = self._calculate_pressure(resp[40:44]) if tmp > 98000: data['Temperature'] = None data['Pressure'] = tmp else: tmp = self._calculate_temp(resp[40:44]) if tmp < 500: data['Temperature'] = tmp data['Pressure'] = None else: data['Temperature'] = None data['Pressure'] = None data['Sampling Period'] = self._calculate_float(resp[44:48]) data['Checksum'] = self._16bit_unsigned(resp[48], resp[49]) data['PM1'] = self._calculate_float(resp[50:54]) data['PM2.5'] = self._calculate_float(resp[54:58]) data['PM10'] = self._calculate_float(resp[58:]) # Calculate the sum of the histogram bins histogram_sum = data['Bin 0'] + data['Bin 1'] + data['Bin 2'] + \ data['Bin 3'] + data['Bin 4'] + data['Bin 5'] + data['Bin 6'] + \ data['Bin 7'] + data['Bin 8'] + data['Bin 9'] + data['Bin 10'] + \ data['Bin 11'] + data['Bin 12'] + data['Bin 13'] + data['Bin 14'] + \ data['Bin 15'] # Check that checksum and the least significant bits of the sum of histogram bins # are equivilant if (histogram_sum & 0x0000FFFF) != data['Checksum']: logger.warning("Data transfer was incomplete") return None # If histogram is true, convert histogram values to number concentration if number_concentration is True: _conv_ = data['SFR'] * data['Sampling Period'] # Divider in units of ml (cc) data['Bin 0'] = data['Bin 0'] / _conv_ data['Bin 1'] = data['Bin 1'] / _conv_ data['Bin 2'] = data['Bin 2'] / _conv_ data['Bin 3'] = data['Bin 3'] / _conv_ data['Bin 4'] = data['Bin 4'] / _conv_ data['Bin 5'] = data['Bin 5'] / _conv_ data['Bin 6'] = data['Bin 6'] / _conv_ data['Bin 7'] = data['Bin 7'] / _conv_ data['Bin 8'] = data['Bin 8'] / _conv_ data['Bin 9'] = data['Bin 9'] / _conv_ data['Bin 10'] = data['Bin 10'] / _conv_ data['Bin 11'] = data['Bin 11'] / _conv_ data['Bin 12'] = data['Bin 12'] / _conv_ data['Bin 13'] = data['Bin 13'] / _conv_ data['Bin 14'] = data['Bin 14'] / _conv_ data['Bin 15'] = data['Bin 15'] / _conv_ sleep(0.1) return data
def save_config_variables(self): """Save the configuration variables in non-volatile memory. This method should be used in conjuction with *write_config_variables*. :rtype: boolean :Example: >>> alpha.save_config_variables() True """ command = 0x43 byte_list = [0x3F, 0x3C, 0x3F, 0x3C, 0x43] success = [0xF3, 0x43, 0x3F, 0x3C, 0x3F, 0x3C] resp = [] # Send the command byte and then wait for 10 ms r = self.cnxn.xfer([command])[0] sleep(10e-3) # append the response of the command byte to the List resp.append(r) # Send the rest of the config bytes for each in byte_list: r = self.cnxn.xfer([each])[0] resp.append(r) sleep(0.1) return True if resp == success else False
def set_fan_power(self, power): """Set only the Fan power. :param power: Fan power value as an integer between 0-255. :type power: int :rtype: boolean :Example: >>> alpha.set_fan_power(255) True """ # Check to make sure the value is a single byte if power > 255: raise ValueError("The fan power should be a single byte (0-255).") # Send the command byte and wait 10 ms a = self.cnxn.xfer([0x42])[0] sleep(10e-3) # Send the next two bytes b = self.cnxn.xfer([0x00])[0] c = self.cnxn.xfer([power])[0] sleep(0.1) return True if a == 0xF3 and b == 0x42 and c == 0x00 else False
def toggle_laser(self, state): """Toggle the power state of the laser. :param state: Boolean state of the laser :type state: boolean :rtype: boolean :Example: >>> alpha.toggle_laser(True) True """ # Send the command byte and wait 10 ms a = self.cnxn.xfer([0x03])[0] sleep(10e-3) # If state is true, turn the laser ON, else OFF if state: b = self.cnxn.xfer([0x02])[0] else: b = self.cnxn.xfer([0x03])[0] sleep(0.1) return True if a == 0xF3 and b == 0x03 else False
def read_pot_status(self): """Read the status of the digital pot. Firmware v18+ only. The return value is a dictionary containing the following as unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal. :rtype: dict :Example: >>> alpha.read_pot_status() { 'LaserDACVal': 230, 'FanDACVal': 255, 'FanON': 0, 'LaserON': 0 } """ # Send the command byte and wait 10 ms a = self.cnxn.xfer([0x13])[0] sleep(10e-3) # Build an array of the results res = [] for i in range(4): res.append(self.cnxn.xfer([0x00])[0]) sleep(0.1) return { 'FanON': res[0], 'LaserON': res[1], 'FanDACVal': res[2], 'LaserDACVal': res[3] }
def sn(self): """Read the Serial Number string. This method is only available on OPC-N2 firmware versions 18+. :rtype: string :Example: >>> alpha.sn() 'OPC-N2 123456789' """ string = [] # Send the command byte and sleep for 9 ms self.cnxn.xfer([0x10]) sleep(9e-3) # Read the info string by sending 60 empty bytes for i in range(60): resp = self.cnxn.xfer([0x00])[0] string.append(chr(resp)) sleep(0.1) return ''.join(string)
def read_firmware(self): """Read the firmware version of the OPC-N2. Firmware v18+ only. :rtype: dict :Example: >>> alpha.read_firmware() { 'major': 18, 'minor': 2, 'version': 18.2 } """ # Send the command byte and sleep for 9 ms self.cnxn.xfer([0x12]) sleep(10e-3) self.firmware['major'] = self.cnxn.xfer([0x00])[0] self.firmware['minor'] = self.cnxn.xfer([0x00])[0] # Build the firmware version self.firmware['version'] = float('{}.{}'.format(self.firmware['major'], self.firmware['minor'])) sleep(0.1) return self.firmware
def pm(self): """Read the PM data and reset the histogram **NOTE: This method is supported by firmware v18+.** :rtype: dictionary :Example: >>> alpha.pm() { 'PM1': 0.12, 'PM2.5': 0.24, 'PM10': 1.42 } """ resp = [] data = {} # Send the command byte self.cnxn.xfer([0x32]) # Wait 10 ms sleep(10e-3) # read the histogram for i in range(12): r = self.cnxn.xfer([0x00])[0] resp.append(r) # convert to real things and store in dictionary! data['PM1'] = self._calculate_float(resp[0:4]) data['PM2.5'] = self._calculate_float(resp[4:8]) data['PM10'] = self._calculate_float(resp[8:]) sleep(0.1) return data
def on(self): """Turn ON the OPC (fan and laser) :returns: boolean success state """ b1 = self.cnxn.xfer([0x0C])[0] # send the command byte sleep(9e-3) # sleep for 9 ms return True if b1 == 0xF3 else False
def off(self): """Turn OFF the OPC (fan and laser) :returns: boolean success state """ b1 = self.cnxn.xfer([0x03])[0] # send the command byte sleep(9e-3) # sleep for 9 ms return True if b1 == 0xF3 else False
def read_gsc_sfr(self): """Read the gain-scaling-coefficient and sample flow rate. :returns: dictionary containing GSC and SFR """ config = [] data = {} # Send the command byte and sleep for 10 ms self.cnxn.xfer([0x33]) sleep(10e-3) # Read the config variables by sending 256 empty bytes for i in range(8): resp = self.cnxn.xfer([0x00])[0] config.append(resp) data["GSC"] = self._calculate_float(config[0:4]) data["SFR"] = self._calculate_float(config[4:]) return data
def read_bin_boundaries(self): """Return the bin boundaries. :returns: dictionary with 17 bin boundaries. """ config = [] data = {} # Send the command byte and sleep for 10 ms self.cnxn.xfer([0x33]) sleep(10e-3) # Read the config variables by sending 256 empty bytes for i in range(30): resp = self.cnxn.xfer([0x00])[0] config.append(resp) # Add the bin bounds to the dictionary of data [bytes 0-29] for i in range(0, 14): data["Bin Boundary {0}".format(i)] = self._16bit_unsigned(config[2*i], config[2*i + 1]) return data
def read_bin_particle_density(self): """Read the bin particle density :returns: float """ config = [] # Send the command byte and sleep for 10 ms self.cnxn.xfer([0x33]) sleep(10e-3) # Read the config variables by sending 256 empty bytes for i in range(4): resp = self.cnxn.xfer([0x00])[0] config.append(resp) bpd = self._calculate_float(config) return bpd
def read_histogram(self): """Read and reset the histogram. The expected return is a dictionary containing the counts per bin, MToF for bins 1, 3, 5, and 7, temperature, pressure, the sampling period, the checksum, PM1, PM2.5, and PM10. **NOTE:** The sampling period for the OPCN1 seems to be incorrect. :returns: dictionary """ resp = [] data = {} # command byte command = 0x30 # Send the command byte self.cnxn.xfer([command]) # Wait 10 ms sleep(10e-3) # read the histogram for i in range(62): r = self.cnxn.xfer([0x00])[0] resp.append(r) # convert to real things and store in dictionary! data['Bin 0'] = self._16bit_unsigned(resp[0], resp[1]) data['Bin 1'] = self._16bit_unsigned(resp[2], resp[3]) data['Bin 2'] = self._16bit_unsigned(resp[4], resp[5]) data['Bin 3'] = self._16bit_unsigned(resp[6], resp[7]) data['Bin 4'] = self._16bit_unsigned(resp[8], resp[9]) data['Bin 5'] = self._16bit_unsigned(resp[10], resp[11]) data['Bin 6'] = self._16bit_unsigned(resp[12], resp[13]) data['Bin 7'] = self._16bit_unsigned(resp[14], resp[15]) data['Bin 8'] = self._16bit_unsigned(resp[16], resp[17]) data['Bin 9'] = self._16bit_unsigned(resp[18], resp[19]) data['Bin 10'] = self._16bit_unsigned(resp[20], resp[21]) data['Bin 11'] = self._16bit_unsigned(resp[22], resp[23]) data['Bin 12'] = self._16bit_unsigned(resp[24], resp[25]) data['Bin 13'] = self._16bit_unsigned(resp[26], resp[27]) data['Bin 14'] = self._16bit_unsigned(resp[28], resp[29]) data['Bin 15'] = self._16bit_unsigned(resp[30], resp[31]) data['Bin1 MToF'] = self._calculate_mtof(resp[32]) data['Bin3 MToF'] = self._calculate_mtof(resp[33]) data['Bin5 MToF'] = self._calculate_mtof(resp[34]) data['Bin7 MToF'] = self._calculate_mtof(resp[35]) data['Temperature'] = self._calculate_temp(resp[36:40]) data['Pressure'] = self._calculate_pressure(resp[40:44]) data['Sampling Period'] = self._calculate_period(resp[44:48]) data['Checksum'] = self._16bit_unsigned(resp[48], resp[49]) data['PM1'] = self._calculate_float(resp[50:54]) data['PM2.5'] = self._calculate_float(resp[54:58]) data['PM10'] = self._calculate_float(resp[58:]) # Calculate the sum of the histogram bins histogram_sum = data['Bin 0'] + data['Bin 1'] + data['Bin 2'] + \ data['Bin 3'] + data['Bin 4'] + data['Bin 5'] + data['Bin 6'] + \ data['Bin 7'] + data['Bin 8'] + data['Bin 9'] + data['Bin 10'] + \ data['Bin 11'] + data['Bin 12'] + data['Bin 13'] + data['Bin 14'] + \ data['Bin 15'] return data
def start(self): """ Starts HDLC controller's threads. """ self.receiver = self.Receiver( self.read, self.write, self.send_lock, self.senders, self.frames_received, callback=self.receive_callback, fcs_nack=self.fcs_nack, ) self.receiver.start()
def stop(self): """ Stops HDLC controller's threads. """ if self.receiver != None: self.receiver.join() for s in self.senders.values(): s.join()
def send(self, data): """ Sends a new data frame. This method will block until a new room is available for a new sender. This limit is determined by the size of the window. """ while len(self.senders) >= self.window: pass self.senders[self.new_seq_no] = self.Sender( self.write, self.send_lock, data, self.new_seq_no, timeout=self.sending_timeout, callback=self.send_callback, ) self.senders[self.new_seq_no].start() self.new_seq_no = (self.new_seq_no + 1) % HDLController.MAX_SEQ_NO
def cmp(self, other): """*Note: checks Range.start() only* Key: self = [], other = {} * [ {----]----} => -1 * {---[---} ] => 1 * [---] {---} => -1 * [---] same as {---} => 0 * [--{-}--] => -1 """ if isinstance(other, Range): # other has tz, I dont, so replace the tz start = self.start.replace(tzinfo=other.start.tz) if other.start.tz and self.start.tz is None else self.start end = self.end.replace(tzinfo=other.end.tz) if other.end.tz and self.end.tz is None else self.end if start == other.start and end == other.end: return 0 elif start < other.start: return -1 else: return 1 elif isinstance(other, Date): if other.tz and self.start.tz is None: return 0 if other == self.start.replace(tzinfo=other.tz) else -1 if other > self.start.replace(tzinfo=other.start.tz) else 1 return 0 if other == self.start else -1 if other > self.start else 1 else: return self.cmp(Range(other, tz=self.start.tz))
def cut(self, by, from_start=True): """ Cuts this object from_start to the number requestd returns new instance """ s, e = copy(self.start), copy(self.end) if from_start: e = s + by else: s = e - by return Range(s, e)
def next(self, times=1): """Returns a new instance of self times is not supported yet. """ return Range(copy(self.end), self.end + self.elapse, tz=self.start.tz)
def prev(self, times=1): """Returns a new instance of self times is not supported yet. """ return Range(self.start - self.elapse, copy(self.start), tz=self.start.tz)
def replace(self, **k): """Note returns a new Date obj""" if self.date != 'infinity': return Date(self.date.replace(**k)) else: return Date('infinity')
def adjust(self, to): ''' Adjusts the time from kwargs to timedelta **Will change this object** return new copy of self ''' if self.date == 'infinity': return new = copy(self) if type(to) in (str, unicode): to = to.lower() res = TIMESTRING_RE.search(to) if res: rgroup = res.groupdict() if (rgroup.get('delta') or rgroup.get('delta_2')): i = int(text2num(rgroup.get('num', 'one'))) * (-1 if to.startswith('-') else 1) delta = (rgroup.get('delta') or rgroup.get('delta_2')).lower() if delta.startswith('y'): try: new.date = new.date.replace(year=(new.date.year + i)) except ValueError: # day is out of range for month new.date = new.date + timedelta(days=(365 * i)) elif delta.startswith('month'): if (new.date.month + i) > 12: new.date = new.date.replace(month=(i - (i / 12)), year=(new.date.year + 1 + (i / 12))) elif (new.date.month + i) < 1: new.date = new.date.replace(month=12, year=(new.date.year - 1)) else: new.date = new.date.replace(month=(new.date.month + i)) elif delta.startswith('q'): # NP pass elif delta.startswith('w'): new.date = new.date + timedelta(days=(7 * i)) elif delta.startswith('s'): new.date = new.date + timedelta(seconds=i) else: new.date = new.date + timedelta(**{('days' if delta.startswith('d') else 'hours' if delta.startswith('h') else 'minutes' if delta.startswith('m') else 'seconds'): i}) return new else: new.date = new.date + timedelta(seconds=int(to)) return new raise TimestringInvalid('Invalid addition request')
def findall(text): """Find all the timestrings within a block of text. >>> timestring.findall("once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.") [ ('3 weeks ago,', <timestring.Date 2014-02-09 00:00:00 4483019280>), ('august 15th at 7:20 am', <timestring.Date 2014-08-15 07:20:00 4483019344>) ] """ results = TIMESTRING_RE.findall(text) dates = [] for date in results: if re.compile('((next|last)\s(\d+|couple(\sof))\s(weeks|months|quarters|years))|(between|from)', re.I).match(date[0]): dates.append((date[0].strip(), Range(date[0]))) else: dates.append((date[0].strip(), Date(date[0]))) return dates
def authenticate(self, request): """ Returns two-tuple of (user, token) if authentication succeeds, or None otherwise. """ try: oauth_request = oauth_provider.utils.get_oauth_request(request) except oauth.Error as err: raise exceptions.AuthenticationFailed(err.message) if not oauth_request: return None oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES found = any(param for param in oauth_params if param in oauth_request) missing = list(param for param in oauth_params if param not in oauth_request) if not found: # OAuth authentication was not attempted. return None if missing: # OAuth was attempted but missing parameters. msg = 'Missing parameters: %s' % (', '.join(missing)) raise exceptions.AuthenticationFailed(msg) if not self.check_nonce(request, oauth_request): msg = 'Nonce check failed' raise exceptions.AuthenticationFailed(msg) try: consumer_key = oauth_request.get_parameter('oauth_consumer_key') consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key) except oauth_provider.store.InvalidConsumerError: msg = 'Invalid consumer token: %s' % oauth_request.get_parameter('oauth_consumer_key') raise exceptions.AuthenticationFailed(msg) if consumer.status != oauth_provider.consts.ACCEPTED: msg = 'Invalid consumer key status: %s' % consumer.get_status_display() raise exceptions.AuthenticationFailed(msg) try: token_param = oauth_request.get_parameter('oauth_token') token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param) except oauth_provider.store.InvalidTokenError: msg = 'Invalid access token: %s' % oauth_request.get_parameter('oauth_token') raise exceptions.AuthenticationFailed(msg) try: self.validate_token(request, consumer, token) except oauth.Error as err: raise exceptions.AuthenticationFailed(err.message) user = token.user if not user.is_active: msg = 'User inactive or deleted: %s' % user.username raise exceptions.AuthenticationFailed(msg) return (token.user, token)