text
stringlengths
78
104k
score
float64
0
0.18
def ne(self, other, ranks=None): """ Compares the card against another card, ``other``, and checks whether the card is not equal to ``other``, based on the given rank dict. :arg Card other: The second Card to compare. :arg dict ranks: The ranks to refer to for comparisons. :returns: ``True`` or ``False``. """ ranks = ranks or DEFAULT_RANKS if isinstance(other, Card): if ranks.get("suits"): return ( ranks["values"][self.value] != ranks["values"][other.value] or ranks["suits"][self.suit] != ranks["suits"][other.suit] ) else: return ranks[self.value] != ranks[other.value] else: return False
0.002281
def filename(self): """ :return: :rtype: str """ filename = self.key if self.has_revision_file() and self.history.current_revision: filename += "-" filename += self.history.current_revision.revision_id filename += ".zip" return filename
0.006116
def create(self, count=1, commit=True, **kwargs): ''' Create and return ``count`` model instances. If *commit* is ``False`` the instances will not be saved and many to many relations will not be processed. May raise ``CreateInstanceError`` if constraints are not satisfied. The method internally calls :meth:`create_one` to generate instances. ''' object_list = [] for i in range(count): instance = self.create_one(commit=commit, **kwargs) object_list.append(instance) return object_list
0.003373
async def on_capability_sasl_available(self, value): """ Check whether or not SASL is available. """ if value: self._sasl_mechanisms = value.upper().split(',') else: self._sasl_mechanisms = None if self.sasl_mechanism == 'EXTERNAL' or (self.sasl_username and self.sasl_password): if self.sasl_mechanism == 'EXTERNAL' or puresasl: return True self.logger.warning('SASL credentials set but puresasl module not found: not initiating SASL authentication.') return False
0.007018
def slowDown(self, vehID, speed, duration): """slowDown(string, double, int) -> None Changes the speed smoothly to the given value over the given amount of time in ms (can also be used to increase speed). """ self._connection._beginMessage( tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_SLOWDOWN, vehID, 1 + 4 + 1 + 8 + 1 + 4) self._connection._string += struct.pack( "!BiBdBi", tc.TYPE_COMPOUND, 2, tc.TYPE_DOUBLE, speed, tc.TYPE_INTEGER, duration) self._connection._sendExact()
0.007273
def optimize(self): """ Tries to detect peep-hole patterns in this basic block and remove them. """ changed = OPTIONS.optimization.value > 2 # only with -O3 will enter here while changed: changed = False regs = Registers() if len(self) and self[-1].inst in ('jp', 'jr') and \ self.original_next is LABELS[self[-1].opers[0]].basic_block: # { jp Label ; Label: ; ... } => { Label: ; ... } LABELS[self[-1].opers[0]].used_by.remove(self) self.pop(len(self) - 1) changed = True continue for i in range(len(self)): try: if self.mem[i].is_label: # ignore labels continue except IndexError: print(i) print('\n'.join(str(x) for x in self.mem)) raise i1 = self.mem[i].inst o1 = self.mem[i].opers if i > 0: i0 = self.mem[i - 1].inst o0 = self.mem[i - 1].opers else: i0 = o0 = None if i < len(self) - 1: i2 = self.mem[i + 1].inst o2 = self.mem[i + 1].opers else: i2 = o2 = None if i < len(self) - 2: i3 = self.mem[i + 2].inst o3 = self.mem[i + 2].opers else: i3 = o3 = None if i1 == 'ld': if OPT00 and o1[0] == o1[1]: # { LD X, X } => {} self.pop(i) changed = True break if OPT01 and o0 == 'ld' and o0[0] == o1[1] and o1[0] == o0[1]: # { LD A, X; LD X, A} => {LD A, X} self.pop(i) changed = True break if OPT02 and i0 == i1 == 'ld' and o0[1] == o1[1] and \ is_register(o0[0]) and is_register(o1[0]) and not is_16bit_idx_register(o1[0]): if is_8bit_register(o1[0]): if not is_8bit_register(o1[1]): # { LD r1, N; LD r2, N} => {LD r1, N; LD r2, r1} changed = True self[i] = 'ld %s, %s' % (o1[0], o0[0]) break else: changed = True # {LD r1, NN; LD r2, NN} => { LD r1, NN; LD r2H, r1H; LD r2L, r1L} self[i] = 'ld %s, %s' % (HI16(o1[0]), HI16(o0[0])) self.insert(i + 1, 'ld %s, %s' % (LO16(o1[0]), LO16(o0[0]))) break if OPT03 and is_register(o1[0]) and o1[0] != 'sp' and \ not self.is_used(single_registers(o1[0]), i + 1): # LD X, nnn ; X not used later => Remove instruction tmp = str(self.asm) self.pop(i) changed = True __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT04 and o1 == ['h', 'a'] and i2 == 'ld' and o2[0] == 'a' \ and i3 == 'sub' and o3[0] == 'h' and not self.is_used('h', i + 3): if is_number(o2[1]): self[i] = 'neg' self[i + 1] = 'add a, %s' % o2[1] self[i + 2] = 'ccf' changed = True break if OPT05 and regs._is(o1[0], o1[1]): # and regs.get(o1[0])[0:3] != '(ix': tmp = str(self.asm) self.pop(i) changed = True __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT06 and o1[0] in ('hl', 'de') and \ i2 == 'ex' and o2[0] == 'de' and o2[1] == 'hl' and \ not self.is_used(single_registers(o1[0]), i + 2): # { LD HL, XX ; EX DE, HL; POP HL } ::= { LD DE, XX ; POP HL } reg = 'de' if o1[0] == 'hl' else 'hl' self.pop(i + 1) self[i] = 'ld %s, %s' % (reg, o1[1]) changed = True break if OPT07 and i0 == 'ld' and i2 == 'ld' and o2[1] == 'hl' and not self.is_used(['h', 'l'], i + 2) \ and (o0[0] == 'h' and o0[1] == 'b' and o1[0] == 'l' and o1[1] == 'c' or o0[0] == 'l' and o0[1] == 'c' and o1[0] == 'h' and o1[1] == 'b' or o0[0] == 'h' and o0[1] == 'd' and o1[0] == 'l' and o1[1] == 'e' or o0[0] == 'l' and o0[1] == 'e' and o1[0] == 'h' and o1[1] == 'd'): # { LD h, rH ; LD l, rl ; LD (XX), HL } ::= { LD (XX), R } tmp = str(self.asm) r2 = 'de' if o0[1] in ('d', 'e') else 'bc' self[i + 1] = 'ld %s, %s' % (o2[0], r2) self.pop(i) self.pop(i - 1) changed = True __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT08 and i1 == i2 == 'ld' and i > 0 and \ (o1[1] == 'h' and o1[0] == 'b' and o2[1] == 'l' and o2[0] == 'c' or o1[1] == 'l' and o1[0] == 'c' and o2[1] == 'h' and o2[0] == 'b' or o1[1] == 'h' and o1[0] == 'd' and o2[1] == 'l' and o2[0] == 'e' or o1[1] == 'l' and o1[0] == 'e' and o2[1] == 'h' and o2[ 0] == 'd') and \ regs.get('hl') is not None and not self.is_used(['h', 'l'], i + 2) and \ not self[i - 1].needs(['h', 'l']) and not self[i - 1].affects(['h', 'l']): # { LD HL, XXX ; <inst> ; LD rH, H; LD rL, L } ::= { LD HL, XXX ; LD rH, H; LD rL, L; <inst> } changed = True tmp = str(self.asm) self.swap(i - 1, i + 1) __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT09 and i > 0 and i0 == i1 == i2 == 'ld' and \ o0[0] == 'hl' and \ (o1[1] == 'h' and o1[0] == 'b' and o2[1] == 'l' and o2[0] == 'c' or o1[1] == 'l' and o1[0] == 'c' and o2[1] == 'h' and o2[0] == 'b' or o1[1] == 'h' and o1[0] == 'd' and o2[1] == 'l' and o2[0] == 'e' or o1[1] == 'l' and o1[0] == 'e' and o2[1] == 'h' and o2[0] == 'd') and \ not self.is_used(['h', 'l'], i + 2): # { LD HL, XXX ; LD rH, H; LD rL, L } ::= { LD rr, XXX } changed = True r1 = 'de' if o1[0] in ('d', 'e') else 'bc' tmp = str(self.asm) self[i - 1] = 'ld %s, %s' % (r1, o0[1]) self.pop(i + 1) self.pop(i) __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT10 and i1 in ('inc', 'dec') and o1[0] == 'a': if i2 == i0 == 'ld' and o2[0] == o0[1] and 'a' == o0[0] == o2[1] and o0[1][0] == '(': if not RE_INDIR.match(o2[0]): if not self.is_used(['a', 'h', 'l'], i + 2): # { LD A, (X); [ DEC A | INC A ]; LD (X), A} ::= {LD HL, X; [ DEC (HL) | INC (HL) ]} tmp = str(self.asm) self.pop(i + 1) self[i - 1] = 'ld hl, %s' % (o0[1][1:-1]) self[i] = '%s (hl)' % i1 changed = True __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break else: if not self.is_used(['a'], i + 2): # { LD A, (IX + n); [ DEC A | INC A ]; LD (X), A} ::= # { [ DEC (IX + n) | INC (IX + n) ] } tmp = str(self.asm) self.pop(i + 1) self.pop(i) self[i - 1] = '%s %s' % (i1, o0[1]) changed = True __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT11 and i0 == 'push' and i3 == 'pop' and o0[0] != o3[0] \ and o0[0] in ('hl', 'de') and o3[0] in ('hl', 'de') \ and i1 == i2 == 'ld' and ( o1[0] == HI16(o0[0]) and o2[0] == LO16(o0[0]) and o1[1] == HI16(o3[0]) and o2[1] == LO16(o3[0]) or o2[0] == HI16(o0[0]) and o1[0] == LO16(o0[0]) and o2[1] == HI16( o3[0]) and o1[1] == LO16(o3[0])): # { PUSH HL; LD H, D; LD L, E; POP HL } ::= {EX DE, HL} self.pop(i + 2) self.pop(i + 1) self.pop(i) self[i - 1] = 'ex de, hl' changed = True break if i0 == 'push' and i1 == 'pop': if OPT12 and o0[0] == o1[0]: # { PUSH X ; POP X } ::= { } self.pop(i) self.pop(i - 1) changed = True break if OPT13 and o0[0] in ('de', 'hl') and o1[0] in ('de', 'hl') and not self.is_used( single_registers(o0[0]), i + 1): # { PUSH DE ; POP HL } ::= { EX DE, HL } self.pop(i) self[i - 1] = 'ex de, hl' changed = True break if OPT14 and 'af' in (o0[0], o1[0]): # { push Xx ; pop af } => { ld a, X } if not self.is_used(o1[0][1], i + 1): self[i - 1] = 'ld %s, %s' % (HI16(o1[0]), HI16(o0[0])) self.pop(i) changed = True break elif OPT15 and not is_16bit_idx_register(o0[0]) and not is_16bit_idx_register( o1[0]) and 'af' not in (o0[0], o1[0]): # { push Xx ; pop Yy } => { ld Y, X ; ld y, x } self[i - 1] = 'ld %s, %s' % (HI16(o1[0]), HI16(o0[0])) self[i] = 'ld %s, %s' % (LO16(o1[0]), LO16(o0[0])) changed = True break if OPT16 and i > 0 and not self.mem[i - 1].is_label and i1 == 'pop' and \ (not self.mem[i - 1].affects([o1[0], 'sp']) or self.safe_to_write(o1[0], i + 1)) and \ not self.mem[i - 1].needs([o1[0], 'sp']): # { <inst>; POP X } => { POP X; <inst> } ; if inst does not uses X tmp = str(self.asm) self.swap(i - 1, i) changed = True __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT17 and i1 == 'xor' and o1[0] == 'a' and regs._is('a', 0) and regs.Z and not regs.C: tmp = str(self.asm) self.pop(i) __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) changed = True break if OPT18 and i3 is not None and \ (i0 == i1 == 'ld' and i2 == i3 == 'push') and \ (o0[0] == o3[0] == 'de' and o1[0] == o2[0] == 'bc'): # and \ if not self.is_used(['h', 'l', 'd', 'e', 'b', 'c'], i + 3): # { LD DE, (X2) ; LD BC, (X1); PUSH DE; PUSH BC } ::= # { LD HL, (X2); PUSH HL; LD HL, (X1); PUSH HL } self[i - 1] = 'ld hl, %s' % o1[1] self[i] = 'push hl' self[i + 1] = 'ld hl, %s' % o0[1] self[i + 2] = 'push hl' changed = True break if i1 in ('jp', 'jr', 'call') and o1[0] in JUMP_LABELS: c = self.mem[i].condition_flag if OPT19 and c is not None: if c == 'c' and regs.C == 1 or \ c == 'z' and regs.Z == 1 or \ c == 'nc' and regs.C == 0 or \ c == 'nz' and regs.Z == 0: # If the condition is always satisfied, replace with a simple jump / call changed = True tmp = str(self.asm) self[i] = '%s %s' % (i1, o1[0]) self.update_goes_and_comes() __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break ii = LABELS[o1[0]].basic_block.get_first_non_label_instruction() ii1 = None if ii is None else ii.inst cc = None if ii is None else ii.condition_flag # Are we calling / jumping into another jump? if OPT20 and ii1 in ('jp', 'jr') and ( cc is None or cc == c or cc == 'c' and regs.C == 1 or cc == 'z' and regs.Z == 1 or cc == 'nc' and regs.C == 0 or cc == 'nz' and regs.Z == 0): if c is None: c = '' else: c = c + ', ' changed = True tmp = str(self.asm) LABELS[o1[0]].used_by.remove(self) # This block no longer uses this label self[i] = '%s %s%s' % (i1, c, ii.opers[0]) self.update_goes_and_comes() __DEBUG__('Changed %s ==> %s' % (tmp, self.asm), 2) break if OPT22 and i0 == 'sbc' and o0[0] == o0[1] == 'a' and \ i1 == 'or' and o1[0] == 'a' and \ i2 == 'jp' and \ self[i + 1].condition_flag is not None and \ not self.is_used(['a'], i + 2): c = self.mem[i + 1].condition_flag if c in ('z', 'nz'): c = 'c' if c == 'nz' else 'nc' changed = True self[i + 1] = 'jp %s, %s' % (c, o2[0]) self.pop(i) self.pop(i - 1) break if OPT23 and i0 == 'ld' and is_16bit_register(o0[0]) and o0[1][0] == '(' and \ i1 == 'ld' and o1[0] == 'a' and o1[1] == LO16(o0[0]) and not self.is_used( single_registers(o0[0]), i + 1): # { LD HL, (X) ; LD A, L } ::= { LD A, (X) } self.pop(i) self[i - 1] = 'ld a, %s' % o0[1] changed = True break if OPT24 and i1 == i2 == 'ccf': # { ccf ; ccf } ::= { } self.pop(i) self.pop(i) changed = True break if OPT25 and i1 == 'ld' and is_register(o1[0]) and o1[0] != 'sp': is8 = is_8bit_register(o1[0]) ss = [x for x, y in regs.regs.items() if x != o1[0] and y is not None and y == regs.get(o1[1]) and not is_8bit_register(o1[1])] for r_ in ss: if is8 != is_8bit_register(r_): continue changed = True if is8: # ld A, n; ld B, n => ld A, n; ld B, A self[i] = 'ld %s, %s' % (o1[0], r_) else: # ld HL, n; ld DE, n => ld HL, n; ld d, h; ld e, l # 16 bit register self[i] = 'ld %s, %s' % (HI16(o1[0]), HI16(r_)) self.insert(i + 1, 'ld %s, %s' % (LO16(o1[0]), LO16(r_))) break if changed: break if OPT26 and i1 == i2 == 'ld' and (o1[0], o1[1], o2[0], o2[1]) == ('d', 'h', 'e', 'l') and \ not self.is_used(['h', 'l'], i + 2): self[i] = 'ex de, hl' self.pop(i + 1) changed = True break if OPT27 and i1 in ('cp', 'or', 'and', 'add', 'adc', 'sub', 'sbc') and o1[-1] != 'a' and \ not self.is_used(o1[-1], i + 1) and i0 == 'ld' and o0[0] == o1[-1] and \ (o0[1] == '(hl)' or RE_IXIND.match(o0[1])): template = '{0} %s{1}' % ('a, ' if i1 in ('add', 'adc', 'sbc') else '') self[i] = template.format(i1, o0[1]) self.pop(i - 1) changed = True break regs.op(i1, o1)
0.003194
def update_object_options(self, identifier, options): """Update set of typed attributes (options) that are associated with a given image group. Raises a ValueError if any of the given attributes violates the attribute definitions associated with image groups. Parameters ---------- identifier : string Unique object identifier options : list(dict('name':...,'value:...')) List of attribute instances Returns ------- ImageGroupHandle Handle for updated image group or None if identifier is unknown. """ # Retrieve object from database to ensure that it exists img_group = self.get_object(identifier) if img_group is None: return None # Replace existing object in database with object having given options. # Raises an exception of attributes with duplicate names appear in the # list. img_group.options = attribute.to_dict(options, self.attribute_defs) self.replace_object(img_group) # Return image group handle return img_group
0.001735
def do_uninstall(ctx, verbose, fake): """Uninstalls legit git aliases, including deprecated legit sub-commands.""" aliases = cli.list_commands(ctx) # Add deprecated aliases aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall']) for alias in aliases: system_command = 'git config --global --unset-all alias.{0}'.format(alias) verbose_echo(system_command, verbose, fake) if not fake: os.system(system_command) if not fake: click.echo('\nThe following git aliases are uninstalled:\n') output_aliases(aliases)
0.006462
def build_chvatal_graph(): """Makes a new Chvatal graph. Ref: http://mathworld.wolfram.com/ChvatalGraph.html""" # The easiest way to build the Chvatal graph is to start # with C12 and add the additional 12 edges graph = build_cycle_graph(12) edge_tpls = [ (1,7), (1,9), (2,5), (2,11), (3,7), (3,9), (4,10), (4,12), (5,8), (6,10), (6,12), (8,11), ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
0.027197
def lookup(self, ip_address): """Try to do a reverse dns lookup on the given ip_address""" # Is this already in our cache if self.ip_lookup_cache.get(ip_address): domain = self.ip_lookup_cache.get(ip_address) # Is the ip_address local or special elif not self.lookup_internal and net_utils.is_internal(ip_address): domain = 'internal' elif net_utils.is_special(ip_address): domain = net_utils.is_special(ip_address) # Look it up at this point else: domain = self._reverse_dns_lookup(ip_address) # Cache it self.ip_lookup_cache.set(ip_address, domain) # Return the domain return domain
0.002729
def make_package_tree(matrix=None,labels=None,width=25,height=10,title=None,font_size=None): '''make package tree will make a dendrogram comparing a matrix of packages :param matrix: a pandas df of packages, with names in index and columns :param labels: a list of labels corresponding to row names, will be pulled from rows if not defined :param title: a title for the plot, if not defined, will be left out. :returns a plot that can be saved with savefig ''' from matplotlib import pyplot as plt from scipy.cluster.hierarchy import ( dendrogram, linkage ) if font_size is None: font_size = 8. from scipy.cluster.hierarchy import cophenet from scipy.spatial.distance import pdist if not isinstance(matrix,pandas.DataFrame): bot.info("No pandas DataFrame (matrix) of similarities defined, will use default.") matrix = compare_packages()['files.txt'] title = 'Docker Library Similarity to Base OS' Z = linkage(matrix, 'ward') c, coph_dists = cophenet(Z, pdist(matrix)) if labels == None: labels = matrix.index.tolist() plt.figure(figsize=(width, height)) if title != None: plt.title(title) plt.xlabel('image index') plt.ylabel('distance') dendrogram(Z, leaf_rotation=90., # rotates the x axis labels leaf_font_size=font_size, # font size for the x axis labels labels=labels) return plt
0.008021
def finalize_content(self): """ Finalize the additons """ self.write_closed = True body = self.raw_body.decode(self.encoding) self._init_xml(body) self._form_output()
0.009709
def add_view_file_mapping(self, pattern, cls): """Adds a mapping between a file and a view class. Pattern can be an extension in the form .EXT or a filename. """ if isinstance(pattern, str): if not pattern.endswith("*"): _, ext = os.path.splitext(pattern) self.allowed_extensions.add(ext) pattern = re.compile("^" + re.escape(pattern).replace("\\*", ".+") + "$", re.I) self.view_class_files_map.append((pattern, cls))
0.005848
def set_formatter(log_formatter): """Override the default log formatter with your own.""" # Add our formatter to all the handlers root_logger = logging.getLogger() for handler in root_logger.handlers: handler.setFormatter(logging.Formatter(log_formatter))
0.006689
def rna_transcript_expression_dict_from_args(args): """ Returns a dictionary mapping Ensembl transcript IDs to FPKM expression values or None if neither Cufflinks tracking file nor StringTie GTF file were specified. """ if args.rna_transcript_fpkm_tracking_file: return load_cufflinks_fpkm_dict(args.rna_transcript_fpkm_tracking_file) elif args.rna_transcript_fpkm_gtf_file: return load_transcript_fpkm_dict_from_gtf( args.rna_transcript_fpkm_gtf_file) else: return None
0.001859
def handle_request(self, filter_name, path): """ Handle image request :param filter_name: filter_name :param path: image_path :return: """ if filter_name in self._filter_sets: if self._filter_sets[filter_name]['cached']: cached_item_path = self._adapter.check_cached_item('%s/%s' % (filter_name, path)) if cached_item_path: return redirect(cached_item_path, self._redirect_code) resource = self._adapter.get_item(path) if resource: for filter_item in self._filter_sets[filter_name]['filters']: resource = filter_item.apply(resource) if self._filter_sets[filter_name]['cached']: return redirect( self._adapter.create_cached_item('%s/%s' % (filter_name, path), resource), self._redirect_code ) else: output = BytesIO() resource.save(output, format=str(resource.format)) return output.getvalue() else: LOGGER.warning('File "%s" not found.' % path) abort(404) else: LOGGER.warning('Filter "%s" not found.' % filter_name) abort(404)
0.002928
def loggray(x, a, b): """Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs.""" linval = 10.0 + 990.0 * (x-float(a))/(b-a) return (np.log10(linval)-1.0)*0.5 * 255.0
0.004651
def toggle_view(self, *args, **kwargs): """ Toggles the view between different groups """ group_to_display = self.views.popleft() self.cur_view.value = group_to_display for repo in self.tools_tc: for tool in self.tools_tc[repo]: t_groups = self.manifest.option(tool, 'groups')[1] if group_to_display not in t_groups and \ group_to_display != 'all groups': self.tools_tc[repo][tool].value = False self.tools_tc[repo][tool].hidden = True else: self.tools_tc[repo][tool].value = True self.tools_tc[repo][tool].hidden = False # redraw elements self.display() # add view back to queue self.views.append(group_to_display)
0.002372
def by_login(cls, session, login, local=True): """ Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User` """ user = cls.first(session, where=((cls.login == login), (cls.local == local),) ) # XXX it's appear that this is not case sensitive ! return user if user and user.login == login else None
0.003101
def ip2network(ip): """Convert a dotted-quad ip to base network number. This differs from :func:`ip2long` in that partial addresses as treated as all network instead of network plus host (eg. '127.1' expands to '127.1.0.0') :param ip: dotted-quad ip address (eg. ‘127.0.0.1’). :type ip: str :returns: Network byte order 32-bit integer or `None` if ip is invalid. """ if not validate_ip(ip): return None quads = ip.split('.') netw = 0 for i in range(4): netw = (netw << 8) | int(len(quads) > i and quads[i] or 0) return netw
0.001686
def fetchText(cls, url, data, textSearch, optional): """Search text entry for given text pattern in a HTML page.""" if textSearch: match = textSearch.search(data[0]) if match: text = match.group(1) out.debug(u'matched text %r with pattern %s' % (text, textSearch.pattern)) return unescape(text).strip() if optional: return None else: raise ValueError("Pattern %s not found at URL %s." % (textSearch.pattern, url)) else: return None
0.006723
def _date_from_match(match_object): """ Create a date object from a regular expression match. The regular expression match is expected to be from _RE_DATE or _RE_DATETIME. @param match_object: The regular expression match. @type match_object: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{date} """ year = int(match_object.group("year")) month = int(match_object.group("month")) day = int(match_object.group("day")) return datetime.date(year, month, day)
0.001887
def loads(self, value): '''Returns deserialized `value`.''' for serializer in reversed(self): value = serializer.loads(value) return value
0.012821
def continuousGenerator(self, request): """ Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request. """ compoundId = None if request.continuous_set_id != "": compoundId = datamodel.ContinuousSetCompoundId.parse( request.continuous_set_id) if compoundId is None: raise exceptions.ContinuousSetNotSpecifiedException() dataset = self.getDataRepository().getDataset( compoundId.dataset_id) continuousSet = dataset.getContinuousSet(request.continuous_set_id) iterator = paging.ContinuousIterator(request, continuousSet) return iterator
0.002793
def _import_from_importer(network, importer, basename, skip_time=False): """ Import network data from importer. Parameters ---------- skip_time : bool Skip importing time """ attrs = importer.get_attributes() current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")] pypsa_version = None if attrs is not None: network.name = attrs.pop('name') try: pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")] except KeyError: pypsa_version = None for attr, val in iteritems(attrs): setattr(network, attr, val) ##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types if pypsa_version is None or pypsa_version < current_pypsa_version: logger.warning(dedent(""" Importing PyPSA from older version of PyPSA than current version {}. Please read the release notes at https://pypsa.org/doc/release_notes.html carefully to prepare your network for import. """).format(network.pypsa_version)) importer.pypsa_version = pypsa_version importer.current_pypsa_version = current_pypsa_version # if there is snapshots.csv, read in snapshot data df = importer.get_snapshots() if df is not None: network.set_snapshots(df.index) if "weightings" in df.columns: network.snapshot_weightings = df["weightings"].reindex(network.snapshots) imported_components = [] # now read in other components; make sure buses and carriers come first for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}): list_name = network.components[component]["list_name"] df = importer.get_static(list_name) if df is None: if component == "Bus": logger.error("Error, no buses found") return else: continue import_components_from_dataframe(network, df, component) if not skip_time: for attr, df in importer.get_series(list_name): import_series_from_dataframe(network, df, component, attr) logger.debug(getattr(network,list_name)) imported_components.append(list_name) logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
0.004078
def add_tracks(self, track): """ Add a track or iterable of tracks. Parameters ---------- track : iterable or Track Iterable of :class:`Track` objects, or a single :class:`Track` object. """ from trackhub import BaseTrack if isinstance(track, BaseTrack): self.add_child(track) self._tracks.append(track) else: for t in track: self.add_child(t) self._tracks.append(t)
0.003759
def tables(self): """Print the existing tables in a database :example: ``ds.tables()`` """ if self._check_db() == False: return try: pmodels = self._tables() if pmodels is None: return num = len(pmodels) s = "s" if num == 1: s = "" msg = "Found " + colors.bold(str(num)) + " table" + s + ":\n" msg += "\n".join(pmodels) self.info(msg) except Exception as e: self.err(e, "Can not print tables")
0.005051
def get_join_cols(by_entry): """ helper function used for joins builds left and right join list for join function """ left_cols = [] right_cols = [] for col in by_entry: if isinstance(col, str): left_cols.append(col) right_cols.append(col) else: left_cols.append(col[0]) right_cols.append(col[1]) return left_cols, right_cols
0.026954
def filter_spent_outputs(self, outputs): """Remove outputs that have been spent Args: outputs: list of TransactionLink """ links = [o.to_dict() for o in outputs] txs = list(query.get_spending_transactions(self.connection, links)) spends = {TransactionLink.from_dict(input_['fulfills']) for tx in txs for input_ in tx['inputs']} return [ff for ff in outputs if ff not in spends]
0.004149
def daterange(self): """ This ``PeriodRange`` represented as a naive :class:`~spans.types.daterange`. """ return daterange( lower=self.lower, upper=self.upper, lower_inc=self.lower_inc, upper_inc=self.upper_inc)
0.006667
def to_json(self): """ Return the individual info in a dictionary for json. """ self.logger.debug("Returning json info") individual_info = { 'family_id': self.family, 'id':self.individual_id, 'sex':str(self.sex), 'phenotype': str(self.phenotype), 'mother': self.mother, 'father': self.father, 'extra_info': self.extra_info } return individual_info
0.016293
def register_periodic_tasks(self, tasks: Iterable[Task]): """Register tasks that need to be scheduled periodically.""" for task in tasks: self._scheduler.enter( int(task.periodicity.total_seconds()), 0, self._schedule_periodic_task, argument=(task,) )
0.005634
def qualify(self): """ Convert attribute values, that are references to other objects, into I{qref}. Qualfied using default document namespace. Since many wsdls are written improperly: when the document does not define a default namespace, the schema target namespace is used to qualify references. """ defns = self.root.defaultNamespace() if Namespace.none(defns): defns = self.schema.tns for a in self.autoqualified(): ref = getattr(self, a) if ref is None: continue if isqref(ref): continue qref = qualify(ref, self.root, defns) log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref) setattr(self, a, qref)
0.00246
def lpushx(self, key, *values): """ Insert values at the head of an existing list. :param key: The list's key :type key: :class:`str`, :class:`bytes` :param values: One or more positional arguments to insert at the beginning of the list. Each value is inserted at the beginning of the list individually (see discussion below). :returns: the length of the list after push operations, zero if `key` does not refer to a list :rtype: int :raises: :exc:`~tredis.exceptions.TRedisException` This method inserts `values` at the head of the list stored at `key`, only if `key` already exists and holds a list. In contrary to :meth:`.lpush`, no operation will be performed when key does not yet exist. .. note:: **Time complexity**: ``O(1)`` """ return self._execute([b'LPUSHX', key] + list(values))
0.002077
def on_okButton(self, event): """ grab user input values, format them, and run huji_magic.py with the appropriate flags """ os.chdir(self.WD) options = {} HUJI_file = self.bSizer0.return_value() if not HUJI_file: pw.simple_warning("You must select a HUJI format file") return False options['magfile'] = HUJI_file dat_file = self.bSizer0A.return_value() if os.path.isfile(dat_file): options['datafile'] = dat_file else: dat_file="" magicoutfile=os.path.split(HUJI_file)[1]+".magic" outfile=os.path.join(self.WD, magicoutfile) options['meas_file'] = outfile magicoutfile=os.path.split(HUJI_file)[1]+"_specimens.txt" spec_outfile=os.path.join(self.WD, magicoutfile) options['spec_file'] = spec_outfile magicoutfile=os.path.split(HUJI_file)[1]+"_samples.txt" samp_outfile=os.path.join(self.WD, magicoutfile) options['samp_file'] = samp_outfile magicoutfile=os.path.split(HUJI_file)[1]+"_sites.txt" site_outfile=os.path.join(self.WD, magicoutfile) options['site_file'] = site_outfile magicoutfile=os.path.split(HUJI_file)[1]+"_locations.txt" loc_outfile=os.path.join(self.WD, magicoutfile) options['loc_file'] = loc_outfile user = self.bSizer1.return_value() options['user'] = user if user: user = '-usr ' + user experiment_type = self.bSizer2.return_value() options['codelist'] = experiment_type if not experiment_type: pw.simple_warning("You must select an experiment type") return False cooling_rate = self.cooling_rate.GetValue() or 0 if cooling_rate: experiment_type = experiment_type + " " + cooling_rate lab_field = self.bSizer3.return_value() if not lab_field: lab_field = "0 0 0" lab_field_list = lab_field.split() options['labfield'] = lab_field_list[0] options['phi'] = lab_field_list[1] options['theta'] = lab_field_list[2] lab_field = '-dc ' + lab_field spc = self.bSizer4.return_value() options['specnum'] = spc or 0 if not spc: spc = '-spc 0' else: spc = '-spc ' + spc ncn = self.bSizer5.return_value() options['samp_con'] = ncn loc_name = self.bSizer6.return_value() options['location'] = loc_name if loc_name: loc_name = '-loc ' + loc_name #peak_AF = self.bSizer7.return_value() #options['peakfield'] = peak_AF replicate = self.bSizer8.return_value() if replicate: options['noave'] = 0 replicate = '' else: options['noave'] = 1 replicate = '-A' COMMAND = "huji_magic_new.py -f {} -fd {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, dat_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, loc_name, ncn, lab_field, spc, replicate) program_ran, error_message = convert.huji(**options) if program_ran: pw.close_window(self, COMMAND, outfile) else: pw.simple_warning(error_message)
0.005692
def getdevice_by_uuid(uuid): """ Get a HDD device by uuid Example:: from burlap.disk import getdevice_by_uuid device = getdevice_by_uuid("356fafdc-21d5-408e-a3e9-2b3f32cb2a8c") if device: mount(device,'/mountpoint') """ with settings(hide('running', 'warnings', 'stdout'), warn_only=True): res = run_as_root('blkid -U %s' % uuid) if not res.succeeded: return None return res
0.002123
def is_mac_address(value, **kwargs): """Indicate whether ``value`` is a valid MAC address. :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.mac_address(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
0.003419
def update_handles(self, key, axis, element, ranges, style): """ Update the elements of the plot. """ self.teardown_handles() plot_data, plot_kwargs, axis_kwargs = self.get_data(element, ranges, style) with abbreviated_exception(): handles = self.init_artists(axis, plot_data, plot_kwargs) self.handles.update(handles) return axis_kwargs
0.007246
def encodeEntitiesReentrant(self, input): """Do a global encoding of a string, replacing the predefined entities and non ASCII values with their entities and CharRef counterparts. Contrary to xmlEncodeEntities, this routine is reentrant, and result must be deallocated. """ ret = libxml2mod.xmlEncodeEntitiesReentrant(self._o, input) return ret
0.005013
def get_broadcast_message(self, rawtx): """TODO add docstring""" tx = deserialize.tx(rawtx) result = control.get_broadcast_message(self.testnet, tx) result["signature"] = serialize.signature(result["signature"]) return result
0.007547
def _set_data(self, action): """ capture Wikidata API response data """ if action == 'labels': self._set_labels() if action == 'wikidata': self._set_wikidata() self.get_labels(show=False)
0.007576
def content_val(self, ymldata=None, messages=None): """Validates the Command Dictionary to ensure the contents for each of the fields meets specific criteria regarding the expected types, byte ranges, etc.""" self._ymlproc = YAMLProcessor(self._ymlfile, False) # Turn off the YAML Processor log.debug("BEGIN: Content-based validation of Command dictionary") if ymldata is not None: cmddict = ymldata elif ymldata is None and self._ymlproc.loaded: cmddict = self._ymlproc.data elif not self._ymlproc.loaded: raise util.YAMLError("YAML failed to load.") try: # instantiate the document number. this will increment in order to # track the line numbers and section where validation fails docnum = 0 # boolean to hold argument validity argsvalid = True # list of rules to validate against rules = [] ### set the command rules # # set uniqueness rule for command names rules.append(UniquenessRule('name', "Duplicate command name: %s", messages)) # set uniqueness rule for opcodes rules.append(UniquenessRule('opcode', "Duplicate opcode: %s", messages)) # ### for cmdcnt, cmddefn in enumerate(cmddict[0]): # check the command rules for rule in rules: rule.check(cmddefn) # list of argument rules to validate against argrules = [] ### set rules for command arguments # # set uniqueness rule for opcodes argrules.append(UniquenessRule('name', "Duplicate argument name: " + cmddefn.name + ".%s", messages)) # set type rule for arg.type argrules.append(TypeRule('type', "Invalid argument type for argument: " + cmddefn.name + ".%s", messages)) # set argument size rule for arg.type.nbytes argrules.append(TypeSizeRule('nbytes', "Invalid argument size for argument: " + cmddefn.name + ".%s", messages)) # set argument enumerations rule to check no enumerations contain un-quoted YAML special variables argrules.append(EnumRule('enum', "Invalid enum value for argument: " + cmddefn.name + ".%s", messages)) # set byte order rule to ensure proper ordering of aruguments argrules.append(ByteOrderRule('bytes', "Invalid byte order for argument: " + cmddefn.name + ".%s", messages)) # ### argdefns = cmddefn.argdefns for arg in argdefns: # check argument rules for rule in argrules: rule.check(arg) # check if argument rule failed, if so set the validity to False if not all(r.valid is True for r in argrules): argsvalid = False log.debug("END: Content-based validation complete for '%s'", self._ymlfile) # check validity of all command rules and argument validity return all(rule.valid is True for rule in rules) and argsvalid except util.YAMLValidationError, e: # Display the error message if messages is not None: if len(e.message) < 128: msg = "Validation Failed for YAML file '" + self._ymlfile + "': '" + str(e.message) + "'" else: msg = "Validation Failed for YAML file '" + self._ymlfile + "'" log.error(msg) self.ehandler.process(docnum, self.ehandler.doclines, e, messages) return False
0.004945
def add_entry(self, timestamp, data): """Adds a new data entry to the TimeSeries. :param timestamp: Time stamp of the data. This has either to be a float representing the UNIX epochs or a string containing a timestamp in the given format. :param numeric data: Actual data value. """ self._normalized = self._predefinedNormalized self._sorted = self._predefinedSorted tsformat = self._timestampFormat if tsformat is not None: timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat) self._timeseriesData.append([float(timestamp), float(data)])
0.005908
def _match_real(filename, include, exclude, follow, symlinks): """Match real filename includes and excludes.""" sep = '\\' if util.platform() == "windows" else '/' if isinstance(filename, bytes): sep = os.fsencode(sep) if not filename.endswith(sep) and os.path.isdir(filename): filename += sep matched = False for pattern in include: if _fs_match(pattern, filename, sep, follow, symlinks): matched = True break if matched: matched = True if exclude: for pattern in exclude: if _fs_match(pattern, filename, sep, follow, symlinks): matched = False break return matched
0.001364
def message(self, message, source, point, ln): """Creates a SyntaxError-like message.""" if message is None: message = "parsing failed" if ln is not None: message += " (line " + str(ln) + ")" if source: if point is None: message += "\n" + " " * taberrfmt + clean(source) else: part = clean(source.splitlines()[lineno(point, source) - 1], False).lstrip() point -= len(source) - len(part) # adjust all points based on lstrip part = part.rstrip() # adjust only points that are too large based on rstrip message += "\n" + " " * taberrfmt + part if point > 0: if point >= len(part): point = len(part) - 1 message += "\n" + " " * (taberrfmt + point) + "^" return message
0.00547
def is_simpletable(table): """test if the table has only strings in the cells""" tds = table('td') for td in tds: if td.contents != []: td = tdbr2EOL(td) if len(td.contents) == 1: thecontents = td.contents[0] if not isinstance(thecontents, NavigableString): return False else: return False return True
0.002331
def addSuffixToExtensions(toc): """ Returns a new TOC with proper library suffix for EXTENSION items. """ new_toc = TOC() for inm, fnm, typ in toc: if typ in ('EXTENSION', 'DEPENDENCY'): binext = os.path.splitext(fnm)[1] if not os.path.splitext(inm)[1] == binext: inm = inm + binext new_toc.append((inm, fnm, typ)) return new_toc
0.002445
def get_session_indexes(request): """ Gets the SessionIndexes from the Logout Request :param request: Logout Request Message :type request: string|DOMDocument :return: The SessionIndex value :rtype: list """ elem = OneLogin_Saml2_XML.to_etree(request) session_indexes = [] session_index_nodes = OneLogin_Saml2_XML.query(elem, '/samlp:LogoutRequest/samlp:SessionIndex') for session_index_node in session_index_nodes: session_indexes.append(OneLogin_Saml2_XML.element_text(session_index_node)) return session_indexes
0.006421
def get_profile_model(): """ Returns configured user profile model or None if not found """ auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', None) profile_model = None if auth_profile_module: # get the profile model. TODO: super flacky, refactor app_label, model = auth_profile_module.split('.') profile_model = getattr(__import__("%s.models" % app_label, \ globals(), locals(), [model, ], -1), model, None) return profile_model
0.005871
def parse_ggKbase_tables(tables, id_type): """ convert ggKbase genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('name'): header = line header[4] = 'genome size (bp)' header[12] = '#SCGs' header[13] = '#SCG duplicates' continue name, code, info = line[0], line[1], line info = [to_int(i) for i in info] if id_type is False: # try to use name and code ID if 'UNK' in code or 'unknown' in code: code = name if (name != code) and (name and code in g2info): print('# duplicate name or code in table(s)', file=sys.stderr) print('# %s and/or %s' % (name, code), file=sys.stderr) exit() if name not in g2info: g2info[name] = {item:stat for item, stat in zip(header, info)} if code not in g2info: g2info[code] = {item:stat for item, stat in zip(header, info)} else: if id_type == 'name': ID = name elif id_type == 'code': ID = code else: print('# specify name or code column using -id', file=sys.stderr) exit() ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
0.005169
def _run_vagrant_command(self, args): ''' Run a vagrant command and return its stdout. args: A sequence of arguments to a vagrant command line. e.g. ['up', 'my_vm_name', '--no-provision'] or ['up', None, '--no-provision'] for a non-Multi-VM environment. ''' # Make subprocess command command = self._make_vagrant_command(args) with self.err_cm() as err_fh: return compat.decode(subprocess.check_output(command, cwd=self.root, env=self.env, stderr=err_fh))
0.006826
def parameters(self, sequence, value_means, value_ranges, arrangement): """Relates the individual to be evolved to the full parameter string. Parameters ---------- sequence: str Full amino acid sequence for specification object to be optimized. Must be equal to the number of residues in the model. value_means: list List containing mean values for parameters to be optimized. value_ranges: list List containing ranges for parameters to be optimized. Values must be positive. arrangement: list Full list of fixed and variable parameters for model building. Fixed values are the appropriate value. Values to be varied should be listed as 'var0', 'var1' etc, and must be in ascending numerical order. Variables can be repeated if required. """ self._params['sequence'] = sequence self._params['value_means'] = value_means self._params['value_ranges'] = value_ranges self._params['arrangement'] = arrangement if any(x <= 0 for x in self._params['value_ranges']): raise ValueError("range values must be greater than zero") self._params['variable_parameters'] = [] for i in range(len(self._params['value_means'])): self._params['variable_parameters'].append( "".join(['var', str(i)])) if len(set(arrangement).intersection( self._params['variable_parameters'])) != len( self._params['value_means']): raise ValueError("argument mismatch!") if len(self._params['value_ranges']) != len( self._params['value_means']): raise ValueError("argument mismatch!")
0.001092
def tuplewrap(value): """ INTENDED TO TURN lists INTO tuples FOR USE AS KEYS """ if is_many(value): return tuple(tuplewrap(v) if is_sequence(v) else v for v in value) return unwrap(value),
0.00463
def _create_remote_dict_method(dict_method_name: str): """ Generates a method for the State class, that will call the "method_name" on the state (a ``dict``) stored on the server, and return the result. Glorified RPC. """ def remote_method(self, *args, **kwargs): return self._s_request_reply( { Msgs.cmd: Cmds.run_dict_method, Msgs.info: dict_method_name, Msgs.args: args, Msgs.kwargs: kwargs, } ) remote_method.__name__ = dict_method_name return remote_method
0.003311
def set_value(request): """Set the value and returns *True* or *False*.""" key = request.matchdict['key'] _VALUES[key] = request.json_body return _VALUES.get(key)
0.010256
def _get_services(self, version='v1'): '''get version 1 of the google compute and storage service Parameters ========== version: version to use (default is v1) ''' self._bucket_service = storage.Client() creds = GoogleCredentials.get_application_default() self._storage_service = discovery_build('storage', version, credentials=creds) self._compute_service = discovery_build('compute', version, credentials=creds)
0.00823
def sequence_number(self): """ The sequence number of this command. This is the sequence number assigned by the issuing client. """ entry = self._proto.commandQueueEntry if entry.cmdId.HasField('sequenceNumber'): return entry.cmdId.sequenceNumber return None
0.006135
def get_player_summaries(self, steamids=None, **kwargs): """Returns a dictionary containing a player summaries :param steamids: (list) list of ``32-bit`` or ``64-bit`` steam ids, notice that api will convert if ``32-bit`` are given :return: dictionary of player summaries, see :doc:`responses </responses>` """ if not isinstance(steamids, collections.Iterable): steamids = [steamids] base64_ids = list(map(convert_to_64_bit, filter(lambda x: x is not None, steamids))) if 'steamids' not in kwargs: kwargs['steamids'] = base64_ids url = self.__build_url(urls.GET_PLAYER_SUMMARIES, **kwargs) req = self.executor(url) if self.logger: self.logger.info('URL: {0}'.format(url)) if not self.__check_http_err(req.status_code): return response.build(req, url, self.raw_mode)
0.005342
def http_methods(self, urls=None, **route_data): """Creates routes from a class, where the class method names should line up to HTTP METHOD types""" def decorator(class_definition): instance = class_definition if isinstance(class_definition, type): instance = class_definition() router = self.urls(urls if urls else "/{0}".format(instance.__class__.__name__.lower()), **route_data) for method in HTTP_METHODS: handler = getattr(instance, method.lower(), None) if handler: http_routes = getattr(handler, '_hug_http_routes', ()) if http_routes: for route in http_routes: http(**router.accept(method).where(**route).route)(handler) else: http(**router.accept(method).route)(handler) cli_routes = getattr(handler, '_hug_cli_routes', ()) if cli_routes: for route in cli_routes: cli(**self.where(**route).route)(handler) return class_definition return decorator
0.004108
def Vc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]): r'''This function handles the retrieval of a chemical's critical volume. Lookup is based on CASRNs. Will automatically select a data source to use if no Method is provided; returns None if the data is not available. Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for inorganic chemicals. Function has data for approximately 1000 chemicals. Examples -------- >>> Vc(CASRN='64-17-5') 0.000168 Parameters ---------- CASRN : string CASRN [-] Returns ------- Vc : float Critical volume, [m^3/mol] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain Vc with the given inputs Other Parameters ---------------- Method : string, optional The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', 'CRC', 'PSRK', 'YAWS', and 'SURF'. All valid values are also held in the list `Vc_methods`. AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain Vc for the desired chemical, and will return methods instead of Vc IgnoreMethods : list, optional A list of methods to ignore in obtaining the full list of methods, useful for for performance reasons and ignoring inaccurate methods Notes ----- A total of six sources are available for this function. They are: * 'IUPAC', a series of critically evaluated experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_, [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_. * 'MATTHEWS', a series of critically evaluated data for inorganic compounds in [13]_. * 'CRC', a compillation of critically evaluated data by the TRC as published in [14]_. * 'PSRK', a compillation of experimental and estimated data published in [15]_. * 'YAWS', a large compillation of data from a variety of sources; no data points are sourced in the work of [16]_. * 'SURF', an estimation method using a simple quadratic method for estimating Pc from Tc and Vc. This is ignored and not returned as a method by default References ---------- .. [1] Ambrose, Douglas, and Colin L. Young. "Vapor-Liquid Critical Properties of Elements and Compounds. 1. An Introductory Survey." Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996): 154-154. doi:10.1021/je950378q. .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of Elements and Compounds. 2. Normal Alkanes." Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46. doi:10.1021/je00019a001. .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid Critical Properties of Elements and Compounds. 3. Aromatic Hydrocarbons." Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 547-58. doi:10.1021/je00019a002. .. [4] Gude, Michael, and Amyn S. Teja. "Vapor-Liquid Critical Properties of Elements and Compounds. 4. Aliphatic Alkanols." Journal of Chemical & Engineering Data 40, no. 5 (September 1, 1995): 1025-36. doi:10.1021/je00021a001. .. [5] Daubert, Thomas E. "Vapor-Liquid Critical Properties of Elements and Compounds. 5. Branched Alkanes and Cycloalkanes." Journal of Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72. doi:10.1021/je9501548. .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic Hydrocarbons." Journal of Chemical & Engineering Data 41, no. 4 (January 1, 1996): 645-56. doi:10.1021/je9501999. .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen Compounds Other Than Alkanols and Cycloalkanols." Journal of Chemical & Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680. .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid Critical Properties of Elements and Compounds. 8. Organic Sulfur, Silicon, and Tin Compounds (C + H + S, Si, and Sn)." Journal of Chemical & Engineering Data 46, no. 3 (May 1, 2001): 480-85. doi:10.1021/je000210r. .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose, and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of Elements and Compounds. 9. Organic Compounds Containing Nitrogen." Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006): 305-14. doi:10.1021/je050221q. .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton, Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young. "Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic Compounds Containing Halogens." Journal of Chemical & Engineering Data 52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g. .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin. "Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si; N + O; and O + S, + Si." Journal of Chemical & Engineering Data 54, no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z. .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David W. Morton, and Kenneth N. Marsh. "Vapor-Liquid Critical Properties of Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and Non-Hydrocarbons." Journal of Chemical & Engineering Data, October 5, 2015, 151005081500002. doi:10.1021/acs.jced.5b00571. .. [13] Mathews, Joseph F. "Critical Constants of Inorganic Substances." Chemical Reviews 72, no. 1 (February 1, 1972): 71-100. doi:10.1021/cr60275a004. .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014. .. [15] Horstmann, Sven, Anna Jabłoniec, Jörg Krafczyk, Kai Fischer, and Jürgen Gmehling. "PSRK Group Contribution Equation of State: Comprehensive Revision and Extension IV, Including Critical Constants and Α-Function Parameters for 1000 Components." Fluid Phase Equilibria 227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002. .. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional Publishing, 2014. ''' def list_methods(): methods = [] if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, 'Vc']): methods.append(IUPAC) if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, 'Vc']): methods.append(MATTHEWS) if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, 'Vc']): methods.append(CRC) if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, 'Vc']): methods.append(PSRK) if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, 'Vc']): methods.append(YAWS) if CASRN: methods.append(SURF) if IgnoreMethods: for Method in IgnoreMethods: if Method in methods: methods.remove(Method) methods.append(NONE) return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == IUPAC: _Vc = float(_crit_IUPAC.at[CASRN, 'Vc']) elif Method == PSRK: _Vc = float(_crit_PSRKR4.at[CASRN, 'Vc']) elif Method == MATTHEWS: _Vc = float(_crit_Matthews.at[CASRN, 'Vc']) elif Method == CRC: _Vc = float(_crit_CRC.at[CASRN, 'Vc']) elif Method == YAWS: _Vc = float(_crit_Yaws.at[CASRN, 'Vc']) elif Method == SURF: _Vc = third_property(CASRN=CASRN, V=True) elif Method == NONE: return None else: raise Exception('Failure in in function') return _Vc
0.001062
def to_xml(self): ''' Returns a DOM representation of the payment. @return: Element ''' for n, v in { "amount": self.amount, "date": self.date, "method":self.method}.items(): if is_empty_or_none(v): raise PaymentError("'%s' attribute cannot be empty or " \ "None." % n) doc = Document() root = doc.createElement("payment") super(Payment, self).to_xml(root) self._create_text_node(root, "amount", self.amount) self._create_text_node(root, "method", self.method) self._create_text_node(root, "reference", self.ref, True) self._create_text_node(root, "date", self.date) return root
0.009079
def excluded(filename): """ Check if options.exclude contains a pattern that matches filename. """ basename = os.path.basename(filename) for pattern in options.exclude: if fnmatch(basename, pattern): # print basename, 'excluded because it matches', pattern return True
0.003125
def create_table(self, cursor, target, options): "Creates the target table." cursor.execute( self.create_sql[target].format(self.qualified_names[target]))
0.010989
def row(self, content='', align='left'): """ A row of the menu, which comprises the left and right verticals plus the given content. Returns: str: A row of this menu component with the specified content. """ return u"{lm}{vert}{cont}{vert}".format(lm=' ' * self.margins.left, vert=self.border_style.outer_vertical, cont=self._format_content(content, align))
0.009881
def _norm_perm_list_from_perm_dict(self, perm_dict): """Return a minimal, ordered, hashable list of subjects and permissions.""" high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict) return [ [k, list(sorted(high_perm_dict[k]))] for k in ORDERED_PERM_LIST if high_perm_dict.get(k, False) ]
0.008108
def running_jobs(self, exit_on_error=True): """Initialize multiprocessing.""" with self.handling_exceptions(): if self.using_jobs: from concurrent.futures import ProcessPoolExecutor try: with ProcessPoolExecutor(self.jobs) as self.executor: yield finally: self.executor = None else: yield if exit_on_error: self.exit_on_error()
0.003883
def description_record(self): """Return dict describing :class:`condoor.Connection` object. Example:: {'connections': [{'chain': [{'driver_name': 'eXR', 'family': 'ASR9K', 'hostname': 'vkg3', 'is_console': True, 'is_target': True, 'mode': 'global', 'os_type': 'eXR', 'os_version': '6.1.2.06I', 'platform': 'ASR-9904', 'prompt': 'RP/0/RSP0/CPU0:vkg3#', 'udi': {'description': 'ASR-9904 AC Chassis', 'name': 'Rack 0', 'pid': 'ASR-9904-AC', 'sn': 'FOX2024GKDE ', 'vid': 'V01'}}]}, {'chain': [{'driver_name': 'generic', 'family': None, 'hostname': '172.27.41.52:2045', 'is_console': None, 'is_target': True, 'mode': None, 'os_type': None, 'os_version': None, 'platform': None, 'prompt': None, 'udi': None}]}], 'last_chain': 0} """ if self.connection_chains: return { 'connections': [{'chain': [device.device_info for device in chain.devices]} for chain in self.connection_chains], 'last_chain': self._last_chain_index, } else: raise ConnectionError("Device not connected")
0.001596
def parse_timedelta(deltastr): """ Parse a string describing a period of time. """ matches = TIMEDELTA_REGEX.match(deltastr) if not matches: return None components = {} for name, value in matches.groupdict().items(): if value: components[name] = int(value) for period, hours in (('days', 24), ('years', 8766)): if period in components: components['hours'] = components.get('hours', 0) + \ components[period] * hours del components[period] return int(timedelta(**components).total_seconds())
0.001613
def airpwn(iffrom, ifto, replace, pattern="", ignorepattern=""): """Before using this, initialize "iffrom" and "ifto" interfaces: iwconfig iffrom mode monitor iwpriv orig_ifto hostapd 1 ifconfig ifto up note: if ifto=wlan0ap then orig_ifto=wlan0 note: ifto and iffrom must be set on the same channel ex: ifconfig eth1 up iwconfig eth1 mode monitor iwconfig eth1 channel 11 iwpriv wlan0 hostapd 1 ifconfig wlan0ap up iwconfig wlan0 channel 11 iwconfig wlan0 essid dontexist iwconfig wlan0 mode managed """ ptrn = re.compile(pattern) iptrn = re.compile(ignorepattern) def do_airpwn(p, ifto=ifto, replace=replace, ptrn=ptrn, iptrn=iptrn): if not isinstance(p,Dot11): return if not p.FCfield & 1: return if not p.haslayer(TCP): return ip = p.getlayer(IP) tcp = p.getlayer(TCP) pay = str(tcp.payload) # print "got tcp" if not ptrn.match(pay): return # print "match 1" if iptrn.match(pay): return # print "match 2" del(p.payload.payload.payload) p.FCfield="from-DS" p.addr1,p.addr2 = p.addr2,p.addr1 q = p.copy() p /= IP(src=ip.dst,dst=ip.src) p /= TCP(sport=tcp.dport, dport=tcp.sport, seq=tcp.ack, ack=tcp.seq+len(pay), flags="PA") q = p.copy() p /= replace q.ID += 1 q.getlayer(TCP).flags="RA" q.getlayer(TCP).seq+=len(replace) sendp([p,q], iface=ifto, verbose=0) # print "send",repr(p) # print "send",repr(q) print(p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%")) sniff(iface=iffrom,prn=do_airpwn)
0.008009
def invokeCompletionIfAvailable(self, requestedByUser=False): """Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked """ if self._qpart.completionEnabled and self._wordSet is not None: wordBeforeCursor = self._wordBeforeCursor() wholeWord = wordBeforeCursor + self._wordAfterCursor() forceShow = requestedByUser or self._completionOpenedManually if wordBeforeCursor: if len(wordBeforeCursor) >= self._qpart.completionThreshold or forceShow: if self._widget is None: model = _CompletionModel(self._wordSet) model.setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(model, forceShow): self._createWidget(model) return True else: self._widget.model().setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(self._widget.model(), forceShow): self._widget.updateGeometry() return True self._closeCompletion() return False
0.004736
def read_value(ftype, prompt, default=None, minval=None, maxval=None, allowed_single_chars=None, question_mark=True): """Return value read from keyboard Parameters ---------- ftype : int() or float() Function defining the expected type. prompt : str Prompt string. default : int or None Default value. minval : int or None Mininum allowed value. maxval : int or None Maximum allowed value. allowed_single_chars : str String containing allowed valid characters. question_mark : bool If True, display question mark after prompt. Returns ------- result : integer, float or str Integer, float of single character. """ # avoid PyCharm warning 'might be referenced before assignment' result = None # question mark if question_mark: cquestion_mark = ' ? ' else: cquestion_mark = '' # check minimum value if minval is not None: try: iminval = ftype(minval) except ValueError: raise ValueError("'" + str(minval) + "' cannot " + "be used as an minval in readi()") else: iminval = None # check maximum value if maxval is not None: try: imaxval = ftype(maxval) except ValueError: raise ValueError("'" + str(maxval) + "' cannot " + "be used as an maxval in readi()") else: imaxval = None # minimum and maximum values if minval is None and maxval is None: cminmax = '' elif minval is None: cminmax = ' (number <= ' + str(imaxval) + ')' elif maxval is None: cminmax = ' (number >= ' + str(iminval) + ')' else: cminmax = ' (' + str(minval) + ' <= number <= ' + str(maxval) + ')' # main loop loop = True while loop: # display prompt if default is None: print(prompt + cminmax + cquestion_mark, end='') sys.stdout.flush() else: print(prompt + cminmax + ' [' + str(default) + ']' + cquestion_mark, end='') sys.stdout.flush() # read user's input cresult = sys.stdin.readline().strip() if cresult == '' and default is not None: cresult = str(default) # if valid allowed single character, return character if len(cresult) == 1: if allowed_single_chars is not None: if cresult in allowed_single_chars: return cresult # convert to ftype value try: result = ftype(cresult) except ValueError: print("*** Error: invalid " + str(ftype) + " value. Try again!") else: # check number is within expected range if minval is None and maxval is None: loop = False elif minval is None: if result <= imaxval: loop = False else: print("*** Error: number out of range. Try again!") elif maxval is None: if result >= iminval: loop = False else: print("*** Error: number out of range. Try again!") else: if iminval <= result <= imaxval: loop = False else: print("*** Error: number out of range. Try again!") return result
0.000282
def label(self, nid, id_if_null=False): """ Fetches label for a node Arguments --------- nid : str Node identifier for entity to be queried id_if_null : bool If True and node has no label return id as label Return ------ str """ g = self.get_graph() if nid in g: n = g.node[nid] if 'label' in n: return n['label'] else: if id_if_null: return nid else: return None else: if id_if_null: return nid else: return None
0.002766
def vt2esofspy(vesseltree, outputfilename="tracer.txt", axisorder=[0, 1, 2]): """ exports vesseltree to esofspy format :param vesseltree: filename or vesseltree dictionary structure :param outputfilename: output file name :param axisorder: order of axis can be specified with this option :return: """ if (type(vesseltree) == str) and os.path.isfile(vesseltree): import io3d vt = io3d.misc.obj_from_file(vesseltree) else: vt = vesseltree print(vt['general']) print(vt.keys()) vtgm = vt['graph']['microstructure'] lines = [] vs = vt['general']['voxel_size_mm'] sh = vt['general']['shape_px'] # switch axis ax = axisorder lines.append("#Tracer+\n") lines.append("#voxelsize mm %f %f %f\n" % (vs[ax[0]], vs[ax[1]], vs[ax[2]])) lines.append("#shape %i %i %i\n" % (sh[ax[0]], sh[ax[1]], sh[ax[2]])) lines.append(str(len(vtgm) * 2)+"\n") i = 1 for id in vtgm: # edge[''] try: nda = vtgm[id]['nodeA_ZYX'] ndb = vtgm[id]['nodeB_ZYX'] lines.append("%i\t%i\t%i\t%i\n" % (nda[ax[0]], nda[ax[1]], nda[ax[2]], i)) lines.append("%i\t%i\t%i\t%i\n" % (ndb[ax[0]], ndb[ax[1]], ndb[ax[2]], i)) i += 1 except: pass lines.append("%i\t%i\t%i\t%i" % (0, 0, 0, 0)) lines[3] = str(i - 1) + "\n" with open(outputfilename, 'wt') as f: f.writelines(lines)
0.004093
def add_extension_if_needed(filepath, ext, check_if_exists=False): """Add the extension ext to fpath if it doesn't have it. Parameters ---------- filepath: str File name or path ext: str File extension check_if_exists: bool Returns ------- File name or path with extension added, if needed. """ if not filepath.endswith(ext): filepath += ext if check_if_exists: if not op.exists(filepath): raise IOError('File not found: ' + filepath) return filepath
0.001838
def start(component, exact): # type: (str, str) -> None """ Create a new release branch. Args: component (str): Version component to bump when creating the release. Can be *major*, *minor* or *patch*. exact (str): The exact version to set for the release. Overrides the component argument. This allows to re-release a version if something went wrong with the release upload. """ version_file = conf.get_path('version_file', 'VERSION') develop = conf.get('git.devel_branch', 'develop') common.assert_on_branch(develop) with conf.within_proj_dir(): out = shell.run('git status --porcelain', capture=True).stdout lines = out.split(os.linesep) has_changes = any( not l.startswith('??') for l in lines if l.strip() ) if has_changes: log.info("Cannot release: there are uncommitted changes") exit(1) old_ver, new_ver = versioning.bump(component, exact) log.info("Bumping package version") log.info(" old version: <35>{}".format(old_ver)) log.info(" new version: <35>{}".format(new_ver)) with conf.within_proj_dir(): branch = 'release/' + new_ver common.git_checkout(branch, create=True) log.info("Creating commit for the release") shell.run('git add {ver_file} && git commit -m "{msg}"'.format( ver_file=version_file, msg="Releasing v{}".format(new_ver) ))
0.001978
def batch_retrieve_overrides_in_course(self, course_id, assignment_overrides_id, assignment_overrides_assignment_id): """ Batch retrieve overrides in a course. Returns a list of specified overrides in this course, providing they target sections/groups/students visible to the current user. Returns null elements in the list for requests that were not found. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - assignment_overrides[id] """Ids of overrides to retrieve""" params["assignment_overrides[id]"] = assignment_overrides_id # REQUIRED - assignment_overrides[assignment_id] """Ids of assignments for each override""" params["assignment_overrides[assignment_id]"] = assignment_overrides_assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/assignments/overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/overrides".format(**path), data=data, params=params, all_pages=True)
0.004717
def transaction(self, transaction_id, expand_merchant=False): """ Returns an individual transaction, fetched by its id. Official docs: https://monzo.com/docs/#retrieve-transaction :param transaction_id: Monzo transaction ID :type transaction_id: str :param expand_merchant: whether merchant data should be included :type expand_merchant: bool :returns: Monzo transaction details :rtype: MonzoTransaction """ endpoint = '/transactions/{}'.format(transaction_id) data = dict() if expand_merchant: data['expand[]'] = 'merchant' response = self._get_response( method='get', endpoint=endpoint, params=data, ) return MonzoTransaction(data=response.json()['transaction'])
0.002401
def text(length, choices=string.ascii_letters): """ returns a random (fixed length) string :param length: string length :param choices: string containing all the chars can be used to build the string .. seealso:: :py:func:`rtext` """ return ''.join(choice(choices) for x in range(length))
0.006231
def load_training_roidbs(self, names): """ Args: names (list[str]): name of the training datasets, e.g. ['train2014', 'valminusminival2014'] Returns: roidbs (list[dict]): Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances. and the following keys are expected for training: file_name: str, full path to the image boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2] class: numpy array of k integers, in the range of [1, #categories], NOT [0, #categories) is_crowd: k booleans. Use k False if you don't know what it means. segmentation: k lists of numpy arrays (one for each instance). Each list of numpy arrays corresponds to the mask for one instance. Each numpy array in the list is a polygon of shape Nx2, because one mask can be represented by N polygons. If your segmentation annotations are originally masks rather than polygons, either convert it, or the augmentation will need to be changed or skipped accordingly. Include this field only if training Mask R-CNN. """ return COCODetection.load_many( cfg.DATA.BASEDIR, names, add_gt=True, add_mask=cfg.MODE_MASK)
0.005275
def should_generate_summaries(): """Is this an appropriate context to generate summaries. Returns: a boolean """ name_scope = tf.contrib.framework.get_name_scope() if name_scope and "while/" in name_scope: # Summaries don't work well within tf.while_loop() return False if tf.get_variable_scope().reuse: # Avoid generating separate summaries for different data shards return False return True
0.014052
def closed_sets(C, mincount_connectivity=0): """ Computes the strongly connected closed sets of C """ n = np.shape(C)[0] S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=True) closed = [] for s in S: mask = np.zeros(n, dtype=bool) mask[s] = True if C[np.ix_(mask, ~mask)].sum() == 0: # closed set, take it closed.append(s) return closed
0.004739
def insert_query(connection, publicId, aead, keyhandle, aeadobj): """this functions read the response fields and creates sql query. then inserts everything inside the database""" # turn the keyhandle into an integer keyhandle = key_handle_to_int(keyhandle) if not keyhandle == aead.key_handle: print("WARNING: keyhandle does not match aead.key_handle") return None # creates the query object try: sql = aeadobj.insert().values(public_id=publicId, keyhandle=aead.key_handle, nonce=aead.nonce, aead=aead.data) # insert the query result = connection.execute(sql) return result except sqlalchemy.exc.IntegrityError: pass return None
0.00277
def send_produce_request(self, payloads=None, acks=1, timeout=DEFAULT_REPLICAS_ACK_MSECS, fail_on_error=True, callback=None): """ Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Parameters ---------- payloads: list of ProduceRequest acks: How many Kafka broker replicas need to write before the leader replies with a response timeout: How long the server has to receive the acks from the replicas before returning an error. fail_on_error: boolean, should we raise an Exception if we encounter an API error? callback: function, instead of returning the ProduceResponse, first pass it through this function Return ------ a deferred which callbacks with a list of ProduceResponse Raises ------ FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError """ encoder = partial( KafkaCodec.encode_produce_request, acks=acks, timeout=timeout) if acks == 0: decoder = None else: decoder = KafkaCodec.decode_produce_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder) returnValue(self._handle_responses(resps, fail_on_error, callback))
0.002414
def smkdirs(dpath, mode=0o777): """Safely make a full directory path if it doesn't exist. Parameters ---------- dpath : str Path of directory/directories to create mode : int [default=0777] Permissions for the new directories See also -------- os.makedirs """ if not os.path.exists(dpath): os.makedirs(dpath, mode=mode)
0.002591
async def send_rpc_message(self, message, context): """Handle a send_rpc message. See :meth:`AbstractDeviceAdapter.send_rpc`. """ conn_string = message.get('connection_string') rpc_id = message.get('rpc_id') address = message.get('address') timeout = message.get('timeout') payload = message.get('payload') client_id = context.user_data self._logger.debug("Calling RPC %d:0x%04X with payload %s on %s", address, rpc_id, payload, conn_string) response = bytes() err = None try: response = await self.send_rpc(client_id, conn_string, address, rpc_id, payload, timeout=timeout) except VALID_RPC_EXCEPTIONS as internal_err: err = internal_err except (DeviceAdapterError, DeviceServerError): raise except Exception as internal_err: self._logger.warning("Unexpected exception calling RPC %d:0x%04x", address, rpc_id, exc_info=True) raise ServerCommandError('send_rpc', str(internal_err)) from internal_err status, response = pack_rpc_response(response, err) return { 'status': status, 'payload': base64.b64encode(response) }
0.003888
def _construct_select_query(**filter_definition): """Return SELECT statement that will be used as a filter. :param filter_definition: definition of a filter that should be used for SELECT construction :return: """ table_name = filter_definition.pop('table') distinct = filter_definition.pop('distinct', False) select_count = filter_definition.pop('count', False) if distinct and select_count: raise UnsupportedDefinitionError('SELECT (DISTINCT ...) is not supported') if select_count and 'select' in filter_definition: raise UnsupportedDefinitionError('SELECT COUNT(columns) is not supported') if 'joins' in filter_definition: join_definitions = filter_definition.pop('joins') if not isinstance(join_definitions, (tuple, list)): join_definitions = (join_definitions,) filter_definition['joins'] = [] for join_def in join_definitions: filter_definition['joins'].append(_expand_join(join_def)) if 'where' in filter_definition: for key, value in filter_definition['where'].items(): if is_filter_query(value): # We can do it recursively here sub_query = value.pop(DEFAULT_FILTER_KEY) if value: raise ParsingInputError("Unknown keys for sub-query provided: %s" % value) filter_definition['where'][key] = mosql_raw('( {} )'.format(_construct_select_query(**sub_query))) elif isinstance(value, str) and value.startswith('$') and QUERY_REFERENCE.fullmatch(value[1:]): # Make sure we construct correct query with escaped table name and escaped column for sub-queries filter_definition['where'][key] = mosql_raw('"{}"'.format('"."'.join(value[1:].split('.')))) raw_select = select(table_name, **filter_definition) if distinct: # Note that we want to limit replace to the current SELECT, not affect nested ones raw_select = raw_select.replace('SELECT', 'SELECT DISTINCT', 1) if select_count: # Note that we want to limit replace to the current SELECT, not affect nested ones raw_select = raw_select.replace('SELECT *', 'SELECT COUNT(*)', 1) return raw_select
0.004848
def can_delete_post(self, post, user): """ Given a forum post, checks whether the user can delete the latter. """ checker = self._get_checker(user) # A user can delete a post if... # they are a superuser # they are the original poster of the forum post # they belong to the forum moderators is_author = self._is_post_author(post, user) can_delete = ( user.is_superuser or (is_author and checker.has_perm('can_delete_own_posts', post.topic.forum)) or checker.has_perm('can_delete_posts', post.topic.forum) ) return can_delete
0.004484
def filenumber_handle(self): """Get the number of files in the folder.""" self.__results = [] self.__dirs = [] self.__files = [] self.__ftp = self.connect() self.__ftp.dir(self.args.path, self.__results.append) self.logger.debug("dir results: {}".format(self.__results)) self.quit() status = self.ok for data in self.__results: if "<DIR>" in data: self.__dirs.append(str(data.split()[3])) else: self.__files.append(str(data.split()[2])) self.__result = len(self.__files) self.logger.debug("result: {}".format(self.__result)) # Compare the vlaue. if self.__result > self.args.warning: status = self.warning if self.__result > self.args.critical: status = self.critical # Output self.shortoutput = "Found {0} files in {1}.".format(self.__result, self.args.path) [self.longoutput.append(line) for line in self.__results if self.__results] self.perfdata.append("{path}={result};{warn};{crit};0;".format( crit=self.args.critical, warn=self.args.warning, result=self.__result, path=self.args.path)) self.logger.debug("Return status and output.") status(self.output())
0.001397
def attribute(self): """Set or reset attributes""" paint = { "bold": self.ESC + "1" + self.END, 1: self.ESC + "1" + self.END, "dim": self.ESC + "2" + self.END, 2: self.ESC + "2" + self.END, "underlined": self.ESC + "4" + self.END, 4: self.ESC + "4" + self.END, "blink": self.ESC + "5" + self.END, 5: self.ESC + "5" + self.END, "reverse": self.ESC + "7" + self.END, 7: self.ESC + "7" + self.END, "hidden": self.ESC + "8" + self.END, 8: self.ESC + "8" + self.END, "reset": self.ESC + "0" + self.END, 0: self.ESC + "0" + self.END, "res_bold": self.ESC + "21" + self.END, 21: self.ESC + "21" + self.END, "res_dim": self.ESC + "22" + self.END, 22: self.ESC + "22" + self.END, "res_underlined": self.ESC + "24" + self.END, 24: self.ESC + "24" + self.END, "res_blink": self.ESC + "25" + self.END, 25: self.ESC + "25" + self.END, "res_reverse": self.ESC + "27" + self.END, 27: self.ESC + "27" + self.END, "res_hidden": self.ESC + "28" + self.END, 28: self.ESC + "28" + self.END, } return paint[self.color]
0.00149
def _is_valid_language(self): """ Return True if the value of component in attribute "language" is valid, and otherwise False. :returns: True if value is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value.lower() lang_rxc = re.compile(CPEComponentSimple._LANGTAG_PATTERN) return lang_rxc.match(comp_str) is not None
0.004854
def _find_matching_algo(self, region): """! @brief Searches for a flash algo covering the regions's address range.'""" for algo in self._info.algos: # Both start and size are required attributes. algoStart = int(algo.attrib['start'], base=0) algoSize = int(algo.attrib['size'], base=0) algoEnd = algoStart + algoSize - 1 # Check if the region indicated by start..size fits within the algo. if (algoStart <= region.start <= algoEnd) and (algoStart <= region.end <= algoEnd): return algo return None
0.009709
def delete(self): """Delete the resource. Returns: True if the delete is successful. Will throw an error if other errors occur """ session = self._session url = session._build_url(self._resource_path(), self.id) return session.delete(url, CB.boolean(204))
0.006135
def id(self, value): """The id property. Args: value (string). the property value. """ if value == self._defaults['ai.device.id'] and 'ai.device.id' in self._values: del self._values['ai.device.id'] else: self._values['ai.device.id'] = value
0.01227
def circle_touching_line(center, radius, start, end): """ Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector """ C, R = center, radius A, B = start, end a = (B.x - A.x)**2 + (B.y - A.y)**2 b = 2 * (B.x - A.x) * (A.x - C.x) \ + 2 * (B.y - A.y) * (A.y - C.y) c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \ - 2 * (C.x * A.x + C.y * A.y) - R**2 discriminant = b**2 - 4 * a * c if discriminant < 0: return False elif discriminant == 0: u = v = -b / float(2 * a) else: u = (-b + math.sqrt(discriminant)) / float(2 * a) v = (-b - math.sqrt(discriminant)) / float(2 * a) if u < 0 and v < 0: return False if u > 1 and v > 1: return False return True
0.006098
def warp(source_file, destination_file, dst_crs=None, resolution=None, dimensions=None, src_bounds=None, dst_bounds=None, src_nodata=None, dst_nodata=None, target_aligned_pixels=False, check_invert_proj=True, creation_options=None, resampling=Resampling.cubic, **kwargs): """Warp a raster dataset. Parameters ------------ source_file: str, file object or pathlib.Path object Source file. destination_file: str, file object or pathlib.Path object Destination file. dst_crs: rasterio.crs.CRS, optional Target coordinate reference system. resolution: tuple (x resolution, y resolution) or float, optional Target resolution, in units of target coordinate reference system. dimensions: tuple (width, height), optional Output file size in pixels and lines. src_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from source bounds (in source georeferenced units). dst_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from destination bounds (in destination georeferenced units). src_nodata: int, float, or nan, optional Manually overridden source nodata. dst_nodata: int, float, or nan, optional Manually overridden destination nodata. target_aligned_pixels: bool, optional Align the output bounds based on the resolution. Default is `False`. check_invert_proj: bool, optional Constrain output to valid coordinate region in dst_crs. Default is `True`. creation_options: dict, optional Custom creation options. resampling: rasterio.enums.Resampling Reprojection resampling method. Default is `cubic`. kwargs: optional Additional arguments passed to transformation function. Returns --------- out: None Output is written to destination. """ with rasterio.Env(CHECK_WITH_INVERT_PROJ=check_invert_proj): with rasterio.open(source_file) as src: out_kwargs = src.profile.copy() dst_crs, dst_transform, dst_width, dst_height = calc_transform( src, dst_crs, resolution, dimensions, src_bounds, dst_bounds, target_aligned_pixels) # If src_nodata is not None, update the dst metadata NODATA # value to src_nodata (will be overridden by dst_nodata if it is not None. if src_nodata is not None: # Update the destination NODATA value out_kwargs.update({ 'nodata': src_nodata }) # Validate a manually set destination NODATA value. if dst_nodata is not None: if src_nodata is None and src.meta['nodata'] is None: raise ValueError('src_nodata must be provided because dst_nodata is not None') else: out_kwargs.update({'nodata': dst_nodata}) out_kwargs.update({ 'crs': dst_crs, 'transform': dst_transform, 'width': dst_width, 'height': dst_height }) # Adjust block size if necessary. if ('blockxsize' in out_kwargs and dst_width < out_kwargs['blockxsize']): del out_kwargs['blockxsize'] if ('blockysize' in out_kwargs and dst_height < out_kwargs['blockysize']): del out_kwargs['blockysize'] if creation_options is not None: out_kwargs.update(**creation_options) with rasterio.open(destination_file, 'w', **out_kwargs) as dst: reproject( source=rasterio.band(src, src.indexes), destination=rasterio.band(dst, dst.indexes), src_transform=src.transform, src_crs=src.crs, src_nodata=src_nodata, dst_transform=out_kwargs['transform'], dst_crs=out_kwargs['crs'], dst_nodata=dst_nodata, resampling=resampling, **kwargs)
0.000939
def _get_acq_evaluator(self, acq): """ Imports the evaluator """ from ..core.evaluators import select_evaluator from copy import deepcopy eval_args = deepcopy(self.config['acquisition']['evaluator']) del eval_args['type'] return select_evaluator(self.config['acquisition']['evaluator']['type'])(acq, **eval_args)
0.007958
def stop(self, actor, exc=None, exit_code=None): """Gracefully stop the ``actor``. """ if actor.state <= ACTOR_STATES.RUN: # The actor has not started the stopping process. Starts it now. actor.state = ACTOR_STATES.STOPPING actor.event('start').clear() if exc: if not exit_code: exit_code = getattr(exc, 'exit_code', 1) if exit_code == 1: exc_info = sys.exc_info() if exc_info[0] is not None: actor.logger.critical('Stopping', exc_info=exc_info) else: actor.logger.critical('Stopping: %s', exc) elif exit_code == 2: actor.logger.error(str(exc)) elif exit_code: actor.stream.writeln(str(exc)) else: if not exit_code: exit_code = getattr(actor._loop, 'exit_code', 0) # # Fire stopping event actor.exit_code = exit_code actor.stopping_waiters = [] actor.event('stopping').fire() if actor.stopping_waiters and actor._loop.is_running(): actor.logger.info('asynchronous stopping') # make sure to return the future (used by arbiter for waiting) return actor._loop.create_task(self._async_stopping(actor)) else: if actor.logger: actor.logger.info('stopping') self._stop_actor(actor) elif actor.stopped(): return self._stop_actor(actor, True)
0.001183
def sort(ol,**kwargs): ''' from elist.elist import * ol = [1,3,4,2] id(ol) new = sort(ol) ol new id(ol) id(new) #### ol = [1,3,4,2] id(ol) rslt = sort(ol,mode="original") ol rslt id(ol) id(rslt) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" if(mode == "new"): new = copy.deepcopy(ol) new.sort() return(new) else: ol.sort() return(ol)
0.005329
def get_converted(self, symbol, units='CAD', system=None, tag=None): """ Uses a Symbol's Dataframe, to build a new Dataframe, with the data converted to the new units Parameters ---------- symbol : str or tuple of the form (Dataframe, str) String representing a symbol's name, or a dataframe with the data required to be converted. If supplying a dataframe, units must be passed. units : str, optional Specify the units to convert the symbol to, default to CAD system : str, optional If None, the default system specified at instantiation is used. System defines which conversion approach to take. tag : str, optional Tags define which set of conversion data is used. If None, the default tag specified at instantiation is used. """ if isinstance(symbol, (str, unicode)): sym = self.get(symbol) df = sym.df curu = sym.units requ = units elif isinstance(symbol, tuple): df = symbol[0] curu = symbol[1] requ = units else: raise TypeError("Expected str or (DataFrame, str), found {}".format(type(symbol))) system = system or self.default_system tag = tag or self.default_tag conv = self.converters[system][tag] newdf = conv.convert(df, curu, requ) newdf = pd.merge(df, newdf, left_index=True, right_index=True) newdf = newdf[df.columns[0] + "_y"].to_frame() newdf.columns = df.columns return newdf
0.005202
def transform(self, data): """ Transform the SFrame `data` using a fitted model. Parameters ---------- data : SFrame The data to be transformed. Returns ------- A transformed SFrame. Returns ------- out: SFrame A transformed SFrame. See Also -------- fit, fit_transform """ if self.transform_chain is None: raise RuntimeError("`transform()` method called before `fit` or `fit_transform`.") return self.transform_chain.transform(data)
0.004902