text
stringlengths
78
104k
score
float64
0
0.18
def _save_cookies(requests_cookiejar, filename): """Save cookies to a file.""" with open(filename, 'wb') as handle: pickle.dump(requests_cookiejar, handle)
0.005848
def instance_from_physical_vector(self, physical_vector): """ Creates a ModelInstance, which has an attribute and class instance corresponding to every PriorModel \ attributed to this instance. This method takes as input a physical vector of parameter values, thus omitting the use of priors. Parameters ---------- physical_vector: [float] A unit hypercube vector Returns ------- model_instance : autofit.mapper.model.ModelInstance An object containing reconstructed model_mapper instances """ arguments = dict( map(lambda prior_tuple, physical_unit: (prior_tuple.prior, physical_unit), self.prior_tuples_ordered_by_id, physical_vector)) return self.instance_for_arguments(arguments)
0.005924
def cut_across_axis(self, dim, minval=None, maxval=None): ''' Cut the mesh by a plane, discarding vertices that lie behind that plane. Or cut the mesh by two parallel planes, discarding vertices that lie outside them. The region to keep is defined by an axis of perpendicularity, specified by `dim`: 0 means x, 1 means y, 2 means z. `minval` and `maxval` indicate the portion of that axis to keep. Return the original indices of the kept vertices. ''' # vertex_mask keeps track of the vertices we want to keep. vertex_mask = np.ones((len(self.v),), dtype=bool) if minval is not None: predicate = self.v[:, dim] >= minval vertex_mask = np.logical_and(vertex_mask, predicate) if maxval is not None: predicate = self.v[:, dim] <= maxval vertex_mask = np.logical_and(vertex_mask, predicate) vertex_indices = np.flatnonzero(vertex_mask) self.keep_vertices(vertex_indices) return vertex_indices
0.001867
async def start(self, remoteCaps, remotePort): """ Start the transport. """ if not self.__started: self.__started = True self.__state = 'connecting' self._remote_port = remotePort # configure logging if logger.isEnabledFor(logging.DEBUG): prefix = self.is_server and 'server ' or 'client ' self.__log_debug = lambda msg, *args: logger.debug(prefix + msg, *args) # initialise local channel ID counter # one side should be using even IDs, the other odd IDs if self.is_server: self._data_channel_id = 0 else: self._data_channel_id = 1 self.__transport._register_data_receiver(self) if not self.is_server: await self._init()
0.00346
def _check_year(year, month, error, error_msg): """Checks that the year is within 50 years from now.""" if year not in xrange((now.year - 50), (now.year + 51)): year = now.year month = now.month error = error_msg return year, month, error
0.00365
def __perform_rest_call(self, requestURL, params=None, headers=None, restType='GET', body=None): """Returns the JSON representation of the response if the response status was ok, returns ``None`` otherwise. """ auth, headers = self.__prepare_gprest_call(requestURL, params=params, headers=headers, restType=restType, body=body) if restType == 'GET': r = requests.get(requestURL, auth=auth, headers=headers, params=params) elif restType == 'PUT': r = requests.put(requestURL, data=body, auth=auth, headers=headers, params=params) elif restType == 'POST': r = requests.post(requestURL, data=body, auth=auth, headers=headers, params=params) elif restType == 'DELETE': r = requests.delete(requestURL, auth=auth, headers=headers, params=params) resp = self.__process_gprest_response(r, restType=restType) return resp
0.008395
def readrows(self): """Using the BroLogReader this method yields each row of the log file replacing timestamps, looping and emitting rows based on EPS rate """ # Loop forever or until max_rows is reached num_rows = 0 while True: # Yield the rows from the internal reader for row in self.log_reader.readrows(): yield self.replace_timestamp(row) # Sleep and count rows time.sleep(next(self.eps_timer)) num_rows += 1 # Check for max_rows if self.max_rows and (num_rows >= self.max_rows): return
0.002915
def generate_boto3_response(operation): """The decorator to convert an XML response to JSON, if the request is determined to be from boto3. Pass the API action as a parameter. """ def _boto3_request(method): @wraps(method) def f(self, *args, **kwargs): rendered = method(self, *args, **kwargs) if 'json' in self.headers.get('Content-Type', []): self.response_headers.update( {'x-amzn-requestid': '2690d7eb-ed86-11dd-9877-6fad448a8419', 'date': datetime.now(pytz.utc).strftime('%a, %d %b %Y %H:%M:%S %Z'), 'content-type': 'application/x-amz-json-1.1'}) resp = xml_to_json_response( self.aws_service_spec, operation, rendered) return '' if resp is None else json.dumps(resp) return rendered return f return _boto3_request
0.003226
def save(self, fname): """Save the report""" with open(fname, 'wb') as f: f.write(encode(self.text))
0.015625
def scale(self, factor, inplace=True): """ Multiplies all branch lengths by factor. """ if not inplace: t = self.copy() else: t = self t._tree.scale_edges(factor) t._dirty = True return t
0.007722
def write_unchecked_data(self, offsets, data): # type: (Descriptor, Offsets, bytes) -> None """Write unchecked data to disk :param Descriptor self: this :param Offsets offsets: download offsets :param bytes data: data """ self.write_data(offsets, data) unchecked = UncheckedChunk( data_len=len(data), fd_start=self.view.fd_start + offsets.fd_start, file_path=self.final_path, temp=False, ) with self._meta_lock: self._unchecked_chunks[offsets.chunk_num] = { 'ucc': unchecked, 'decrypted': True, }
0.004412
def schedule_contact_downtime(self, contact, start_time, end_time, author, comment): """Schedule contact downtime Format of the line that triggers function call:: SCHEDULE_CONTACT_DOWNTIME;<contact_name>;<start_time>;<end_time>;<author>;<comment> :param contact: contact to put in downtime :type contact: alignak.objects.contact.Contact :param start_time: downtime start time :type start_time: int :param end_time: downtime end time :type end_time: int :param author: downtime author :type author: str :param comment: text comment :type comment: str :return: None """ data = {'ref': contact.uuid, 'start_time': start_time, 'end_time': end_time, 'author': author, 'comment': comment} cdt = ContactDowntime(data) contact.add_downtime(cdt) self.send_an_element(contact.get_update_status_brok())
0.003122
def _get_type(self, value): """Get the data type for *value*.""" if value is None: return type(None) elif type(value) in int_types: return int elif type(value) in float_types: return float elif isinstance(value, binary_type): return binary_type else: return text_type
0.005333
async def section_name(self, sec_name=None): """ Section name :param sec_name: :return: """ if self.writing: fvalue = sec_name.encode('ascii') await x.dump_uint(self.iobj, len(fvalue), 1) await self.iobj.awrite(bytearray(fvalue)) else: ivalue = await x.load_uint(self.iobj, 1) fvalue = bytearray(ivalue) await self.iobj.areadinto(fvalue) return bytes(fvalue).decode('ascii')
0.003883
def upload(self, env, cart, callback=None): """ Nothing special happens here. This method recieves a destination repo, and a payload of `cart` which will be uploaded into the target repo. Preparation: To use this method you must pre-process your cart: Remotes must be fetched and saved locally. Directories must be recursed and replaced with their contents. Items should be signed if necessary. Warning: this method trusts you to Do The Right Thing (TM), ahead of time and check file types before feeding them to it. `env` - name of the environment with the cart destination `cart` - cart to upload `callback` - Optional callback to call if juicer.utils.upload_rpm succeeds """ for repo in cart.repos(): if not juicer.utils.repo_exists_p(repo, self.connectors[env], env): juicer.utils.Log.log_info("repo '%s' doesn't exist in %s environment... skipping!", (repo, env)) continue repoid = "%s-%s" % (repo, env) for item in cart[repo]: # if item is remote if juicer.utils.is_remote_rpm(item.path): # if item is in our pulp server ours = False for env, con in self.connectors.iteritems(): if item.path.startswith('/'.join(con.base_url.split('/')[0:2])): ours = True break if ours: # check to see if it's in the right repo if item.path.startswith(juicer.utils.pulp_repo_path(con, repoid)): juicer.utils.Log.log_info("%s is already in repo %s in %s. Nothing to do." % (item, repo, env)) continue else: juicer.utils.Log.log_info("Initiating upload of '%s' into '%s'" % (item.path, repoid)) item.sync_to(cart.remotes_storage) rpm_id = juicer.utils.upload_rpm(item.path, repoid, self.connectors[env], callback) filename = os.path.basename(item.path) item.update('%s/%s' % (juicer.utils.pulp_repo_path(con, repoid), filename)) juicer.utils.Log.log_debug('%s uploaded with an id of %s' % (os.path.basename(item.path), rpm_id)) else: juicer.utils.Log.log_info("Initiating upload of '%s' into '%s'" % (item.path, repoid)) rpm_id = juicer.utils.upload_rpm(item.path, repoid, self.connectors[env], callback) juicer.utils.Log.log_debug('%s uploaded with an id of %s' % (os.path.basename(item.path), rpm_id)) # else item is local elif juicer.utils.is_rpm(item.path): juicer.utils.Log.log_info("Initiating upload of '%s' into '%s'" % (item.path, repoid)) rpm_id = juicer.utils.upload_rpm(item.path, repoid, self.connectors[env], callback) juicer.utils.Log.log_debug('%s uploaded with an id of %s' % (os.path.basename(item.path), rpm_id)) # Upload carts aren't special, don't update their paths if cart.cart_name == 'upload-cart': continue # Set the path to items in this cart to their location on # the pulp server. path = juicer.utils.remote_url(self.connectors[env], env, repo, os.path.basename(item.path)) item.update(path) self.connectors[env].post('/repositories/%s/actions/publish/' % repoid, {'id': 'yum_distributor'}) # Upload carts don't persist if not cart.cart_name == 'upload-cart': cart.current_env = env cart.save() self.publish(cart) return True
0.00457
def seek_previous_line(self): """ Seek previous line relative to the current file position. :return: Position of the line or -1 if previous line was not found. """ where = self.file.tell() offset = 0 while True: if offset == where: break read_size = self.read_size if self.read_size <= where else where self.file.seek(where - offset - read_size, SEEK_SET) data_len, data = self.read(read_size) # Consider the following example: Foo\r | \nBar where " | " denotes current position, # '\nBar' is the read part and 'Foo\r' is the remaining part. # We should completely consume terminator "\r\n" by reading one extra byte. if b'\r\n' in self.LINE_TERMINATORS and data[0] == b'\n'[0]: terminator_where = self.file.tell() if terminator_where > data_len + 1: self.file.seek(where - offset - data_len - 1, SEEK_SET) terminator_len, terminator_data = self.read(1) if terminator_data[0] == b'\r'[0]: data_len += 1 data = b'\r' + data self.file.seek(terminator_where) data_where = data_len while data_where > 0: terminator = self.suffix_line_terminator(data[:data_where]) if terminator and offset == 0 and data_where == data_len: # The last character is a line terminator that finishes current line. Ignore it. data_where -= len(terminator) elif terminator: self.file.seek(where - offset - (data_len - data_where)) return self.file.tell() else: data_where -= 1 offset += data_len if where == 0: # Nothing more to read. return -1 else: # Very first line. self.file.seek(0) return 0
0.002402
def base64url_decode(input): """Helper method to base64url_decode a string. Args: input (str): A base64url_encoded string to decode. """ rem = len(input) % 4 if rem > 0: input += b'=' * (4 - rem) return base64.urlsafe_b64decode(input)
0.003597
def is_never_accessible(self): """ Returns true if the course/task is never accessible """ return self._val[0] == datetime.max and self._val[1] == datetime.max
0.011429
def _one_diagonal_capture_square(self, capture_square, position): """ Adds specified diagonal as a capture move if it is one """ if self.contains_opposite_color_piece(capture_square, position): if self.would_move_be_promotion(): for move in self.create_promotion_moves(status=notation_const.CAPTURE_AND_PROMOTE, location=capture_square): yield move else: yield self.create_move(end_loc=capture_square, status=notation_const.CAPTURE)
0.006279
def _set_route(self, ip_dest, next_hop, **kwargs): """Configure a static route Args: ip_dest (string): The ip address of the destination in the form of A.B.C.D/E next_hop (string): The next hop interface or ip address **kwargs['next_hop_ip'] (string): The next hop address on destination interface **kwargs['distance'] (string): Administrative distance for this route **kwargs['tag'] (string): Route tag **kwargs['route_name'] (string): Route name **kwargs['delete'] (boolean): If true, deletes the specified route instead of creating or setting values for the route **kwargs['default'] (boolean): If true, defaults the specified route instead of creating or setting values for the route Returns: True if the operation succeeds, otherwise False. """ commands = self._build_commands(ip_dest, next_hop, **kwargs) delete = kwargs.get('delete', False) default = kwargs.get('default', False) # Prefix with 'no' if delete is set if delete: commands = "no " + commands # Or with 'default' if default is setting else: if default: commands = "default " + commands return self.configure(commands)
0.001412
def credit_note_pdf(self, credit_note_it): """ Opens a pdf of a credit note :param credit_note_it: the credit note id :return: dict """ return self._create_get_request(resource=CREDIT_NOTES, billomat_id=credit_note_it, command=PDF)
0.010714
def java_timestamp(timestamp=True): """ .. versionadded:: 0.2.0 Returns a timestamp in the format produced by |date_tostring|_, e.g.:: Mon Sep 02 14:00:54 EDT 2016 If ``timestamp`` is `True` (the default), the current date & time is returned. If ``timestamp`` is `None` or `False`, an empty string is returned. If ``timestamp`` is a number, it is converted from seconds since the epoch to local time. If ``timestamp`` is a `datetime.datetime` object, its value is used directly, with naïve objects assumed to be in the local timezone. The timestamp is always constructed using the C locale. :param timestamp: the date & time to display :type timestamp: `None`, `bool`, number, or `datetime.datetime` :rtype: text string .. |date_tostring| replace:: Java 8's ``Date.toString()`` .. _date_tostring: https://docs.oracle.com/javase/8/docs/api/java/util/Date.html#toString-- """ if timestamp is None or timestamp is False: return '' if isinstance(timestamp, datetime) and timestamp.tzinfo is not None: timebits = timestamp.timetuple() # Assumes `timestamp.tzinfo.tzname()` is meaningful/useful tzname = timestamp.tzname() else: if timestamp is True: timestamp = None elif isinstance(timestamp, datetime): try: # Use `datetime.timestamp()` if it's available, as it (unlike # `datetime.timetuple()`) takes `fold` into account for naïve # datetimes timestamp = timestamp.timestamp() except AttributeError: # Pre-Python 3.3 # Mapping `timetuple` through `mktime` and `localtime` is # necessary for determining whether DST is in effect (which, in # turn, is necessary for determining which timezone name to # use). The only downside to using standard functions instead # of `python-dateutil` is that `mktime`, apparently, handles # times duplicated by DST non-deterministically (cf. # <https://git.io/vixsE>), but there's no right way to deal # with those anyway, so... timestamp = time.mktime(timestamp.timetuple()) elif not isinstance(timestamp, numbers.Number): raise TypeError('Timestamp must be number or datetime.datetime') timebits = time.localtime(timestamp) try: tzname = timebits.tm_zone except AttributeError: # This assumes that `time.tzname` is meaningful/useful. tzname = time.tzname[timebits.tm_isdst > 0] assert 1 <= timebits.tm_mon <= 12, 'invalid month' assert 0 <= timebits.tm_wday <= 6, 'invalid day of week' return '{wday} {mon} {t.tm_mday:02d}' \ ' {t.tm_hour:02d}:{t.tm_min:02d}:{t.tm_sec:02d}' \ ' {tz} {t.tm_year:04d}'.format( t=timebits, tz=tzname, mon=MONTHS[timebits.tm_mon-1], wday=DAYS_OF_WEEK[timebits.tm_wday] )
0.000642
def get_filter_item(name: str, operation: bytes, value: bytes) -> bytes: """ A field could be found for this term, try to get filter string for it. """ assert isinstance(name, str) assert isinstance(value, bytes) if operation is None: return filter_format(b"(%s=%s)", [name, value]) elif operation == "contains": assert value != "" return filter_format(b"(%s=*%s*)", [name, value]) else: raise ValueError("Unknown search operation %s" % operation)
0.001957
def unbounded(self): """ Get whether this node is unbounded I{(a collection)} @return: True if unbounded, else False. @rtype: boolean """ max = self.max if max is None: max = '1' if max.isdigit(): return (int(max) > 1) else: return max == 'unbounded'
0.005587
def process_url(url, key): """ Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects """ logger.debug('Fetching DOE CODE JSON: %s', url) if key is None: raise ValueError('DOE CODE API Key value is missing!') response = requests.get(url, headers={"Authorization": "Basic " + key}) doecode_json = response.json() for record in doecode_json['records']: yield record
0.002045
def _get_src_file_path(self, markdown_file_path: Path) -> Path: '''Translate the path of Markdown file that is located inside the temporary working directory into the path of the corresponding Markdown file that is located inside the source directory of Foliant project. :param markdown_file_path: Path to Markdown file that is located inside the temporary working directory :returns: Mapping of Markdown file path to the source directory ''' path_relative_to_working_dir = markdown_file_path.relative_to(self.working_dir.resolve()) self.logger.debug( 'Currently processed Markdown file path relative to working dir: ' + f'{path_relative_to_working_dir}' ) path_mapped_to_src_dir = ( self.project_path.resolve() / self.config['src_dir'] / path_relative_to_working_dir ) self.logger.debug( 'Currently processed Markdown file path mapped to source dir: ' + f'{path_mapped_to_src_dir}' ) return path_mapped_to_src_dir
0.006267
def get_tc_device(self): """ Return a device name that associated network communication direction. """ if self.direction == TrafficDirection.OUTGOING: return self.device if self.direction == TrafficDirection.INCOMING: return self.ifb_device raise ParameterError( "unknown direction", expected=TrafficDirection.LIST, value=self.direction )
0.006928
def sample_cluster(sources, srcfilter, num_ses, param): """ Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures """ eb_ruptures = [] numpy.random.seed(sources[0].serial) [grp_id] = set(src.src_group_id for src in sources) # AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) # Set the parameters required to compute the number of occurrences # of the group of sources # assert param['oqparam'].number_of_logic_tree_samples > 0 samples = getattr(sources[0], 'samples', 1) tom = getattr(sources, 'temporal_occurrence_model') rate = tom.occurrence_rate time_span = tom.time_span # Note that using a single time interval corresponding to the product # of the investigation time and the number of realisations as we do # here is admitted only in the case of a time-independent model grp_num_occ = numpy.random.poisson(rate * time_span * samples * num_ses) # Now we process the sources included in the group. Possible cases: # * The group is a cluster. In this case we choose one rupture per each # source; uncertainty in the ruptures can be handled in this case # using mutually exclusive ruptures (note that this is admitted # only for nons-parametric sources). # * The group contains mutually exclusive sources. In this case we # choose one source and then one rupture from this source. rup_counter = {} rup_data = {} eff_ruptures = 0 for rlz_num in range(grp_num_occ): if sources.cluster: for src, _sites in srcfilter(sources): # Sum Ruptures if rlz_num == 0: eff_ruptures += src.num_ruptures # Track calculation time t0 = time.time() rup = src.get_one_rupture() # The problem here is that we do not know a-priori the # number of occurrences of a given rupture. if src.id not in rup_counter: rup_counter[src.id] = {} rup_data[src.id] = {} if rup.idx not in rup_counter[src.id]: rup_counter[src.id][rup.idx] = 1 rup_data[src.id][rup.idx] = [rup, src.id, grp_id] else: rup_counter[src.id][rup.idx] += 1 # Store info dt = time.time() - t0 calc_times[src.id] += numpy.array([len(rup_data[src.id]), src.nsites, dt]) elif param['src_interdep'] == 'mutex': print('Not yet implemented') exit(0) # Create event based ruptures for src_key in rup_data: for rup_key in rup_data[src_key]: dat = rup_data[src_key][rup_key] cnt = rup_counter[src_key][rup_key] ebr = EBRupture(dat[0], dat[1], dat[2], cnt, samples) eb_ruptures.append(ebr) return eb_ruptures, calc_times, eff_ruptures, grp_id
0.000292
def ajax_login_required(view_func): """Handle non-authenticated users differently if it is an AJAX request.""" @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if request.is_ajax(): if request.user.is_authenticated(): return view_func(request, *args, **kwargs) else: response = http.HttpResponse() response['X-Django-Requires-Auth'] = True response['X-Django-Login-Url'] = settings.LOGIN_URL return response else: return login_required(view_func)(request, *args, **kwargs) return _wrapped_view
0.001443
def _make_sql_params(self,kw): """Make a list of strings to pass to an SQL statement from the dictionary kw with Python types""" vals = [] for k,v in kw.iteritems(): vals.append('%s=%s' %(k,self._conv(v))) return vals
0.021818
def _collect_layer_output_min_max(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect min and max values from layer outputs and save them in a dictionary mapped by layer names. """ collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.min_max_dict, num_examples
0.006224
def add_validation_message(self, message): """ Adds a message to the messages dict :param message: """ if message.file not in self.messages: self.messages[message.file] = [] self.messages[message.file].append(message)
0.007194
def rexponweib(alpha, k, loc=0, scale=1, size=None): """ Random exponentiated Weibull variates. """ q = np.random.uniform(size=size) r = flib.exponweib_ppf(q, alpha, k) return loc + r * scale
0.00463
def replace(self, new_node): """Replace a node after first checking integrity of node stack.""" cur_node = self.cur_node nodestack = self.nodestack cur = nodestack.pop() prev = nodestack[-1] index = prev[-1] - 1 oldnode, name = prev[-2][index] assert cur[0] is cur_node is oldnode, (cur[0], cur_node, prev[-2], index) parent = prev[0] if isinstance(parent, list): parent[index] = new_node else: setattr(parent, name, new_node)
0.003419
def read_block_data(self, cmd, length): """ Read a block of bytes from the bus from the specified command register Amount of bytes read in is defined by length """ results = self.bus.read_i2c_block_data(self.address, cmd, length) self.log.debug( "read_block_data: Read [%s] from command register 0x%02X" % ( ', '.join(['0x%02X' % x for x in results]), cmd ) ) return results
0.004049
def get_pager_spec(self): """ Find the best pager settings for this command. If the user has specified overrides in the INI config file we prefer those. """ self_config = self.get_config() pagercmd = self_config.get('pager') istty = self_config.getboolean('pager_istty') core_config = self.get_config('core') if pagercmd is None: pagercmd = core_config.get('pager') if istty is None: istty = core_config.get('pager_istty') return { "pagercmd": pagercmd, "istty": istty }
0.003339
def _mid(pt1, pt2): """ (Point, Point) -> Point Return the point that lies in between the two input points. """ (x0, y0), (x1, y1) = pt1, pt2 return 0.5 * (x0 + x1), 0.5 * (y0 + y1)
0.004878
def wait_lock(path, lock_fn=None, timeout=5, sleep=0.1, time_start=None): ''' Obtain a write lock. If one exists, wait for it to release first ''' if not isinstance(path, six.string_types): raise FileLockError('path must be a string') if lock_fn is None: lock_fn = path + '.w' if time_start is None: time_start = time.time() obtained_lock = False def _raise_error(msg, race=False): ''' Raise a FileLockError ''' raise FileLockError(msg, time_start=time_start) try: if os.path.exists(lock_fn) and not os.path.isfile(lock_fn): _raise_error( 'lock_fn {0} exists and is not a file'.format(lock_fn) ) open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY while time.time() - time_start < timeout: try: # Use os.open() to obtain filehandle so that we can force an # exception if the file already exists. Concept found here: # http://stackoverflow.com/a/10979569 fh_ = os.open(lock_fn, open_flags) except (IOError, OSError) as exc: if exc.errno != errno.EEXIST: _raise_error( 'Error {0} encountered obtaining file lock {1}: {2}' .format(exc.errno, lock_fn, exc.strerror) ) log.trace( 'Lock file %s exists, sleeping %f seconds', lock_fn, sleep ) time.sleep(sleep) else: # Write the lock file with os.fdopen(fh_, 'w'): pass # Lock successfully acquired log.trace('Write lock %s obtained', lock_fn) obtained_lock = True # Transfer control back to the code inside the with block yield # Exit the loop break else: _raise_error( 'Timeout of {0} seconds exceeded waiting for lock_fn {1} ' 'to be released'.format(timeout, lock_fn) ) except FileLockError: raise except Exception as exc: _raise_error( 'Error encountered obtaining file lock {0}: {1}'.format( lock_fn, exc ) ) finally: if obtained_lock: os.remove(lock_fn) log.trace('Write lock for %s (%s) released', path, lock_fn)
0.000392
def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]: """ Given a tag sequence encoded with IOB1 labels, recode to BIOUL. In the IOB1 scheme, I is a token inside a span, O is a token outside a span and B is the beginning of span immediately following another span of the same type. In the BIO scheme, I is a token inside a span, O is a token outside a span and B is the beginning of a span. Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"]. encoding : `str`, optional, (default = ``IOB1``). The encoding type to convert from. Must be either "IOB1" or "BIO". Returns ------- bioul_sequence: ``List[str]`` The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"]. """ if not encoding in {"IOB1", "BIO"}: raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.") # pylint: disable=len-as-condition def replace_label(full_label, new_label): # example: full_label = 'I-PER', new_label = 'U', returns 'U-PER' parts = list(full_label.partition('-')) parts[0] = new_label return ''.join(parts) def pop_replace_append(in_stack, out_stack, new_label): # pop the last element from in_stack, replace the label, append # to out_stack tag = in_stack.pop() new_tag = replace_label(tag, new_label) out_stack.append(new_tag) def process_stack(stack, out_stack): # process a stack of labels, add them to out_stack if len(stack) == 1: # just a U token pop_replace_append(stack, out_stack, 'U') else: # need to code as BIL recoded_stack = [] pop_replace_append(stack, recoded_stack, 'L') while len(stack) >= 2: pop_replace_append(stack, recoded_stack, 'I') pop_replace_append(stack, recoded_stack, 'B') recoded_stack.reverse() out_stack.extend(recoded_stack) # Process the tag_sequence one tag at a time, adding spans to a stack, # then recode them. bioul_sequence = [] stack: List[str] = [] for label in tag_sequence: # need to make a dict like # token = {'token': 'Matt', "labels": {'conll2003': "B-PER"} # 'gold': 'I-PER'} # where 'gold' is the raw value from the CoNLL data set if label == 'O' and len(stack) == 0: bioul_sequence.append(label) elif label == 'O' and len(stack) > 0: # need to process the entries on the stack plus this one process_stack(stack, bioul_sequence) bioul_sequence.append(label) elif label[0] == 'I': # check if the previous type is the same as this one # if it is then append to stack # otherwise this start a new entity if the type # is different if len(stack) == 0: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) stack.append(label) else: # check if the previous type is the same as this one this_type = label.partition('-')[2] prev_type = stack[-1].partition('-')[2] if this_type == prev_type: stack.append(label) else: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) # a new entity process_stack(stack, bioul_sequence) stack.append(label) elif label[0] == 'B': if len(stack) > 0: process_stack(stack, bioul_sequence) stack.append(label) else: raise InvalidTagSequence(tag_sequence) # process the stack if len(stack) > 0: process_stack(stack, bioul_sequence) return bioul_sequence
0.000988
def _scheduling_block_ids(num_blocks, start_id, project): """Generate Scheduling Block instance ID""" for i in range(num_blocks): _root = '{}-{}'.format(strftime("%Y%m%d", gmtime()), project) yield '{}-sb{:03d}'.format(_root, i + start_id), \ '{}-sbi{:03d}'.format(_root, i + start_id)
0.003096
def run(*args, **kwargs): """Execute a command. Command can be passed as several arguments, each being a string or a list of strings; lists are flattened. If opts.verbose is True, output of the command is shown. If the command exits with non-zero, print an error message and exit. If keyward argument get_output is True, output is returned. Additionally, non-zero exit code with empty output is ignored. """ capture = kwargs.get("get_output", False) args = [arg for arglist in args for arg in (arglist if isinstance(arglist, list) else [arglist])] if opts.verbose: print("Running {}".format(" ".join(args))) live_output = opts.verbose and not capture runner = subprocess.check_call if live_output else subprocess.check_output try: output = runner(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as exception: if capture and not exception.output.strip(): # Ignore errors if output is empty. return "" if not live_output: sys.stdout.write(exception.output.decode(default_encoding, "ignore")) sys.exit("Error: got exitcode {} from command {}".format( exception.returncode, " ".join(args))) except OSError: sys.exit("Error: couldn't run {}: is {} in PATH?".format(" ".join(args), args[0])) if opts.verbose and capture: sys.stdout.write(output.decode(default_encoding, "ignore")) return capture and output.decode(default_encoding, "ignore").strip()
0.002582
def sql_fingerprint(query, hide_columns=True): """ Simplify a query, taking away exact values and fields selected. Imperfect but better than super explicit, value-dependent queries. """ parsed_query = parse(query)[0] sql_recursively_simplify(parsed_query, hide_columns=hide_columns) return str(parsed_query)
0.002976
def _dispatcher(self, connection, event): """ Dispatch events to on_<event.type> method, if present. """ log.debug("_dispatcher: %s", event.type) def do_nothing(connection, event): return None method = getattr(self, "on_" + event.type, do_nothing) method(connection, event)
0.005848
def get_instance(self, payload): """ Build an instance of DependentPhoneNumberInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance :rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberInstance """ return DependentPhoneNumberInstance( self._version, payload, account_sid=self._solution['account_sid'], address_sid=self._solution['address_sid'], )
0.006645
def expectation(self, diag_hermitian: bk.TensorLike, trials: int = None) -> bk.BKTensor: """Return the expectation of a measurement. Since we can only measure our computer in the computational basis, we only require the diagonal of the Hermitian in that basis. If the number of trials is specified, we sample the given number of times. Else we return the exact expectation (as if we'd performed an infinite number of trials. ) """ if trials is None: probs = self.probabilities() else: probs = bk.real(bk.astensorproduct(self.sample(trials) / trials)) diag_hermitian = bk.astensorproduct(diag_hermitian) return bk.sum(bk.real(diag_hermitian) * probs)
0.003831
def concretize(self): """ Transforms the SFA into a DFA Args: None Returns: DFA: The generated DFA """ dfa = DFA(self.alphabet) for state in self.states: for arc in state.arcs: for char in arc.guard: dfa.add_arc(arc.src_state, arc.dst_state, char) for i in xrange(len(self.states)): if self.states[i].final: dfa[i].final = True return dfa
0.003914
def openParametersDialog(params, title=None): ''' Opens a dialog to enter parameters. Parameters are passed as a list of Parameter objects Returns a dict with param names as keys and param values as values Returns None if the dialog was cancelled ''' QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor)) dlg = ParametersDialog(params, title) dlg.exec_() QApplication.restoreOverrideCursor() return dlg.values
0.002198
def _put_attributes_using_post(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): """ Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit for attribute values. Using POST prevents that. https://github.com/BD2KGenomics/toil/issues/502 """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} self._build_name_value_list(params, attributes, replace) if expected_value: self._build_expected_value(params, expected_value) # The addition of the verb keyword argument is the only difference to put_attributes (Hannes) return self.get_status('PutAttributes', params, verb='POST')
0.004435
def rotate_slaves(self): "Round-robin slave balancer" slaves = self.sentinel_manager.discover_slaves(self.service_name) if slaves: if self.slave_rr_counter is None: self.slave_rr_counter = random.randint(0, len(slaves) - 1) for _ in xrange(len(slaves)): self.slave_rr_counter = ( self.slave_rr_counter + 1) % len(slaves) slave = slaves[self.slave_rr_counter] yield slave # Fallback to the master connection try: yield self.get_master_address() except MasterNotFoundError: pass raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
0.002717
def vcpkg_dir(): """ Figure out where vcpkg is installed. vcpkg-exported is populated in some flavors of FB internal builds. C:/tools/vcpkg is the appveyor location. C:/open/vcpkg is my local location. """ for p in ["vcpkg-exported", "C:/tools/vcpkg", "C:/open/vcpkg"]: if os.path.isdir(p): return os.path.realpath(p) raise Exception("cannot find vcpkg")
0.002488
def start(self, test_connection=True): """Starts connection to server if not existent. NO-OP if connection is already established. Makes ping-pong test as well if desired. """ if self._context is None: self._logger.debug('Starting Client') self._context = zmq.Context() self._poll = zmq.Poller() self._start_socket() if test_connection: self.test_ping()
0.004246
def gen_dist_diff( networkA, networkB, techs=None, snapshot=0, n_cols=3, gen_size=0.2, filename=None, buscmap=plt.cm.jet): """ Difference in generation distribution Green/Yellow/Red colors mean that the generation at a location is bigger with switches than without Blue colors mean that the generation at a location is smaller with switches than without Parameters ---------- networkA : PyPSA network container Holds topology of grid with switches including results from powerflow analysis networkB : PyPSA network container Holds topology of grid without switches including results from powerflow analysis techs : dict type of technologies which shall be plotted snapshot : int snapshot n_cols : int number of columns of the plot gen_size : num size of generation bubbles at the buses filename : str Specify filename If not given, figure will be show directly """ if techs is None: techs = networkA.generators.carrier.unique() else: techs = techs n_graphs = len(techs) n_cols = n_cols if n_graphs % n_cols == 0: n_rows = n_graphs // n_cols else: n_rows = n_graphs // n_cols + 1 fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols) size = 4 fig.set_size_inches(size * n_cols, size * n_rows) for i, tech in enumerate(techs): i_row = i // n_cols i_col = i % n_cols ax = axes[i_row, i_col] gensA = networkA.generators[networkA.generators.carrier == tech] gensB = networkB.generators[networkB.generators.carrier == tech] gen_distribution =\ networkA.generators_t.p.mul(networkA.snapshot_weightings, axis=0)\ [gensA.index].loc[networkA.snapshots[snapshot]].groupby( networkA.generators.bus).sum().reindex( networkA.buses.index, fill_value=0.) -\ networkB.generators_t.p.mul(networkB.snapshot_weightings, axis=0)\ [gensB.index].loc[networkB.snapshots[snapshot]].groupby( networkB.generators.bus).sum().reindex( networkB.buses.index, fill_value=0.) networkA.plot( ax=ax, bus_sizes=gen_size * abs(gen_distribution), bus_colors=gen_distribution, line_widths=0.1, bus_cmap=buscmap) ax.set_title(tech) if filename is None: plt.show() else: plt.savefig(filename) plt.close()
0.003064
def add_user_actions(self, actions=(), version='v1.0'): """ 回传数据 https://wximg.qq.com/wxp/pdftool/get.html?id=rkalQXDBM&pa=39 :param actions: 用户行为源类型 :param version: 版本号 v1.0 """ return self._post( 'user_actions/add', params={'version': version}, json={'actions': actions} )
0.005319
def tagAttributes_while(fdef_master_list,root): '''Tag each node under root with the appropriate depth. ''' depth = 0 current = root untagged_nodes = [root] while untagged_nodes: current = untagged_nodes.pop() for x in fdef_master_list: if jsName(x.path,x.name) == current['name']: current['path'] = x.path if children in current: for child in children: child["depth"] = depth untagged_nodes.append(child) if depth not in current: current["depth"] = depth depth += 1 return root
0.004762
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y = sp.prox_l1l2(self.AX + self.U, (self.lmbda/self.rho)*self.wl1, (self.mu/self.rho), axis=self.cri.axisC) cbpdn.GenericConvBPDN.ystep(self)
0.009772
def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) minions = _get_list(serv, 'minions') returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun)) # returns = {minion: return, minion: return, ...} ret = {} for minion, data in six.iteritems(returns): ret[minion] = salt.utils.json.loads(data) return ret
0.002404
def _pyfftw_empty_aligned(shape, dtype, order='C', n=None): """Patched version of :func:`sporco.linalg.`.""" return cp.empty(shape, dtype, order)
0.006494
def itin(self): """Generate a random United States Individual Taxpayer Identification Number (ITIN). An United States Individual Taxpayer Identification Number (ITIN) is a tax processing number issued by the Internal Revenue Service. It is a nine-digit number that always begins with the number 9 and has a range of 70-88 in the fourth and fifth digit. Effective April 12, 2011, the range was extended to include 900-70-0000 through 999-88-9999, 900-90-0000 through 999-92-9999 and 900-94-0000 through 999-99-9999. https://www.irs.gov/individuals/international-taxpayers/general-itin-information """ area = self.random_int(min=900, max=999) serial = self.random_int(min=0, max=9999) # The group number must be between 70 and 99 inclusively but not 89 or 93 group = random.choice([x for x in range(70, 100) if x not in [89, 93]]) itin = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial) return itin
0.003872
def filter_data(d, x, model="lms", **kwargs): """ Function that filter data with selected adaptive filter. **Args:** * `d` : desired value (1 dimensional array) * `x` : input matrix (2-dimensional array). Rows are samples, columns are input arrays. **Kwargs:** * Any key argument that can be accepted with selected filter model. For more information see documentation of desired adaptive filter. **Returns:** * `y` : output value (1 dimensional array). The size corresponds with the desired value. * `e` : filter error for every sample (1 dimensional array). The size corresponds with the desired value. * `w` : history of all weights (2 dimensional array). Every row is set of the weights for given sample. """ # overwrite n with correct size kwargs["n"] = x.shape[1] # create filter according model if model in ["LMS", "lms"]: f = FilterLMS(**kwargs) elif model in ["NLMS", "nlms"]: f = FilterNLMS(**kwargs) elif model in ["RLS", "rls"]: f = FilterRLS(**kwargs) elif model in ["GNGD", "gngd"]: f = FilterGNGD(**kwargs) elif model in ["AP", "ap"]: f = FilterAP(**kwargs) elif model in ["LMF", "lmf"]: f = FilterLMF(**kwargs) elif model in ["NLMF", "nlmf"]: f = FilterNLMF(**kwargs) else: raise ValueError('Unknown model of filter {}'.format(model)) # calculate and return the values y, e, w = f.run(d, x) return y, e, w
0.004516
def iodp_samples(samp_file, output_samp_file=None, output_dir_path='.', input_dir_path='', data_model_num=3): """ Convert IODP samples data file into MagIC samples file. Default is to overwrite samples.txt in your working directory. Parameters ---------- samp_file : str path to IODP sample file to convert output_samp_file : str MagIC-format samples file to append to, default None output_dir_path : str output file directory, default "." input_dir_path : str input file directory IF different from output_dir_path, default "" data_model_num : int MagIC data model [2, 3], default 3 Returns -------- type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written) """ samp_file_name = "samples.txt" sample_alternatives = "sample_alternatives" method_codes = "method_codes" sample_name = "sample" site_name = "site" expedition_name = "expedition_name" location_name = "location" citation_name = "citation" dip = "dip" azimuth = "azimuth" core_depth = "core_depth" composite_depth = "composite_depth" timestamp = "timestamp" file_type = "samples" data_model_num = int(float(data_model_num)) if data_model_num == 2: samp_file_name = "er_samples.txt" sample_alternatives = "er_sample_alternatives" method_codes = "magic_method_codes" sample_name = "er_sample_name" site_name = "er_site_name" expedition_name = "er_expedition_name" location_name = "er_location_name" citation_name = "er_citation_names" dip = "sample_dip" azimuth = "sample_azimuth" core_depth = "sample_core_depth" composite_depth = "sample_composite_depth" timestamp = "sample_date" file_type = "er_samples" text_key = None comp_depth_key = "" input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, input_dir_path) Samps = [] samp_out = os.path.join(output_dir_path, samp_file_name) if output_samp_file: if os.path.exists(output_samp_file): samp_out = os.path.join(output_dir_path, output_samp_file) Samps, file_type = pmag.magic_read(samp_out) print(len(Samps), ' read in from: ', samp_out) fin = open(samp_file, "r") file_input = fin.readlines() fin.close() keys = file_input[0].replace('\n', '').split(',') if "CSF-B Top (m)" in keys: comp_depth_key = "CSF-B Top (m)" elif "Top depth CSF-B (m)" in keys: comp_depth_key = "Top depth CSF-B (m)" # incorporate changes to LIMS data model, while maintaining backward # compatibility keys = [key.strip('"').strip("'") for key in keys] if "Top Depth (m)" in keys: depth_key = "Top Depth (m)" elif "CSF-A Top (m)" in keys: depth_key = "CSF-A Top (m)" elif "Top depth CSF-A (m)" in keys: depth_key = "Top depth CSF-A (m)" if "Text Id" in keys: text_key = "Text Id" elif "Text identifier" in keys: text_key = "Text identifier" elif "Text ID" in keys: text_key = "Text ID" if "Sample Date Logged" in keys: date_key = "Sample Date Logged" elif "Sample date logged" in keys: date_key = "Sample date logged" elif "Date sample logged" in keys: date_key = "Date sample logged" elif "Timestamp (UTC)" in keys: date_key = "Timestamp (UTC)" if 'Volume (cc)' in keys: volume_key = 'Volume (cc)' if 'Volume (cm^3)' in keys: volume_key = 'Volume (cm^3)' if 'Volume (cm3)' in keys: volume_key = 'Volume (cm3)' if not text_key: return False, "Could not extract the necessary data from your input file.\nPlease make sure you are providing a correctly formated IODP samples csv file." ErSamples, samples, file_format = [], [], 'old' for line in file_input[1:]: if line[0] != '0': ODPRec, SampRec = {}, {} interval, core = "", "" rec = line.replace('\n', '').split(',') if len(rec) < 2: print("Error in csv file, blank columns") break for k in range(len(keys)): ODPRec[keys[k]] = rec[k].strip('"') SampRec[sample_alternatives] = ODPRec[text_key] if "Label Id" in keys: # old format label = ODPRec['Label Id'].split() if len(label) > 1: interval = label[1].split('/')[0] pieces = label[0].split('-') core = pieces[2] while len(core) < 4: core = '0' + core # my way else: # new format file_format = 'new' pieces = [ODPRec['Exp'], ODPRec['Site'] + ODPRec['Hole'], ODPRec['Core'] + ODPRec['Type'], ODPRec['Sect'], ODPRec['A/W']] interval = ODPRec['Top offset (cm)'].split( '.')[0].strip() # only integers allowed! core = ODPRec['Core'] + ODPRec['Type'] if core != "" and interval != "": SampRec[method_codes] = 'FS-C-DRILL-IODP:SP-SS-C:SO-V' if file_format == 'old': SampRec[sample_name] = pieces[0] + '-' + pieces[1] + \ '-' + core + '-' + pieces[3] + \ '-' + pieces[4] + '-' + interval else: SampRec[sample_name] = pieces[0] + '-' + pieces[1] + '-' + core + '-' + \ pieces[3] + '_' + pieces[4] + '_' + \ interval # change in sample name convention SampRec[site_name] = SampRec[sample_name] # pieces=SampRec['er_sample_name'].split('-') SampRec[expedition_name] = pieces[0] SampRec[location_name] = pieces[1] SampRec[citation_name] = "This study" SampRec[dip] = "0" SampRec[azimuth] = "0" SampRec[core_depth] = ODPRec[depth_key] if ODPRec[volume_key] != "": SampRec['sample_volume'] = str( float(ODPRec[volume_key]) * 1e-6) else: SampRec['sample_volume'] = '1' if comp_depth_key != "": SampRec[composite_depth] = ODPRec[comp_depth_key] dates = ODPRec[date_key].split() if '/' in dates[0]: # have a date mmddyy = dates[0].split('/') yyyy = '20' + mmddyy[2] mm = mmddyy[0] if len(mm) == 1: mm = '0' + mm dd = mmddyy[1] if len(dd) == 1: dd = '0' + dd date = yyyy + ':' + mm + ':' + \ dd + ':' + dates[1] + ":00.00" else: date = "" SampRec[timestamp] = date ErSamples.append(SampRec) samples.append(SampRec[sample_name]) if len(Samps) > 0: for samp in Samps: if samp[sample_name] not in samples: ErSamples.append(samp) Recs, keys = pmag.fillkeys(ErSamples) pmag.magic_write(samp_out, Recs, file_type) print('sample information written to: ', samp_out) return True, samp_out
0.000922
def scrape(self, request, response, link_type=None): '''Iterate the scrapers, returning the first of the results.''' for scraper in self._document_scrapers: scrape_result = scraper.scrape(request, response, link_type) if scrape_result is None: continue if scrape_result.link_contexts: return scrape_result
0.005115
def filter(self, **params): """Stream statuses/filter :param \*\*params: Parameters to send with your stream request Accepted params found at: https://developer.twitter.com/en/docs/tweets/filter-realtime/api-reference/post-statuses-filter """ url = 'https://stream.twitter.com/%s/statuses/filter.json' \ % self.streamer.api_version self.streamer._request(url, 'POST', params=params)
0.008811
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: BDEFileEntry: file entry or None. """ return bde_file_entry.BDEFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
0.002933
def handle(self, **kwargs): """ Simply re-saves all objects from models listed in settings.TIMELINE_MODELS. Since the timeline app is now following these models, it will register each item as it is re-saved. The purpose of this script is to register content in your database that existed prior to installing the timeline app. """ for item in settings.ACTIVITY_MONITOR_MODELS: app_label, model = item['model'].split('.', 1) content_type = ContentType.objects.get(app_label=app_label, model=model) model = content_type.model_class() objects = model.objects.all() for object in objects: try: object.save() except Exception as e: print("Error saving: {}".format(e))
0.012771
def stat_container(self, container): """Stat container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). """ LOG.debug('stat_container() with %s is success.', self.driver) return self.driver.stat_container(container)
0.006116
def _init_random_centroids(self): """Initialize the centroids as k random samples of X (k = n_clusters) """ self.centroids = self._X[np.random.choice(list(range(self._X.shape[0])), size=self.n_clusters), :]
0.013043
def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True, s=1.1, gamma=1., **slice_kwargs): """ Estimate burstness profile for a feature over the ``'date'`` axis. Parameters ---------- corpus : :class:`.Corpus` feature : str Name of featureset in ``corpus``. E.g. ``'citations'``. findex : int Index of ``feature`` in ``corpus``. k : int (default: 5) Number of burst states. normalize : bool (default: True) If True, burstness is expressed relative to the hightest possible state (``k-1``). Otherwise, states themselves are returned. kwargs : kwargs Parameters for burstness automaton HMM. """ if featureset_name not in corpus.features: corpus.index_feature(featureset_name) if 'date' not in corpus.indices: corpus.index('date') # Get time-intervals between occurrences. dates = [min(corpus.indices['date'].keys()) - 1] # Pad start. X_ = [1.] years, values = corpus.feature_distribution(featureset_name, feature) for year, N in izip(years, values): if N == 0: continue if N > 1: if year == dates[-1] + 1: for n in xrange(int(N)): X_.append(1./N) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) for n in xrange(int(N) - 1): X_.append(1./(N - 1)) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) # Get optimum state sequence. st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k) # Bin by date. A = defaultdict(list) for i in xrange(len(X_)): A[dates[i]].append(st[i]) # Normalize. if normalize: A = {key: mean(values)/k for key, values in A.items()} else: A = {key: mean(values) for key, values in A.items()} D = sorted(A.keys()) return D[1:], [A[d] for d in D[1:]]
0.001417
def remove_child(self, child): """ Remove a child from this node. """ assert child in self.children self.children.remove(child) self.index.pop(child.tax_id) if child.parent is self: child.parent = None if child.index is self.index: child.index = None # Remove child subtree from index for n in child: if n is child: continue self.index.pop(n.tax_id) if n.index is self.index: n.index = None
0.003552
def create(cls, cash_register_id, description, status, amount_total, monetary_account_id=None, allow_amount_higher=None, allow_amount_lower=None, want_tip=None, minimum_age=None, require_address=None, redirect_url=None, visibility=None, expiration=None, tab_attachment=None, custom_headers=None): """ Create a TabUsageMultiple. On creation the status must be set to OPEN :type user_id: int :type monetary_account_id: int :type cash_register_id: int :param description: The description of the TabUsageMultiple. Maximum 9000 characters. Field is required but can be an empty string. :type description: str :param status: The status of the TabUsageMultiple. On creation the status must be set to OPEN. You can change the status from OPEN to PAYABLE. If the TabUsageMultiple gets paid the status will remain PAYABLE. :type status: str :param amount_total: The total amount of the Tab. Must be a positive amount. As long as the tab has the status OPEN you can change the total amount. This amount is not affected by the amounts of the TabItems. However, if you've created any TabItems for a Tab the sum of the amounts of these items must be equal to the total_amount of the Tab when you change its status to PAYABLE :type amount_total: object_.Amount :param allow_amount_higher: [DEPRECATED] Whether or not a higher amount can be paid. :type allow_amount_higher: bool :param allow_amount_lower: [DEPRECATED] Whether or not a lower amount can be paid. :type allow_amount_lower: bool :param want_tip: [DEPRECATED] Whether or not the user paying the Tab should be asked if he wants to give a tip. When want_tip is set to true, allow_amount_higher must also be set to true and allow_amount_lower must be false. :type want_tip: bool :param minimum_age: The minimum age of the user paying the Tab. :type minimum_age: int :param require_address: Whether a billing and shipping address must be provided when paying the Tab. Possible values are: BILLING, SHIPPING, BILLING_SHIPPING, NONE, OPTIONAL. Default is NONE. :type require_address: str :param redirect_url: The URL which the user is sent to after paying the Tab. :type redirect_url: str :param visibility: The visibility of a Tab. A Tab can be visible trough NearPay, the QR code of the CashRegister and its own QR code. :type visibility: object_.TabVisibility :param expiration: The moment when this Tab expires. Can be at most 365 days into the future. :type expiration: str :param tab_attachment: An array of attachments that describe the tab. Uploaded through the POST /user/{userid}/attachment-tab endpoint. :type tab_attachment: list[object_.BunqId] :type custom_headers: dict[str, str]|None :rtype: BunqResponseStr """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_DESCRIPTION: description, cls.FIELD_STATUS: status, cls.FIELD_AMOUNT_TOTAL: amount_total, cls.FIELD_ALLOW_AMOUNT_HIGHER: allow_amount_higher, cls.FIELD_ALLOW_AMOUNT_LOWER: allow_amount_lower, cls.FIELD_WANT_TIP: want_tip, cls.FIELD_MINIMUM_AGE: minimum_age, cls.FIELD_REQUIRE_ADDRESS: require_address, cls.FIELD_REDIRECT_URL: redirect_url, cls.FIELD_VISIBILITY: visibility, cls.FIELD_EXPIRATION: expiration, cls.FIELD_TAB_ATTACHMENT: tab_attachment } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), cash_register_id) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseStr.cast_from_bunq_response( cls._process_for_uuid(response_raw) )
0.002566
def get_print_rect(self, grid_rect): """Returns wx.Rect that is correctly positioned on the print canvas""" grid = self.grid rect_x = grid_rect.x - \ grid.GetScrollPos(wx.HORIZONTAL) * grid.GetScrollLineX() rect_y = grid_rect.y - \ grid.GetScrollPos(wx.VERTICAL) * grid.GetScrollLineY() return wx.Rect(rect_x, rect_y, grid_rect.width, grid_rect.height)
0.004773
def peek_pointers_in_data(self, data, peekSize = 16, peekStep = 1): """ Tries to guess which values in the given data are valid pointers, and reads some data from them. @type data: str @param data: Binary data to find pointers in. @type peekSize: int @param peekSize: Number of bytes to read from each pointer found. @type peekStep: int @param peekStep: Expected data alignment. Tipically you specify 1 when data alignment is unknown, or 4 when you expect data to be DWORD aligned. Any other value may be specified. @rtype: dict( str S{->} str ) @return: Dictionary mapping stack offsets to the data they point to. """ aProcess = self.get_process() return aProcess.peek_pointers_in_data(data, peekSize, peekStep)
0.006912
def _make_tmp_path(self, remote_user=None): """ Create a temporary subdirectory as a child of the temporary directory managed by the remote interpreter. """ LOG.debug('_make_tmp_path(remote_user=%r)', remote_user) path = self._generate_tmp_path() LOG.debug('Temporary directory: %r', path) self._connection.get_chain().call_no_reply(os.mkdir, path) self._connection._shell.tmpdir = path return path
0.004184
def lastCall(self): #pylint: disable=invalid-name """ Return: SpyCall object for this spy's most recent call """ last_index = len(super(SinonSpy, self)._get_wrapper().call_list) - 1 return self.getCall(last_index)
0.01581
def gene_variants(self, query=None, category='snv', variant_type=['clinical'], nr_of_variants=50, skip=0): """Return all variants seen in a given gene. If skip not equal to 0 skip the first n variants. Arguments: query(dict): A dictionary with querys for the database, including variant_type: 'clinical', 'research' category(str): 'sv', 'str', 'snv' or 'cancer' nr_of_variants(int): if -1 return all variants skip(int): How many variants to skip """ mongo_variant_query = self.build_variant_query(query=query, category=category, variant_type=variant_type) sorting = [('rank_score', pymongo.DESCENDING)] if nr_of_variants == -1: nr_of_variants = 0 # This will return all variants else: nr_of_variants = skip + nr_of_variants result = self.variant_collection.find( mongo_variant_query ).sort(sorting).skip(skip).limit(nr_of_variants) return result
0.006289
def replace_grist (features, new_grist): """ Replaces the grist of a string by a new one. Returns the string with the new grist. """ assert is_iterable_typed(features, basestring) or isinstance(features, basestring) assert isinstance(new_grist, basestring) # this function is used a lot in the build phase and the original implementation # was extremely slow; thus some of the weird-looking optimizations for this function. single_item = False if isinstance(features, str): features = [features] single_item = True result = [] for feature in features: # '<feature>value' -> ('<feature', '>', 'value') # 'something' -> ('something', '', '') # '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value') grist, split, value = feature.partition('>') # if a partition didn't occur, then grist is just 'something' # set the value to be the grist if not value and not split: value = grist result.append(new_grist + value) if single_item: return result[0] return result
0.005286
def debug(self, nest_level=1): """ Show the binary data and parsed data in a tree structure """ prefix = ' ' * nest_level # This interacts with Any and moves the tag, implicit, explicit, _header, # contents, _footer to the parsed value so duplicate data isn't present has_parsed = hasattr(self, 'parsed') _basic_debug(prefix, self) if has_parsed: self.parsed.debug(nest_level + 2) elif hasattr(self, 'chosen'): self.chosen.debug(nest_level + 2) else: if _PY2 and isinstance(self.native, byte_cls): print('%s Native: b%s' % (prefix, repr(self.native))) else: print('%s Native: %s' % (prefix, self.native))
0.003836
def load_commands(self, obj): """ Load commands defined on an arbitrary object. All functions decorated with the :func:`subparse.command` decorator attached the specified object will be loaded. The object may be a dictionary, an arbitrary python object, or a dotted path. The dotted path may be absolute, or relative to the current package by specifying a leading '.' (e.g. ``'.commands'``). """ if isinstance(obj, str): if obj.startswith('.') or obj.startswith(':'): package = caller_package() if obj in ['.', ':']: obj = package.__name__ else: obj = package.__name__ + obj obj = pkg_resources.EntryPoint.parse('x=%s' % obj).resolve() command.discover_and_call(obj, self.command)
0.002286
def p_const_expression_floatnum(self, p): 'const_expression : floatnumber' p[0] = FloatConst(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
0.011696
def update(self, request, *args, **kwargs): """Update an entity. Original queryset produces a temporary database table whose rows cannot be selected for an update. As a workaround, we patch get_queryset function to return only Entity objects without additional data that is not needed for the update. """ orig_get_queryset = self.get_queryset def patched_get_queryset(): """Patched get_queryset method.""" entity_ids = orig_get_queryset().values_list('id', flat=True) return Entity.objects.filter(id__in=entity_ids) self.get_queryset = patched_get_queryset resp = super().update(request, *args, **kwargs) self.get_queryset = orig_get_queryset return resp
0.002535
def _decoder(self): """Transliterate a string from English to the target language.""" if self.target_lang == 'en': return Transliterator._dummy_coder else: weights = load_transliteration_table(self.target_lang) decoder_weights = weights["decoder"] return Transliterator._transliterate_string(decoder_weights)
0.014535
def ensure_future(fut, *, loop=None): """ Wraps asyncio.async()/asyncio.ensure_future() depending on the python version :param fut: The awaitable, future, or coroutine to wrap :param loop: The loop to run in :return: The wrapped future """ if sys.version_info < (3, 4, 4): # This is to avoid a SyntaxError on 3.7.0a2+ func = getattr(asyncio, "async") else: func = asyncio.ensure_future return func(fut, loop=loop)
0.004219
def get_runconfig(path=None, root=None, db=None): """Load the main configuration files and accounts file. Debprecated. Use load() """ return load(path, root=root, db=db)
0.005348
def parse_attrlist(str_, avs_sep=":", vs_sep=",", as_sep=";"): """ Simple parser to parse expressions in the form of [ATTR1:VAL0,VAL1,...;ATTR2:VAL0,VAL2,..]. :param str_: input string :param avs_sep: char to separate attribute and values :param vs_sep: char to separate values :param as_sep: char to separate attributes >>> parse_attrlist("requires:bash,zsh") {'requires': ['bash', 'zsh']} """ return dict(parse_attrlist_0(str_, avs_sep, vs_sep, as_sep))
0.001984
def get_file(self, file_path, mode="r"): """ provide File object specified via 'file_path' :param file_path: str, path to the file :param mode: str, mode used when opening the file :return: File instance """ return open(self.cont_path(file_path), mode=mode)
0.00639
def alpha1_carbonate(pH): """Calculate the fraction of total carbonates in bicarbonate form (HCO3-) :param pH: pH of the system :type pH: float :return: Fraction of carbonates in bicarbonate form (HCO3-) :rtype: float :Examples: >>> from aguaclara.research.environmental_processes_analysis import alpha1_carbonate >>> round(alpha1_carbonate(10), 7) <Quantity(0.639969, 'dimensionless')> """ alpha1_carbonate = 1/((invpH(pH)/K1_carbonate) + 1 + (K2_carbonate/invpH(pH))) return alpha1_carbonate
0.003503
def from_file(cls, file_path, compressed=False, encoded=False): """Create a content object from a file path.""" file_id = '.'.join(path.basename(file_path).split('.')[:-1]) file_format = file_path.split('.')[-1] content = cls(file_id, file_format, compressed, encoded) content.file_exists = True content._location = path.dirname(file_path) return content
0.004878
def build_event_out(self, event_out): """ Build event out code. @param event_out: event out object @type event_out: lems.model.dynamics.EventOut @return: Generated event out code @rtype: string """ event_out_code = ['if "{0}" in self.event_out_callbacks:'.format(event_out.port), ' for c in self.event_out_callbacks[\'{0}\']:'.format(event_out.port), ' c()'] return event_out_code
0.007722
def clicked(self, event): """ Call if an element of this plottype is clicked. Implement in sub class. """ group = event.artist._mt_group indices = event.ind # double click only supported on 1.2 or later major, minor, _ = mpl_version.split('.') if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick: for i in indices: print(self.groups[group][i].line_str) else: # toggle durline first = indices[0] logevent = self.groups[group][first] try: # remove triangle for this event idx = map(itemgetter(0), self.durlines).index(logevent) _, poly = self.durlines[idx] poly.remove() plt.gcf().canvas.draw() del self.durlines[idx] except ValueError: # construct triangle and add to list of durlines if self.args['optime_start']: pts = [[date2num(logevent.datetime), 0], [date2num(logevent.datetime), logevent.duration], [date2num(logevent.datetime + timedelta(milliseconds=logevent.duration) ), 0]] else: pts = [[date2num(logevent.datetime), 0], [date2num(logevent.datetime), logevent.duration], [date2num(logevent.datetime - timedelta(milliseconds=logevent.duration) ), 0]] poly = Polygon(pts, closed=True, alpha=0.2, linewidth=0, facecolor=event.artist.get_markerfacecolor(), edgecolor=None, zorder=-10000) ax = plt.gca() ax.add_patch(poly) plt.gcf().canvas.draw() self.durlines.append((logevent, poly))
0.000968
def varYSizeGaussianFilter(arr, stdyrange, stdx=0, modex='wrap', modey='reflect'): ''' applies gaussian_filter on input array but allowing variable ksize in y stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value stdyrange(tuple,list) -> minimum and maximum size as (mn,mx) stdyrange(np.array) -> all different ksizes in y ''' assert arr.ndim == 2, 'only works on 2d arrays at the moment' s0 = arr.shape[0] #create stdys: if isinstance(stdyrange, np.ndarray): assert len(stdyrange)==s0, '[stdyrange] needs to have same length as [arr]' stdys = stdyrange else: if type(stdyrange) not in (list, tuple): stdyrange = (0,stdyrange) mn,mx = stdyrange stdys = np.linspace(mn,mx,s0) #prepare array for convolution: kx = int(stdx*2.5) kx += 1-kx%2 ky = int(mx*2.5) ky += 1-ky%2 arr2 = extendArrayForConvolution(arr, (kx, ky), modex, modey) #create convolution kernels: inp = np.zeros((ky,kx)) inp[ky//2, kx//2] = 1 kernels = np.empty((s0,ky,kx)) for i in range(s0): stdy = stdys[i] kernels[i] = gaussian_filter(inp, (stdy,stdx)) out = np.empty_like(arr) _2dConvolutionYdependentKernel(arr2, out, kernels) return out
0.016476
def package_assets(example_path): """ Generates pseudo-packages for the examples directory. """ examples(example_path, force=True, root=__file__) for root, dirs, files in os.walk(example_path): walker(root, dirs+files) setup_args['packages'] += packages for p, exts in extensions.items(): if exts: setup_args['package_data'][p] = exts
0.002564
def query_image_metadata(self, image, metadata_type=""): '''**Description** Find the image with the tag <image> and return its metadata. **Arguments** - image: Input image can be in the following formats: registry/repo:tag - metadata_type: The metadata type can be one of the types returned by running without a type specified **Success Return Value** A JSON object representing the image metadata. ''' return self._query_image(image, query_group='metadata', query_type=metadata_type)
0.008711
def validate(table, constraints=None, header=None): """ Validate a `table` against a set of `constraints` and/or an expected `header`, e.g.:: >>> import petl as etl >>> # define some validation constraints ... header = ('foo', 'bar', 'baz') >>> constraints = [ ... dict(name='foo_int', field='foo', test=int), ... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')), ... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']), ... dict(name='not_none', assertion=lambda row: None not in row), ... dict(name='qux_int', field='qux', test=int, optional=True), ... ] >>> # now validate a table ... table = (('foo', 'bar', 'bazzz'), ... (1, '2000-01-01', 'Y'), ... ('x', '2010-10-10', 'N'), ... (2, '2000/01/01', 'Y'), ... (3, '2015-12-12', 'x'), ... (4, None, 'N'), ... ('y', '1999-99-99', 'z'), ... (6, '2000-01-01'), ... (7, '2001-02-02', 'N', True)) >>> problems = etl.validate(table, constraints=constraints, header=header) >>> problems.lookall() +--------------+-----+-------+--------------+------------------+ | name | row | field | value | error | +==============+=====+=======+==============+==================+ | '__header__' | 0 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 5 | 'bar' | None | 'AttributeError' | +--------------+-----+-------+--------------+------------------+ | 'not_none' | 5 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 7 | None | 2 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 7 | 'baz' | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 8 | None | 4 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ Returns a table of validation problems. """ # noqa return ProblemsView(table, constraints=constraints, header=header)
0.000297
def _read_routine_metadata(self): """ Returns the metadata of stored routines. :rtype: dict """ metadata = {} if os.path.isfile(self._metadata_filename): with open(self._metadata_filename, 'r') as file: metadata = json.load(file) return metadata
0.006042
def create_random_ind_full(self, depth=0): "Random individual using full method" lst = [] self._create_random_ind_full(depth=depth, output=lst) return lst
0.010753
def set_splash_message(self, text): """Sets the text in the bottom of the Splash screen.""" self.splash_text = text self._show_message(text) self.timer_ellipsis.start(500)
0.009662
def find_slots(cls): """Return a set of all slots for a given class and its parents""" slots = set() for c in cls.__mro__: cslots = getattr(c, "__slots__", tuple()) if not cslots: continue elif isinstance(cslots, (bstr, ustr)): cslots = (cslots,) slots.update(cslots) return slots
0.002809
def update_group_color(self, lights: list) -> None: """Update group colors based on light states. deCONZ group updates don't contain any information about the current state of the lights in the group. This method updates the color properties of the group to the current color of the lights in the group. For groups where the lights have different colors the group color will only reflect the color of the latest changed light in the group. """ for group in self.groups.values(): # Skip group if there are no common light ids. if not any({*lights} & {*group.lights}): continue # More than one light means load_parameters called this method. # Then we take first best light to be available. light_ids = lights if len(light_ids) > 1: light_ids = group.lights for light_id in light_ids: if self.lights[light_id].reachable: group.update_color_state(self.lights[light_id]) break
0.001786
def sinogram_as_radon(uSin, align=True): r"""Compute the phase from a complex wave field sinogram This step is essential when using the ray approximation before computation of the refractive index with the inverse Radon transform. Parameters ---------- uSin: 2d or 3d complex ndarray The background-corrected sinogram of the complex scattered wave :math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- phase: 2d or 3d real ndarray The unwrapped phase array corresponding to `uSin`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping radontea.backproject_3d: e.g. reconstruction via backprojection """ ndims = len(uSin.shape) if ndims == 2: # unwrapping is very important phiR = np.unwrap(np.angle(uSin), axis=-1) else: # Unwrap gets the dimension of the problem from the input # data. Since we have a sinogram, we need to pass it the # slices one by one. phiR = np.angle(uSin) for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) return phiR
0.000742
def values(self, multi=False): # type: (bool) -> Iterator[Any] """ Yield the last value on every key list. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the lasted added of each key. """ for values in itervalues(self): if multi: for value in values: yield value else: yield values[-1]
0.005474
def read(self): '''Read some number of messages''' found = Client.read(self) # Redistribute our ready state if necessary if self.needs_distribute_ready(): self.distribute_ready() # Finally, return all the results we've read return found
0.006711