text
stringlengths
78
104k
score
float64
0
0.18
def request(self, url, post=None, method="GET"): """ Make the request""" dsid = self.get_dsid() baseurl = "https://auth.api.swedbank.se/TDE_DAP_Portal_REST_WEB/api/v1/%s?dsid=%s" % ( url, dsid) if self.pch is None: self.pch = build_opener(HTTPCookieProcessor(self.cj)) if post: post = bytearray(post, "utf-8") request = Request(baseurl, data=post) request.add_header("Content-Type", "application/json") else: request = Request(baseurl) request.add_header("User-Agent", self.useragent) request.add_header("Authorization", self.get_authkey()) request.add_header("Accept", "*/*") request.add_header("Accept-Language", "sv-se") request.add_header("Connection", "keep-alive") request.add_header("Proxy-Connection", "keep-alive") self.cj.set_cookie( Cookie(version=0, name='dsid', value=dsid, port=None, port_specified=False, domain='.api.swedbank.se', domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpsOnly': None}, rfc2109=False)) request.get_method = lambda: method tmp = self.pch.open(request) self.data = tmp.read().decode("utf8")
0.001996
def find_one(self, cls, id): """Required functionality.""" one = self._find(cls, {"_id": id}) if not one: return None return one[0]
0.011429
def uniprot(self, uniprotid=None, hgnc_symbol=None, hgnc_identifier=None, limit=None, as_df=False): """Method to query :class:`.models.UniProt` objects in database :param uniprotid: UniProt identifier(s) :type uniprotid: str or tuple(str) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.UniProt`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.UniProt`) or :class:`pandas.DataFrame` """ q = self.session.query(models.UniProt) model_queries_config = ( (uniprotid, models.UniProt.uniprotid), ) q = self.get_model_queries(q, model_queries_config) many_to_many_queries_config = ( (hgnc_symbol, models.UniProt.hgncs, models.HGNC.symbol), (hgnc_identifier, models.UniProt.hgncs, models.HGNC.identifier), ) q = self.get_many_to_many_queries(q, many_to_many_queries_config) return self._limit_and_df(q, limit, as_df)
0.003088
def fit(self, X, y=None, **kwargs): """ The fit method is the primary drawing input for the visualization since it has both the X and y data required for the viz and the transform method does not. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer """ if is_dataframe(X): self.X = X.values if self.features_ is None: self.features_ = X.columns else: self.X = X self.y = y super(MissingDataVisualizer, self).fit(X, y, **kwargs)
0.002128
def logger_delete(self, project, logger_name): """API call: delete all entries in a logger via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete :type project: str :param project: ID of project containing the log entries to delete :type logger_name: str :param logger_name: name of logger containing the log entries to delete """ path = "/projects/%s/logs/%s" % (project, logger_name) self.api_request(method="DELETE", path=path)
0.003546
def get_stroke_features(recording, strokeid1, strokeid2): """Get the features used to decide if two strokes belong to the same symbol or not. Parameters ---------- recording : list A list of strokes strokeid1 : int strokeid2 : int Returns ------- list : A list of features which could be useful to decide if stroke1 and stroke2 belong to the same symbol. """ stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] assert isinstance(stroke1, list), "stroke1 is a %s" % type(stroke1) X_i = [] for s in [stroke1, stroke2]: hw = HandwrittenData(json.dumps([s])) feat1 = features.ConstantPointCoordinates(strokes=1, points_per_stroke=20, fill_empty_with=0) feat2 = features.ReCurvature(strokes=1) feat3 = features.Ink() X_i += hw.feature_extraction([feat1, feat2, feat3]) X_i += [get_strokes_distance(stroke1, stroke2)] # Distance of strokes X_i += [get_time_distance(stroke1, stroke2)] # Time in between X_i += [abs(strokeid2-strokeid1)] # Strokes in between # X_i += [get_black_percentage()] return X_i
0.000794
def _search(self, query, fields=None, limit=50000, sampling=None): """ Perform the search and return raw rows :type query object :type fields list[str] or None :type limit int :type sampling int or None :arg sampling: Percentage of results to be returned (0,100) :rtype: list """ body = { "query": { "bool": { "must": [ query ] } } } # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-source-filtering.html if fields: body['_source'] = { "includes": fields } # add @timestamp range # @see http://stackoverflow.com/questions/40996266/elasticsearch-5-1-unknown-key-for-a-start-object-in-filters # @see https://discuss.elastic.co/t/elasticsearch-watcher-error-for-range-query/70347/2 body['query']['bool']['must'].append(self._get_timestamp_filer()) # sample the results if needed if sampling is not None: body['query']['bool']['must'].append({ 'script': { 'script': { 'lang': 'painless', 'source': "Math.abs(doc['_id'].value.hashCode()) % 100 < params.sampling", 'params': { 'sampling': sampling } } } }) self._logger.debug("Running {} query (limit set to {:d})".format(json.dumps(body), body.get('size', 0))) # use Scroll API to be able to fetch more than 10k results and prevent "search_phase_execution_exception": # "Result window is too large, from + size must be less than or equal to: [10000] but was [500000]. # See the scroll api for a more efficient way to request large data sets." # # @see http://elasticsearch-py.readthedocs.io/en/master/helpers.html#scan rows = scan( client=self._es, clear_scroll=False, # True causes "403 Forbidden: You don't have access to this resource" index=self._index, query=body, sort=["_doc"], # return the next batch of results from every shard that still has results to return. size=self._batch_size, # batch size ) # get only requested amount of entries and cast them to a list rows = islice(rows, 0, limit) rows = [entry['_source'] for entry in rows] # get data self._logger.info("{:d} rows returned".format(len(rows))) return rows
0.004762
def expand_tpm(tpm): """Broadcast a state-by-node TPM so that singleton dimensions are expanded over the full network. """ unconstrained = np.ones([2] * (tpm.ndim - 1) + [tpm.shape[-1]]) return tpm * unconstrained
0.004292
def stream_info(self) -> None: """Check and report source names, dtypes and shapes of all the streams available.""" stream_names = [stream_name for stream_name in dir(self) if 'stream' in stream_name and stream_name != 'stream_info'] logging.info('Found %s stream candidates: %s', len(stream_names), stream_names) for stream_name in stream_names: try: stream_fn = getattr(self, stream_name) logging.info(stream_name) batch = next(iter(stream_fn())) rows = [] for key, value in batch.items(): try: value_arr = np.array(value) row = [key, value_arr.dtype, value_arr.shape] if value_arr.dtype.kind in 'bui': # boolean, unsigned, integer row.append('{} - {}'.format(value_arr.min(), value_arr.max())) elif value_arr.dtype.kind is 'f': row.append('{0:.2f} - {1:.2f}'.format(value_arr.min(), value_arr.max())) except ValueError: # np broadcasting failed (ragged array) value_arr = None row = [key, '{}'.format(type(value[0]).__name__), '({},)'.format(len(list(value)))] if value_arr is None or \ (value_arr.ndim > 0 and value_arr.shape[1:] != np.array(value_arr[0]).shape): logging.warning('*** stream source `%s` appears to be ragged (non-rectangular) ***', key) rows.append(row) for line in tabulate.tabulate(rows, headers=['name', 'dtype', 'shape', 'range'], tablefmt='grid').split('\n'): logging.info(line) except Exception: logging.warning('Exception was raised during checking stream `%s`, ' '(stack trace is displayed only with --verbose flag)', stream_name) logging.debug(traceback.format_exc())
0.007289
def get_bundle_registered_services(self, bundle): # type: (Any) -> List[ServiceReference] """ Retrieves the services registered by the given bundle. Returns None if the bundle didn't register any service. :param bundle: The bundle to look into :return: The references to the services registered by the bundle """ with self.__svc_lock: return sorted(self.__bundle_svc.get(bundle, []))
0.006522
def generate_protocol(self): """ Recreate the command stimulus (protocol) for the current sweep. It's not stored point by point (that's a waste of time and memory!) Instead it's stored as a few (x,y) points which can be easily graphed. TODO: THIS for segment in abf.ABFreader.read_protocol(): for analogsignal in segment.analogsignals: print(analogsignal) plt.plot(analogsignal) plt.show() plt.close('all') """ # TODO: elegantly read the protocol like this: #abf.ABFreader.read_protocol()[0].analogsignals()[sigNum] # TODO: right now this works only for the first channel # correct for weird recording/protocol misalignment #what is magic here? 64-bit data points? #1,000,000/64 = 15625 btw self.offsetX = int(self.sweepSize/64) # if there's not a header, get out of here! if not len(self.header['dictEpochInfoPerDAC']): self.log.debug("no protocol defined, so I'll make one") self.protoX,self.protoY=[0,self.sweepX[-1]],[self.holding,self.holding] self.protoSeqX,self.protoSeqY=[0],[self.holding] return # load our protocol from the header proto=self.header['dictEpochInfoPerDAC'][self.channel] # prepare our (x,y) pair arrays self.protoX,self.protoY=[] ,[] # assume our zero time point is the "holding" value self.protoX.append(0) self.protoY.append(self.holding) #TODO: what is this??? # now add x,y points for each change in the protocol for step in proto: dX = proto[step]['lEpochInitDuration'] Y = proto[step]['fEpochInitLevel']+proto[step]['fEpochLevelInc']*self.sweep # we have a new Y value, so add it to the last time point self.protoX.append(self.protoX[-1]) self.protoY.append(Y) # now add the same Y point after "dX" amount of time self.protoX.append(self.protoX[-1]+dX) self.protoY.append(Y) # TODO: detect ramps and warn what's up # The last point is probably holding current finalVal=self.holding #regular holding # although if it's set to "use last value", maybe that should be the last one if self.header['listDACInfo'][0]['nInterEpisodeLevel']: finalVal=self.protoY[-1] # add the shift to the final value to the list self.protoX.append(self.protoX[-1]) self.protoY.append(finalVal) # and again as the very last time point self.protoX.append(self.sweepSize) self.protoY.append(finalVal) # update the sequence of protocols now (eliminate duplicate entries) for i in range(1,len(self.protoX)-1): #correct for weird ABF offset issue. self.protoX[i]=self.protoX[i]+self.offsetX self.protoSeqY=[self.protoY[0]] self.protoSeqX=[self.protoX[0]] for i in range(1,len(self.protoY)): if not self.protoY[i]==self.protoY[i-1]: self.protoSeqY.append(self.protoY[i]) self.protoSeqX.append(self.protoX[i]) if self.protoY[0]!=self.protoY[1]: self.protoY.insert(1,self.protoY[0]) self.protoX.insert(1,self.protoX[1]) self.protoY.insert(1,self.protoY[0]) self.protoX.insert(1,self.protoX[0]+self.offsetX/2) self.protoSeqY.append(finalVal) self.protoSeqX.append(self.sweepSize) # convert lists to numpy arrays and do any final conversions self.protoX=np.array(self.protoX)/self.pointsPerSec self.protoY=np.array(self.protoY)
0.011733
def _send(self, msg, buffers=None): """Sends a message to the model in the front-end.""" if self.comm is not None and self.comm.kernel is not None: self.comm.send(data=msg, buffers=buffers)
0.009217
def _import_data(sections, crumbs): """ Import the section metadata and change it to index-by-name. :param list sections: Metadata :param str pc: paleo or chron :return dict _sections: Metadata """ logger_jsons.info("enter import_data: {}".format(crumbs)) _sections = OrderedDict() try: for _idx, section in enumerate(sections): _tmp = OrderedDict() # Process the paleo measurement table if "measurementTable" in section: _tmp["measurementTable"] = _idx_table_by_name(section["measurementTable"], "{}{}{}".format(crumbs, _idx, "measurement")) # Process the paleo model if "model" in section: _tmp["model"] = _import_model(section["model"], "{}{}{}".format(crumbs, _idx, "model")) # Get the table name from the first measurement table, and use that as the index name for this table _table_name = "{}{}".format(crumbs, _idx) # If we only have generic table names, and one exists already, don't overwrite. Create dynamic name if _table_name in _sections: _table_name = "{}_{}".format(_table_name, _idx) # Put the final product into the output dictionary. Indexed by name _sections[_table_name] = _tmp except Exception as e: logger_jsons.error("import_data: Exception: {}".format(e)) print("Error: import_data: {}".format(e)) logger_jsons.info("exit import_data: {}".format(crumbs)) return _sections
0.003215
def get(self, key, default=None): """Returns the value of `key` if it exists, else `default`.""" if key in self._hparam_types: # Ensure that default is compatible with the parameter type. if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
0.00999
def get_object_list_json(request): """gmn.listObjects(session[, fromDate][, toDate][, formatId] [, identifier][, replicaStatus][, start=0][, count=1000] [, f=sysmetaField ...]) → ObjectListJson GMN specific API for fast retrieval of object sysmeta elements. """ # TODO: Add to documentation if "f" in request.GET: field_list = request.GET.getlist("f") else: field_list = None result_dict = d1_gmn.app.views.util.query_object_list(request, "object_list_json") result_dict["fields"] = field_list result_dict["objects"] = d1_gmn.app.sysmeta_extract.extract_values_query( result_dict["query"], field_list ) del result_dict["query"] return django.http.HttpResponse( d1_common.util.serialize_to_normalized_pretty_json(result_dict), d1_common.const.CONTENT_TYPE_JSON, )
0.002307
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes): '''http://www.w3.org/TR/xml-exc-c14n/ InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that are handled in the manner described by the Canonical XML Recommendation''' inclusive = [] if node.prefix: usedPrefixes = ['xmlns:%s' %node.prefix] else: usedPrefixes = ['xmlns'] for a in _attrs(node): if a.nodeName.startswith('xmlns') or not a.prefix: continue usedPrefixes.append('xmlns:%s' %a.prefix) unused_namespace_dict = {} for attr in context: n = attr.nodeName if n in unsuppressedPrefixes: inclusive.append(attr) elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes: inclusive.append(attr) elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes: inclusive.append(attr) elif attr.nodeName in usedPrefixes: inclusive.append(attr) elif n.startswith('xmlns:'): unused_namespace_dict[n] = attr.value return inclusive, unused_namespace_dict
0.0062
def set_display_mode(self, zoom,layout='continuous'): """Set display mode in viewer The "zoom" argument may be 'fullpage', 'fullwidth', 'real', 'default', or a number, interpreted as a percentage.""" if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)): self.zoom_mode=zoom else: self.error('Incorrect zoom display mode: '+zoom) if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'): self.layout_mode=layout else: self.error('Incorrect layout display mode: '+layout)
0.026627
def next_frame_epva(): """EPVA hparams.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l2_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-05 hparams.batch_size = 2 hparams.clip_grad_norm = 0.01 # TODO(msaffar): disentangle EPVA from SV2P hparams.add_hparam("reward_prediction", False) hparams.add_hparam("clip_pixel_values", True) hparams.add_hparam("context_frames", 5) hparams.add_hparam("enc_learning_rate", 1e-5) hparams.add_hparam("enc_pred_loss_scale", 0.1) hparams.add_hparam("enc_pred_loss_scale_delay", 6e5) hparams.add_hparam("enc_size", 64) hparams.add_hparam("enc_keep_prob", .65) hparams.add_hparam("enc_pred_use_l1_loss", False) hparams.add_hparam("enc_pred_use_l2norm", False) hparams.add_hparam("van_learning_rate", 3e-5) hparams.add_hparam("van_keep_prob", .9) hparams.add_hparam("sequence_length ", 64) hparams.add_hparam("skip_num", 2) hparams.add_hparam("pred_noise_std", 0) hparams.add_hparam("lstm_state_noise_stddev", 0) return hparams
0.021505
def get(cls, **kwargs): """Get a copy of the type from the cache and reconstruct it.""" data = cls._get(**kwargs) if data is None: new = cls() new.from_miss(**kwargs) return new return cls.deserialize(data)
0.007273
def fix_lamdaline(source): """Remove the last redundant token from lambda expression lambda x: return x) ^ Return string without irrelevant tokens returned from inspect.getsource on lamda expr returns """ # Using undocumented generate_tokens due to a tokenize.tokenize bug # See https://bugs.python.org/issue23297 strio = io.StringIO(source) gen = tokenize.generate_tokens(strio.readline) tkns = [] try: for t in gen: tkns.append(t) except tokenize.TokenError: pass # Find the position of 'lambda' lambda_pos = [(t.type, t.string) for t in tkns].index( (tokenize.NAME, "lambda") ) # Ignore tokes before 'lambda' tkns = tkns[lambda_pos:] # Find the position of th las OP lastop_pos = ( len(tkns) - 1 - [t.type for t in tkns[::-1]].index(tokenize.OP) ) lastop = tkns[lastop_pos] # Remove OP from the line fiedlineno = lastop.start[0] fixedline = lastop.line[: lastop.start[1]] + lastop.line[lastop.end[1] :] tkns = tkns[:lastop_pos] fixedlines = "" last_lineno = 0 for t in tkns: if last_lineno == t.start[0]: continue elif t.start[0] == fiedlineno: fixedlines += fixedline last_lineno = t.start[0] else: fixedlines += t.line last_lineno = t.start[0] return fixedlines
0.001387
def base_dtype(dtype): """Returns a non-reference `dtype` based on this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'base_dtype'): return dtype.base_dtype return dtype
0.026042
def buscar_idtrafficreturn_opcvip(self, nome_opcao_txt): """Search id of Option VIP when tipo_opcao = 'Retorno de trafego' ​​ :return: Dictionary with the following structure: :: {‘trafficreturn_opt’: ‘trafficreturn_opt’: <'id'>} :raise InvalidParameterError: Environment VIP identifier is null and invalid. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise InvalidParameterError: finalidade_txt and cliente_txt is null and invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ trafficreturn_map = dict() trafficreturn_map['trafficreturn'] = nome_opcao_txt url = 'optionvip/trafficreturn/search/' code, xml = self.submit({'trafficreturn_opt':trafficreturn_map }, 'POST', url) return self.response(code, xml)
0.007353
def check_origin(self, origin: str) -> bool: """Override to enable support for allowing alternate origins. The ``origin`` argument is the value of the ``Origin`` HTTP header, the url responsible for initiating this request. This method is not called for clients that do not send this header; such requests are always allowed (because all browsers that implement WebSockets support this header, and non-browser clients do not have the same cross-site security concerns). Should return ``True`` to accept the request or ``False`` to reject it. By default, rejects all requests with an origin on a host other than this one. This is a security protection against cross site scripting attacks on browsers, since WebSockets are allowed to bypass the usual same-origin policies and don't use CORS headers. .. warning:: This is an important security measure; don't disable it without understanding the security implications. In particular, if your authentication is cookie-based, you must either restrict the origins allowed by ``check_origin()`` or implement your own XSRF-like protection for websocket connections. See `these <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_ `articles <https://devcenter.heroku.com/articles/websocket-security>`_ for more. To accept all cross-origin traffic (which was the default prior to Tornado 4.0), simply override this method to always return ``True``:: def check_origin(self, origin): return True To allow connections from any subdomain of your site, you might do something like:: def check_origin(self, origin): parsed_origin = urllib.parse.urlparse(origin) return parsed_origin.netloc.endswith(".mydomain.com") .. versionadded:: 4.0 """ parsed_origin = urlparse(origin) origin = parsed_origin.netloc origin = origin.lower() host = self.request.headers.get("Host") # Check to see that origin matches host directly, including ports return origin == host
0.000864
def set_fan_mode(self, mode): """ :param mode: a string one of ["on", "auto"] :return: nothing """ desired_state = {"fan_mode": mode} response = self.api_interface.set_device_state(self, { "desired_state": desired_state }) self._update_state_from_response(response)
0.005831
def load_csv(self, filename, start_frame=10, max_frames=int(1e300)): '''Load marker data from a CSV file. The file will be imported using Pandas, which must be installed to use this method. (``pip install pandas``) The first line of the CSV file will be used for header information. The "time" column will be used as the index for the data frame. There must be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and 'markerAB-foo-c' for marker 'foo' to be included in the model. Parameters ---------- filename : str Name of the CSV file to load. ''' import pandas as pd compression = None if filename.endswith('.gz'): compression = 'gzip' df = pd.read_csv(filename, compression=compression).set_index('time').fillna(-1) # make sure the data frame's time index matches our world. assert self.world.dt == pd.Series(df.index).diff().mean() markers = [] for c in df.columns: m = re.match(r'^marker\d\d-(.*)-c$', c) if m: markers.append(m.group(1)) self.channels = self._map_labels_to_channels(markers) cols = [c for c in df.columns if re.match(r'^marker\d\d-.*-[xyzc]$', c)] self.data = df[cols].values.reshape((len(df), len(markers), 4))[start_frame:] self.data[:, :, [1, 2]] = self.data[:, :, [2, 1]] logging.info('%s: loaded marker data %s', filename, self.data.shape) self.process_data() self.create_bodies()
0.003743
def do_rating_by_user(parser, token): """ Retrieves the ``Vote`` cast by a user on a particular object and stores it in a context variable. If the user has not voted, the context variable will be 0. Example usage:: {% rating_by_user user on instance as vote %} """ bits = token.contents.split() if len(bits) != 6: raise template.TemplateSyntaxError("'%s' tag takes exactly five arguments" % bits[0]) if bits[2] != 'on': raise template.TemplateSyntaxError("second argument to '%s' tag must be 'on'" % bits[0]) if bits[4] != 'as': raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0]) return RatingByUserNode(bits[1], bits[3], bits[5])
0.009223
def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None): """Calculate the Multiscale Entropy of the given time series considering different time-scales of the time series. Args: time_series: Time series for analysis sample_length: Bandwidth or group of points tolerance: Tolerance (default = 0.1*std(time_series)) Returns: Vector containing Multiscale Entropy Reference: [1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html """ if tolerance is None: #we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy() tolerance = 0.1*np.std(time_series) if maxscale is None: maxscale = len(time_series) mse = np.zeros(maxscale) for i in range(maxscale): temp = util_granulate_time_series(time_series, i+1) mse[i] = sample_entropy(temp, sample_length, tolerance)[-1] return mse
0.008991
def _init_metadata(self): """stub""" super(BaseMultiChoiceFileQuestionFormRecord, self)._init_metadata() self._choice_file_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'choice-file'), 'element_label': 'Choice File', 'instructions': 'accepts an Asset Id', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] }
0.003067
def get_main_activity(self): """ Return the name of the main activity :rtype: string """ x = set() y = set() for i in self.xml: activities_and_aliases = self.xml[i].getElementsByTagName("activity") + \ self.xml[i].getElementsByTagName("activity-alias") for item in activities_and_aliases: # Some applications have more than one MAIN activity. # For example: paid and free content activityEnabled = item.getAttributeNS(NS_ANDROID_URI, "enabled") if activityEnabled is not None and activityEnabled != "" and activityEnabled == "false": continue for sitem in item.getElementsByTagName("action"): val = sitem.getAttributeNS(NS_ANDROID_URI, "name") if val == "android.intent.action.MAIN": x.add(item.getAttributeNS(NS_ANDROID_URI, "name")) for sitem in item.getElementsByTagName("category"): val = sitem.getAttributeNS(NS_ANDROID_URI, "name") if val == "android.intent.category.LAUNCHER": y.add(item.getAttributeNS(NS_ANDROID_URI, "name")) z = x.intersection(y) if len(z) > 0: return self.format_value(z.pop()) return None
0.004202
def server_side(func): """ Decorator to designate an API method applicable only to server-side instances. This allows us to use the same APIRequest and APIResponse subclasses on the client and server sides without too much confusion. """ def inner(*args, **kwargs): if args and hasattr(args[0], 'is_server') and not voltron.debugger: raise ServerSideOnlyException("This method can only be called on a server-side instance") return func(*args, **kwargs) return inner
0.003802
def run_command(cmd): """ Open a child process, and return its exit status and stdout. """ child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out = [s.decode("utf-8").strip() for s in child.stdout] err = [s.decode("utf-8").strip() for s in child.stderr] w = child.wait() return os.WEXITSTATUS(w), out, err
0.002331
def convert_to_ReSpecTh(self, filename): """Convert ChemKED record to ReSpecTh XML file. This converter uses common information in a ChemKED file to generate a ReSpecTh XML file. Note that some information may be lost, as ChemKED stores some additional attributes. Arguments: filename (`str`): Filename for output ReSpecTh XML file. Example: >>> dataset = ChemKED(yaml_file) >>> dataset.convert_to_ReSpecTh(xml_file) """ root = etree.Element('experiment') file_author = etree.SubElement(root, 'fileAuthor') file_author.text = self.file_authors[0]['name'] # right now ChemKED just uses an integer file version file_version = etree.SubElement(root, 'fileVersion') major_version = etree.SubElement(file_version, 'major') major_version.text = str(self.file_version) minor_version = etree.SubElement(file_version, 'minor') minor_version.text = '0' respecth_version = etree.SubElement(root, 'ReSpecThVersion') major_version = etree.SubElement(respecth_version, 'major') major_version.text = '1' minor_version = etree.SubElement(respecth_version, 'minor') minor_version.text = '0' # Only ignition delay currently supported exp = etree.SubElement(root, 'experimentType') if self.experiment_type == 'ignition delay': exp.text = 'Ignition delay measurement' else: raise NotImplementedError('Only ignition delay type supported for conversion.') reference = etree.SubElement(root, 'bibliographyLink') citation = '' for author in self.reference.authors: citation += author['name'] + ', ' citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' + str(self.reference.volume) + ':' + self.reference.pages + '. ' + self.reference.detail ) reference.set('preferredKey', citation) reference.set('doi', self.reference.doi) apparatus = etree.SubElement(root, 'apparatus') kind = etree.SubElement(apparatus, 'kind') kind.text = self.apparatus.kind common_properties = etree.SubElement(root, 'commonProperties') # ChemKED objects have no common properties once loaded. Check for properties # among datapoints that tend to be common common = [] composition = self.datapoints[0].composition # Composition type *has* to be the same composition_type = self.datapoints[0].composition_type if not all(dp.composition_type == composition_type for dp in self.datapoints): raise NotImplementedError('Error: ReSpecTh does not support varying composition ' 'type among datapoints.' ) if all([composition == dp.composition for dp in self.datapoints]): # initial composition is common common.append('composition') prop = etree.SubElement(common_properties, 'property') prop.set('name', 'initial composition') for species_name, species in composition.items(): component = etree.SubElement(prop, 'component') species_link = etree.SubElement(component, 'speciesLink') species_link.set('preferredKey', species_name) if species.InChI is not None: species_link.set('InChI', species.InChI) amount = etree.SubElement(component, 'amount') amount.set('units', composition_type) amount.text = str(species.amount.magnitude) # If multiple datapoints present, then find any common properties. If only # one datapoint, then composition should be the only "common" property. if len(self.datapoints) > 1: for prop_name in datagroup_properties: attribute = prop_name.replace(' ', '_') quantities = [getattr(dp, attribute, False) for dp in self.datapoints] # All quantities must have the property in question and all the # values must be equal if all(quantities) and quantities.count(quantities[0]) == len(quantities): common.append(prop_name) prop = etree.SubElement(common_properties, 'property') prop.set('description', '') prop.set('name', prop_name) prop.set('units', str(quantities[0].units)) value = etree.SubElement(prop, 'value') value.text = str(quantities[0].magnitude) # Ignition delay can't be common, unless only a single datapoint. datagroup = etree.SubElement(root, 'dataGroup') datagroup.set('id', 'dg1') datagroup_link = etree.SubElement(datagroup, 'dataGroupLink') datagroup_link.set('dataGroupID', '') datagroup_link.set('dataPointID', '') property_idx = {} labels = {'temperature': 'T', 'pressure': 'P', 'ignition delay': 'tau', 'pressure rise': 'dP/dt', } for prop_name in datagroup_properties: attribute = prop_name.replace(' ', '_') # This can't be hasattr because properties are set to the value None # if no value is specified in the file, so the attribute always exists prop_indices = [i for i, dp in enumerate(self.datapoints) if getattr(dp, attribute) is not None ] if prop_name in common or not prop_indices: continue prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', prop_name) units = str(getattr(self.datapoints[prop_indices[0]], attribute).units) prop.set('units', units) idx = 'x{}'.format(len(property_idx) + 1) property_idx[idx] = {'name': prop_name, 'units': units} prop.set('id', idx) prop.set('label', labels[prop_name]) # Need to handle datapoints with possibly different species in the initial composition if 'composition' not in common: for dp in self.datapoints: for species in dp.composition.values(): # Only add new property for species not already considered has_spec = any([species.species_name in d.values() for d in property_idx.values() ]) if not has_spec: prop = etree.SubElement(datagroup, 'property') prop.set('description', '') idx = 'x{}'.format(len(property_idx) + 1) property_idx[idx] = {'name': species.species_name} prop.set('id', idx) prop.set('label', '[' + species.species_name + ']') prop.set('name', 'composition') prop.set('units', self.datapoints[0].composition_type) species_link = etree.SubElement(prop, 'speciesLink') species_link.set('preferredKey', species.species_name) if species.InChI is not None: species_link.set('InChI', species.InChI) for dp in self.datapoints: datapoint = etree.SubElement(datagroup, 'dataPoint') for idx, val in property_idx.items(): # handle regular properties a bit differently than composition if val['name'] in datagroup_properties: value = etree.SubElement(datapoint, idx) quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units']) value.text = str(quantity.magnitude) else: # composition for item in dp.composition.values(): if item.species_name == val['name']: value = etree.SubElement(datapoint, idx) value.text = str(item.amount.magnitude) # See https://stackoverflow.com/a/16097112 for the None.__ne__ history_types = ['volume_history', 'temperature_history', 'pressure_history', 'piston_position_history', 'light_emission_history', 'OH_emission_history', 'absorption_history'] time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types] time_histories = list(filter(None.__ne__, time_histories)) if len(self.datapoints) > 1 and len(time_histories) > 1: raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints ' 'with a time history.') elif len(time_histories) > 0: for dg_idx, hist in enumerate(time_histories): if hist.type not in ['volume', 'temperature', 'pressure']: warn('The time-history type {} is not supported by ReSpecTh for ' 'ignition delay experiments'.format(hist.type)) continue datagroup = etree.SubElement(root, 'dataGroup') datagroup.set('id', 'dg{}'.format(dg_idx)) datagroup_link = etree.SubElement(datagroup, 'dataGroupLink') datagroup_link.set('dataGroupID', '') datagroup_link.set('dataPointID', '') # Time history has two properties: time and quantity. prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', 'time') prop.set('units', str(hist.time.units)) time_idx = 'x{}'.format(len(property_idx) + 1) property_idx[time_idx] = {'name': 'time'} prop.set('id', time_idx) prop.set('label', 't') prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', hist.type) prop.set('units', str(hist.quantity.units)) quant_idx = 'x{}'.format(len(property_idx) + 1) property_idx[quant_idx] = {'name': hist.type} prop.set('id', quant_idx) prop.set('label', 'V') for time, quantity in zip(hist.time, hist.quantity): datapoint = etree.SubElement(datagroup, 'dataPoint') value = etree.SubElement(datapoint, time_idx) value.text = str(time.magnitude) value = etree.SubElement(datapoint, quant_idx) value.text = str(quantity.magnitude) ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints] # All datapoints must have the same ignition target and type if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types): # In ReSpecTh files all datapoints must share ignition type ignition = etree.SubElement(root, 'ignitionType') if ign_types[0]['target'] in ['pressure', 'temperature']: ignition.set('target', ign_types[0]['target'][0].upper()) else: # options left are species ignition.set('target', self.datapoints[0].ignition_type['target']) if ign_types[0]['type'] == 'd/dt max extrapolated': ignition.set('type', 'baseline max intercept from d/dt') else: ignition.set('type', self.datapoints[0].ignition_type['type']) else: raise NotImplementedError('Different ignition targets or types for multiple datapoints ' 'are not supported in ReSpecTh.') et = etree.ElementTree(root) et.write(filename, encoding='utf-8', xml_declaration=True) # now do a "pretty" rewrite xml = minidom.parse(filename) xml_string = xml.toprettyxml(indent=' ') with open(filename, 'w') as f: f.write(xml_string) print('Converted to ' + filename)
0.001928
def get_position(self): """ 获取持仓 :return: """ xq_positions = self._get_position() balance = self.get_balance()[0] position_list = [] for pos in xq_positions: volume = pos["weight"] * balance["asset_balance"] / 100 position_list.append( { "cost_price": volume / 100, "current_amount": 100, "enable_amount": 100, "income_balance": 0, "keep_cost_price": volume / 100, "last_price": volume / 100, "market_value": volume, "position_str": "random", "stock_code": pos["stock_symbol"], "stock_name": pos["stock_name"], } ) return position_list
0.002286
def distance_diagonal_law(matrix, positions=None): """Compute a distance law trend using the contact averages of equal distances. Specific positions can be supplied if needed. """ n = min(matrix.shape) if positions is None: return np.array([np.average(np.diagonal(matrix, j)) for j in range(n)]) else: contigs = positions_to_contigs(positions) def is_intra(i, j): return contigs[i] == contigs[j] max_intra_distance = max((len(contigs == u) for u in set(contigs))) intra_contacts = [] inter_contacts = [np.average(np.diagonal(matrix, j)) for j in range(max_intra_distance, n)] for j in range(max_intra_distance): D = np.diagonal(matrix, j) for i in range(len(D)): diagonal_intra = [] if is_intra(i, j): diagonal_intra.append(D[i]) # else: # diagonal_inter.append(D[i]) # inter_contacts.append(np.average(np.array(diagonal_inter))) intra_contacts.append(np.average(np.array(diagonal_intra))) intra_contacts.extend(inter_contacts) return [positions, np.array(intra_contacts)]
0.001705
def add_data_flow_view_for_model(self, data_flow_m, parent_state_m): """Creates a `DataFlowView` and adds it to the canvas The method creates a`DataFlowView` from the given `DataFlowModel `data_flow_m` and adds it to the canvas. :param DataFlowModel data_flow_m: The data flow for which a view is to be created :param ContainerStateModel parent_state_m: The parental `StateModel` of the data flow """ parent_state_v = self.canvas.get_view_for_model(parent_state_m) hierarchy_level = parent_state_v.hierarchy_level data_flow_v = DataFlowView(data_flow_m, hierarchy_level) # Draw data flow above NameView but beneath all other state elements self.canvas.add(data_flow_v, parent_state_v, index=1) self._connect_data_flow_to_ports(data_flow_m, data_flow_v, parent_state_m)
0.00813
def parse_case_snake_to_camel(snake, upper_first=True): """ Convert a string from snake_case to CamelCase. :param str snake: The snake_case string to convert. :param bool upper_first: Whether or not to capitalize the first character of the string. :return: The CamelCase version of string. :rtype: str """ snake = snake.split('_') first_part = snake[0] if upper_first: first_part = first_part.title() return first_part + ''.join(word.title() for word in snake[1:])
0.029167
def mirror_folder(source, target, delete_orphans=True, recurse=True, ignore_list=None, _level=0, logger=None): """Mirrors a folder *source* into a target folder *target*.""" logger = logger or logging.getLogger(__name__) def expand_tree(p): tree = [] for node in Path(p).walk(): tree.append(node) return tree report = { 'deleted': set([]), 'overwritten': set([]), 'new': set([]) } d1 = source d2 = target logger.debug("Mirroring %s ==> %s" % (d1, d2)) if not d2.exists(): d2.makedirs() compare = filecmp.dircmp(d1, d2) # Expand the ignore list to be full paths if ignore_list is None: ignore_list = [] else: ignore_list = [Path(d2 / i).normpath() for i in ignore_list] ignore_files = [f for f in ignore_list if f.isfile()] ignore_list.extend(expand_path(ignore_files, root_path=d2)) # Delete orphan files/folders in the target folder if delete_orphans: for item in compare.right_only: fullpath = Path(d2 / item).normpath() if fullpath in ignore_list: logger.debug( "%s ==> Ignored - path is in ignore list" % fullpath) continue if fullpath.isdir() and recurse: logger.debug( "%s ==> Deleted - doesn't exist in source" % fullpath) report['deleted'].add(fullpath) if len(fullpath.listdir()) > 0: report['deleted'].update(expand_tree(fullpath)) # noinspection PyArgumentList fullpath.rmtree() elif fullpath.isfile(): logger.debug( "%s ==> Deleted - doesn't exist in source" % fullpath) report['deleted'].add(fullpath) fullpath.remove() # Copy new files and folders from the source to the target for item in compare.left_only: fullpath = d1 / item if fullpath.isdir() and recurse: logger.debug( "Copying new directory %s ==> %s" % (fullpath, (d2 / item))) fullpath.copytree(d2 / item) report['new'].add(d2 / item) report['new'].update(expand_tree(d2 / item)) elif fullpath.isfile(): logger.debug("Copying new file %s ==> %s" % (fullpath, (d2 / item))) fullpath.copy2(d2) report['new'].add(d2 / item) # Copy modified files in the source to the target, overwriting the target file for item in compare.diff_files: logger.debug( "Overwriting existing file %s ==> %s" % ((d1 / item), (d2 / item))) (d1 / item).copy2(d2) report['overwritten'].add(d2 / item) # Recurse into subfolders that exist in both the source and target if recurse: for item in compare.common_dirs: rpt = mirror_folder(d1 / item, d2 / item, delete_orphans, _level=_level + 1) report['new'].update(rpt['new']) report['overwritten'].update(rpt['overwritten']) report['deleted'].update(rpt['deleted']) return report
0.001571
def partitionSum(M,I,T,step=None): """ INPUT PARAMETERS: M: HITRAN molecule number (required) I: HITRAN isotopologue number (required) T: temperature conditions (required) step: step to calculate temperatures (optional) OUTPUT PARAMETERS: TT: list of temperatures (present only if T is a list) PartSum: partition sums calculated on a list of temperatures --- DESCRIPTION: Calculate range of partition sums at different temperatures. This function uses a python implementation of TIPS-2011 code: Reference: A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman. Total internal partition sums to support planetary remote sensing. Icarus, Volume 215, Issue 1, September 2011, Pages 391–400 http://dx.doi.org/10.1016/j.icarus.2011.06.004 Output depends on a structure of input parameter T so that: 1) If T is a scalar/list and step IS NOT provided, then calculate partition sums over each value of T. 2) If T is a list and step parameter IS provided, then calculate partition sums between T[0] and T[1] with a given step. --- EXAMPLE OF USAGE: PartSum = partitionSum(1,1,[296,1000]) TT,PartSum = partitionSum(1,1,[296,1000],step=0.1) --- """ # partitionSum if not step: if type(T) not in set([list,tuple]): return BD_TIPS_2011_PYTHON(M,I,T)[1] else: return [BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in T] else: #n = (T[1]-T[0])/step #TT = linspace(T[0],T[1],n) TT = arange(T[0],T[1],step) return TT,array([BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in TT])
0.014714
def separateKeywords(kwArgsDict): """ Look through the keywords passed and separate the special ones we have added from the legal/standard ones. Return both sets as two dicts (in a tuple), as (standardKws, ourKws) """ standardKws = {} ourKws = {} for k in kwArgsDict: if k in STANDARD_KEYS: standardKws[k]=kwArgsDict[k] else: ourKws[k]=kwArgsDict[k] return (standardKws, ourKws)
0.006593
def guess_lexer(_text, **options): """Guess a lexer by strong distinctions in the text (eg, shebang).""" # try to get a vim modeline first ft = get_filetype_from_buffer(_text) if ft is not None: try: return get_lexer_by_name(ft, **options) except ClassNotFound: pass best_lexer = [0.0, None] for lexer in _iter_lexerclasses(): rv = lexer.analyse_text(_text) if rv == 1.0: return lexer(**options) if rv > best_lexer[0]: best_lexer[:] = (rv, lexer) if not best_lexer[0] or best_lexer[1] is None: raise ClassNotFound('no lexer matching the text found') return best_lexer[1](**options)
0.001404
def get_attributes(self, domain_or_name, item_name, attribute_names=None, consistent_read=False, item=None): """ Retrieve attributes for a given item in a domain. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type item_name: string :param item_name: The name of the item whose attributes are being retrieved. :type attribute_names: string or list of strings :param attribute_names: An attribute name or list of attribute names. This parameter is optional. If not supplied, all attributes will be retrieved for the item. :type consistent_read: bool :param consistent_read: When set to true, ensures that the most recent data is returned. :type item: :class:`boto.sdb.item.Item` :keyword item: Instead of instantiating a new Item object, you may specify one to update. :rtype: :class:`boto.sdb.item.Item` :return: An Item with the requested attribute name/values set on it """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName' : domain_name, 'ItemName' : item_name} if consistent_read: params['ConsistentRead'] = 'true' if attribute_names: if not isinstance(attribute_names, list): attribute_names = [attribute_names] self.build_list_params(params, attribute_names, 'AttributeName') response = self.make_request('GetAttributes', params) body = response.read() if response.status == 200: if item == None: item = self.item_cls(domain, item_name) h = handler.XmlHandler(item, self) xml.sax.parseString(body, h) return item else: raise SDBResponseError(response.status, response.reason, body)
0.004342
def converter(f): """Decorator to convert value from dbus type to python type.""" @wraps(f) def wrapper(*args, **kwds): return convert(f(*args, **kwds)) return wrapper
0.005236
def mnn_correct(*datas, var_index=None, var_subset=None, batch_key='batch', index_unique='-', batch_categories=None, k=20, sigma=1., cos_norm_in=True, cos_norm_out=True, svd_dim=None, var_adj=True, compute_angle=False, mnn_order=None, svd_mode='rsvd', do_concatenate=True, save_raw=False, n_jobs=None, **kwargs): """Correct batch effects by matching mutual nearest neighbors [Haghverdi18]_ [Kang18]_. This uses the implementation of `mnnpy <https://github.com/chriscainx/mnnpy>`__ [Kang18]_. Depending on `do_concatenate`, returns matrices or `AnnData` objects in the original order containing corrected expression values or a concatenated matrix or AnnData object. Be reminded that it is not advised to use the corrected data matrices for differential expression testing. More information and bug reports `here <https://github.com/chriscainx/mnnpy>`__. Parameters ---------- datas : `numpy.ndarray` or :class:`~anndata.AnnData` Expression matrices or AnnData objects. Matrices should be shaped like n_obs * n_vars (n_cell * n_gene) and have consistent number of columns. AnnData objects should have same number of variables. var_index : `list` or `None`, optional (default: None) The index (list of str) of vars (genes). Necessary when using only a subset of vars to perform MNN correction, and should be supplied with var_subset. When datas are AnnData objects, var_index is ignored. var_subset : `list` or `None`, optional (default: None) The subset of vars (list of str) to be used when performing MNN correction. Typically, a list of highly variable genes (HVGs). When set to None, uses all vars. batch_key : `str`, optional (default: 'batch') The batch_key for AnnData.concatenate. Only valid when do_concatenate and supplying AnnData objects. index_unique : `str`, optional (default: '-') The index_unique for AnnData.concatenate. Only valid when do_concatenate and supplying AnnData objects. batch_categories : `list` or `None`, optional (default: None) The batch_categories for AnnData.concatenate. Only valid when do_concatenate and supplying AnnData objects. k : `int`, optional (default: 20) Number of mutual nearest neighbors. sigma : `float`, optional (default: 1) The bandwidth of the Gaussian smoothing kernel used to compute the correction vectors. Default is 1. cos_norm_in : `bool`, optional (default: True) Whether cosine normalization should be performed on the input data prior to calculating distances between cells. cos_norm_out : `bool`, optional (default: True) Whether cosine normalization should be performed prior to computing corrected expression values. svd_dim : `int` or `None`, optional (default: None) The number of dimensions to use for summarizing biological substructure within each batch. If None, biological components will not be removed from the correction vectors. var_adj : `bool`, optional (default: True) Whether to adjust variance of the correction vectors. Note this step takes most computing time. compute_angle : `bool`, optional (default: False) Whether to compute the angle between each cell’s correction vector and the biological subspace of the reference batch. mnn_order : `list` or `None`, optional (default: None) The order in which batches are to be corrected. When set to None, datas are corrected sequentially. svd_mode : `str`, optional (default: 'rsvd') One of 'svd', 'rsvd', and 'irlb'. 'svd' computes SVD using a non-randomized SVD-via-ID algorithm, while 'rsvd' uses a randomized version. 'irlb' performes truncated SVD by implicitly restarted Lanczos bidiagonalization (forked from https://github.com/airysen/irlbpy). do_concatenate : `bool`, optional (default: True) Whether to concatenate the corrected matrices or AnnData objects. Default is True. save_raw : `bool`, optional (default: False) Whether to save the original expression data in the .raw attribute of AnnData objects. n_jobs : `int` or `None`, optional (default: None) The number of jobs. When set to None, automatically uses the number of cores. kwargs : `dict` or `None`, optional (default: None) optional keyword arguments for irlb. Returns ------- **datas** : :class:`~numpy.ndarray` or :class:`~anndata.AnnData` Corrected matrix/matrices or AnnData object/objects, depending on the input type and `do_concatenate`. **mnn_list** : ``List[pandas.DataFrame]`` A list containing MNN pairing information as DataFrames in each iteration step. **angle_list** : ``List[Tuple[Optional[float], int]]`` or ``None`` A list containing angles of each batch. """ try: from mnnpy import mnn_correct as mnn_cor n_jobs = settings.n_jobs if n_jobs is None else n_jobs datas, mnn_list, angle_list = mnn_cor( *datas, var_index=var_index, var_subset=var_subset, batch_key=batch_key, index_unique=index_unique, batch_categories=batch_categories, k=k, sigma=sigma, cos_norm_in=cos_norm_in, cos_norm_out=cos_norm_out, svd_dim=svd_dim, var_adj=var_adj, compute_angle=compute_angle, mnn_order=mnn_order, svd_mode=svd_mode, do_concatenate=do_concatenate, save_raw=save_raw, n_jobs=n_jobs, **kwargs) return datas, mnn_list, angle_list except ImportError: raise ImportError( 'Please install the package mnnpy ' '(https://github.com/chriscainx/mnnpy). ')
0.002924
def _build_parser(): """ Return a command-line arguments parser. """ parser = argparse.ArgumentParser(description='Search you Azure AD contacts from mutt or the command-line.') parser.add_argument('-c', '--config', help='Specify alternative configuration file.', metavar="FILE") parser.add_argument('-v', '--verbose', dest="log_level", action='store_const', const=logging.INFO, help='Be verbose about what is going on (stderr).') parser.add_argument('-V', '--version', action='version', version='%%(prog)s %s' % pkg_resources.get_distribution("aadbook").version, help="Print version and exit") parser.add_argument('-d', '--debug', dest="log_level", action='store_const', const=logging.DEBUG, help='Output debug info (stderr).') parser.set_defaults(config=CONFIG_FILE, log_level=logging.ERROR) subparsers = parser.add_subparsers() parser_config_template = subparsers.add_parser('config-template', description='Prints a template for .aadbookrc to stdout') parser_config_template.set_defaults(func=do_config_template) parser_reload = subparsers.add_parser('authenticate', description='Azure AD authentication.') parser_reload.set_defaults(func=do_authenticate) parser_reload = subparsers.add_parser('reload', description='Force reload of the cache.') parser_reload.set_defaults(func=do_reload) parser_query = subparsers.add_parser('query', description='Search contacts using query (regex).') parser_query.add_argument('query', help='regex to search for.', metavar='QUERY') parser_query.set_defaults(func=do_query) return parser
0.006821
def watchdogctl(ctx, kill=False, verbose=True): """Control / check a running Sphinx autobuild process.""" tries = 40 if kill else 0 cmd = 'lsof -i TCP:{} -s TCP:LISTEN -S -Fp 2>/dev/null'.format(ctx.rituals.docs.watchdog.port) pidno = 0 pidinfo = capture(cmd, ignore_failures=True) while pidinfo: pidline = next(filter(None, [re.match(r'^p(\d+)$', x) for x in pidinfo.splitlines()])) if not pidline: raise ValueError("Standard lsof output expected (got {!r})".format(pidinfo)) pidno = int(pidline.group(1), 10) if verbose: ctx.run("ps uw {}".format(pidno), echo=False) verbose = False tries -= 1 if tries <= 0: break else: try: os.kill(pidno, 0) #except ProcessLookupError: # XXX Python3 only # break except OSError as exc: # Python2 has no ProcessLookupError if exc.errno == 3: break raise else: notify.info("Killing PID {}".format(pidno)) ctx.run("kill {}".format(pidno), echo=False) time.sleep(.25) pid = capture(cmd, ignore_failures=True) return pidno
0.003906
def is_bool(tg_type, inc_array=False): """Tells if the given tango type is boolean :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is boolean or False otherwise :rtype: :py:obj:`bool` """ global _scalar_bool_types, _array_bool_types if tg_type in _scalar_bool_types: return True if not inc_array: return False return tg_type in _array_bool_types
0.001595
def random_permutation(iterable, r=None): """Return a random *r* length permutation of the elements in *iterable*. If *r* is not specified or is ``None``, then *r* defaults to the length of *iterable*. >>> random_permutation(range(5)) # doctest:+SKIP (3, 4, 0, 1, 2) This equivalent to taking a random selection from ``itertools.permutations(iterable, r)``. """ pool = tuple(iterable) r = len(pool) if r is None else r return tuple(sample(pool, r))
0.00198
def verify_edx_resources(): """ Ensure that all necessary resources to render the view are present. """ required_methods = { 'ProgramDataExtender': ProgramDataExtender, } for method in required_methods: if required_methods[method] is None: raise NotConnectedToOpenEdX( _("The following method from the Open edX platform is necessary for this view but isn't available.") + "\nUnavailable: {method}".format(method=method) )
0.003861
def apply_filter(objs, selector, mode): '''Apply selector to transform each object in objs. This operates in-place on objs. Empty objects are removed from the list. Args: mode: either KEEP (to keep selected items & their ancestors) or DELETE (to delete selected items and their children). ''' indices_to_delete = [] presumption = DELETE if mode == KEEP else KEEP for i, obj in enumerate(objs): timer.log('Applying selector: %s' % selector) marks = {k: mode for k in selector_to_ids(selector, obj, mode)} timer.log('done applying selector') timer.log('filtering object...') filter_object(obj, marks, presumption=presumption) timer.log('done filtering') if obj is None: indices_to_delete.append(i) for index in reversed(indices_to_delete): del objs[index]
0.001129
def generic(magfile="", dir_path=".", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", user="", labfield=0, labfield_phi=0, labfield_theta=0, experiment="", cooling_times_list=[], sample_nc=[1, 0], site_nc=[1, 0], location="unknown", lat="", lon="", noave=False, input_dir_path=""): """ Convert generic file to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str output directory, default "." meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" user : str user name, default "" labfield : float dc lab field (in micro tesla) labfield_phi : float declination 0-360 labfield_theta : float inclination -90 - 90 experiment : str experiment type, see info below cooling_times_list : list cooling times in [K/minutes] seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70 sample_nc : list sample naming convention, default [1, 0], see info below site_nc : list site naming convention, default [1, 0], see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) input_dir_path : str input file directory IF different from dir_path, default "" Info -------- Experiment type: Demag: AF and/or Thermal PI: paleointenisty thermal experiment (ZI/IZ/IZZI) ATRM n: ATRM in n positions (n=6) AARM n: AARM in n positions CR: cooling rate experiment The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional) where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps. XXX.00 is optional zerofield baseline. XXX.70 is alteration check. if using this type, you must also provide cooling rates in [K/minutes] in cooling_times_list seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70 No need to specify the cooling rate for the zerofield But users need to make sure that there are no duplicate meaurements in the file NLT: non-linear-TRM experiment Specimen-sample naming convention: X determines which kind of convention (initial characters, terminal characters, or delimiter Y determines how many characters to remove to go from specimen --> sample OR which delimiter to use X=0 Y=n: specimen is distinguished from sample by n initial characters. (example: generic(samp_nc=[0, 4], ...) if n=4 then and specimen = mgf13a then sample = mgf13) X=1 Y=n: specimen is distiguished from sample by n terminate characters. (example: generic(samp_nc=[1, 1], ...)) if n=1 then and specimen = mgf13a then sample = mgf13) X=2 Y=c: specimen is distinguishing from sample by a delimiter. (example: generic([2, "-"])) if c=- then and specimen = mgf13-a then sample = mgf13) default: sample is the same as specimen name Sample-site naming convention: X determines which kind of convention (initial characters, terminal characters, or delimiter Y determines how many characters to remove to go from sample --> site OR which delimiter to use X=0 Y=n: sample is distiguished from site by n initial characters. (example: generic(site_nc=[0, 3])) if n=3 then and sample = mgf13 then sample = mgf) X=1 Y=n: sample is distiguished from site by n terminate characters. (example: generic(site_nc=[1, 2])) if n=2 and sample = mgf13 then site = mgf) X=2 Y=c: specimen is distiguishing from sample by a delimiter. (example: generic(site_nc=[2, "-"])) if c='-' and sample = 'mgf-13' then site = mgf) default: site name is the same as sample name """ # -------------------------------------- # functions # -------------------------------------- def sort_magic_file(path, ignore_lines_n, sort_by_this_name): ''' reads a file with headers. Each line is stored as a dictionary following the headers. Lines are sorted in DATA by the sort_by_this_name header DATA[sort_by_this_name]=[dictionary1,dictionary2,...] ''' DATA = {} fin = open(path, 'r') # ignore first lines for i in range(ignore_lines_n): fin.readline() # header line = fin.readline() header = line.strip('\n').split('\t') # print header for line in fin.readlines(): if line[0] == "#": continue tmp_data = {} tmp_line = line.strip('\n').split('\t') # print tmp_line for i in range(len(tmp_line)): if i >= len(header): continue tmp_data[header[i]] = tmp_line[i] DATA[tmp_data[sort_by_this_name]] = tmp_data fin.close() return(DATA) def read_generic_file(path, average_replicates): ''' reads a generic file format. If average_replicates==True average replicate measurements. Rrturns a Data dictionary with measurements line sorted by specimen Data[specimen_name][dict1,dict2,...] ''' Data = {} Fin = open(path, 'r') header = Fin.readline().strip('\n').split('\t') duplicates = [] for line in Fin.readlines(): tmp_data = {} # found_duplicate=False l = line.strip('\n').split('\t') for i in range(min(len(header), len(l))): tmp_data[header[i]] = l[i] specimen = tmp_data['specimen'] if specimen not in list(Data.keys()): Data[specimen] = [] Data[specimen].append(tmp_data) Fin.close() # search from duplicates for specimen in list(Data.keys()): x = len(Data[specimen])-1 new_data = [] duplicates = [] for i in range(1, x): while i < len(Data[specimen]) and Data[specimen][i]['treatment'] == Data[specimen][i-1]['treatment'] and Data[specimen][i]['treatment_type'] == Data[specimen][i-1]['treatment_type']: duplicates.append(Data[specimen][i]) del(Data[specimen][i]) if len(duplicates) > 0: if average_replicates: duplicates.append(Data[specimen][i-1]) Data[specimen][i-1] = average_duplicates(duplicates) print("-W- WARNING: averaging %i duplicates for specimen %s treatmant %s" % (len(duplicates), specimen, duplicates[-1]['treatment'])) duplicates = [] else: Data[specimen][i-1] = duplicates[-1] print("-W- WARNING: found %i duplicates for specimen %s treatmant %s. Taking the last measurement only" % (len(duplicates), specimen, duplicates[-1]['treatment'])) duplicates = [] if i == len(Data[specimen])-1: break return(Data) def average_duplicates(duplicates): ''' avarage replicate measurements. ''' carts_s, carts_g, carts_t = [], [], [] for rec in duplicates: moment = float(rec['moment']) if 'dec_s' in list(rec.keys()) and 'inc_s' in list(rec.keys()): if rec['dec_s'] != "" and rec['inc_s'] != "": dec_s = float(rec['dec_s']) inc_s = float(rec['inc_s']) cart_s = pmag.dir2cart([dec_s, inc_s, moment]) carts_s.append(cart_s) if 'dec_g' in list(rec.keys()) and 'inc_g' in list(rec.keys()): if rec['dec_g'] != "" and rec['inc_g'] != "": dec_g = float(rec['dec_g']) inc_g = float(rec['inc_g']) cart_g = pmag.dir2cart([dec_g, inc_g, moment]) carts_g.append(cart_g) if 'dec_t' in list(rec.keys()) and 'inc_t' in list(rec.keys()): if rec['dec_t'] != "" and rec['inc_t'] != "": dec_t = float(rec['dec_t']) inc_t = float(rec['inc_t']) cart_t = pmag.dir2cart([dec_t, inc_t, moment]) carts_t.append(cart_t) if len(carts_s) > 0: carts = scipy.array(carts_s) x_mean = scipy.mean(carts[:, 0]) y_mean = scipy.mean(carts[:, 1]) z_mean = scipy.mean(carts[:, 2]) mean_dir = pmag.cart2dir([x_mean, y_mean, z_mean]) mean_dec_s = "%.2f" % mean_dir[0] mean_inc_s = "%.2f" % mean_dir[1] mean_moment = "%10.3e" % mean_dir[2] else: mean_dec_s, mean_inc_s = "", "" if len(carts_g) > 0: carts = scipy.array(carts_g) x_mean = scipy.mean(carts[:, 0]) y_mean = scipy.mean(carts[:, 1]) z_mean = scipy.mean(carts[:, 2]) mean_dir = pmag.cart2dir([x_mean, y_mean, z_mean]) mean_dec_g = "%.2f" % mean_dir[0] mean_inc_g = "%.2f" % mean_dir[1] mean_moment = "%10.3e" % mean_dir[2] else: mean_dec_g, mean_inc_g = "", "" if len(carts_t) > 0: carts = scipy.array(carts_t) x_mean = scipy.mean(carts[:, 0]) y_mean = scipy.mean(carts[:, 1]) z_mean = scipy.mean(carts[:, 2]) mean_dir = pmag.cart2dir([x_mean, y_mean, z_mean]) mean_dec_t = "%.2f" % mean_dir[0] mean_inc_t = "%.2f" % mean_dir[1] mean_moment = "%10.3e" % mean_dir[2] else: mean_dec_t, mean_inc_t = "", "" meanrec = {} for key in list(duplicates[0].keys()): if key in ['dec_s', 'inc_s', 'dec_g', 'inc_g', 'dec_t', 'inc_t', 'moment']: continue else: meanrec[key] = duplicates[0][key] meanrec['dec_s'] = mean_dec_s meanrec['dec_g'] = mean_dec_g meanrec['dec_t'] = mean_dec_t meanrec['inc_s'] = mean_inc_s meanrec['inc_g'] = mean_inc_g meanrec['inc_t'] = mean_inc_t meanrec['moment'] = mean_moment return meanrec def get_upper_level_name(name, nc): ''' get sample/site name from specimen/sample using naming convention ''' if float(nc[0]) == 0: if float(nc[1]) != 0: number_of_char = int(nc[1]) high_name = name[:number_of_char] else: high_name = name elif float(nc[0]) == 1: if float(nc[1]) != 0: number_of_char = int(nc[1])*-1 high_name = name[:number_of_char] else: high_name = name elif float(nc[0]) == 2: d = str(nc[1]) name_splitted = name.split(d) if len(name_splitted) == 1: high_name = name_splitted[0] else: high_name = d.join(name_splitted[:-1]) else: high_name = name return high_name def merge_pmag_recs(old_recs): recs = {} recs = copy.deepcopy(old_recs) headers = [] for rec in recs: for key in list(rec.keys()): if key not in headers: headers.append(key) for rec in recs: for header in headers: if header not in list(rec.keys()): rec[header] = "" return recs # -------------------------------------- # start conversion from generic # -------------------------------------- # format and validate variables input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path) labfield = float(labfield) labfield_phi = float(labfield_phi) labfield_theta = float(labfield_theta) if magfile: magfile = pmag.resolve_file_name(magfile, input_dir_path) try: input = open(magfile, 'r') except: print("bad mag file:", magfile) return False, "bad mag file" else: print("mag_file field is required option") return False, "mag_file field is required option" if not experiment: print("-E- Must provide experiment. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see below for format), NLT") return False, "Must provide experiment. Please provide experiment type of: Demag, PI, ATRM n (n of positions), CR (see help for format), NLT" if 'ATRM' in experiment: try: experiment, atrm_n_pos = experiment.split() atrm_n_pos = int(atrm_n_pos) except: experiment = 'ATRM' atrm_n_pos = 6 if 'AARM' in experiment: try: experiment, aarm_n_pos = experiment.split() aarm_n_pos = int(aarm_n_pos) except: experiment = 'AARM' aarm_n_pos = 6 if experiment == 'CR': if command_line: ind = sys.argv.index("CR") cooling_times = sys.argv[ind+1] cooling_times_list = cooling_times.split(',') noave = True # if not command line, cooling_times_list is already set # -------------------------------------- # read data from generic file # -------------------------------------- mag_data = read_generic_file(magfile, not noave) # -------------------------------------- # for each specimen get the data, and translate it to MagIC format # -------------------------------------- MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] specimens_list = sorted(mag_data.keys()) for specimen in specimens_list: measurement_running_number = 0 this_specimen_treatments = [] # a list of all treatments MeasRecs_this_specimen = [] LP_this_specimen = [] # a list of all lab protocols IZ, ZI = 0, 0 # counter for IZ and ZI steps for meas_line in mag_data[specimen]: # ------------------ # trivial MeasRec data # ------------------ MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} specimen = meas_line['specimen'] sample = get_upper_level_name(specimen, sample_nc) site = get_upper_level_name(sample, site_nc) sample_method_codes = "" azimuth, dip, DipDir, Dip = "", "", "", "" MeasRec['citations'] = "This study" MeasRec["specimen"] = specimen MeasRec['analysts'] = user MeasRec["instrument_codes"] = "" MeasRec["quality"] = 'g' MeasRec["treat_step_num"] = "%i" % measurement_running_number MeasRec["magn_moment"] = '%10.3e' % ( float(meas_line["moment"])*1e-3) # in Am^2 MeasRec["meas_temp"] = '273.' # room temp in kelvin # ------------------ # decode treatments from treatment column in the generic file # ------------------ treatment = [] treatment_code = str(meas_line['treatment']).split(".") treatment.append(float(treatment_code[0])) if len(treatment_code) == 1: treatment.append(0) else: treatment.append(float(treatment_code[1])) # ------------------ # lab field direction # ------------------ if experiment in ['PI', 'NLT', 'CR']: if float(treatment[1]) in [0., 3.]: # zerofield step or tail check MeasRec["treat_dc_field"] = "0" MeasRec["treat_dc_field_phi"] = "0" MeasRec["treat_dc_field_theta"] = "0" elif not labfield: print( "-W- WARNING: labfield (-dc) is a required argument for this experiment type") return False, "labfield (-dc) is a required argument for this experiment type" else: MeasRec["treat_dc_field"] = '%8.3e' % (float(labfield)) MeasRec["treat_dc_field_phi"] = "%.2f" % ( float(labfield_phi)) MeasRec["treat_dc_field_theta"] = "%.2f" % ( float(labfield_theta)) else: MeasRec["treat_dc_field"] = "" MeasRec["treat_dc_field_phi"] = "" MeasRec["treat_dc_field_theta"] = "" # ------------------ # treatment temperature/peak field # ------------------ if experiment == 'Demag': if meas_line['treatment_type'] == 'A': MeasRec['treat_temp'] = "273." MeasRec["treat_ac_field"] = "%.3e" % (treatment[0]*1e-3) elif meas_line['treatment_type'] == 'N': MeasRec['treat_temp'] = "273." MeasRec["treat_ac_field"] = "" else: MeasRec['treat_temp'] = "%.2f" % (treatment[0]+273.) MeasRec["treat_ac_field"] = "" else: MeasRec['treat_temp'] = "%.2f" % (treatment[0]+273.) MeasRec["treat_ac_field"] = "" # --------------------- # Lab treatment # Lab protocol # --------------------- # --------------------- # Lab treatment and lab protocoal for NRM: # --------------------- if float(meas_line['treatment']) == 0: LT = "LT-NO" LP = "" # will be filled later after finishing reading all measurements line # --------------------- # Lab treatment and lab protocoal for paleointensity experiment # --------------------- elif experiment == 'PI': LP = "LP-PI-TRM" if treatment[1] == 0: LT = "LT-T-Z" elif treatment[1] == 1 or treatment[1] == 10: # infield LT = "LT-T-I" elif treatment[1] == 2 or treatment[1] == 20: # pTRM check LT = "LT-PTRM-I" LP = LP+":"+"LP-PI-ALT-PTRM" elif treatment[1] == 3 or treatment[1] == 30: # Tail check LT = "LT-PTRM-MD" LP = LP+":"+"LP-PI-BT-MD" elif treatment[1] == 4 or treatment[1] == 40: # Additivity check LT = "LT-PTRM-AC" LP = LP+":"+"LP-PI-BT-MD" else: print("-E- unknown measurement code specimen %s treatmemt %s" % (meas_line['specimen'], meas_line['treatment'])) MeasRec = {} continue # save all treatment in a list # we will use this later to distinguidh between ZI / IZ / and IZZI this_specimen_treatments.append(float(meas_line['treatment'])) if LT == "LT-T-Z": if float(treatment[0]+0.1) in this_specimen_treatments: LP = LP+":"+"LP-PI-IZ" if LT == "LT-T-I": if float(treatment[0]+0.0) in this_specimen_treatments: LP = LP+":"+"LP-PI-ZI" # --------------------- # Lab treatment and lab protocoal for demag experiment # --------------------- elif "Demag" in experiment: if meas_line['treatment_type'] == 'A': LT = "LT-AF-Z" LP = "LP-DIR-AF" else: LT = "LT-T-Z" LP = "LP-DIR-T" # --------------------- # Lab treatment and lab protocoal for ATRM experiment # --------------------- elif experiment in ['ATRM', 'AARM']: if experiment == 'ATRM': LP = "LP-AN-TRM" n_pos = atrm_n_pos if n_pos != 6: print( "the program does not support ATRM in %i position." % n_pos) continue if experiment == 'AARM': LP = "LP-AN-ARM" n_pos = aarm_n_pos if n_pos != 6: print( "the program does not support AARM in %i position." % n_pos) continue if treatment[1] == 0: if experiment == 'ATRM': LT = "LT-T-Z" MeasRec['treat_temp'] = "%.2f" % (treatment[0]+273.) MeasRec["treat_ac_field"] = "" else: LT = "LT-AF-Z" MeasRec['treat_temp'] = "273." MeasRec["treat_ac_field"] = "%.3e" % ( treatment[0]*1e-3) MeasRec["treat_dc_field"] = '0' MeasRec["treat_dc_field_phi"] = '0' MeasRec["treat_dc_field_theta"] = '0' else: if experiment == 'ATRM': # alteration check as final measurement if float(treatment[1]) == 70 or float(treatment[1]) == 7: LT = "LT-PTRM-I" else: LT = "LT-T-I" else: LT = "LT-AF-I" MeasRec["treat_dc_field"] = '%8.3e' % (float(labfield)) # find the direction of the lab field in two ways: # (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z) tdec = [0, 90, 0, 180, 270, 0, 0, 90, 0] tinc = [0, 0, 90, 0, 0, -90, 0, 0, 90] if treatment[1] < 10: ipos_code = int(treatment[1]) - 1 else: ipos_code = int(treatment[1] / 10) - 1 # (2) using the magnetization if meas_line["dec_s"] != "": DEC = float(meas_line["dec_s"]) INC = float(meas_line["inc_s"]) elif meas_line["dec_g"] != "": DEC = float(meas_line["dec_g"]) INC = float(meas_line["inc_g"]) elif meas_line["dec_t"] != "": DEC = float(meas_line["dec_t"]) INC = float(meas_line["inc_t"]) if DEC < 0 and DEC > -359: DEC = 360.+DEC if INC < 45 and INC > -45: if DEC > 315 or DEC < 45: ipos_guess = 0 if DEC > 45 and DEC < 135: ipos_guess = 1 if DEC > 135 and DEC < 225: ipos_guess = 3 if DEC > 225 and DEC < 315: ipos_guess = 4 else: if INC > 45: ipos_guess = 2 if INC < -45: ipos_guess = 5 # prefer the guess over the code ipos = ipos_guess # check it if treatment[1] != 7 and treatment[1] != 70: if ipos_guess != ipos_code: print("-W- WARNING: check specimen %s step %s, anistropy measurements, coding does not match the direction of the lab field" % ( specimen, meas_line['treatment'])) MeasRec["treat_dc_field_phi"] = '%7.1f' % (tdec[ipos]) MeasRec["treat_dc_field_theta"] = '%7.1f' % (tinc[ipos]) # --------------------- # Lab treatment and lab protocoal for cooling rate experiment # --------------------- elif experiment == "CR": cooling_times_list LP = "LP-CR-TRM" MeasRec["treat_temp"] = '%8.3e' % ( float(treatment[0])+273.) # temp in kelvin if treatment[1] == 0: LT = "LT-T-Z" MeasRec["treat_dc_field"] = "0" MeasRec["treat_dc_field_phi"] = '0' MeasRec["treat_dc_field_theta"] = '0' else: if treatment[1] == 7: # alteration check as final measurement LT = "LT-PTRM-I" else: LT = "LT-T-I" MeasRec["treat_dc_field"] = '%8.3e' % (labfield) MeasRec["treat_dc_field_phi"] = '%7.1f' % ( labfield_phi) # labfield phi MeasRec["treat_dc_field_theta"] = '%7.1f' % ( labfield_theta) # labfield theta indx = int(treatment[1])-1 # alteration check matjed as 0.7 in the measurement file if indx == 6: cooling_time = cooling_times_list[-1] else: cooling_time = cooling_times_list[indx] MeasRec["measurement_description"] = "cooling_rate" + \ ":"+cooling_time+":"+"K/min" # --------------------- # Lab treatment and lab protocoal for NLT experiment # --------------------- elif 'NLT' in experiment: print( "Dont support yet NLT rate experiment file. Contact [email protected]") # --------------------- # method_codes for this measurement only # LP will be fixed after all measurement lines are read # --------------------- MeasRec["method_codes"] = LT+":"+LP # -------------------- # deal with specimen orientation and different coordinate system # -------------------- found_s, found_geo, found_tilt = False, False, False if "dec_s" in list(meas_line.keys()) and "inc_s" in list(meas_line.keys()): if meas_line["dec_s"] != "" and meas_line["inc_s"] != "": found_s = True MeasRec["dir_dec"] = meas_line["dec_s"] MeasRec["dir_inc"] = meas_line["inc_s"] if "dec_g" in list(meas_line.keys()) and "inc_g" in list(meas_line.keys()): if meas_line["dec_g"] != "" and meas_line["inc_g"] != "": found_geo = True if "dec_t" in list(meas_line.keys()) and "inc_t" in list(meas_line.keys()): if meas_line["dec_t"] != "" and meas_line["inc_t"] != "": found_tilt = True # ----------------------------- # specimen coordinates: no # geographic coordinates: yes # ----------------------------- if found_geo and not found_s: MeasRec["dir_dec"] = meas_line["dec_g"] MeasRec["dir_inc"] = meas_line["inc_g"] azimuth = "0" dip = "0" # ----------------------------- # specimen coordinates: no # geographic coordinates: no # ----------------------------- if not found_geo and not found_s: print("-E- ERROR: sample %s does not have dec_s/inc_s or dec_g/inc_g. Ignore specimen %s " % (sample, specimen)) break # ----------------------------- # specimen coordinates: yes # geographic coordinates: yes # # commant: Ron, this need to be tested !! # ----------------------------- if found_geo and found_s: cdec, cinc = float(meas_line["dec_s"]), float( meas_line["inc_s"]) gdec, ginc = float(meas_line["dec_g"]), float( meas_line["inc_g"]) az, pl = pmag.get_azpl(cdec, cinc, gdec, ginc) azimuth = "%.1f" % az dip = "%.1f" % pl # ----------------------------- # specimen coordinates: yes # geographic coordinates: no # ----------------------------- if not found_geo and found_s and "Demag" in experiment: print("-W- WARNING: missing dip or azimuth for sample %s" % sample) # ----------------------------- # tilt-corrected coordinates: yes # geographic coordinates: no # ----------------------------- if found_tilt and not found_geo: print( "-E- ERROR: missing geographic data for sample %s. Ignoring tilt-corrected data " % sample) # ----------------------------- # tilt-corrected coordinates: yes # geographic coordinates: yes # ----------------------------- if found_tilt and found_geo: dec_geo, inc_geo = float( meas_line["dec_g"]), float(meas_line["inc_g"]) dec_tilt, inc_tilt = float( meas_line["dec_t"]), float(meas_line["inc_t"]) if dec_geo == dec_tilt and inc_geo == inc_tilt: DipDir, Dip = 0., 0. else: DipDir, Dip = pmag.get_tilt( dec_geo, inc_geo, dec_tilt, inc_tilt) # ----------------------------- # samples method codes # geographic coordinates: no # ----------------------------- if found_tilt or found_geo: sample_method_codes = "SO-NO" if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec['citations'] = "This study" SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec['citations'] = "This study" SampRec['azimuth'] = azimuth SampRec['dip'] = dip SampRec['bed_dip_direction'] = DipDir SampRec['bed_dip'] = Dip SampRec['method_codes'] = sample_method_codes SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec['citations'] = "This study" SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec['citations'] = "This study" LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) MeasRecs_this_specimen.append(MeasRec) measurement_running_number += 1 # ------- # ------- # after reading all the measurements lines for this specimen # 1) add experiments # 2) fix method_codes with the correct lab protocol # ------- LP_this_specimen = [] for MeasRec in MeasRecs_this_specimen: method_codes = MeasRec["method_codes"].split(":") for code in method_codes: if "LP" in code and code not in LP_this_specimen: LP_this_specimen.append(code) # check IZ/ZI/IZZI if "LP-PI-ZI" in LP_this_specimen and "LP-PI-IZ" in LP_this_specimen: LP_this_specimen.remove("LP-PI-ZI") LP_this_specimen.remove("LP-PI-IZ") LP_this_specimen.append("LP-PI-BT-IZZI") # add the right LP codes and fix experiment name for MeasRec in MeasRecs_this_specimen: # MeasRec["experiment"]=MeasRec["specimen"]+":"+":".join(LP_this_specimen) method_codes = MeasRec["method_codes"].split(":") LT = "" for code in method_codes: if code[:3] == "LT-": LT = code break MeasRec["method_codes"] = LT+":"+":".join(LP_this_specimen) MeasRec["method_codes"] = MeasRec["method_codes"].strip(":") MeasRecs.append(MeasRec) # -- # write tables to file # -- con = cb.Contribution(dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MeasRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file,dir_path=dir_path) con.tables['samples'].write_magic_file(custom_name=samp_file,dir_path=dir_path) con.tables['sites'].write_magic_file(custom_name=site_file,dir_path=dir_path) con.tables['locations'].write_magic_file(custom_name=loc_file,dir_path=dir_path) con.tables['measurements'].write_magic_file(custom_name=meas_file,dir_path=dir_path) return True, meas_file
0.001817
def google_news_search(self,query,category_label,num=50): ''' Searches Google News. NOTE: Official Google News API is deprecated https://developers.google.com/news-search/?hl=en NOTE: Google limits the maximum number of documents per query to 100. Use multiple related queries to get a bigger corpus. Args: query (str): The search term. category_label (str): The category to assign to the articles. These categories are the labels in the generated corpus num (Optional[int]): The numnber of results to return. Returns: articles: Array of tuples that contains article link & cateogory ex. [('IPO','www.cs.columbia.edu')] ''' url = 'https://news.google.com/news?hl=en&q='+self._encode_query(query) \ +'&num='+str(num)+'&output=rss' rss = feedparser.parse(url) entries = rss['entries'] articles = [] for entry in entries: link = entry['link'] articles.append((category_label,link)) return articles
0.010318
def write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir): """Write text to files.""" def write_to_file(all_files, urls_path, tmp_dir, filename): """Write text to files.""" with io.open( os.path.join(tmp_dir, filename + ".source"), "w", encoding="utf-8") as fstory: with io.open( os.path.join(tmp_dir, filename + ".target"), "w", encoding="utf-8") as fsummary: for example in example_generator(all_files, urls_path, sum_token=True): story, summary = _story_summary_split(example) fstory.write(story + "\n") fsummary.write(summary + "\n") if dataset_split == problem.DatasetSplit.TRAIN: filename = "cnndm.train" elif dataset_split == problem.DatasetSplit.EVAL: filename = "cnndm.dev" else: filename = "cnndm.test" tf.logging.info("Writing %s" % filename) write_to_file(all_files, urls_path, tmp_dir, filename)
0.014878
def delete_perf_task(task_name, auth, url): """ Function takes a str of the target task_name to be deleted and retrieves task_id using the get_perf_task function. Once the task_id has been successfully retrieved it is populated into the task_id variable and an DELETE call is made against the HPE IMC REST interface to delete the target task. :param task_name: str of task name :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: int of 204 if successful, str of "Perf Task doesn't exist" i :rtype: int """ task_id = get_perf_task(task_name, auth, url) if isinstance(task_id, str): print("Perf task doesn't exist") return 403 task_id = task_id['taskId'] get_perf_task_url = "/imcrs/perf/task/delete/" + str(task_id) f_url = url + get_perf_task_url response = requests.delete(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 204: print("Perf Task successfully delete") return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' delete_perf_task: An Error has occured'
0.005356
def send_to_address(recipient_address, amount, private_key, blockchain_client=BlockchainInfoClient(), fee=STANDARD_FEE, change_address=None): """ Builds, signs, and dispatches a "send to address" transaction. """ # build and sign the tx signed_tx = make_send_to_address_tx(recipient_address, amount, private_key, blockchain_client, fee=fee, change_address=change_address) # dispatch the signed transction to the network response = broadcast_transaction(signed_tx, blockchain_client) # return the response return response
0.008562
def handle_special_journals(citation_elements, kbs): """format special journals (like JHEP) volume number JHEP needs the volume number prefixed with the year e.g. JHEP 0301 instead of JHEP 01 """ for el in citation_elements: if el['type'] == 'JOURNAL' and el['title'] in kbs['special_journals']: if re.match(r'\d{1,2}$', el['volume']): # Sometimes the page is omitted and the year is written in its place # We can never be sure but it's very likely that page > 1900 is # actually a year, so we skip this reference if el['year'] == '' and re.match(r'(19|20)\d{2}$', el['page']): el['type'] = 'MISC' el['misc_txt'] = "%s,%s,%s" \ % (el['title'], el['volume'], el['page']) el['volume'] = el['year'][-2:] + '%02d' % int(el['volume']) if el['page'].isdigit(): # JHEP and JCAP have always pages 3 digits long el['page'] = '%03d' % int(el['page']) return citation_elements
0.00182
def mean(self, axis=None, dtype=None, out=None, keepdims=False): """Compute the arithmetic mean along the specified axis. See np.mean() for details.""" if axis == -1: axis = self.ndim if axis is None: results = vectorize(mean)(self, axis, dtype, keepdims=False) weights = self._sublengths res = np.average(results, axis=None, weights=weights) if keepdims: for i in range(self.ndim): res = expand_dims(res, res.ndim) elif axis == self._distaxis: results = vectorize(mean)(self, axis, dtype, keepdims=True) results = gather(results) # Average manually (np.average doesn't preserve ndarray subclasses) weights = (np.array(self._sublengths, dtype=np.float64) / sum(self._sublengths)) ix = [slice(None)] * self.ndim ix[axis] = 0 res = results[ix] * weights[0] for i in range(1, self._n): ix[axis] = i res = res + results[ix] * weights[i] if keepdims: res = expand_dims(res, axis) else: res = vectorize(mean)(self, axis, dtype, keepdims=False) if keepdims: res = expand_dims(res, axis) if out is not None: out[:] = res return res
0.001414
def _parse_iso8601(text): """ Maybe parse an ISO8601 datetime string into a datetime. :param text: Either a ``unicode`` string to parse or any other object (ideally a ``datetime`` instance) to pass through. :return: A ``datetime.datetime`` representing ``text``. Or ``text`` if it was anything but a ``unicode`` string. """ if isinstance(text, unicode): try: return parse_iso8601(text) except ValueError: raise CheckedValueTypeError( None, (datetime,), unicode, text, ) # Let pyrsistent reject it down the line. return text
0.001558
def do_opt(self, *args, **kwargs): """ Get and set options """ args = list(args) if not args: largest = 0 keys = [key for key in self.conf if not key.startswith("_")] for key in keys: largest = max(largest, len(key)) for key in keys: print("%s : %s" % (key.rjust(largest), self.conf[key])) return option = args.pop(0) if not args and not kwargs: method = getattr(self, "getopt_" + option, None) if method is None: self.getopt_default(option) else: method() else: method = getattr(self, "opt_" + option, None) if method is None: print("Unrecognized option %r" % option) else: method(*args, **kwargs) self.save_config()
0.0022
def skip_prepare(func): """ A convenience decorator for indicating the raw data should not be prepared. """ @wraps(func) def _wrapper(self, *args, **kwargs): value = func(self, *args, **kwargs) return Data(value, should_prepare=False) return _wrapper
0.003448
def _normalize_array(array, domain=(0, 1)): """Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image """ # first copy the input so we're never mutating the user's data array = np.array(array) # squeeze helps both with batch=1 and B/W and PIL's mode inference array = np.squeeze(array) assert len(array.shape) <= 3 assert np.issubdtype(array.dtype, np.number) assert not np.isnan(array).any() low, high = np.min(array), np.max(array) if domain is None: message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)" log.debug(message, low, high) domain = (low, high) # clip values if domain was specified and array contains values outside of it if low < domain[0] or high > domain[1]: message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})." log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255 # convert signed to unsigned if needed if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if offset != 0: array -= offset log.debug("Converting inexact array by subtracting -%.2f.", offset) scalar = max_value / (domain[1] - domain[0]) if scalar != 1: array *= scalar log.debug("Converting inexact array by scaling by %.2f.", scalar) return array.clip(min_value, max_value).astype(np.uint8)
0.012249
def histogram(title, title_x, title_y, x, bins_x): """ Plot a basic histogram. """ plt.figure() plt.hist(x, bins_x) plt.xlabel(title_x) plt.ylabel(title_y) plt.title(title)
0.004587
def normalize_underscore_case(name): """Normalize an underscore-separated descriptor to something more readable. i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes 'Host Components' """ normalized = name.lower() normalized = re.sub(r'_(\w)', lambda match: ' ' + match.group(1).upper(), normalized) return normalized[0].upper() + normalized[1:]
0.002247
def create_entity_dict(self): ''' Creates a dict-based entity with fixed values, using all of the supported data types. ''' entity = {} # Partition key and row key must be strings and are required entity['PartitionKey'] = 'pk{}'.format(str(uuid.uuid4()).replace('-', '')) entity['RowKey'] = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) # Some basic types are inferred entity['age'] = 39 # EdmType.INT64 entity['large'] = 933311100 # EdmType.INT64 entity['sex'] = 'male' # EdmType.STRING entity['married'] = True # EdmType.BOOLEAN entity['ratio'] = 3.1 # EdmType.DOUBLE entity['birthday'] = datetime(1970, 10, 4) # EdmType.DATETIME # Binary, Int32 and GUID must be explicitly typed entity['binary'] = EntityProperty(EdmType.BINARY, b'xyz') entity['other'] = EntityProperty(EdmType.INT32, 20) entity['clsid'] = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity
0.00473
def get_names_owned_by_address( self, address ): """ Get the set of names owned by a particular address. NOTE: only works for cases where we could extract an address. """ cur = self.db.cursor() names = namedb_get_names_owned_by_address( cur, address, self.lastblock ) return names
0.020833
def connect( self, login, password, authz_id=b"", starttls=False, authmech=None): """Establish a connection with the server. This function must be used. It read the server capabilities and wraps calls to STARTTLS and AUTHENTICATE commands. :param login: username :param password: clear password :param starttls: use a TLS connection or not :param authmech: prefered authenticate mechanism :rtype: boolean """ try: self.sock = socket.create_connection((self.srvaddr, self.srvport)) self.sock.settimeout(Client.read_timeout) except socket.error as msg: raise Error("Connection to server failed: %s" % str(msg)) if not self.__get_capabilities(): raise Error("Failed to read capabilities from server") if starttls and not self.__starttls(): return False if self.__authenticate(login, password, authz_id, authmech): return True return False
0.001898
def request_bytesize(self): """The size of in bytes of the bundled field elements.""" return sum(len(str(e)) for elts in self._in_deque for e in elts)
0.012048
def robust_single_linkage(X, cut, k=5, alpha=1.4142135623730951, gamma=5, metric='euclidean', algorithm='best', memory=Memory(cachedir=None, verbose=0), leaf_size=40, core_dist_n_jobs=4, **kwargs): """Perform robust single linkage clustering from a vector array or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. cut : float The reachability distance value to cut the cluster heirarchy at to derive a flat cluster labelling. k : int, optional (default=5) Reachability distances will be computed with regard to the `k` nearest neighbors. alpha : float, optional (default=np.sqrt(2)) Distance scaling for reachability distance computation. Reachability distance is computed as $max \{ core_k(a), core_k(b), 1/\alpha d(a,b) \}$. gamma : int, optional (default=5) Ignore any clusters in the flat clustering with size less than gamma, and declare points in such clusters as noise points. metric : string, or callable, optional (default='euclidean') The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. algorithm : string, optional (default='best') Exactly which algorithm to use; hdbscan has variants specialised for different characteristics of the data. By default this is set to ``best`` which chooses the "best" algorithm given the nature of the data. You can force other options if you believe you know better. Options are: * ``generic`` * ``best`` * ``prims_kdtree`` * ``prims_balltree`` * ``boruvka_kdtree`` * ``boruvka_balltree`` memory : Instance of joblib.Memory or string (optional) Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. leaf_size : int, optional (default=40) Leaf size for trees responsible for fast nearest neighbour queries. core_dist_n_jobs : int, optional Number of parallel jobs to run in core distance computations (if supported by the specific algorithm). For ``core_dist_n_jobs`` below -1, (n_cpus + 1 + core_dist_n_jobs) are used. (default 4) Returns ------- labels : ndarray, shape (n_samples, ) Cluster labels for each point. Noisy samples are given the label -1. single_linkage_tree : ndarray, shape (n_samples - 1, 4) The single linkage tree produced during clustering in scipy hierarchical clustering format (see http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html). References ---------- .. [1] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the cluster tree. In Advances in Neural Information Processing Systems (pp. 343-351). """ if not isinstance(k, int) or k < 1: raise ValueError('k must be an integer greater than zero!') if not isinstance(alpha, float) or alpha < 1.0: raise ValueError('alpha must be a float greater than or equal to 1.0!') if not isinstance(gamma, int) or gamma < 1: raise ValueError('gamma must be an integer greater than zero!') if not isinstance(leaf_size, int) or leaf_size < 1: raise ValueError('Leaf size must be at least one!') if metric == 'minkowski': if 'p' not in kwargs or kwargs['p'] is None: raise TypeError('Minkowski metric given but no p value supplied!') if kwargs['p'] < 0: raise ValueError('Minkowski metric with negative p value is not' ' defined!') X = check_array(X, accept_sparse='csr') if isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) if algorithm != 'best': if algorithm == 'generic': single_linkage_tree = memory.cache(_rsl_generic)( X, k, alpha, metric, **kwargs) elif algorithm == 'prims_kdtree': single_linkage_tree = memory.cache(_rsl_prims_kdtree)( X, k, alpha, metric, **kwargs) elif algorithm == 'prims_balltree': single_linkage_tree = memory.cache(_rsl_prims_balltree)( X, k, alpha, metric, **kwargs) elif algorithm == 'boruvka_kdtree': single_linkage_tree = \ memory.cache(_rsl_boruvka_kdtree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) elif algorithm == 'boruvka_balltree': single_linkage_tree = \ memory.cache(_rsl_boruvka_balltree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) else: raise TypeError('Unknown algorithm type %s specified' % algorithm) else: if issparse(X) or metric not in FAST_METRICS: # We can't do much with sparse matrices ... single_linkage_tree = memory.cache(_rsl_generic)( X, k, alpha, metric, **kwargs) elif metric in KDTree.valid_metrics: # Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 128: single_linkage_tree = memory.cache(_rsl_prims_kdtree)( X, k, alpha, metric, **kwargs) else: single_linkage_tree = \ memory.cache(_rsl_boruvka_kdtree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) else: # Metric is a valid BallTree metric # Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 128: single_linkage_tree = memory.cache(_rsl_prims_kdtree)( X, k, alpha, metric, **kwargs) else: single_linkage_tree = \ memory.cache(_rsl_boruvka_balltree)(X, k, alpha, metric, leaf_size, core_dist_n_jobs, **kwargs) labels = single_linkage_tree.get_clusters(cut, gamma) return labels, single_linkage_tree.to_numpy()
0.001264
def replace_with_text_stream(stream_name): """Given a stream name, replace the target stream with a text-converted equivalent :param str stream_name: The name of a target stream, such as **stdout** or **stderr** :return: None """ new_stream = TEXT_STREAMS.get(stream_name) if new_stream is not None: new_stream = new_stream() setattr(sys, stream_name, new_stream) return None
0.007143
def get_lmv2_response(domain, username, password, server_challenge, client_challenge): """ Computes an appropriate LMv2 response based on the supplied arguments The algorithm is based on jCIFS. The response is 24 bytes, with the 16 bytes of hash concatenated with the 8 byte client client_challenge """ ntlmv2_hash = PasswordAuthentication.ntowfv2(domain, username, password.encode('utf-16le')) hmac_context = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend()) hmac_context.update(server_challenge) hmac_context.update(client_challenge) lmv2_hash = hmac_context.finalize() # The LMv2 master user session key is a HMAC MD5 of the NTLMv2 and LMv2 hash session_key = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend()) session_key.update(lmv2_hash) return lmv2_hash + client_challenge, session_key.finalize()
0.008475
def set_verbosity(verbosity): """Banana banana """ Logger._verbosity = min(max(0, WARNING - verbosity), 2) debug("Verbosity set to %d" % (WARNING - Logger._verbosity), 'logging')
0.009524
def inverse_unit_transform(self, x, **kwargs): """Go from the parameter value to the unit coordinate using the cdf. """ if len(kwargs) > 0: self.update(**kwargs) return self.distribution.cdf(x, *self.args, loc=self.loc, scale=self.scale)
0.006289
def h2i(self, pkt, seconds): """Convert the number of seconds since 1-Jan-70 UTC to the packed representation.""" if seconds is None: seconds = 0 tmp_short = (seconds >> 32) & 0xFFFF tmp_int = seconds & 0xFFFFFFFF return struct.pack("!HI", tmp_short, tmp_int)
0.006154
def apply(self, function): """ Applies a function on the value, the actual stored value will not change. :param function: (Function), A stateful serializable object which represents the Function defined on server side. This object must have a serializable Function counter part registered on server side with the actual ``org.hazelcast.core.IFunction`` implementation. :return: (object), the result of the function application. """ check_not_none(function, "function can't be None") return self._encode_invoke(atomic_long_apply_codec, function=self._to_data(function))
0.009077
def _saline(cls, T, P, S): """Eq 4""" # Check input in range of validity if T <= 261 or T > 353 or P <= 0 or P > 100 or S < 0 or S > 0.12: warnings.warn("Incoming out of bound") S_ = 0.03516504*40/35 X = (S/S_)**0.5 tau = (T-273.15)/40 pi = (P-0.101325)/100 I = [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 4, 2, 2, 3, 4, 5, 2, 3, 4, 2, 3, 2, 3, 2, 3, 2, 3, 4, 2, 3, 2, 3, 2, 2, 2, 3, 4, 2, 3, 2, 3, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2] J = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 0, 0, 0, 1, 1, 2, 2, 3, 4, 0, 0, 0, 1, 1, 2, 2, 3, 4, 0, 0, 1, 2, 3, 0, 1, 2] K = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5] G = [0.581281456626732e4, 0.141627648484197e4, -0.243214662381794e4, 0.202580115603697e4, -0.109166841042967e4, 0.374601237877840e3, -0.485891069025409e2, 0.851226734946706e3, 0.168072408311545e3, -0.493407510141682e3, 0.543835333000098e3, -0.196028306689776e3, 0.367571622995805e2, 0.880031352997204e3, -0.430664675978042e2, -0.685572509204491e2, -0.225267649263401e3, -0.100227370861875e2, 0.493667694856254e2, 0.914260447751259e2, 0.875600661808945, -0.171397577419788e2, -0.216603240875311e2, 0.249697009569508e1, 0.213016970847183e1, -0.331049154044839e4, 0.199459603073901e3, -0.547919133532887e2, 0.360284195611086e2, 0.729116529735046e3, -0.175292041186547e3, -0.226683558512829e2, -0.860764303783977e3, 0.383058066002476e3, 0.694244814133268e3, -0.460319931801257e3, -0.297728741987187e3, 0.234565187611355e3, 0.384794152978599e3, -0.522940909281335e2, -0.408193978912261e1, -0.343956902961561e3, 0.831923927801819e2, 0.337409530269367e3, -0.541917262517112e2, -0.204889641964903e3, 0.747261411387560e2, -0.965324320107458e2, 0.680444942726459e2, -0.301755111971161e2, 0.124687671116248e3, -0.294830643494290e2, -0.178314556207638e3, 0.256398487389914e2, 0.113561697840594e3, -0.364872919001588e2, 0.158408172766824e2, -0.341251932441282e1, -0.316569643860730e2, 0.442040358308000e2, -0.111282734326413e2, -0.262480156590992e1, 0.704658803315449e1, -0.792001547211682e1] g, gt, gp, gtt, gtp, gpp, gs, gsp = 0, 0, 0, 0, 0, 0, 0, 0 # Calculate only for some salinity if S != 0: for i, j, k, gi in zip(I, J, K, G): if i == 1: g += gi*X**2*log(X)*tau**j*pi**k gs += gi*(2*log(X)+1)*tau**j*pi**k else: g += gi*X**i*tau**j*pi**k gs += i*gi*X**(i-2)*tau**j*pi**k if j >= 1: if i == 1: gt += gi*X**2*log(X)*j*tau**(j-1)*pi**k else: gt += gi*X**i*j*tau**(j-1)*pi**k if k >= 1: gp += k*gi*X**i*tau**j*pi**(k-1) gsp += i*k*gi*X**(i-2)*tau**j*pi**(k-1) if j >= 2: gtt += j*(j-1)*gi*X**i*tau**(j-2)*pi**k if j >= 1 and k >= 1: gtp += j*k*gi*X**i*tau**(j-1)*pi**(k-1) if k >= 2: gpp += k*(k-1)*gi*X**i*tau**j*pi**(k-2) prop = {} prop["g"] = g*1e-3 prop["gt"] = gt/40*1e-3 prop["gp"] = gp/100*1e-6 prop["gtt"] = gtt/40**2*1e-3 prop["gtp"] = gtp/40/100*1e-6 prop["gpp"] = gpp/100**2*1e-6 prop["gs"] = gs/S_/2*1e-3 prop["gsp"] = gsp/S_/2/100*1e-6 return prop
0.00074
def get(self, id, seq, intf): # pylint: disable=invalid-name,redefined-builtin """Get a capture. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param intf: Interface name as string. :return: :class:`captures.Capture <captures.Capture>` object :rtype: captures.Capture """ schema = CaptureSchema() resp = self.service.get_id(self._base(id, seq), intf) return self.service.decode(schema, resp)
0.005952
def is_ordered_mapping(obj): """Checks whether given object is an ordered mapping, e.g. a :class:`OrderedDict`. :return: ``True`` if argument is an ordered mapping, ``False`` otherwise """ if not (is_mapping(obj) and hasattr(obj, '__reversed__')): return False # PyPy has a bug where the standard :class:`dict` has the ``__reversed__`` # method but it's unusable and throws an exception when called try: obj.__reversed__() except TypeError: return False else: return True
0.001838
def text(self, text, x, y, width, height, color, font): """ See the Processing function text(): https://processing.org/reference/text_.html Consider using Pango in addition to Cairo here. """ # Helper function. def chop(word): """ Take a word longer than the bounding box's width and chop off as many letters in the beginning as fit, followed by an ellipsis. """ total_str = "" for c in word: _, _, total_width, _, _, _ = self.context.text_extents(total_str + c + "…") if total_width >= width: return total_str + "…" total_str += c assert not "Should not be here, else 'word' fit into the bounding box" # Prepare the context for text rendering. self.context.set_source_rgb(*color) font_name, (font_size, font_slant, font_weight) = (font) self.context.select_font_face(font_name, font_slant, font_weight) self.context.set_font_size(font_size) self.context.set_antialias(cairo.ANTIALIAS_DEFAULT) # Get some font metrics. font_asc, _, font_height, _, _ = self.context.font_extents() # Initialize text cursor to the baseline of the font. width, height = self.tx(width), self.ty(height) w_x, w_y = self.tx(x), font_asc + self.ty(y) # Draw the text one line at a time and ensure the bounding box. line = "" nlines = 1 for word in text.split(" "): _, _, line_width, _, _, _ = self.context.text_extents(_join(line, word)) if line_width < width: line = _join(line, word) else: if not line: # First word of the line extends beyond the line: chop and done. self.context.move_to(w_x, w_y) self.context.show_text(chop(word)) return nlines, font_height else: # Filled a line, render it, and move on to the next line. self.context.move_to(w_x, w_y) self.context.show_text(line) line = word w_y += font_height if w_y > height: return nlines, font_height nlines += 1 self.context.move_to(w_x, w_y) self.context.show_text(line) return nlines, font_height
0.00278
def scan_file(self, filename, filename_key=None): """Scans a specified file, and adds information to self.data :type filename: str :param filename: full path to file to scan. :type filename_key: str :param filename_key: key to store in self.data :returns: boolean; though this value is only used for testing """ if not filename_key: filename_key = filename if os.path.islink(filename): return False try: with codecs.open(filename, encoding='utf-8') as f: self._extract_secrets_from_file(f, filename_key) return True except IOError: log.warning("Unable to open file: %s", filename) return False
0.002577
def holiday_description(self): """ Return the holiday description. In case none exists will return None. """ entry = self._holiday_entry() desc = entry.description return desc.hebrew.long if self.hebrew else desc.english
0.00722
def move_to_device(obj, cuda_device: int): """ Given a structure (possibly) containing Tensors on the CPU, move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU). """ if cuda_device < 0 or not has_tensor(obj): return obj elif isinstance(obj, torch.Tensor): return obj.cuda(cuda_device) elif isinstance(obj, dict): return {key: move_to_device(value, cuda_device) for key, value in obj.items()} elif isinstance(obj, list): return [move_to_device(item, cuda_device) for item in obj] elif isinstance(obj, tuple): return tuple([move_to_device(item, cuda_device) for item in obj]) else: return obj
0.004213
def p_mp_createClass(p): """mp_createClass : classDeclaration """ # pylint: disable=too-many-branches,too-many-statements,too-many-locals ns = p.parser.handle.default_namespace cc = p[1] try: fixedNS = fixedRefs = fixedSuper = False while not fixedNS or not fixedRefs or not fixedSuper: try: if p.parser.verbose: p.parser.log( _format("Creating class {0!A}:{1!A}", ns, cc.classname)) p.parser.handle.CreateClass(cc) if p.parser.verbose: p.parser.log( _format("Created class {0!A}:{1!A}", ns, cc.classname)) p.parser.classnames[ns].append(cc.classname.lower()) break except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) errcode = ce.status_code if errcode == CIM_ERR_INVALID_NAMESPACE: if fixedNS: raise if p.parser.verbose: p.parser.log( _format("Creating namespace {0!A}", ns)) p.parser.server.create_namespace(ns) fixedNS = True continue if not p.parser.search_paths: raise if errcode == CIM_ERR_INVALID_SUPERCLASS: if fixedSuper: raise moffile = p.parser.mofcomp.find_mof(cc.superclass) if not moffile: raise p.parser.mofcomp.compile_file(moffile, ns) fixedSuper = True elif errcode in [CIM_ERR_INVALID_PARAMETER, CIM_ERR_NOT_FOUND, CIM_ERR_FAILED]: if fixedRefs: raise if not p.parser.qualcache[ns]: for fname in ['qualifiers', 'qualifiers_optional']: qualfile = p.parser.mofcomp.find_mof(fname) if qualfile: p.parser.mofcomp.compile_file(qualfile, ns) if not p.parser.qualcache[ns]: # can't find qualifiers raise objects = list(cc.properties.values()) for meth in cc.methods.values(): objects += list(meth.parameters.values()) dep_classes = NocaseDict() # dict dep_class, ce for obj in objects: if obj.type not in ['reference', 'string']: continue if obj.type == 'reference': if obj.reference_class not in dep_classes: dep_classes[obj.reference_class] = ce elif obj.type == 'string': try: embedded_inst = \ obj.qualifiers['embeddedinstance'] except KeyError: continue if embedded_inst.value not in dep_classes: dep_classes[embedded_inst.value] = ce continue for cln, err in dep_classes.items(): if cln in p.parser.classnames[ns]: continue try: # don't limit it with LocalOnly=True, # PropertyList, IncludeQualifiers=False, ... # because of caching in case we're using the # special WBEMConnection subclass used for # removing schema elements p.parser.handle.GetClass(cln, LocalOnly=False, IncludeQualifiers=True) p.parser.classnames[ns].append(cln) except CIMError: moffile = p.parser.mofcomp.find_mof(cln) if not moffile: raise err try: if p.parser.verbose: p.parser.log( _format("Class {0!A} namespace {1!A} " "depends on class {2!A} which " "is not in repository.", cc.classname, ns, cln)) p.parser.mofcomp.compile_file(moffile, ns) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise err raise p.parser.classnames[ns].append(cln) fixedRefs = True else: raise except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) if ce.status_code != CIM_ERR_ALREADY_EXISTS: raise if p.parser.verbose: p.parser.log( _format("Class {0!A} already exist. Modifying...", cc.classname)) try: p.parser.handle.ModifyClass(cc, ns) except CIMError as ce: p.parser.log( _format("Error modifying class {0!A}: {1}, {2}", cc.classname, ce.status_code, ce.status_description))
0.000338
def GET_namespaces( self, path_info ): """ Get the list of all namespaces Reply all existing namespaces Reply 502 if we can't reach the server for whatever reason """ qs_values = path_info['qs_values'] offset = qs_values.get('offset', None) count = qs_values.get('count', None) blockstackd_url = get_blockstackd_url() namespaces = blockstackd_client.get_all_namespaces(offset=offset, count=count, hostport=blockstackd_url) if json_is_error(namespaces): # error status_code = namespaces.get('http_status', 502) return self._reply_json({'error': namespaces['error']}, status_code=status_code) self._reply_json(namespaces) return
0.007813
def get_epoll_events(self): """ Create a bit mask using ``EPOLL*`` family of constants. """ epoll_events = 0 if self & EVENT_READ: epoll_events |= select.EPOLLIN if self & EVENT_WRITE: epoll_events |= select.EPOLLOUT return epoll_events
0.006329
def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. stats: Dict of dict with servers stats """ # Flush display logger.debug('Servers list: {}'.format(stats)) self.flush(stats) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(stats) # Is it an exit or select server key ? exitkey = ( pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats) # Wait 100ms... self.wait() return self.active_server
0.007085
def p_exception(self, p): '''exception : EXCEPTION IDENTIFIER '{' field_seq '}' annotations''' p[0] = ast.Exc( name=p[2], fields=p[4], annotations=p[6], lineno=p.lineno(2) )
0.009569
def add_users_to_user_group(self, id, **kwargs): # noqa: E501 """Add multiple users to a specific user group # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.add_users_to_user_group(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param list[str] body: List of users that should be added to user group :return: ResponseContainerUserGroup If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.add_users_to_user_group_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.add_users_to_user_group_with_http_info(id, **kwargs) # noqa: E501 return data
0.00199
def _convert_nodelist(self, impl_nodelist): """ Convert a list of underlying implementation nodes into a list of *xml4h* wrapper nodes. """ nodelist = [ self.adapter.wrap_node(n, self.adapter.impl_document, self.adapter) for n in impl_nodelist] return NodeList(nodelist)
0.005848
def exclude_package(cls, package_name=None, recursive=False): """Excludes the given fully qualified package name from shading. :param unicode package_name: A fully qualified package_name; eg: `org.pantsbuild`; `None` for the java default (root) package. :param bool recursive: `True` to exclude any package with `package_name` as a proper prefix; `False` by default. :returns: A `Shader.Rule` describing the shading exclusion. """ if not package_name: return Shading.create_exclude('**' if recursive else '*') return Shading.create_exclude_package(package_name, recursive=recursive)
0.005917
def patch_io_httplib(config): """ Patch the base httplib.HTTPConnection class, which is used in most HTTP libraries like urllib2 or urllib3/requests. """ # pylint: disable=import-error def start(method, url): job = get_current_job() if job: job.set_current_io({ "type": "http.%s" % method.lower(), "data": { "url": url } }) def stop(): job = get_current_job() if job: job.set_current_io(None) class mrq_wrapped_socket(object): """ Socket-like object that keeps track of 'trace_args' and wraps our monitoring code around blocking I/O calls. """ def __init__(self, obj, parent_connection): self._obj = obj self._parent_connection = parent_connection def _make_patched_method(method): def _patched_method(*args, **kwargs): # In the case of HTTPS, we may connect() before having called conn.request() # For requests/urllib3, we may need to plug ourselves at the # connectionpool.urlopen level if not hasattr(self._parent_connection, "_traced_args"): return getattr(self._obj, method)(*args, **kwargs) start(*self._parent_connection._traced_args) # pylint: disable=protected-access try: data = getattr(self._obj, method)(*args, **kwargs) finally: stop() return data return _patched_method # Replace socket methods with instrumented ones for method in [ # socket "send", "sendall", "sendto", "recv", "recvfrom", "recvfrom_into", "recv_into", "connect", "connect_ex", "close", # fileobject "read", "readline", "write", "writelines", "seek" ]: setattr(self, method, _make_patched_method(method)) # Forward all other calls/attributes to the base socket def __getattr__(self, attr): # cprint(attr, "green") return getattr(self._obj, attr) def makefile(self, *args, **kwargs): newsock = self._obj.makefile(*args, **kwargs) return mrq_wrapped_socket(newsock, self._parent_connection) def request(old_method, self, method, url, body=None, headers=None, *args, **kwargs): if headers is None: headers = {} # This is for proxy support - TODO show that in dashboard? if re.search(r"^http(s?)\:\/\/", url): report_url = url else: protocol = "http" if hasattr(self, "key_file"): protocol = "https" report_url = "%s://%s%s%s" % ( protocol, self.host, (":%s" % self.port) if self.port != 80 else "", url ) self._traced_args = (method, report_url) # pylint: disable=protected-access res = old_method(self, method, url, body=body, headers=headers) return res def connect(old_method, self, *args, **kwargs): # In the case of HTTPS, we may connect() before having called conn.request() # For requests/urllib3, we may need to plug ourselves at the connectionpool.urlopen level if not hasattr(self, "_traced_args"): ret = old_method(self, *args, **kwargs) else: start(*self._traced_args) # pylint: disable=protected-access try: ret = old_method(self, *args, **kwargs) finally: stop() self.sock = mrq_wrapped_socket(self.sock, self) return ret from http.client import HTTPConnection, HTTPSConnection patch_method(HTTPConnection, "request", request) patch_method(HTTPConnection, "connect", connect) patch_method(HTTPSConnection, "connect", connect) # Try to patch requests & urllib3 as they are very popular python modules. try: from requests.packages.urllib3.connection import ( HTTPConnection, UnverifiedHTTPSConnection, VerifiedHTTPSConnection ) patch_method(HTTPConnection, "connect", connect) patch_method(UnverifiedHTTPSConnection, "connect", connect) patch_method(VerifiedHTTPSConnection, "connect", connect) except ImportError: pass try: from urllib3.connection import ( HTTPConnection, UnverifiedHTTPSConnection, VerifiedHTTPSConnection ) patch_method(HTTPConnection, "connect", connect) patch_method(UnverifiedHTTPSConnection, "connect", connect) patch_method(VerifiedHTTPSConnection, "connect", connect) except ImportError: pass
0.002211
def format_prompt(prompt=None, default=None, enable_quit=False, quit_string='q', quit_message='(enter q to Quit)'): """ Format the prompt. :param prompt: the prompt message. :param default: the default answer if user does not provide a response. :param enable_quit: specifies whether the user can cancel out of the input prompt. :param quit_string: the string whcih the user must input in order to quit. :param quit_message: the message to explain how to quit. :return: the formatted prompt string. """ if prompt is None: return None prompt = prompt.rstrip() prompt = prompt.rstrip(':') if enable_quit: prompt = "{0} {1}".format(prompt, quit_message) if default: prompt = "{0} [{1}]".format(prompt, default) return "{0}: ".format(prompt)
0.007642
def maxTreeDepthDivide(rootValue, currentDepth=0, parallelLevel=2): """Finds a tree node that represents rootValue and computes the max depth of this tree branch. This function will emit new futures until currentDepth=parallelLevel""" thisRoot = shared.getConst('myTree').search(rootValue) if currentDepth >= parallelLevel: return thisRoot.maxDepth(currentDepth) else: # Base case if not any([thisRoot.left, thisRoot.right]): return currentDepth if not all([thisRoot.left, thisRoot.right]): return thisRoot.maxDepth(currentDepth) # Parallel recursion return max( futures.map( maxTreeDepthDivide, [ thisRoot.left.payload, thisRoot.right.payload, ], cycle([currentDepth + 1]), cycle([parallelLevel]), ) )
0.001045
def string(self, string): """Load an object from a string and return the processed JSON content :return: the result of the processing step :param str string: the string to load the JSON from """ object_ = json.loads(string) return self.object(object_)
0.006667
def on(self, event): """ Returns a wrapper for the given event. Usage: @dispatch.on("my_event") def handle_my_event(foo, bar, baz): ... """ handler = self._handlers.get(event, None) if not handler: raise ValueError("Unknown event '{}'".format(event)) return handler.register
0.005195
def _request(self, method, identifier, key=None, value=None): """Perform request with identifier.""" params = {'id': identifier} if key is not None and value is not None: params[key] = value result = yield from self._transact(method, params) return result.get(key)
0.006329
def get_editor_buffer_for_location(self, location): """ Return the `EditorBuffer` for this location. When this file was not yet loaded, return None """ for eb in self.editor_buffers: if eb.location == location: return eb
0.006944
def train(target, dataset, cluster_spec, ctx): """Train Inception on a dataset for a number of steps.""" # Number of workers and parameter servers are infered from the workers and ps # hosts string. num_workers = len(cluster_spec.as_dict()['worker']) num_parameter_servers = len(cluster_spec.as_dict()['ps']) # If no value is given, num_replicas_to_aggregate defaults to be the number of # workers. if FLAGS.num_replicas_to_aggregate == -1: num_replicas_to_aggregate = num_workers else: num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate # Both should be greater than 0 in a distributed training. assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and ' 'num_parameter_servers' ' must be > 0.') # Choose worker 0 as the chief. Note that any worker could be the chief # but there should be only one chief. is_chief = (FLAGS.task_id == 0) # Ops are assigned to worker by default. with tf.device('/job:worker/task:%d' % FLAGS.task_id): # Variables and its related init/assign ops are assigned to ps. with slim.scopes.arg_scope( [slim.variables.variable, slim.variables.global_step], device=slim.variables.VariableDeviceChooser(num_parameter_servers)): # Create a variable to count the number of train() calls. This equals the # number of updates applied to the variables. global_step = slim.variables.global_step() # Calculate the learning rate schedule. num_batches_per_epoch = (dataset.num_examples_per_epoch() / FLAGS.batch_size) # Decay steps need to be divided by the number of replicas to aggregate. decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay / num_replicas_to_aggregate) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True) # Add a summary to track the learning rate. tf.summary.scalar('learning_rate', lr) # Create an optimizer that performs gradient descent. opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY, momentum=RMSPROP_MOMENTUM, epsilon=RMSPROP_EPSILON) if FLAGS.input_mode == 'spark': def feed_dict(feed_batch): # extract TFRecords, since feed_batch is [(TFRecord, None)] tfrecords = [] for elem in feed_batch: tfrecords.append(str(elem[0])) return tfrecords batch = tf.placeholder(tf.string, [FLAGS.batch_size / FLAGS.num_preprocess_threads]) # The following is adapted from image_processing.py to remove Readers/QueueRunners. # Note: this removes the RandomShuffledQueue, so the incoming data is not shuffled. # Presumably, this could be done on the Spark side or done in additional TF code. examples = tf.unstack(batch) images, labels = [], [] for example_serialized in examples: for thread_id in range(FLAGS.num_preprocess_threads): # Parse a serialized Example proto to extract the image and metadata. image_buffer, label_index, bbox, _ = image_processing.parse_example_proto(example_serialized) image = image_processing.image_preprocessing(image_buffer, bbox, train, thread_id) images.append(image) labels.append(label_index) height = FLAGS.image_size width = FLAGS.image_size depth = 3 images = tf.cast(images, tf.float32) images = tf.reshape(images, shape=[FLAGS.batch_size, height, width, depth]) tf.summary.image('images', images) labels = tf.reshape(labels, [FLAGS.batch_size]) else: images, labels = image_processing.distorted_inputs( dataset, batch_size=FLAGS.batch_size, num_preprocess_threads=FLAGS.num_preprocess_threads) # Number of classes in the Dataset label set plus 1. # Label 0 is reserved for an (unused) background class. num_classes = dataset.num_classes() + 1 logits = inception.inference(images, num_classes, for_training=True) # Add classification loss. inception.loss(logits, labels) # Gather all of the losses including regularization losses. losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = tf.add_n(losses, name='total_loss') if is_chief: # Compute the moving average of all individual losses and the # total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; # do the same for the averaged version of the losses. for l in losses + [total_loss]: loss_name = l.op.name # Name each loss as '(raw)' and name the moving average version of the # loss as the original loss name. tf.summary.scalar(loss_name + ' (raw)', l) tf.summary.scalar(loss_name, loss_averages.average(l)) # Add dependency to compute loss_averages. with tf.control_dependencies([loss_averages_op]): total_loss = tf.identity(total_loss) # Track the moving averages of all trainable variables. # Note that we maintain a 'double-average' of the BatchNormalization # global statistics. # This is not needed when the number of replicas are small but important # for synchronous distributed training with tens of workers/replicas. exp_moving_averager = tf.train.ExponentialMovingAverage( inception.MOVING_AVERAGE_DECAY, global_step) variables_to_average = ( tf.trainable_variables() + tf.moving_average_variables()) # Add histograms for model variables. for var in variables_to_average: tf.summary.histogram(var.op.name, var) # Create synchronous replica optimizer. opt = tf.train.SyncReplicasOptimizer( opt, replicas_to_aggregate=num_replicas_to_aggregate, total_num_replicas=num_workers, variable_averages=exp_moving_averager, variables_to_average=variables_to_average) batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION) assert batchnorm_updates, 'Batchnorm updates are missing' batchnorm_updates_op = tf.group(*batchnorm_updates) # Add dependency to compute batchnorm_updates. with tf.control_dependencies([batchnorm_updates_op]): total_loss = tf.identity(total_loss) # Compute gradients with respect to the loss. grads = opt.compute_gradients(total_loss) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) apply_gradients_op = opt.apply_gradients(grads, global_step=global_step) with tf.control_dependencies([apply_gradients_op]): train_op = tf.identity(total_loss, name='train_op') # Get chief queue_runners, init_tokens and clean_up_op, which is used to # synchronize replicas. # More details can be found in sync_replicas_optimizer. chief_queue_runners = [opt.get_chief_queue_runner()] init_tokens_op = opt.get_init_tokens_op() # Create a saver. saver = tf.train.Saver() # Build the summary operation based on the TF collection of Summaries. summary_op = tf.summary.merge_all() # Build an initialization operation to run below. init_op = tf.global_variables_initializer() # We run the summaries in the same thread as the training operations by # passing in None for summary_op to avoid a summary_thread being started. # Running summaries and training operations in parallel could run out of # GPU memory. summary_writer = tf.summary.FileWriter("tensorboard_%d" % ctx.worker_num, graph=tf.get_default_graph()) sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op, summary_op=None, global_step=global_step, summary_writer=summary_writer, saver=saver, save_model_secs=FLAGS.save_interval_secs) tf.logging.info('%s Supervisor' % datetime.now()) sess_config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement) # Get a session. sess = sv.prepare_or_wait_for_session(target, config=sess_config) # Start the queue runners. queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS) sv.start_queue_runners(sess, queue_runners) tf.logging.info('Started %d queues for processing input data.', len(queue_runners)) if is_chief: sv.start_queue_runners(sess, chief_queue_runners) sess.run(init_tokens_op) # Train, checking for Nans. Concurrently run the summary operation at a # specified interval. Note that the summary_op and train_op never run # simultaneously in order to prevent running out of GPU memory. next_summary_time = time.time() + FLAGS.save_summaries_secs tf_feed = TFNode.DataFeed(ctx.mgr) while not sv.should_stop(): try: start_time = time.time() if FLAGS.input_mode == 'spark': tmp = feed_dict(tf_feed.next_batch(FLAGS.batch_size / FLAGS.num_preprocess_threads)) feed = {batch: tmp} loss_value, step = sess.run([train_op, global_step], feed_dict=feed) else: loss_value, step = sess.run([train_op, global_step]) assert not np.isnan(loss_value), 'Model diverged with loss = NaN' if step > FLAGS.max_steps: break duration = time.time() - start_time if step % 30 == 0: examples_per_sec = FLAGS.batch_size / float(duration) format_str = ('Worker %d: %s: step %d, loss = %.2f' '(%.1f examples/sec; %.3f sec/batch)') tf.logging.info(format_str % (FLAGS.task_id, datetime.now(), step, loss_value, examples_per_sec, duration)) # Determine if the summary_op should be run on the chief worker. if FLAGS.input_mode == 'tf' and is_chief and next_summary_time < time.time(): tf.logging.info('Running Summary operation on the chief.') summary_str = sess.run(summary_op) sv.summary_computed(sess, summary_str) tf.logging.info('Finished running Summary operation.') # Determine the next time for running the summary. next_summary_time += FLAGS.save_summaries_secs except: if is_chief: tf.logging.info('About to execute sync_clean_up_op!') raise # Stop the TFNode data feed if FLAGS.input_mode == 'spark': tf_feed.terminate() # Stop the supervisor. This also waits for service threads to finish. sv.stop() # Save after the training ends. if is_chief: saver.save(sess, os.path.join(FLAGS.train_dir, 'model.ckpt'), global_step=global_step)
0.011946
def _find_ancillary_vars(self, ds, refresh=False): ''' Returns a list of variable names that are defined as ancillary variables in the dataset ds. An ancillary variable generally is a metadata container and referenced from other variables via a string reference in an attribute. - via ancillary_variables (3.4) - "grid mapping var" (5.6) - TODO: more? The result is cached by the passed in dataset object inside of this checker. Pass refresh=True to redo the cached value. :param netCDF4.Dataset ds: An open netCDF dataset :param bool refresh: if refresh is set to True, the cache is invalidated. :rtype: list :return: List of variable names (str) that are defined as ancillary variables in the dataset ds. ''' # Used the cached version if it exists and is not empty if self._ancillary_vars.get(ds, None) and refresh is False: return self._ancillary_vars[ds] # Invalidate the cache at all costs self._ancillary_vars[ds] = [] for name, var in ds.variables.items(): if hasattr(var, 'ancillary_variables'): for anc_name in var.ancillary_variables.split(" "): if anc_name in ds.variables: self._ancillary_vars[ds].append(anc_name) if hasattr(var, 'grid_mapping'): gm_name = var.grid_mapping if gm_name in ds.variables: self._ancillary_vars[ds].append(gm_name) return self._ancillary_vars[ds]
0.001206
def cmd_split(self, line): """ Get the command associated with this input line. """ cmd, *args = line.lstrip().split(' ', 1) return self.root_command.subcommands[cmd], ' '.join(args)
0.009709
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta): ''' Returns the rate of events with M > mag_value :param float slip: Slip rate in mm/yr :param float mmax: Maximum magnitude :param float mag_value: Magnitude value :param float bbar: \bar{b} parameter (effectively = b * log(10.)) :param float dbar: \bar{d} parameter :param float beta: Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) ''' delta_m = mmax - mag_value a_3 = self._get_a3_value(bbar, dbar, slip / 10., beta, mmax) central_term = np.exp(bbar * delta_m) - 1.0 - (bbar * delta_m) return a_3 * central_term * (delta_m > 0.0)
0.002509
def update_thread(cls, session, conversation, thread): """Update a thread. Args: session (requests.sessions.Session): Authenticated session. conversation (helpscout.models.Conversation): The conversation that the thread belongs to. thread (helpscout.models.Thread): The thread to be updated. Returns: helpscout.models.Conversation: Conversation including freshly updated thread. """ data = thread.to_api() data['reload'] = True return cls( '/conversations/%s/threads/%d.json' % ( conversation.id, thread.id, ), data=data, request_type=RequestPaginator.PUT, singleton=True, session=session, )
0.002424